Merge tag 'drm-intel-next-2020-01-14' of git://anongit.freedesktop.org/drm/drm-intel into drm-next
Final drm/i915 features for v5.6: - DP MST fixes (José) - Fix intel_bw_state memory leak (Pankaj Bharadiya) - Switch context id allocation to xarray (Tvrtko) - ICL/EHL/TGL workarounds (Matt Roper, Tvrtko) - Debugfs for LMEM details (Lukasz Fiedorowicz) - Prefer platform acronyms over codenames in symbols (Lucas) - Tiled and port sync mode fixes for fbdev and DP (Manasi) - DSI panel and backlight enable GPIO fixes (Hans de Goede) - Relax audio min CDCLK requirements on non-GLK (Kai Vehmanen) - Plane alignment and dimension check fixes (Imre) - Fix state checks for PSR (José) - Remove ICL+ clock gating programming (José) - Static checker fixes around bool usage (Ma Feng) - Bring back tests for self-contained headers in i915 (Masahiro Yamada) - Fix DP MST disable sequence (Ville) - Start converting i915 to the new drm device based logging macros (Wambui Karuga) - Add DSI VBT I2C sequence execution (Vivek Kasireddy) - Start using function pointers and ops structs in uc code (Michal) - Fix PMU names to not use colons or dashes (Tvrtko) - TGL media decompression support (DK, Imre) - Split i915_gem_gtt.[ch] to more manageable chunks (Matthew Auld) - Create dumb buffers in LMEM where available (Ram) - Extend mmap support for LMEM (Abdiel) - Selftest updates (Chris) - Hack bump up CDCLK on TGL to avoid underruns (Stan) - Use intel_encoder and intel_connector more instead of drm counterparts (Ville) - Build error fixes (Zhang Xiaoxu) - Fixes related to GPU and engine initialization/resume (Chris) - Support for prefaulting discontiguous objects (Abdiel) - Support discontiguous LMEM object maps (Chris) - Various GEM and GT improvements and fixes (Chris) - Merge pinctrl dependencies branch for the DSI GPIO updates (Jani) - Backmerge drm-next for new logging macros (Jani) Signed-off-by: Dave Airlie <airlied@redhat.com> From: Jani Nikula <jani.nikula@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/87sgkil0v9.fsf@intel.com
This commit is contained in:
commit
71e7274066
|
@ -201,7 +201,7 @@ static unsigned long pin_highz_conf[] = {
|
|||
};
|
||||
|
||||
/* Pin control settings */
|
||||
static struct pinctrl_map __initdata u300_pinmux_map[] = {
|
||||
static const struct pinctrl_map u300_pinmux_map[] = {
|
||||
/* anonymous maps for chip power and EMIFs */
|
||||
PIN_MAP_MUX_GROUP_HOG_DEFAULT("pinctrl-u300", NULL, "power"),
|
||||
PIN_MAP_MUX_GROUP_HOG_DEFAULT("pinctrl-u300", NULL, "emif0"),
|
||||
|
|
|
@ -114,6 +114,33 @@ drm_client_find_modeset(struct drm_client_dev *client, struct drm_crtc *crtc)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static struct drm_display_mode *
|
||||
drm_connector_get_tiled_mode(struct drm_connector *connector)
|
||||
{
|
||||
struct drm_display_mode *mode;
|
||||
|
||||
list_for_each_entry(mode, &connector->modes, head) {
|
||||
if (mode->hdisplay == connector->tile_h_size &&
|
||||
mode->vdisplay == connector->tile_v_size)
|
||||
return mode;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct drm_display_mode *
|
||||
drm_connector_fallback_non_tiled_mode(struct drm_connector *connector)
|
||||
{
|
||||
struct drm_display_mode *mode;
|
||||
|
||||
list_for_each_entry(mode, &connector->modes, head) {
|
||||
if (mode->hdisplay == connector->tile_h_size &&
|
||||
mode->vdisplay == connector->tile_v_size)
|
||||
continue;
|
||||
return mode;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct drm_display_mode *
|
||||
drm_connector_has_preferred_mode(struct drm_connector *connector, int width, int height)
|
||||
{
|
||||
|
@ -348,8 +375,15 @@ static bool drm_client_target_preferred(struct drm_connector **connectors,
|
|||
struct drm_connector *connector;
|
||||
u64 conn_configured = 0;
|
||||
int tile_pass = 0;
|
||||
int num_tiled_conns = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < connector_count; i++) {
|
||||
if (connectors[i]->has_tile &&
|
||||
connectors[i]->status == connector_status_connected)
|
||||
num_tiled_conns++;
|
||||
}
|
||||
|
||||
retry:
|
||||
for (i = 0; i < connector_count; i++) {
|
||||
connector = connectors[i];
|
||||
|
@ -399,6 +433,28 @@ retry:
|
|||
list_for_each_entry(modes[i], &connector->modes, head)
|
||||
break;
|
||||
}
|
||||
/*
|
||||
* In case of tiled mode if all tiles not present fallback to
|
||||
* first available non tiled mode.
|
||||
* After all tiles are present, try to find the tiled mode
|
||||
* for all and if tiled mode not present due to fbcon size
|
||||
* limitations, use first non tiled mode only for
|
||||
* tile 0,0 and set to no mode for all other tiles.
|
||||
*/
|
||||
if (connector->has_tile) {
|
||||
if (num_tiled_conns <
|
||||
connector->num_h_tile * connector->num_v_tile ||
|
||||
(connector->tile_h_loc == 0 &&
|
||||
connector->tile_v_loc == 0 &&
|
||||
!drm_connector_get_tiled_mode(connector))) {
|
||||
DRM_DEBUG_KMS("Falling back to non tiled mode on Connector %d\n",
|
||||
connector->base.id);
|
||||
modes[i] = drm_connector_fallback_non_tiled_mode(connector);
|
||||
} else {
|
||||
modes[i] = drm_connector_get_tiled_mode(connector);
|
||||
}
|
||||
}
|
||||
|
||||
DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
|
||||
"none");
|
||||
conn_configured |= BIT_ULL(i);
|
||||
|
@ -515,6 +571,7 @@ static bool drm_client_firmware_config(struct drm_client_dev *client,
|
|||
bool fallback = true, ret = true;
|
||||
int num_connectors_enabled = 0;
|
||||
int num_connectors_detected = 0;
|
||||
int num_tiled_conns = 0;
|
||||
struct drm_modeset_acquire_ctx ctx;
|
||||
|
||||
if (!drm_drv_uses_atomic_modeset(dev))
|
||||
|
@ -532,6 +589,11 @@ static bool drm_client_firmware_config(struct drm_client_dev *client,
|
|||
memcpy(save_enabled, enabled, count);
|
||||
mask = GENMASK(count - 1, 0);
|
||||
conn_configured = 0;
|
||||
for (i = 0; i < count; i++) {
|
||||
if (connectors[i]->has_tile &&
|
||||
connectors[i]->status == connector_status_connected)
|
||||
num_tiled_conns++;
|
||||
}
|
||||
retry:
|
||||
conn_seq = conn_configured;
|
||||
for (i = 0; i < count; i++) {
|
||||
|
@ -631,6 +693,16 @@ retry:
|
|||
connector->name);
|
||||
modes[i] = &connector->state->crtc->mode;
|
||||
}
|
||||
/*
|
||||
* In case of tiled modes, if all tiles are not present
|
||||
* then fallback to a non tiled mode.
|
||||
*/
|
||||
if (connector->has_tile &&
|
||||
num_tiled_conns < connector->num_h_tile * connector->num_v_tile) {
|
||||
DRM_DEBUG_KMS("Falling back to non tiled mode on Connector %d\n",
|
||||
connector->base.id);
|
||||
modes[i] = drm_connector_fallback_non_tiled_mode(connector);
|
||||
}
|
||||
crtcs[i] = new_crtc;
|
||||
|
||||
DRM_DEBUG_KMS("connector %s on [CRTC:%d:%s]: %dx%d%s\n",
|
||||
|
|
|
@ -1561,7 +1561,9 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
|
|||
for (j = 0; j < mode_set->num_connectors; j++) {
|
||||
struct drm_connector *connector = mode_set->connectors[j];
|
||||
|
||||
if (connector->has_tile) {
|
||||
if (connector->has_tile &&
|
||||
desired_mode->hdisplay == connector->tile_h_size &&
|
||||
desired_mode->vdisplay == connector->tile_v_size) {
|
||||
lasth = (connector->tile_h_loc == (connector->num_h_tile - 1));
|
||||
lastv = (connector->tile_v_loc == (connector->num_v_tile - 1));
|
||||
/* cloning to multiple tiles is just crazy-talk, so: */
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
*.hdrtest
|
|
@ -31,9 +31,6 @@ CFLAGS_display/intel_fbdev.o = $(call cc-disable-warning, override-init)
|
|||
subdir-ccflags-y += \
|
||||
$(call as-instr,movntdqa (%eax)$(comma)%xmm0,-DCONFIG_AS_MOVNTDQA)
|
||||
|
||||
# Extra header tests
|
||||
header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h
|
||||
|
||||
subdir-ccflags-y += -I$(srctree)/$(src)
|
||||
|
||||
# Please keep these build lists sorted!
|
||||
|
@ -73,11 +70,12 @@ i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o display/intel_pipe_crc.o
|
|||
i915-$(CONFIG_PERF_EVENTS) += i915_pmu.o
|
||||
|
||||
# "Graphics Technology" (aka we talk to the gpu)
|
||||
obj-y += gt/
|
||||
gt-y += \
|
||||
gt/debugfs_engines.o \
|
||||
gt/debugfs_gt.o \
|
||||
gt/debugfs_gt_pm.o \
|
||||
gt/gen6_ppgtt.o \
|
||||
gt/gen8_ppgtt.o \
|
||||
gt/intel_breadcrumbs.o \
|
||||
gt/intel_context.o \
|
||||
gt/intel_engine_cs.o \
|
||||
|
@ -85,14 +83,17 @@ gt-y += \
|
|||
gt/intel_engine_pm.o \
|
||||
gt/intel_engine_pool.o \
|
||||
gt/intel_engine_user.o \
|
||||
gt/intel_ggtt.o \
|
||||
gt/intel_gt.o \
|
||||
gt/intel_gt_irq.o \
|
||||
gt/intel_gt_pm.o \
|
||||
gt/intel_gt_pm_irq.o \
|
||||
gt/intel_gt_requests.o \
|
||||
gt/intel_gtt.o \
|
||||
gt/intel_llc.o \
|
||||
gt/intel_lrc.o \
|
||||
gt/intel_mocs.o \
|
||||
gt/intel_ppgtt.o \
|
||||
gt/intel_rc6.o \
|
||||
gt/intel_renderstate.o \
|
||||
gt/intel_reset.o \
|
||||
|
@ -111,7 +112,6 @@ gt-y += \
|
|||
i915-y += $(gt-y)
|
||||
|
||||
# GEM (Graphics Execution Management) code
|
||||
obj-y += gem/
|
||||
gem-y += \
|
||||
gem/i915_gem_busy.o \
|
||||
gem/i915_gem_clflush.o \
|
||||
|
@ -157,7 +157,6 @@ i915-y += \
|
|||
intel_wopcm.o
|
||||
|
||||
# general-purpose microcontroller (GuC) support
|
||||
obj-y += gt/uc/
|
||||
i915-y += gt/uc/intel_uc.o \
|
||||
gt/uc/intel_uc_fw.o \
|
||||
gt/uc/intel_guc.o \
|
||||
|
@ -170,7 +169,6 @@ i915-y += gt/uc/intel_uc.o \
|
|||
gt/uc/intel_huc_fw.o
|
||||
|
||||
# modesetting core code
|
||||
obj-y += display/
|
||||
i915-y += \
|
||||
display/intel_atomic.o \
|
||||
display/intel_atomic_plane.o \
|
||||
|
@ -235,7 +233,6 @@ i915-y += \
|
|||
display/vlv_dsi_pll.o
|
||||
|
||||
# perf code
|
||||
obj-y += oa/
|
||||
i915-y += \
|
||||
oa/i915_oa_hsw.o \
|
||||
oa/i915_oa_bdw.o \
|
||||
|
@ -260,6 +257,7 @@ i915-$(CONFIG_DRM_I915_SELFTEST) += \
|
|||
gem/selftests/igt_gem_utils.o \
|
||||
selftests/i915_random.o \
|
||||
selftests/i915_selftest.o \
|
||||
selftests/igt_atomic.o \
|
||||
selftests/igt_flush_test.o \
|
||||
selftests/igt_live_test.o \
|
||||
selftests/igt_mmap.o \
|
||||
|
@ -276,3 +274,27 @@ endif
|
|||
|
||||
obj-$(CONFIG_DRM_I915) += i915.o
|
||||
obj-$(CONFIG_DRM_I915_GVT_KVMGT) += gvt/kvmgt.o
|
||||
|
||||
# header test
|
||||
|
||||
# exclude some broken headers from the test coverage
|
||||
no-header-test := \
|
||||
display/intel_vbt_defs.h \
|
||||
gvt/execlist.h \
|
||||
gvt/fb_decoder.h \
|
||||
gvt/gtt.h \
|
||||
gvt/gvt.h \
|
||||
gvt/interrupt.h \
|
||||
gvt/mmio_context.h \
|
||||
gvt/mpt.h \
|
||||
gvt/scheduler.h
|
||||
|
||||
extra-$(CONFIG_DRM_I915_WERROR) += \
|
||||
$(patsubst %.h,%.hdrtest, $(filter-out $(no-header-test), \
|
||||
$(shell cd $(srctree)/$(src) && find * -name '*.h')))
|
||||
|
||||
quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@)
|
||||
cmd_hdrtest = $(CC) $(c_flags) -S -o /dev/null -x c /dev/null -include $<; touch $@
|
||||
|
||||
$(obj)/%.hdrtest: $(src)/%.h FORCE
|
||||
$(call if_changed_dep,hdrtest)
|
||||
|
|
|
@ -1,6 +0,0 @@
|
|||
# For building individual subdir files on the command line
|
||||
subdir-ccflags-y += -I$(srctree)/$(src)/..
|
||||
|
||||
# Extra header tests
|
||||
header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h
|
||||
header-test- := intel_vbt_defs.h
|
|
@ -77,7 +77,7 @@ static enum transcoder dsi_port_to_transcoder(enum port port)
|
|||
static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
struct mipi_dsi_device *dsi;
|
||||
enum port port;
|
||||
enum transcoder dsi_trans;
|
||||
|
@ -202,7 +202,7 @@ static int dsi_send_pkt_payld(struct intel_dsi_host *host,
|
|||
static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
enum phy phy;
|
||||
u32 tmp;
|
||||
int lane;
|
||||
|
@ -267,7 +267,7 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
|
|||
const struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
u32 dss_ctl1;
|
||||
|
||||
dss_ctl1 = I915_READ(DSS_CTL1);
|
||||
|
@ -306,7 +306,7 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
|
|||
static int afe_clk(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
int bpp;
|
||||
|
||||
if (crtc_state->dsc.compression_enable)
|
||||
|
@ -321,7 +321,7 @@ static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder,
|
|||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
enum port port;
|
||||
int afe_clk_khz;
|
||||
u32 esc_clk_div_m;
|
||||
|
@ -360,7 +360,7 @@ static void get_dsi_io_power_domains(struct drm_i915_private *dev_priv,
|
|||
static void gen11_dsi_enable_io_power(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
enum port port;
|
||||
u32 tmp;
|
||||
|
||||
|
@ -376,7 +376,7 @@ static void gen11_dsi_enable_io_power(struct intel_encoder *encoder)
|
|||
static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
enum phy phy;
|
||||
|
||||
for_each_dsi_phy(phy, intel_dsi->phys)
|
||||
|
@ -387,7 +387,7 @@ static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder)
|
|||
static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
enum phy phy;
|
||||
u32 tmp;
|
||||
int lane;
|
||||
|
@ -436,7 +436,7 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
|
|||
static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
u32 tmp;
|
||||
enum phy phy;
|
||||
|
||||
|
@ -488,7 +488,7 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
|
|||
static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
u32 tmp;
|
||||
enum port port;
|
||||
|
||||
|
@ -509,7 +509,7 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder,
|
|||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
u32 tmp;
|
||||
enum port port;
|
||||
enum phy phy;
|
||||
|
@ -575,7 +575,7 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder,
|
|||
static void gen11_dsi_gate_clocks(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
u32 tmp;
|
||||
enum phy phy;
|
||||
|
||||
|
@ -591,7 +591,7 @@ static void gen11_dsi_gate_clocks(struct intel_encoder *encoder)
|
|||
static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
u32 tmp;
|
||||
enum phy phy;
|
||||
|
||||
|
@ -608,7 +608,7 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder,
|
|||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
struct intel_shared_dpll *pll = crtc_state->shared_dpll;
|
||||
enum phy phy;
|
||||
u32 val;
|
||||
|
@ -640,7 +640,7 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
|
|||
const struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
u32 tmp;
|
||||
|
@ -789,7 +789,7 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
|
|||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
const struct drm_display_mode *adjusted_mode =
|
||||
&crtc_state->hw.adjusted_mode;
|
||||
enum port port;
|
||||
|
@ -923,7 +923,7 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
|
|||
static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
enum port port;
|
||||
enum transcoder dsi_trans;
|
||||
u32 tmp;
|
||||
|
@ -945,7 +945,7 @@ static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder,
|
|||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
enum port port;
|
||||
enum transcoder dsi_trans;
|
||||
u32 tmp, hs_tx_timeout, lp_rx_timeout, ta_timeout, divisor, mul;
|
||||
|
@ -1026,7 +1026,7 @@ gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
|
|||
static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
struct mipi_dsi_device *dsi;
|
||||
enum port port;
|
||||
enum transcoder dsi_trans;
|
||||
|
@ -1077,7 +1077,7 @@ static void gen11_dsi_pre_enable(struct intel_encoder *encoder,
|
|||
const struct intel_crtc_state *pipe_config,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
|
||||
/* step3b */
|
||||
gen11_dsi_map_pll(encoder, pipe_config);
|
||||
|
@ -1104,7 +1104,7 @@ static void gen11_dsi_pre_enable(struct intel_encoder *encoder,
|
|||
static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
enum port port;
|
||||
enum transcoder dsi_trans;
|
||||
u32 tmp;
|
||||
|
@ -1126,7 +1126,7 @@ static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder)
|
|||
|
||||
static void gen11_dsi_powerdown_panel(struct intel_encoder *encoder)
|
||||
{
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
|
||||
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_OFF);
|
||||
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_ASSERT_RESET);
|
||||
|
@ -1139,7 +1139,7 @@ static void gen11_dsi_powerdown_panel(struct intel_encoder *encoder)
|
|||
static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
enum port port;
|
||||
enum transcoder dsi_trans;
|
||||
u32 tmp;
|
||||
|
@ -1180,7 +1180,7 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
|
|||
static void gen11_dsi_disable_port(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
u32 tmp;
|
||||
enum port port;
|
||||
|
||||
|
@ -1202,7 +1202,7 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder)
|
|||
static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
enum port port;
|
||||
u32 tmp;
|
||||
|
||||
|
@ -1229,7 +1229,7 @@ static void gen11_dsi_disable(struct intel_encoder *encoder,
|
|||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
|
||||
/* step1: turn off backlight */
|
||||
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF);
|
||||
|
@ -1259,7 +1259,7 @@ static void gen11_dsi_post_disable(struct intel_encoder *encoder,
|
|||
|
||||
intel_dsc_disable(old_crtc_state);
|
||||
|
||||
skylake_scaler_disable(old_crtc_state);
|
||||
skl_scaler_disable(old_crtc_state);
|
||||
}
|
||||
|
||||
static enum drm_mode_status gen11_dsi_mode_valid(struct drm_connector *connector,
|
||||
|
@ -1272,7 +1272,7 @@ static enum drm_mode_status gen11_dsi_mode_valid(struct drm_connector *connector
|
|||
static void gen11_dsi_get_timings(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
struct drm_display_mode *adjusted_mode =
|
||||
&pipe_config->hw.adjusted_mode;
|
||||
|
||||
|
@ -1313,7 +1313,7 @@ static void gen11_dsi_get_config(struct intel_encoder *encoder,
|
|||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
|
||||
intel_dsc_get_config(encoder, pipe_config);
|
||||
|
||||
|
@ -1417,7 +1417,8 @@ static void gen11_dsi_get_power_domains(struct intel_encoder *encoder,
|
|||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
|
||||
get_dsi_io_power_domains(i915, enc_to_intel_dsi(&encoder->base));
|
||||
get_dsi_io_power_domains(i915,
|
||||
enc_to_intel_dsi(encoder));
|
||||
|
||||
if (crtc_state->dsc.compression_enable)
|
||||
intel_display_power_get(i915,
|
||||
|
@ -1428,7 +1429,7 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
|
|||
enum pipe *pipe)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
enum transcoder dsi_trans;
|
||||
intel_wakeref_t wakeref;
|
||||
enum port port;
|
||||
|
|
|
@ -37,6 +37,7 @@
|
|||
#include "intel_atomic.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_hdcp.h"
|
||||
#include "intel_psr.h"
|
||||
#include "intel_sprite.h"
|
||||
|
||||
/**
|
||||
|
@ -129,6 +130,7 @@ int intel_digital_connector_atomic_check(struct drm_connector *conn,
|
|||
struct drm_crtc_state *crtc_state;
|
||||
|
||||
intel_hdcp_atomic_check(conn, old_state, new_state);
|
||||
intel_psr_atomic_check(conn, old_state, new_state);
|
||||
|
||||
if (!new_state->crtc)
|
||||
return 0;
|
||||
|
@ -174,6 +176,38 @@ intel_digital_connector_duplicate_state(struct drm_connector *connector)
|
|||
return &state->base;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_connector_needs_modeset - check if connector needs a modeset
|
||||
*/
|
||||
bool
|
||||
intel_connector_needs_modeset(struct intel_atomic_state *state,
|
||||
struct drm_connector *connector)
|
||||
{
|
||||
const struct drm_connector_state *old_conn_state, *new_conn_state;
|
||||
|
||||
old_conn_state = drm_atomic_get_old_connector_state(&state->base, connector);
|
||||
new_conn_state = drm_atomic_get_new_connector_state(&state->base, connector);
|
||||
|
||||
return old_conn_state->crtc != new_conn_state->crtc ||
|
||||
(new_conn_state->crtc &&
|
||||
drm_atomic_crtc_needs_modeset(drm_atomic_get_new_crtc_state(&state->base,
|
||||
new_conn_state->crtc)));
|
||||
}
|
||||
|
||||
struct intel_digital_connector_state *
|
||||
intel_atomic_get_digital_connector_state(struct intel_atomic_state *state,
|
||||
struct intel_connector *connector)
|
||||
{
|
||||
struct drm_connector_state *conn_state;
|
||||
|
||||
conn_state = drm_atomic_get_connector_state(&state->base,
|
||||
&connector->base);
|
||||
if (IS_ERR(conn_state))
|
||||
return ERR_CAST(conn_state);
|
||||
|
||||
return to_intel_digital_connector_state(conn_state);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_crtc_duplicate_state - duplicate crtc state
|
||||
* @crtc: drm crtc
|
||||
|
|
|
@ -17,6 +17,7 @@ struct drm_device;
|
|||
struct drm_i915_private;
|
||||
struct drm_property;
|
||||
struct intel_atomic_state;
|
||||
struct intel_connector;
|
||||
struct intel_crtc;
|
||||
struct intel_crtc_state;
|
||||
|
||||
|
@ -32,6 +33,11 @@ int intel_digital_connector_atomic_check(struct drm_connector *conn,
|
|||
struct drm_atomic_state *state);
|
||||
struct drm_connector_state *
|
||||
intel_digital_connector_duplicate_state(struct drm_connector *connector);
|
||||
bool intel_connector_needs_modeset(struct intel_atomic_state *state,
|
||||
struct drm_connector *connector);
|
||||
struct intel_digital_connector_state *
|
||||
intel_atomic_get_digital_connector_state(struct intel_atomic_state *state,
|
||||
struct intel_connector *connector);
|
||||
|
||||
struct drm_crtc_state *intel_crtc_duplicate_state(struct drm_crtc *crtc);
|
||||
void intel_crtc_destroy_state(struct drm_crtc *crtc,
|
||||
|
|
|
@ -707,8 +707,8 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
|
|||
DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
|
||||
connector->base.id,
|
||||
connector->name,
|
||||
connector->encoder->base.id,
|
||||
connector->encoder->name);
|
||||
encoder->base.base.id,
|
||||
encoder->base.name);
|
||||
|
||||
connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
|
||||
|
||||
|
@ -856,7 +856,7 @@ static unsigned long i915_audio_component_get_power(struct device *kdev)
|
|||
}
|
||||
|
||||
/* Force CDCLK to 2*BCLK as long as we need audio powered. */
|
||||
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
|
||||
if (IS_GEMINILAKE(dev_priv))
|
||||
glk_force_audio_cdclk(dev_priv, true);
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
|
||||
|
@ -875,7 +875,7 @@ static void i915_audio_component_put_power(struct device *kdev,
|
|||
|
||||
/* Stop forcing CDCLK to 2*BCLK if no need for audio to be powered. */
|
||||
if (--dev_priv->audio_power_refcount == 0)
|
||||
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
|
||||
if (IS_GEMINILAKE(dev_priv))
|
||||
glk_force_audio_cdclk(dev_priv, false);
|
||||
|
||||
intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO, cookie);
|
||||
|
|
|
@ -486,3 +486,8 @@ int intel_bw_init(struct drm_i915_private *dev_priv)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void intel_bw_cleanup(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
drm_atomic_private_obj_fini(&dev_priv->bw_obj);
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ struct intel_bw_state {
|
|||
|
||||
void intel_bw_init_hw(struct drm_i915_private *dev_priv);
|
||||
int intel_bw_init(struct drm_i915_private *dev_priv);
|
||||
void intel_bw_cleanup(struct drm_i915_private *dev_priv);
|
||||
int intel_bw_atomic_check(struct intel_atomic_state *state);
|
||||
void intel_bw_crtc_update(struct intel_bw_state *bw_state,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
|
|
|
@ -2004,6 +2004,18 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
|
|||
/* Account for additional needs from the planes */
|
||||
min_cdclk = max(intel_planes_min_cdclk(crtc_state), min_cdclk);
|
||||
|
||||
/*
|
||||
* HACK. Currently for TGL platforms we calculate
|
||||
* min_cdclk initially based on pixel_rate divided
|
||||
* by 2, accounting for also plane requirements,
|
||||
* however in some cases the lowest possible CDCLK
|
||||
* doesn't work and causing the underruns.
|
||||
* Explicitly stating here that this seems to be currently
|
||||
* rather a Hack, than final solution.
|
||||
*/
|
||||
if (IS_TIGERLAKE(dev_priv))
|
||||
min_cdclk = max(min_cdclk, (int)crtc_state->pixel_rate);
|
||||
|
||||
if (min_cdclk > dev_priv->max_cdclk_freq) {
|
||||
DRM_DEBUG_KMS("required cdclk (%d kHz) exceeds max (%d kHz)\n",
|
||||
min_cdclk, dev_priv->max_cdclk_freq);
|
||||
|
|
|
@ -65,7 +65,7 @@ static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder)
|
|||
return container_of(encoder, struct intel_crt, base);
|
||||
}
|
||||
|
||||
static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
|
||||
static struct intel_crt *intel_attached_crt(struct intel_connector *connector)
|
||||
{
|
||||
return intel_encoder_to_crt(intel_attached_encoder(connector));
|
||||
}
|
||||
|
@ -247,7 +247,7 @@ static void hsw_post_disable_crt(struct intel_encoder *encoder,
|
|||
|
||||
intel_ddi_disable_transcoder_func(old_crtc_state);
|
||||
|
||||
ironlake_pfit_disable(old_crtc_state);
|
||||
ilk_pfit_disable(old_crtc_state);
|
||||
|
||||
intel_ddi_disable_pipe_clock(old_crtc_state);
|
||||
|
||||
|
@ -351,7 +351,7 @@ intel_crt_mode_valid(struct drm_connector *connector,
|
|||
|
||||
/* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */
|
||||
if (HAS_PCH_LPT(dev_priv) &&
|
||||
(ironlake_get_lanes_required(mode->clock, 270000, 24) > 2))
|
||||
ilk_get_lanes_required(mode->clock, 270000, 24) > 2)
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
/* HSW/BDW FDI limited to 4k */
|
||||
|
@ -427,10 +427,10 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
|
||||
static bool ilk_crt_detect_hotplug(struct drm_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct intel_crt *crt = intel_attached_crt(connector);
|
||||
struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
u32 adpa;
|
||||
bool ret;
|
||||
|
@ -440,7 +440,7 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
|
|||
bool turn_off_dac = HAS_PCH_SPLIT(dev_priv);
|
||||
u32 save_adpa;
|
||||
|
||||
crt->force_hotplug_required = 0;
|
||||
crt->force_hotplug_required = false;
|
||||
|
||||
save_adpa = adpa = I915_READ(crt->adpa_reg);
|
||||
DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
|
||||
|
@ -477,7 +477,7 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
|
|||
static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct intel_crt *crt = intel_attached_crt(connector);
|
||||
struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
bool reenable_hpd;
|
||||
u32 adpa;
|
||||
|
@ -535,7 +535,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
|
|||
int i, tries = 0;
|
||||
|
||||
if (HAS_PCH_SPLIT(dev_priv))
|
||||
return intel_ironlake_crt_detect_hotplug(connector);
|
||||
return ilk_crt_detect_hotplug(connector);
|
||||
|
||||
if (IS_VALLEYVIEW(dev_priv))
|
||||
return valleyview_crt_detect_hotplug(connector);
|
||||
|
@ -609,7 +609,7 @@ static int intel_crt_ddc_get_modes(struct drm_connector *connector,
|
|||
|
||||
static bool intel_crt_detect_ddc(struct drm_connector *connector)
|
||||
{
|
||||
struct intel_crt *crt = intel_attached_crt(connector);
|
||||
struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
|
||||
struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev);
|
||||
struct edid *edid;
|
||||
struct i2c_adapter *i2c;
|
||||
|
@ -795,7 +795,7 @@ intel_crt_detect(struct drm_connector *connector,
|
|||
bool force)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->dev);
|
||||
struct intel_crt *crt = intel_attached_crt(connector);
|
||||
struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
|
||||
struct intel_encoder *intel_encoder = &crt->base;
|
||||
intel_wakeref_t wakeref;
|
||||
int status, ret;
|
||||
|
@ -886,7 +886,7 @@ static int intel_crt_get_modes(struct drm_connector *connector)
|
|||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crt *crt = intel_attached_crt(connector);
|
||||
struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
|
||||
struct intel_encoder *intel_encoder = &crt->base;
|
||||
intel_wakeref_t wakeref;
|
||||
struct i2c_adapter *i2c;
|
||||
|
@ -925,7 +925,7 @@ void intel_crt_reset(struct drm_encoder *encoder)
|
|||
POSTING_READ(crt->adpa_reg);
|
||||
|
||||
DRM_DEBUG_KMS("crt adpa set to 0x%x\n", adpa);
|
||||
crt->force_hotplug_required = 1;
|
||||
crt->force_hotplug_required = true;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -1063,7 +1063,7 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
|
|||
/*
|
||||
* Configure the automatic hotplug detection stuff
|
||||
*/
|
||||
crt->force_hotplug_required = 0;
|
||||
crt->force_hotplug_required = false;
|
||||
|
||||
/*
|
||||
* TODO: find a proper way to discover whether we need to set the the
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
#include "intel_ddi.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_dp.h"
|
||||
#include "intel_dp_mst.h"
|
||||
#include "intel_dp_link_training.h"
|
||||
#include "intel_dpio_phy.h"
|
||||
#include "intel_dsi.h"
|
||||
|
@ -1237,9 +1238,9 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
|
|||
|
||||
static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder)
|
||||
{
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
struct intel_digital_port *intel_dig_port =
|
||||
enc_to_dig_port(&encoder->base);
|
||||
enc_to_dig_port(encoder);
|
||||
|
||||
intel_dp->DP = intel_dig_port->saved_port_bits |
|
||||
DDI_BUF_CTL_ENABLE | DDI_BUF_TRANS_SELECT(0);
|
||||
|
@ -1899,8 +1900,13 @@ intel_ddi_transcoder_func_reg_val_get(const struct intel_crtc_state *crtc_state)
|
|||
temp |= TRANS_DDI_MODE_SELECT_DP_MST;
|
||||
temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 12)
|
||||
temp |= TRANS_DDI_MST_TRANSPORT_SELECT(crtc_state->cpu_transcoder);
|
||||
if (INTEL_GEN(dev_priv) >= 12) {
|
||||
enum transcoder master;
|
||||
|
||||
master = crtc_state->mst_master_transcoder;
|
||||
WARN_ON(master == INVALID_TRANSCODER);
|
||||
temp |= TRANS_DDI_MST_TRANSPORT_SELECT(master);
|
||||
}
|
||||
} else {
|
||||
temp |= TRANS_DDI_MODE_SELECT_DP_SST;
|
||||
temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
|
||||
|
@ -1944,17 +1950,18 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
|
|||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
|
||||
i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
|
||||
u32 val = I915_READ(reg);
|
||||
u32 val;
|
||||
|
||||
val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
|
||||
val &= ~TRANS_DDI_FUNC_ENABLE;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 12) {
|
||||
val &= ~(TRANS_DDI_FUNC_ENABLE | TGL_TRANS_DDI_PORT_MASK |
|
||||
TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
|
||||
if (!intel_dp_mst_is_master_trans(crtc_state))
|
||||
val &= ~TGL_TRANS_DDI_PORT_MASK;
|
||||
} else {
|
||||
val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK |
|
||||
TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
|
||||
val &= ~TRANS_DDI_PORT_MASK;
|
||||
}
|
||||
I915_WRITE(reg, val);
|
||||
I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), val);
|
||||
|
||||
if (dev_priv->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME &&
|
||||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
|
||||
|
@ -2217,7 +2224,7 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
|
|||
if (WARN_ON(intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)))
|
||||
return;
|
||||
|
||||
dig_port = enc_to_dig_port(&encoder->base);
|
||||
dig_port = enc_to_dig_port(encoder);
|
||||
intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
|
||||
|
||||
/*
|
||||
|
@ -2287,7 +2294,7 @@ static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
|
|||
static void skl_ddi_set_iboost(struct intel_encoder *encoder,
|
||||
int level, enum intel_output_type type)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
|
||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
enum port port = encoder->port;
|
||||
u8 iboost;
|
||||
|
@ -2358,7 +2365,7 @@ static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder,
|
|||
u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
enum port port = encoder->port;
|
||||
enum phy phy = intel_port_to_phy(dev_priv, port);
|
||||
int n_entries;
|
||||
|
@ -2497,7 +2504,7 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
|
|||
width = 4;
|
||||
rate = 0; /* Rate is always < than 6GHz for HDMI */
|
||||
} else {
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
|
||||
width = intel_dp->lane_count;
|
||||
rate = intel_dp->link_rate;
|
||||
|
@ -2623,7 +2630,7 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
|
|||
width = 4;
|
||||
/* Rate is always < than 6GHz for HDMI */
|
||||
} else {
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
|
||||
width = intel_dp->lane_count;
|
||||
rate = intel_dp->link_rate;
|
||||
|
@ -3160,57 +3167,6 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder)
|
|||
}
|
||||
}
|
||||
|
||||
static void
|
||||
icl_phy_set_clock_gating(struct intel_digital_port *dig_port, bool enable)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
|
||||
enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
|
||||
u32 val, bits;
|
||||
int ln;
|
||||
|
||||
if (tc_port == PORT_TC_NONE)
|
||||
return;
|
||||
|
||||
bits = MG_DP_MODE_CFG_TR2PWR_GATING | MG_DP_MODE_CFG_TRPWR_GATING |
|
||||
MG_DP_MODE_CFG_CLNPWR_GATING | MG_DP_MODE_CFG_DIGPWR_GATING |
|
||||
MG_DP_MODE_CFG_GAONPWR_GATING;
|
||||
|
||||
for (ln = 0; ln < 2; ln++) {
|
||||
if (INTEL_GEN(dev_priv) >= 12) {
|
||||
I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, ln));
|
||||
val = I915_READ(DKL_DP_MODE(tc_port));
|
||||
} else {
|
||||
val = I915_READ(MG_DP_MODE(ln, tc_port));
|
||||
}
|
||||
|
||||
if (enable)
|
||||
val |= bits;
|
||||
else
|
||||
val &= ~bits;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 12)
|
||||
I915_WRITE(DKL_DP_MODE(tc_port), val);
|
||||
else
|
||||
I915_WRITE(MG_DP_MODE(ln, tc_port), val);
|
||||
}
|
||||
|
||||
if (INTEL_GEN(dev_priv) == 11) {
|
||||
bits = MG_MISC_SUS0_CFG_TR2PWR_GATING |
|
||||
MG_MISC_SUS0_CFG_CL2PWR_GATING |
|
||||
MG_MISC_SUS0_CFG_GAONPWR_GATING |
|
||||
MG_MISC_SUS0_CFG_TRPWR_GATING |
|
||||
MG_MISC_SUS0_CFG_CL1PWR_GATING |
|
||||
MG_MISC_SUS0_CFG_DGPWR_GATING;
|
||||
|
||||
val = I915_READ(MG_MISC_SUS0(tc_port));
|
||||
if (enable)
|
||||
val |= (bits | MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(3));
|
||||
else
|
||||
val &= ~(bits | MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK);
|
||||
I915_WRITE(MG_MISC_SUS0(tc_port), val);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
|
@ -3317,7 +3273,7 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder,
|
|||
if (!crtc_state->fec_enable)
|
||||
return;
|
||||
|
||||
intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
intel_dp = enc_to_intel_dp(encoder);
|
||||
val = I915_READ(intel_dp->regs.dp_tp_ctl);
|
||||
val |= DP_TP_CTL_FEC_ENABLE;
|
||||
I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
|
||||
|
@ -3337,7 +3293,7 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
|
|||
if (!crtc_state->fec_enable)
|
||||
return;
|
||||
|
||||
intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
intel_dp = enc_to_intel_dp(encoder);
|
||||
val = I915_READ(intel_dp->regs.dp_tp_ctl);
|
||||
val &= ~DP_TP_CTL_FEC_ENABLE;
|
||||
I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
|
||||
|
@ -3428,10 +3384,10 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder,
|
|||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
|
||||
int level = intel_ddi_dp_level(intel_dp);
|
||||
enum transcoder transcoder = crtc_state->cpu_transcoder;
|
||||
|
@ -3458,14 +3414,14 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder,
|
|||
* (DFLEXDPSP.DPX4TXLATC)
|
||||
*
|
||||
* This was done before tgl_ddi_pre_enable_dp by
|
||||
* haswell_crtc_enable()->intel_encoders_pre_pll_enable().
|
||||
* hsw_crtc_enable()->intel_encoders_pre_pll_enable().
|
||||
*/
|
||||
|
||||
/*
|
||||
* 4. Enable the port PLL.
|
||||
*
|
||||
* The PLL enabling itself was already done before this function by
|
||||
* haswell_crtc_enable()->intel_enable_shared_dpll(). We need only
|
||||
* hsw_crtc_enable()->intel_enable_shared_dpll(). We need only
|
||||
* configure the PLL to port mapping here.
|
||||
*/
|
||||
intel_ddi_clk_select(encoder, crtc_state);
|
||||
|
@ -3509,12 +3465,6 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder,
|
|||
* down this function.
|
||||
*/
|
||||
|
||||
/*
|
||||
* 7.d Type C with DP alternate or fixed/legacy/static connection -
|
||||
* Disable PHY clock gating per Type-C DDI Buffer page
|
||||
*/
|
||||
icl_phy_set_clock_gating(dig_port, false);
|
||||
|
||||
/* 7.e Configure voltage swing and related IO settings */
|
||||
tgl_ddi_vswing_sequence(encoder, crtc_state->port_clock, level,
|
||||
encoder->type);
|
||||
|
@ -3566,15 +3516,6 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder,
|
|||
if (!is_trans_port_sync_mode(crtc_state))
|
||||
intel_dp_stop_link_train(intel_dp);
|
||||
|
||||
/*
|
||||
* TODO: enable clock gating
|
||||
*
|
||||
* It is not written in DP enabling sequence but "PHY Clockgating
|
||||
* programming" states that clock gating should be enabled after the
|
||||
* link training but doing so causes all the following trainings to fail
|
||||
* so not enabling it for now.
|
||||
*/
|
||||
|
||||
/* 7.l Configure and enable FEC if needed */
|
||||
intel_ddi_enable_fec(encoder, crtc_state);
|
||||
intel_dsc_enable(encoder, crtc_state);
|
||||
|
@ -3584,15 +3525,18 @@ static void hsw_ddi_pre_enable_dp(struct intel_encoder *encoder,
|
|||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
enum port port = encoder->port;
|
||||
enum phy phy = intel_port_to_phy(dev_priv, port);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
|
||||
int level = intel_ddi_dp_level(intel_dp);
|
||||
|
||||
WARN_ON(is_mst && (port == PORT_A || port == PORT_E));
|
||||
if (INTEL_GEN(dev_priv) < 11)
|
||||
WARN_ON(is_mst && (port == PORT_A || port == PORT_E));
|
||||
else
|
||||
WARN_ON(is_mst && port == PORT_A);
|
||||
|
||||
intel_dp_set_link_params(intel_dp, crtc_state->port_clock,
|
||||
crtc_state->lane_count, is_mst);
|
||||
|
@ -3610,7 +3554,6 @@ static void hsw_ddi_pre_enable_dp(struct intel_encoder *encoder,
|
|||
dig_port->ddi_io_power_domain);
|
||||
|
||||
icl_program_mg_dp_mode(dig_port, crtc_state);
|
||||
icl_phy_set_clock_gating(dig_port, false);
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 11)
|
||||
icl_ddi_vswing_sequence(encoder, crtc_state->port_clock,
|
||||
|
@ -3644,8 +3587,6 @@ static void hsw_ddi_pre_enable_dp(struct intel_encoder *encoder,
|
|||
|
||||
intel_ddi_enable_fec(encoder, crtc_state);
|
||||
|
||||
icl_phy_set_clock_gating(dig_port, true);
|
||||
|
||||
if (!is_mst)
|
||||
intel_ddi_enable_pipe_clock(crtc_state);
|
||||
|
||||
|
@ -3674,12 +3615,12 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
|
|||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
|
||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
|
||||
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
enum port port = encoder->port;
|
||||
int level = intel_ddi_hdmi_level(dev_priv, port);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
|
||||
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
|
||||
intel_ddi_clk_select(encoder, crtc_state);
|
||||
|
@ -3687,7 +3628,6 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
|
|||
intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
|
||||
|
||||
icl_program_mg_dp_mode(dig_port, crtc_state);
|
||||
icl_phy_set_clock_gating(dig_port, false);
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 12)
|
||||
tgl_ddi_vswing_sequence(encoder, crtc_state->port_clock,
|
||||
|
@ -3702,8 +3642,6 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
|
|||
else
|
||||
intel_prepare_hdmi_ddi_buffers(encoder, level);
|
||||
|
||||
icl_phy_set_clock_gating(dig_port, true);
|
||||
|
||||
if (IS_GEN9_BC(dev_priv))
|
||||
skl_ddi_set_iboost(encoder, level, INTEL_OUTPUT_HDMI);
|
||||
|
||||
|
@ -3746,12 +3684,12 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder,
|
|||
intel_ddi_pre_enable_hdmi(encoder, crtc_state, conn_state);
|
||||
} else {
|
||||
struct intel_lspcon *lspcon =
|
||||
enc_to_intel_lspcon(&encoder->base);
|
||||
enc_to_intel_lspcon(encoder);
|
||||
|
||||
intel_ddi_pre_enable_dp(encoder, crtc_state, conn_state);
|
||||
if (lspcon->active) {
|
||||
struct intel_digital_port *dig_port =
|
||||
enc_to_dig_port(&encoder->base);
|
||||
enc_to_dig_port(encoder);
|
||||
|
||||
dig_port->set_infoframes(encoder,
|
||||
crtc_state->has_infoframe,
|
||||
|
@ -3776,7 +3714,7 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder,
|
|||
}
|
||||
|
||||
if (intel_crtc_has_dp_encoder(crtc_state)) {
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
|
||||
val = I915_READ(intel_dp->regs.dp_tp_ctl);
|
||||
val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
|
||||
|
@ -3796,7 +3734,7 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
|
|||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
struct intel_dp *intel_dp = &dig_port->dp;
|
||||
bool is_mst = intel_crtc_has_type(old_crtc_state,
|
||||
INTEL_OUTPUT_DP_MST);
|
||||
|
@ -3808,8 +3746,19 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
|
|||
*/
|
||||
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
|
||||
|
||||
if (INTEL_GEN(dev_priv) < 12 && !is_mst)
|
||||
intel_ddi_disable_pipe_clock(old_crtc_state);
|
||||
if (INTEL_GEN(dev_priv) >= 12) {
|
||||
if (is_mst) {
|
||||
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
|
||||
u32 val;
|
||||
|
||||
val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
|
||||
val &= ~TGL_TRANS_DDI_PORT_MASK;
|
||||
I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), val);
|
||||
}
|
||||
} else {
|
||||
if (!is_mst)
|
||||
intel_ddi_disable_pipe_clock(old_crtc_state);
|
||||
}
|
||||
|
||||
intel_disable_ddi_buf(encoder, old_crtc_state);
|
||||
|
||||
|
@ -3838,7 +3787,7 @@ static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
|
|||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
|
||||
|
||||
dig_port->set_infoframes(encoder, false,
|
||||
|
@ -3860,8 +3809,6 @@ static void icl_disable_transcoder_port_sync(const struct intel_crtc_state *old_
|
|||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
i915_reg_t reg;
|
||||
u32 trans_ddi_func_ctl2_val;
|
||||
|
||||
if (old_crtc_state->master_transcoder == INVALID_TRANSCODER)
|
||||
return;
|
||||
|
@ -3869,10 +3816,7 @@ static void icl_disable_transcoder_port_sync(const struct intel_crtc_state *old_
|
|||
DRM_DEBUG_KMS("Disabling Transcoder Port Sync on Slave Transcoder %s\n",
|
||||
transcoder_name(old_crtc_state->cpu_transcoder));
|
||||
|
||||
reg = TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder);
|
||||
trans_ddi_func_ctl2_val = ~(PORT_SYNC_MODE_ENABLE |
|
||||
PORT_SYNC_MODE_MASTER_SELECT_MASK);
|
||||
I915_WRITE(reg, trans_ddi_func_ctl2_val);
|
||||
I915_WRITE(TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder), 0);
|
||||
}
|
||||
|
||||
static void intel_ddi_post_disable(struct intel_encoder *encoder,
|
||||
|
@ -3880,25 +3824,27 @@ static void intel_ddi_post_disable(struct intel_encoder *encoder,
|
|||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
|
||||
bool is_tc_port = intel_phy_is_tc(dev_priv, phy);
|
||||
|
||||
intel_crtc_vblank_off(old_crtc_state);
|
||||
if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST)) {
|
||||
intel_crtc_vblank_off(old_crtc_state);
|
||||
|
||||
intel_disable_pipe(old_crtc_state);
|
||||
intel_disable_pipe(old_crtc_state);
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 11)
|
||||
icl_disable_transcoder_port_sync(old_crtc_state);
|
||||
if (INTEL_GEN(dev_priv) >= 11)
|
||||
icl_disable_transcoder_port_sync(old_crtc_state);
|
||||
|
||||
intel_ddi_disable_transcoder_func(old_crtc_state);
|
||||
intel_ddi_disable_transcoder_func(old_crtc_state);
|
||||
|
||||
intel_dsc_disable(old_crtc_state);
|
||||
intel_dsc_disable(old_crtc_state);
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 9)
|
||||
skylake_scaler_disable(old_crtc_state);
|
||||
else
|
||||
ironlake_pfit_disable(old_crtc_state);
|
||||
if (INTEL_GEN(dev_priv) >= 9)
|
||||
skl_scaler_disable(old_crtc_state);
|
||||
else
|
||||
ilk_pfit_disable(old_crtc_state);
|
||||
}
|
||||
|
||||
/*
|
||||
* When called from DP MST code:
|
||||
|
@ -3970,7 +3916,7 @@ static void intel_enable_ddi_dp(struct intel_encoder *encoder,
|
|||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
enum port port = encoder->port;
|
||||
|
||||
if (port == PORT_A && INTEL_GEN(dev_priv) < 9)
|
||||
|
@ -4011,7 +3957,7 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
|
|||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
struct drm_connector *connector = conn_state->connector;
|
||||
enum port port = encoder->port;
|
||||
|
||||
|
@ -4088,7 +4034,7 @@ static void intel_disable_ddi_dp(struct intel_encoder *encoder,
|
|||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
|
||||
intel_dp->link_trained = false;
|
||||
|
||||
|
@ -4136,7 +4082,7 @@ static void intel_ddi_update_pipe_dp(struct intel_encoder *encoder,
|
|||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
|
||||
intel_ddi_set_dp_msa(crtc_state, conn_state);
|
||||
|
||||
|
@ -4200,7 +4146,8 @@ intel_ddi_update_prepare(struct intel_atomic_state *state,
|
|||
|
||||
WARN_ON(crtc && crtc->active);
|
||||
|
||||
intel_tc_port_get_link(enc_to_dig_port(&encoder->base), required_lanes);
|
||||
intel_tc_port_get_link(enc_to_dig_port(encoder),
|
||||
required_lanes);
|
||||
if (crtc_state && crtc_state->hw.active)
|
||||
intel_update_active_dpll(state, crtc, encoder);
|
||||
}
|
||||
|
@ -4210,7 +4157,7 @@ intel_ddi_update_complete(struct intel_atomic_state *state,
|
|||
struct intel_encoder *encoder,
|
||||
struct intel_crtc *crtc)
|
||||
{
|
||||
intel_tc_port_put_link(enc_to_dig_port(&encoder->base));
|
||||
intel_tc_port_put_link(enc_to_dig_port(encoder));
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -4219,7 +4166,7 @@ intel_ddi_pre_pll_enable(struct intel_encoder *encoder,
|
|||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
|
||||
bool is_tc_port = intel_phy_is_tc(dev_priv, phy);
|
||||
|
||||
|
@ -4405,6 +4352,11 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
|
|||
pipe_config->output_types |= BIT(INTEL_OUTPUT_DP_MST);
|
||||
pipe_config->lane_count =
|
||||
((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 12)
|
||||
pipe_config->mst_master_transcoder =
|
||||
REG_FIELD_GET(TRANS_DDI_MST_TRANSPORT_SELECT_MASK, temp);
|
||||
|
||||
intel_dp_get_m_n(intel_crtc, pipe_config);
|
||||
break;
|
||||
default:
|
||||
|
@ -4518,7 +4470,7 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
|
|||
|
||||
static void intel_ddi_encoder_destroy(struct drm_encoder *encoder)
|
||||
{
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
|
||||
|
||||
intel_dp_encoder_flush_work(encoder);
|
||||
|
||||
|
@ -4585,7 +4537,7 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
|
|||
struct drm_modeset_acquire_ctx *ctx)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_hdmi *hdmi = enc_to_intel_hdmi(&encoder->base);
|
||||
struct intel_hdmi *hdmi = enc_to_intel_hdmi(encoder);
|
||||
struct intel_connector *connector = hdmi->attached_connector;
|
||||
struct i2c_adapter *adapter =
|
||||
intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus);
|
||||
|
@ -4657,7 +4609,7 @@ intel_ddi_hotplug(struct intel_encoder *encoder,
|
|||
struct intel_connector *connector,
|
||||
bool irq_received)
|
||||
{
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
struct drm_modeset_acquire_ctx ctx;
|
||||
enum intel_hotplug_state state;
|
||||
int ret;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -474,6 +474,7 @@ void intel_link_compute_m_n(u16 bpp, int nlanes,
|
|||
struct intel_link_m_n *m_n,
|
||||
bool constant_n, bool fec_enable);
|
||||
bool is_ccs_modifier(u64 modifier);
|
||||
int intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane);
|
||||
void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv);
|
||||
u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
|
||||
u32 pixel_format, u64 modifier);
|
||||
|
@ -521,7 +522,7 @@ int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
|
|||
u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc);
|
||||
void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state);
|
||||
|
||||
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
|
||||
int ilk_get_lanes_required(int target_clock, int link_bw, int bpp);
|
||||
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
|
||||
struct intel_digital_port *dport,
|
||||
unsigned int expected_mask);
|
||||
|
@ -578,8 +579,8 @@ void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
|
|||
|
||||
u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_center);
|
||||
int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
|
||||
void skylake_scaler_disable(const struct intel_crtc_state *old_crtc_state);
|
||||
void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state);
|
||||
void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state);
|
||||
void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state);
|
||||
u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
|
||||
const struct intel_plane_state *plane_state);
|
||||
u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state);
|
||||
|
|
|
@ -514,7 +514,7 @@ static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
|
|||
if (encoder->type == INTEL_OUTPUT_DP_MST)
|
||||
continue;
|
||||
|
||||
dig_port = enc_to_dig_port(&encoder->base);
|
||||
dig_port = enc_to_dig_port(encoder);
|
||||
if (WARN_ON(!dig_port))
|
||||
continue;
|
||||
|
||||
|
@ -1664,8 +1664,8 @@ void chv_phy_powergate_lanes(struct intel_encoder *encoder,
|
|||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct i915_power_domains *power_domains = &dev_priv->power_domains;
|
||||
enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
|
||||
enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
|
||||
enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(encoder));
|
||||
enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(encoder));
|
||||
|
||||
mutex_lock(&power_domains->lock);
|
||||
|
||||
|
|
|
@ -90,8 +90,8 @@ struct intel_framebuffer {
|
|||
/* for each plane in the normal GTT view */
|
||||
struct {
|
||||
unsigned int x, y;
|
||||
} normal[2];
|
||||
/* for each plane in the rotated GTT view */
|
||||
} normal[4];
|
||||
/* for each plane in the rotated GTT view for no-CCS formats */
|
||||
struct {
|
||||
unsigned int x, y;
|
||||
unsigned int pitch; /* pixels */
|
||||
|
@ -555,7 +555,7 @@ struct intel_plane_state {
|
|||
*/
|
||||
u32 stride;
|
||||
int x, y;
|
||||
} color_plane[2];
|
||||
} color_plane[4];
|
||||
|
||||
/* plane control register */
|
||||
u32 ctl;
|
||||
|
@ -1054,6 +1054,9 @@ struct intel_crtc_state {
|
|||
|
||||
/* Bitmask to indicate slaves attached */
|
||||
u8 sync_mode_slaves_mask;
|
||||
|
||||
/* Only valid on TGL+ */
|
||||
enum transcoder mst_master_transcoder;
|
||||
};
|
||||
|
||||
struct intel_crtc {
|
||||
|
@ -1435,9 +1438,9 @@ struct intel_load_detect_pipe {
|
|||
};
|
||||
|
||||
static inline struct intel_encoder *
|
||||
intel_attached_encoder(struct drm_connector *connector)
|
||||
intel_attached_encoder(struct intel_connector *connector)
|
||||
{
|
||||
return to_intel_connector(connector)->encoder;
|
||||
return connector->encoder;
|
||||
}
|
||||
|
||||
static inline bool intel_encoder_is_dig_port(struct intel_encoder *encoder)
|
||||
|
@ -1454,12 +1457,12 @@ static inline bool intel_encoder_is_dig_port(struct intel_encoder *encoder)
|
|||
}
|
||||
|
||||
static inline struct intel_digital_port *
|
||||
enc_to_dig_port(struct drm_encoder *encoder)
|
||||
enc_to_dig_port(struct intel_encoder *encoder)
|
||||
{
|
||||
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
|
||||
struct intel_encoder *intel_encoder = encoder;
|
||||
|
||||
if (intel_encoder_is_dig_port(intel_encoder))
|
||||
return container_of(encoder, struct intel_digital_port,
|
||||
return container_of(&encoder->base, struct intel_digital_port,
|
||||
base.base);
|
||||
else
|
||||
return NULL;
|
||||
|
@ -1468,16 +1471,17 @@ enc_to_dig_port(struct drm_encoder *encoder)
|
|||
static inline struct intel_digital_port *
|
||||
conn_to_dig_port(struct intel_connector *connector)
|
||||
{
|
||||
return enc_to_dig_port(&intel_attached_encoder(&connector->base)->base);
|
||||
return enc_to_dig_port(intel_attached_encoder(connector));
|
||||
}
|
||||
|
||||
static inline struct intel_dp_mst_encoder *
|
||||
enc_to_mst(struct drm_encoder *encoder)
|
||||
enc_to_mst(struct intel_encoder *encoder)
|
||||
{
|
||||
return container_of(encoder, struct intel_dp_mst_encoder, base.base);
|
||||
return container_of(&encoder->base, struct intel_dp_mst_encoder,
|
||||
base.base);
|
||||
}
|
||||
|
||||
static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
|
||||
static inline struct intel_dp *enc_to_intel_dp(struct intel_encoder *encoder)
|
||||
{
|
||||
return &enc_to_dig_port(encoder)->dp;
|
||||
}
|
||||
|
@ -1490,14 +1494,14 @@ static inline bool intel_encoder_is_dp(struct intel_encoder *encoder)
|
|||
return true;
|
||||
case INTEL_OUTPUT_DDI:
|
||||
/* Skip pure HDMI/DVI DDI encoders */
|
||||
return i915_mmio_reg_valid(enc_to_intel_dp(&encoder->base)->output_reg);
|
||||
return i915_mmio_reg_valid(enc_to_intel_dp(encoder)->output_reg);
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static inline struct intel_lspcon *
|
||||
enc_to_intel_lspcon(struct drm_encoder *encoder)
|
||||
enc_to_intel_lspcon(struct intel_encoder *encoder)
|
||||
{
|
||||
return &enc_to_dig_port(encoder)->lspcon;
|
||||
}
|
||||
|
|
|
@ -146,9 +146,9 @@ bool intel_dp_is_edp(struct intel_dp *intel_dp)
|
|||
return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
|
||||
}
|
||||
|
||||
static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
|
||||
static struct intel_dp *intel_attached_dp(struct intel_connector *connector)
|
||||
{
|
||||
return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
|
||||
return enc_to_intel_dp(intel_attached_encoder(connector));
|
||||
}
|
||||
|
||||
static void intel_dp_link_down(struct intel_encoder *encoder,
|
||||
|
@ -614,7 +614,7 @@ static enum drm_mode_status
|
|||
intel_dp_mode_valid(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
struct intel_dp *intel_dp = intel_attached_dp(connector);
|
||||
struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
|
||||
struct intel_connector *intel_connector = to_intel_connector(connector);
|
||||
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->dev);
|
||||
|
@ -834,7 +834,7 @@ static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
|
|||
* Pick one that's not used by other ports.
|
||||
*/
|
||||
for_each_intel_dp(&dev_priv->drm, encoder) {
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
|
||||
if (encoder->type == INTEL_OUTPUT_EDP) {
|
||||
WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
|
||||
|
@ -1031,7 +1031,7 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
|
|||
*/
|
||||
|
||||
for_each_intel_dp(&dev_priv->drm, encoder) {
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
|
||||
WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
|
||||
|
||||
|
@ -2034,7 +2034,7 @@ static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
|
|||
static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
|
||||
u8 line_buf_depth;
|
||||
int ret;
|
||||
|
@ -2205,7 +2205,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
|
|||
struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
struct link_config_limits limits;
|
||||
int common_len;
|
||||
int ret;
|
||||
|
@ -2366,8 +2366,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
|
|||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder);
|
||||
enum port port = encoder->port;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
|
||||
struct intel_connector *intel_connector = intel_dp->attached_connector;
|
||||
|
@ -2482,7 +2482,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
|
|||
const struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
enum port port = encoder->port;
|
||||
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
|
||||
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
|
||||
|
@ -2509,7 +2509,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
|
|||
*
|
||||
* CPT PCH is quite different, having many bits moved
|
||||
* to the TRANS_DP_CTL register instead. That
|
||||
* configuration happens (oddly) in ironlake_pch_enable
|
||||
* configuration happens (oddly) in ilk_pch_enable
|
||||
*/
|
||||
|
||||
/* Preserve the BIOS-computed detected bit. This is
|
||||
|
@ -2653,7 +2653,7 @@ static void edp_wait_backlight_off(struct intel_dp *intel_dp)
|
|||
* is locked
|
||||
*/
|
||||
|
||||
static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
|
||||
static u32 ilk_get_pp_control(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
u32 control;
|
||||
|
@ -2703,7 +2703,7 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
|
|||
if (!edp_have_panel_power(intel_dp))
|
||||
wait_panel_power_cycle(intel_dp);
|
||||
|
||||
pp = ironlake_get_pp_control(intel_dp);
|
||||
pp = ilk_get_pp_control(intel_dp);
|
||||
pp |= EDP_FORCE_VDD;
|
||||
|
||||
pp_stat_reg = _pp_stat_reg(intel_dp);
|
||||
|
@ -2768,7 +2768,7 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
|
|||
intel_dig_port->base.base.base.id,
|
||||
intel_dig_port->base.base.name);
|
||||
|
||||
pp = ironlake_get_pp_control(intel_dp);
|
||||
pp = ilk_get_pp_control(intel_dp);
|
||||
pp &= ~EDP_FORCE_VDD;
|
||||
|
||||
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
|
||||
|
@ -2864,7 +2864,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
|
|||
wait_panel_power_cycle(intel_dp);
|
||||
|
||||
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
|
||||
pp = ironlake_get_pp_control(intel_dp);
|
||||
pp = ilk_get_pp_control(intel_dp);
|
||||
if (IS_GEN(dev_priv, 5)) {
|
||||
/* ILK workaround: disable reset around power sequence */
|
||||
pp &= ~PANEL_POWER_RESET;
|
||||
|
@ -2919,7 +2919,7 @@ static void edp_panel_off(struct intel_dp *intel_dp)
|
|||
WARN(!intel_dp->want_panel_vdd, "Need [ENCODER:%d:%s] VDD to turn off panel\n",
|
||||
dig_port->base.base.base.id, dig_port->base.base.name);
|
||||
|
||||
pp = ironlake_get_pp_control(intel_dp);
|
||||
pp = ilk_get_pp_control(intel_dp);
|
||||
/* We need to switch off panel power _and_ force vdd, for otherwise some
|
||||
* panels get very unhappy and cease to work. */
|
||||
pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
|
||||
|
@ -2968,7 +2968,7 @@ static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
|
|||
i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
|
||||
u32 pp;
|
||||
|
||||
pp = ironlake_get_pp_control(intel_dp);
|
||||
pp = ilk_get_pp_control(intel_dp);
|
||||
pp |= EDP_BLC_ENABLE;
|
||||
|
||||
I915_WRITE(pp_ctrl_reg, pp);
|
||||
|
@ -2980,7 +2980,7 @@ static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
|
|||
void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(conn_state->best_encoder);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder));
|
||||
|
||||
if (!intel_dp_is_edp(intel_dp))
|
||||
return;
|
||||
|
@ -3004,7 +3004,7 @@ static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
|
|||
i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
|
||||
u32 pp;
|
||||
|
||||
pp = ironlake_get_pp_control(intel_dp);
|
||||
pp = ilk_get_pp_control(intel_dp);
|
||||
pp &= ~EDP_BLC_ENABLE;
|
||||
|
||||
I915_WRITE(pp_ctrl_reg, pp);
|
||||
|
@ -3018,7 +3018,7 @@ static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
|
|||
/* Disable backlight PP control and backlight PWM. */
|
||||
void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(old_conn_state->best_encoder);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder));
|
||||
|
||||
if (!intel_dp_is_edp(intel_dp))
|
||||
return;
|
||||
|
@ -3036,13 +3036,13 @@ void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
|
|||
static void intel_edp_backlight_power(struct intel_connector *connector,
|
||||
bool enable)
|
||||
{
|
||||
struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
|
||||
struct intel_dp *intel_dp = intel_attached_dp(connector);
|
||||
intel_wakeref_t wakeref;
|
||||
bool is_enabled;
|
||||
|
||||
is_enabled = false;
|
||||
with_pps_lock(intel_dp, wakeref)
|
||||
is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
|
||||
is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
|
||||
if (is_enabled == enable)
|
||||
return;
|
||||
|
||||
|
@ -3079,13 +3079,13 @@ static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
|
|||
#define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
|
||||
#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
|
||||
|
||||
static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *pipe_config)
|
||||
static void ilk_edp_pll_on(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
|
||||
assert_pipe_disabled(dev_priv, crtc->pipe);
|
||||
assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
|
||||
assert_dp_port_disabled(intel_dp);
|
||||
assert_edp_pll_disabled(dev_priv);
|
||||
|
||||
|
@ -3119,13 +3119,13 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
|
|||
udelay(200);
|
||||
}
|
||||
|
||||
static void ironlake_edp_pll_off(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *old_crtc_state)
|
||||
static void ilk_edp_pll_off(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *old_crtc_state)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
|
||||
assert_pipe_disabled(dev_priv, crtc->pipe);
|
||||
assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
|
||||
assert_dp_port_disabled(intel_dp);
|
||||
assert_edp_pll_enabled(dev_priv);
|
||||
|
||||
|
@ -3258,7 +3258,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
|
|||
enum pipe *pipe)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
intel_wakeref_t wakeref;
|
||||
bool ret;
|
||||
|
||||
|
@ -3279,7 +3279,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
|
|||
struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
u32 tmp, flags = 0;
|
||||
enum port port = encoder->port;
|
||||
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
|
||||
|
@ -3363,7 +3363,7 @@ static void intel_disable_dp(struct intel_encoder *encoder,
|
|||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
|
||||
intel_dp->link_trained = false;
|
||||
|
||||
|
@ -3397,7 +3397,7 @@ static void g4x_post_disable_dp(struct intel_encoder *encoder,
|
|||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
enum port port = encoder->port;
|
||||
|
||||
/*
|
||||
|
@ -3410,7 +3410,7 @@ static void g4x_post_disable_dp(struct intel_encoder *encoder,
|
|||
|
||||
/* Only ilk+ has port A */
|
||||
if (port == PORT_A)
|
||||
ironlake_edp_pll_off(intel_dp, old_crtc_state);
|
||||
ilk_edp_pll_off(intel_dp, old_crtc_state);
|
||||
}
|
||||
|
||||
static void vlv_post_disable_dp(struct intel_encoder *encoder,
|
||||
|
@ -3548,7 +3548,7 @@ static void intel_enable_dp(struct intel_encoder *encoder,
|
|||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
|
||||
u32 dp_reg = I915_READ(intel_dp->output_reg);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
|
@ -3608,14 +3608,14 @@ static void g4x_pre_enable_dp(struct intel_encoder *encoder,
|
|||
const struct intel_crtc_state *pipe_config,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
enum port port = encoder->port;
|
||||
|
||||
intel_dp_prepare(encoder, pipe_config);
|
||||
|
||||
/* Only ilk+ has port A */
|
||||
if (port == PORT_A)
|
||||
ironlake_edp_pll_on(intel_dp, pipe_config);
|
||||
ilk_edp_pll_on(intel_dp, pipe_config);
|
||||
}
|
||||
|
||||
static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
|
||||
|
@ -3658,7 +3658,7 @@ static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
|
|||
lockdep_assert_held(&dev_priv->pps_mutex);
|
||||
|
||||
for_each_intel_dp(&dev_priv->drm, encoder) {
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
|
||||
WARN(intel_dp->active_pipe == pipe,
|
||||
"stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
|
||||
|
@ -3681,7 +3681,7 @@ static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
|
|||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
|
||||
lockdep_assert_held(&dev_priv->pps_mutex);
|
||||
|
@ -4203,7 +4203,7 @@ intel_dp_link_down(struct intel_encoder *encoder,
|
|||
const struct intel_crtc_state *old_crtc_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
|
||||
enum port port = encoder->port;
|
||||
u32 DP = intel_dp->DP;
|
||||
|
@ -4903,7 +4903,7 @@ static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
|
|||
intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width);
|
||||
intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height);
|
||||
/* Set test active flag here so userspace doesn't interrupt things */
|
||||
intel_dp->compliance.test_active = 1;
|
||||
intel_dp->compliance.test_active = true;
|
||||
|
||||
return DP_TEST_ACK;
|
||||
}
|
||||
|
@ -4947,7 +4947,7 @@ static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
|
|||
}
|
||||
|
||||
/* Set test active flag here so userspace doesn't interrupt things */
|
||||
intel_dp->compliance.test_active = 1;
|
||||
intel_dp->compliance.test_active = true;
|
||||
|
||||
return test_result;
|
||||
}
|
||||
|
@ -5096,7 +5096,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
|
|||
struct drm_modeset_acquire_ctx *ctx)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
struct intel_connector *connector = intel_dp->attached_connector;
|
||||
struct drm_connector_state *conn_state;
|
||||
struct intel_crtc_state *crtc_state;
|
||||
|
@ -5536,7 +5536,7 @@ static bool intel_combo_phy_connected(struct drm_i915_private *dev_priv,
|
|||
static bool icp_digital_port_connected(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
|
||||
|
||||
if (intel_phy_is_combo(dev_priv, phy))
|
||||
|
@ -5651,7 +5651,7 @@ intel_dp_detect(struct drm_connector *connector,
|
|||
bool force)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->dev);
|
||||
struct intel_dp *intel_dp = intel_attached_dp(connector);
|
||||
struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
struct intel_encoder *encoder = &dig_port->base;
|
||||
enum drm_connector_status status;
|
||||
|
@ -5755,7 +5755,7 @@ out:
|
|||
static void
|
||||
intel_dp_force(struct drm_connector *connector)
|
||||
{
|
||||
struct intel_dp *intel_dp = intel_attached_dp(connector);
|
||||
struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
struct intel_encoder *intel_encoder = &dig_port->base;
|
||||
struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
|
||||
|
@ -5790,7 +5790,7 @@ static int intel_dp_get_modes(struct drm_connector *connector)
|
|||
}
|
||||
|
||||
/* if eDP has no EDID, fall back to fixed mode */
|
||||
if (intel_dp_is_edp(intel_attached_dp(connector)) &&
|
||||
if (intel_dp_is_edp(intel_attached_dp(to_intel_connector(connector))) &&
|
||||
intel_connector->panel.fixed_mode) {
|
||||
struct drm_display_mode *mode;
|
||||
|
||||
|
@ -5808,7 +5808,7 @@ static int intel_dp_get_modes(struct drm_connector *connector)
|
|||
static int
|
||||
intel_dp_connector_register(struct drm_connector *connector)
|
||||
{
|
||||
struct intel_dp *intel_dp = intel_attached_dp(connector);
|
||||
struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
|
||||
int ret;
|
||||
|
||||
ret = intel_connector_register(connector);
|
||||
|
@ -5830,7 +5830,7 @@ intel_dp_connector_register(struct drm_connector *connector)
|
|||
static void
|
||||
intel_dp_connector_unregister(struct drm_connector *connector)
|
||||
{
|
||||
struct intel_dp *intel_dp = intel_attached_dp(connector);
|
||||
struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
|
||||
|
||||
drm_dp_cec_unregister_connector(&intel_dp->aux);
|
||||
drm_dp_aux_unregister(&intel_dp->aux);
|
||||
|
@ -5839,7 +5839,7 @@ intel_dp_connector_unregister(struct drm_connector *connector)
|
|||
|
||||
void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
|
||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(to_intel_encoder(encoder));
|
||||
struct intel_dp *intel_dp = &intel_dig_port->dp;
|
||||
|
||||
intel_dp_mst_encoder_cleanup(intel_dig_port);
|
||||
|
@ -5868,12 +5868,12 @@ static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
|
|||
intel_dp_encoder_flush_work(encoder);
|
||||
|
||||
drm_encoder_cleanup(encoder);
|
||||
kfree(enc_to_dig_port(encoder));
|
||||
kfree(enc_to_dig_port(to_intel_encoder(encoder)));
|
||||
}
|
||||
|
||||
void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
|
||||
{
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
|
||||
intel_wakeref_t wakeref;
|
||||
|
||||
if (!intel_dp_is_edp(intel_dp))
|
||||
|
@ -5904,7 +5904,7 @@ static
|
|||
int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
|
||||
u8 *an)
|
||||
{
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&intel_dig_port->base.base);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(&intel_dig_port->base.base));
|
||||
static const struct drm_dp_aux_msg msg = {
|
||||
.request = DP_AUX_NATIVE_WRITE,
|
||||
.address = DP_AUX_HDCP_AKSV,
|
||||
|
@ -6514,7 +6514,7 @@ static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
|
|||
void intel_dp_encoder_reset(struct drm_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder));
|
||||
struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
|
||||
intel_wakeref_t wakeref;
|
||||
|
||||
|
@ -6693,7 +6693,7 @@ intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
|
|||
|
||||
intel_pps_get_registers(intel_dp, ®s);
|
||||
|
||||
pp_ctl = ironlake_get_pp_control(intel_dp);
|
||||
pp_ctl = ilk_get_pp_control(intel_dp);
|
||||
|
||||
/* Ensure PPS is unlocked */
|
||||
if (!HAS_DDI(dev_priv))
|
||||
|
@ -6863,7 +6863,7 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
|
|||
* soon as the new power sequencer gets initialized.
|
||||
*/
|
||||
if (force_disable_vdd) {
|
||||
u32 pp = ironlake_get_pp_control(intel_dp);
|
||||
u32 pp = ilk_get_pp_control(intel_dp);
|
||||
|
||||
WARN(pp & PANEL_POWER_ON, "Panel power already on\n");
|
||||
|
||||
|
@ -7660,7 +7660,7 @@ void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
|
|||
if (encoder->type != INTEL_OUTPUT_DDI)
|
||||
continue;
|
||||
|
||||
intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
intel_dp = enc_to_intel_dp(encoder);
|
||||
|
||||
if (!intel_dp->can_mst)
|
||||
continue;
|
||||
|
@ -7681,7 +7681,7 @@ void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
|
|||
if (encoder->type != INTEL_OUTPUT_DDI)
|
||||
continue;
|
||||
|
||||
intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
intel_dp = enc_to_intel_dp(encoder);
|
||||
|
||||
if (!intel_dp->can_mst)
|
||||
continue;
|
||||
|
|
|
@ -57,7 +57,7 @@ static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
|
|||
*/
|
||||
static u32 intel_dp_aux_get_backlight(struct intel_connector *connector)
|
||||
{
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
|
||||
u8 read_val[2] = { 0x0 };
|
||||
u16 level = 0;
|
||||
|
||||
|
@ -82,7 +82,7 @@ static void
|
|||
intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 level)
|
||||
{
|
||||
struct intel_connector *connector = to_intel_connector(conn_state->connector);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
|
||||
u8 vals[2] = { 0x0 };
|
||||
|
||||
vals[0] = level;
|
||||
|
@ -110,7 +110,7 @@ intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 lev
|
|||
static bool intel_dp_aux_set_pwm_freq(struct intel_connector *connector)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
|
||||
int freq, fxp, fxp_min, fxp_max, fxp_actual, f = 1;
|
||||
u8 pn, pn_min, pn_max;
|
||||
|
||||
|
@ -178,7 +178,7 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
|
|||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct intel_connector *connector = to_intel_connector(conn_state->connector);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
|
||||
u8 dpcd_buf, new_dpcd_buf, edp_backlight_mode;
|
||||
|
||||
if (drm_dp_dpcd_readb(&intel_dp->aux,
|
||||
|
@ -222,13 +222,14 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
|
|||
|
||||
static void intel_dp_aux_disable_backlight(const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
set_aux_backlight_enable(enc_to_intel_dp(old_conn_state->best_encoder), false);
|
||||
set_aux_backlight_enable(enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)),
|
||||
false);
|
||||
}
|
||||
|
||||
static int intel_dp_aux_setup_backlight(struct intel_connector *connector,
|
||||
enum pipe pipe)
|
||||
{
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
|
||||
struct intel_panel *panel = &connector->panel;
|
||||
|
||||
if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
|
||||
|
@ -247,7 +248,7 @@ static int intel_dp_aux_setup_backlight(struct intel_connector *connector,
|
|||
static bool
|
||||
intel_dp_aux_display_control_capable(struct intel_connector *connector)
|
||||
{
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
|
||||
|
||||
/* Check the eDP Display control capabilities registers to determine if
|
||||
* the panel can support backlight control over the aux channel
|
||||
|
|
|
@ -43,7 +43,7 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
|
|||
struct link_config_limits *limits)
|
||||
{
|
||||
struct drm_atomic_state *state = crtc_state->uapi.state;
|
||||
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
|
||||
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
|
||||
struct intel_dp *intel_dp = &intel_mst->primary->dp;
|
||||
struct intel_connector *connector =
|
||||
to_intel_connector(conn_state->connector);
|
||||
|
@ -88,12 +88,58 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Iterate over all connectors and return the smallest transcoder in the MST
|
||||
* stream
|
||||
*/
|
||||
static enum transcoder
|
||||
intel_dp_mst_master_trans_compute(struct intel_atomic_state *state,
|
||||
struct intel_dp *mst_port)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
||||
struct intel_digital_connector_state *conn_state;
|
||||
struct intel_connector *connector;
|
||||
enum pipe ret = I915_MAX_PIPES;
|
||||
int i;
|
||||
|
||||
if (INTEL_GEN(dev_priv) < 12)
|
||||
return INVALID_TRANSCODER;
|
||||
|
||||
for_each_new_intel_connector_in_state(state, connector, conn_state, i) {
|
||||
struct intel_crtc_state *crtc_state;
|
||||
struct intel_crtc *crtc;
|
||||
|
||||
if (connector->mst_port != mst_port || !conn_state->base.crtc)
|
||||
continue;
|
||||
|
||||
crtc = to_intel_crtc(conn_state->base.crtc);
|
||||
crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
|
||||
if (!crtc_state->uapi.active)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Using crtc->pipe because crtc_state->cpu_transcoder is
|
||||
* computed, so others CRTCs could have non-computed
|
||||
* cpu_transcoder
|
||||
*/
|
||||
if (crtc->pipe < ret)
|
||||
ret = crtc->pipe;
|
||||
}
|
||||
|
||||
if (ret == I915_MAX_PIPES)
|
||||
return INVALID_TRANSCODER;
|
||||
|
||||
/* Simple cast works because TGL don't have a eDP transcoder */
|
||||
return (enum transcoder)ret;
|
||||
}
|
||||
|
||||
static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config,
|
||||
struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state);
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
|
||||
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
|
||||
struct intel_dp *intel_dp = &intel_mst->primary->dp;
|
||||
struct intel_connector *connector =
|
||||
to_intel_connector(conn_state->connector);
|
||||
|
@ -155,24 +201,91 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
|
|||
|
||||
intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
|
||||
|
||||
pipe_config->mst_master_transcoder = intel_dp_mst_master_trans_compute(state, intel_dp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* If one of the connectors in a MST stream needs a modeset, mark all CRTCs
|
||||
* that shares the same MST stream as mode changed,
|
||||
* intel_modeset_pipe_config()+intel_crtc_check_fastset() will take care to do
|
||||
* a fastset when possible.
|
||||
*/
|
||||
static int
|
||||
intel_dp_mst_atomic_master_trans_check(struct intel_connector *connector,
|
||||
struct intel_atomic_state *state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
||||
struct drm_connector_list_iter connector_list_iter;
|
||||
struct intel_connector *connector_iter;
|
||||
|
||||
if (INTEL_GEN(dev_priv) < 12)
|
||||
return 0;
|
||||
|
||||
if (!intel_connector_needs_modeset(state, &connector->base))
|
||||
return 0;
|
||||
|
||||
drm_connector_list_iter_begin(&dev_priv->drm, &connector_list_iter);
|
||||
for_each_intel_connector_iter(connector_iter, &connector_list_iter) {
|
||||
struct intel_digital_connector_state *conn_iter_state;
|
||||
struct intel_crtc_state *crtc_state;
|
||||
struct intel_crtc *crtc;
|
||||
int ret;
|
||||
|
||||
if (connector_iter->mst_port != connector->mst_port ||
|
||||
connector_iter == connector)
|
||||
continue;
|
||||
|
||||
conn_iter_state = intel_atomic_get_digital_connector_state(state,
|
||||
connector_iter);
|
||||
if (IS_ERR(conn_iter_state)) {
|
||||
drm_connector_list_iter_end(&connector_list_iter);
|
||||
return PTR_ERR(conn_iter_state);
|
||||
}
|
||||
|
||||
if (!conn_iter_state->base.crtc)
|
||||
continue;
|
||||
|
||||
crtc = to_intel_crtc(conn_iter_state->base.crtc);
|
||||
crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
|
||||
if (IS_ERR(crtc_state)) {
|
||||
drm_connector_list_iter_end(&connector_list_iter);
|
||||
return PTR_ERR(crtc_state);
|
||||
}
|
||||
|
||||
ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
|
||||
if (ret) {
|
||||
drm_connector_list_iter_end(&connector_list_iter);
|
||||
return ret;
|
||||
}
|
||||
crtc_state->uapi.mode_changed = true;
|
||||
}
|
||||
drm_connector_list_iter_end(&connector_list_iter);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
intel_dp_mst_atomic_check(struct drm_connector *connector,
|
||||
struct drm_atomic_state *state)
|
||||
struct drm_atomic_state *_state)
|
||||
{
|
||||
struct intel_atomic_state *state = to_intel_atomic_state(_state);
|
||||
struct drm_connector_state *new_conn_state =
|
||||
drm_atomic_get_new_connector_state(state, connector);
|
||||
drm_atomic_get_new_connector_state(&state->base, connector);
|
||||
struct drm_connector_state *old_conn_state =
|
||||
drm_atomic_get_old_connector_state(state, connector);
|
||||
drm_atomic_get_old_connector_state(&state->base, connector);
|
||||
struct intel_connector *intel_connector =
|
||||
to_intel_connector(connector);
|
||||
struct drm_crtc *new_crtc = new_conn_state->crtc;
|
||||
struct drm_dp_mst_topology_mgr *mgr;
|
||||
int ret;
|
||||
|
||||
ret = intel_digital_connector_atomic_check(connector, state);
|
||||
ret = intel_digital_connector_atomic_check(connector, &state->base);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = intel_dp_mst_atomic_master_trans_check(intel_connector, state);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -183,12 +296,9 @@ intel_dp_mst_atomic_check(struct drm_connector *connector,
|
|||
* connector
|
||||
*/
|
||||
if (new_crtc) {
|
||||
struct intel_atomic_state *intel_state =
|
||||
to_intel_atomic_state(state);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(new_crtc);
|
||||
struct intel_crtc_state *crtc_state =
|
||||
intel_atomic_get_new_crtc_state(intel_state,
|
||||
intel_crtc);
|
||||
intel_atomic_get_new_crtc_state(state, intel_crtc);
|
||||
|
||||
if (!crtc_state ||
|
||||
!drm_atomic_crtc_needs_modeset(&crtc_state->uapi) ||
|
||||
|
@ -196,8 +306,8 @@ intel_dp_mst_atomic_check(struct drm_connector *connector,
|
|||
return 0;
|
||||
}
|
||||
|
||||
mgr = &enc_to_mst(old_conn_state->best_encoder)->primary->dp.mst_mgr;
|
||||
ret = drm_dp_atomic_release_vcpi_slots(state, mgr,
|
||||
mgr = &enc_to_mst(to_intel_encoder(old_conn_state->best_encoder))->primary->dp.mst_mgr;
|
||||
ret = drm_dp_atomic_release_vcpi_slots(&state->base, mgr,
|
||||
intel_connector->port);
|
||||
|
||||
return ret;
|
||||
|
@ -207,7 +317,7 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder,
|
|||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
|
||||
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
|
||||
struct intel_digital_port *intel_dig_port = intel_mst->primary;
|
||||
struct intel_dp *intel_dp = &intel_dig_port->dp;
|
||||
struct intel_connector *connector =
|
||||
|
@ -231,28 +341,50 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
|
|||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
|
||||
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
|
||||
struct intel_digital_port *intel_dig_port = intel_mst->primary;
|
||||
struct intel_dp *intel_dp = &intel_dig_port->dp;
|
||||
struct intel_connector *connector =
|
||||
to_intel_connector(old_conn_state->connector);
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
|
||||
bool last_mst_stream;
|
||||
u32 val;
|
||||
|
||||
intel_dp->active_mst_links--;
|
||||
last_mst_stream = intel_dp->active_mst_links == 0;
|
||||
WARN_ON(INTEL_GEN(dev_priv) >= 12 && last_mst_stream &&
|
||||
!intel_dp_mst_is_master_trans(old_crtc_state));
|
||||
|
||||
intel_crtc_vblank_off(old_crtc_state);
|
||||
|
||||
intel_disable_pipe(old_crtc_state);
|
||||
|
||||
drm_dp_update_payload_part2(&intel_dp->mst_mgr);
|
||||
|
||||
val = I915_READ(TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder));
|
||||
val &= ~TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
|
||||
I915_WRITE(TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder), val);
|
||||
|
||||
if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
|
||||
DP_TP_STATUS_ACT_SENT, 1))
|
||||
DRM_ERROR("Timed out waiting for ACT sent when disabling\n");
|
||||
drm_dp_check_act_status(&intel_dp->mst_mgr);
|
||||
|
||||
drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, connector->port);
|
||||
|
||||
intel_ddi_disable_transcoder_func(old_crtc_state);
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 9)
|
||||
skylake_scaler_disable(old_crtc_state);
|
||||
skl_scaler_disable(old_crtc_state);
|
||||
else
|
||||
ironlake_pfit_disable(old_crtc_state);
|
||||
ilk_pfit_disable(old_crtc_state);
|
||||
|
||||
/*
|
||||
* Power down mst path before disabling the port, otherwise we end
|
||||
* up getting interrupts from the sink upon detecting link loss.
|
||||
*/
|
||||
drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port,
|
||||
false);
|
||||
/*
|
||||
* From TGL spec: "If multi-stream slave transcoder: Configure
|
||||
* Transcoder Clock Select to direct no clock to the transcoder"
|
||||
|
@ -263,19 +395,6 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
|
|||
if (INTEL_GEN(dev_priv) < 12 || !last_mst_stream)
|
||||
intel_ddi_disable_pipe_clock(old_crtc_state);
|
||||
|
||||
/* this can fail */
|
||||
drm_dp_check_act_status(&intel_dp->mst_mgr);
|
||||
/* and this can also fail */
|
||||
drm_dp_update_payload_part2(&intel_dp->mst_mgr);
|
||||
|
||||
drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, connector->port);
|
||||
|
||||
/*
|
||||
* Power down mst path before disabling the port, otherwise we end
|
||||
* up getting interrupts from the sink upon detecting link loss.
|
||||
*/
|
||||
drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port,
|
||||
false);
|
||||
|
||||
intel_mst->connector = NULL;
|
||||
if (last_mst_stream)
|
||||
|
@ -289,7 +408,7 @@ static void intel_mst_pre_pll_enable_dp(struct intel_encoder *encoder,
|
|||
const struct intel_crtc_state *pipe_config,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
|
||||
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
|
||||
struct intel_digital_port *intel_dig_port = intel_mst->primary;
|
||||
struct intel_dp *intel_dp = &intel_dig_port->dp;
|
||||
|
||||
|
@ -302,7 +421,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
|
|||
const struct intel_crtc_state *pipe_config,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
|
||||
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
|
||||
struct intel_digital_port *intel_dig_port = intel_mst->primary;
|
||||
struct intel_dp *intel_dp = &intel_dig_port->dp;
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
|
@ -318,6 +437,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
|
|||
connector->encoder = encoder;
|
||||
intel_mst->connector = connector;
|
||||
first_mst_stream = intel_dp->active_mst_links == 0;
|
||||
WARN_ON(INTEL_GEN(dev_priv) >= 12 && first_mst_stream &&
|
||||
!intel_dp_mst_is_master_trans(pipe_config));
|
||||
|
||||
DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
|
||||
|
||||
|
@ -360,7 +481,7 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder,
|
|||
const struct intel_crtc_state *pipe_config,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
|
||||
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
|
||||
struct intel_digital_port *intel_dig_port = intel_mst->primary;
|
||||
struct intel_dp *intel_dp = &intel_dig_port->dp;
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
|
@ -381,7 +502,7 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder,
|
|||
static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
|
||||
enum pipe *pipe)
|
||||
{
|
||||
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
|
||||
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
|
||||
*pipe = intel_mst->pipe;
|
||||
if (intel_mst->connector)
|
||||
return true;
|
||||
|
@ -391,7 +512,7 @@ static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
|
|||
static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
|
||||
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
|
||||
struct intel_digital_port *intel_dig_port = intel_mst->primary;
|
||||
|
||||
intel_ddi_get_config(&intel_dig_port->base, pipe_config);
|
||||
|
@ -499,7 +620,7 @@ static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_fun
|
|||
|
||||
static void intel_dp_mst_encoder_destroy(struct drm_encoder *encoder)
|
||||
{
|
||||
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
|
||||
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(to_intel_encoder(encoder));
|
||||
|
||||
drm_encoder_cleanup(encoder);
|
||||
kfree(intel_mst);
|
||||
|
@ -723,3 +844,14 @@ intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port)
|
|||
drm_dp_mst_topology_mgr_destroy(&intel_dp->mst_mgr);
|
||||
/* encoders will get killed by normal cleanup */
|
||||
}
|
||||
|
||||
bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
return crtc_state->mst_master_transcoder == crtc_state->cpu_transcoder;
|
||||
}
|
||||
|
||||
bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
return crtc_state->mst_master_transcoder != INVALID_TRANSCODER &&
|
||||
crtc_state->mst_master_transcoder != crtc_state->cpu_transcoder;
|
||||
}
|
||||
|
|
|
@ -6,10 +6,15 @@
|
|||
#ifndef __INTEL_DP_MST_H__
|
||||
#define __INTEL_DP_MST_H__
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
struct intel_digital_port;
|
||||
struct intel_crtc_state;
|
||||
|
||||
int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
|
||||
void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
|
||||
int intel_dp_mst_encoder_active_links(struct intel_digital_port *intel_dig_port);
|
||||
bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state);
|
||||
bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state);
|
||||
|
||||
#endif /* __INTEL_DP_MST_H__ */
|
||||
|
|
|
@ -642,7 +642,7 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
|
|||
bool uniq_trans_scale)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
|
||||
struct intel_digital_port *dport = enc_to_dig_port(encoder);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
|
||||
enum dpio_channel ch = vlv_dport_to_channel(dport);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
|
@ -738,7 +738,7 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
|
|||
bool reset)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
|
||||
enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(encoder));
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
u32 val;
|
||||
|
@ -781,7 +781,7 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
|
|||
void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
|
||||
struct intel_digital_port *dport = enc_to_dig_port(encoder);
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
enum dpio_channel ch = vlv_dport_to_channel(dport);
|
||||
|
@ -861,7 +861,7 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
|
|||
void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
|
@ -940,7 +940,7 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
|
|||
|
||||
void chv_phy_release_cl2_override(struct intel_encoder *encoder)
|
||||
{
|
||||
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
|
||||
struct intel_digital_port *dport = enc_to_dig_port(encoder);
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
|
||||
if (dport->release_cl2_override) {
|
||||
|
@ -989,7 +989,7 @@ void vlv_set_phy_signal_level(struct intel_encoder *encoder,
|
|||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
|
||||
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
|
||||
struct intel_digital_port *dport = enc_to_dig_port(encoder);
|
||||
enum dpio_channel port = vlv_dport_to_channel(dport);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
|
||||
|
@ -1014,7 +1014,7 @@ void vlv_set_phy_signal_level(struct intel_encoder *encoder,
|
|||
void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
|
||||
struct intel_digital_port *dport = enc_to_dig_port(encoder);
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
enum dpio_channel port = vlv_dport_to_channel(dport);
|
||||
|
@ -1043,7 +1043,7 @@ void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
|
|||
void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
|
@ -1073,7 +1073,7 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
|
|||
void vlv_phy_reset_lanes(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *old_crtc_state)
|
||||
{
|
||||
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
|
||||
struct intel_digital_port *dport = enc_to_dig_port(encoder);
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
|
||||
enum dpio_channel port = vlv_dport_to_channel(dport);
|
||||
|
|
|
@ -2972,8 +2972,8 @@ static void icl_update_active_dpll(struct intel_atomic_state *state,
|
|||
enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
|
||||
|
||||
primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
|
||||
enc_to_mst(&encoder->base)->primary :
|
||||
enc_to_dig_port(&encoder->base);
|
||||
enc_to_mst(encoder)->primary :
|
||||
enc_to_dig_port(encoder);
|
||||
|
||||
if (primary_port &&
|
||||
(primary_port->tc_mode == TC_PORT_DP_ALT ||
|
||||
|
|
|
@ -45,8 +45,9 @@ struct intel_dsi {
|
|||
struct intel_dsi_host *dsi_hosts[I915_MAX_PORTS];
|
||||
intel_wakeref_t io_wakeref[I915_MAX_PORTS];
|
||||
|
||||
/* GPIO Desc for CRC based Panel control */
|
||||
/* GPIO Desc for panel and backlight control */
|
||||
struct gpio_desc *gpio_panel;
|
||||
struct gpio_desc *gpio_backlight;
|
||||
|
||||
struct intel_connector *attached_connector;
|
||||
|
||||
|
@ -68,6 +69,9 @@ struct intel_dsi {
|
|||
/* number of DSI lanes */
|
||||
unsigned int lane_count;
|
||||
|
||||
/* i2c bus associated with the slave device */
|
||||
int i2c_bus_num;
|
||||
|
||||
/*
|
||||
* video mode pixel format
|
||||
*
|
||||
|
@ -141,9 +145,9 @@ static inline struct intel_dsi_host *to_intel_dsi_host(struct mipi_dsi_host *h)
|
|||
#define for_each_dsi_phy(__phy, __phys_mask) \
|
||||
for_each_phy_masked(__phy, __phys_mask)
|
||||
|
||||
static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
|
||||
static inline struct intel_dsi *enc_to_intel_dsi(struct intel_encoder *encoder)
|
||||
{
|
||||
return container_of(encoder, struct intel_dsi, base.base);
|
||||
return container_of(&encoder->base, struct intel_dsi, base.base);
|
||||
}
|
||||
|
||||
static inline bool is_vid_mode(struct intel_dsi *intel_dsi)
|
||||
|
@ -158,7 +162,7 @@ static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
|
|||
|
||||
static inline u16 intel_dsi_encoder_ports(struct intel_encoder *encoder)
|
||||
{
|
||||
return enc_to_intel_dsi(&encoder->base)->ports;
|
||||
return enc_to_intel_dsi(encoder)->ports;
|
||||
}
|
||||
|
||||
/* icl_dsi.c */
|
||||
|
@ -203,6 +207,8 @@ void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port);
|
|||
|
||||
/* intel_dsi_vbt.c */
|
||||
bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id);
|
||||
void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on);
|
||||
void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi);
|
||||
void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
|
||||
enum mipi_seq seq_id);
|
||||
void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec);
|
||||
|
|
|
@ -46,7 +46,7 @@
|
|||
static u32 dcs_get_backlight(struct intel_connector *connector)
|
||||
{
|
||||
struct intel_encoder *encoder = connector->encoder;
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
struct mipi_dsi_device *dsi_device;
|
||||
u8 data = 0;
|
||||
enum port port;
|
||||
|
@ -64,7 +64,7 @@ static u32 dcs_get_backlight(struct intel_connector *connector)
|
|||
|
||||
static void dcs_set_backlight(const struct drm_connector_state *conn_state, u32 level)
|
||||
{
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(conn_state->best_encoder);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder));
|
||||
struct mipi_dsi_device *dsi_device;
|
||||
u8 data = level;
|
||||
enum port port;
|
||||
|
@ -79,7 +79,7 @@ static void dcs_set_backlight(const struct drm_connector_state *conn_state, u32
|
|||
|
||||
static void dcs_disable_backlight(const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(conn_state->best_encoder);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder));
|
||||
struct mipi_dsi_device *dsi_device;
|
||||
enum port port;
|
||||
|
||||
|
@ -113,7 +113,7 @@ static void dcs_disable_backlight(const struct drm_connector_state *conn_state)
|
|||
static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(conn_state->best_encoder);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder));
|
||||
struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel;
|
||||
struct mipi_dsi_device *dsi_device;
|
||||
enum port port;
|
||||
|
|
|
@ -25,7 +25,10 @@
|
|||
*/
|
||||
|
||||
#include <linux/gpio/consumer.h>
|
||||
#include <linux/gpio/machine.h>
|
||||
#include <linux/mfd/intel_soc_pmic.h>
|
||||
#include <linux/pinctrl/consumer.h>
|
||||
#include <linux/pinctrl/machine.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <asm/intel-mid.h>
|
||||
|
@ -83,6 +86,12 @@ static struct gpio_map vlv_gpio_table[] = {
|
|||
{ VLV_GPIO_NC_11_PANEL1_BKLTCTL },
|
||||
};
|
||||
|
||||
struct i2c_adapter_lookup {
|
||||
u16 slave_addr;
|
||||
struct intel_dsi *intel_dsi;
|
||||
acpi_handle dev_handle;
|
||||
};
|
||||
|
||||
#define CHV_GPIO_IDX_START_N 0
|
||||
#define CHV_GPIO_IDX_START_E 73
|
||||
#define CHV_GPIO_IDX_START_SW 100
|
||||
|
@ -375,11 +384,98 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
|
|||
return data;
|
||||
}
|
||||
|
||||
static int i2c_adapter_lookup(struct acpi_resource *ares, void *data)
|
||||
{
|
||||
struct i2c_adapter_lookup *lookup = data;
|
||||
struct intel_dsi *intel_dsi = lookup->intel_dsi;
|
||||
struct acpi_resource_i2c_serialbus *sb;
|
||||
struct i2c_adapter *adapter;
|
||||
acpi_handle adapter_handle;
|
||||
acpi_status status;
|
||||
|
||||
if (intel_dsi->i2c_bus_num >= 0 ||
|
||||
!i2c_acpi_get_i2c_resource(ares, &sb))
|
||||
return 1;
|
||||
|
||||
if (lookup->slave_addr != sb->slave_address)
|
||||
return 1;
|
||||
|
||||
status = acpi_get_handle(lookup->dev_handle,
|
||||
sb->resource_source.string_ptr,
|
||||
&adapter_handle);
|
||||
if (ACPI_FAILURE(status))
|
||||
return 1;
|
||||
|
||||
adapter = i2c_acpi_find_adapter_by_handle(adapter_handle);
|
||||
if (adapter)
|
||||
intel_dsi->i2c_bus_num = adapter->nr;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
|
||||
{
|
||||
DRM_DEBUG_KMS("Skipping I2C element execution\n");
|
||||
struct drm_device *drm_dev = intel_dsi->base.base.dev;
|
||||
struct device *dev = &drm_dev->pdev->dev;
|
||||
struct i2c_adapter *adapter;
|
||||
struct acpi_device *acpi_dev;
|
||||
struct list_head resource_list;
|
||||
struct i2c_adapter_lookup lookup;
|
||||
struct i2c_msg msg;
|
||||
int ret;
|
||||
u8 vbt_i2c_bus_num = *(data + 2);
|
||||
u16 slave_addr = *(u16 *)(data + 3);
|
||||
u8 reg_offset = *(data + 5);
|
||||
u8 payload_size = *(data + 6);
|
||||
u8 *payload_data;
|
||||
|
||||
return data + *(data + 6) + 7;
|
||||
if (intel_dsi->i2c_bus_num < 0) {
|
||||
intel_dsi->i2c_bus_num = vbt_i2c_bus_num;
|
||||
|
||||
acpi_dev = ACPI_COMPANION(dev);
|
||||
if (acpi_dev) {
|
||||
memset(&lookup, 0, sizeof(lookup));
|
||||
lookup.slave_addr = slave_addr;
|
||||
lookup.intel_dsi = intel_dsi;
|
||||
lookup.dev_handle = acpi_device_handle(acpi_dev);
|
||||
|
||||
INIT_LIST_HEAD(&resource_list);
|
||||
acpi_dev_get_resources(acpi_dev, &resource_list,
|
||||
i2c_adapter_lookup,
|
||||
&lookup);
|
||||
acpi_dev_free_resource_list(&resource_list);
|
||||
}
|
||||
}
|
||||
|
||||
adapter = i2c_get_adapter(intel_dsi->i2c_bus_num);
|
||||
if (!adapter) {
|
||||
DRM_DEV_ERROR(dev, "Cannot find a valid i2c bus for xfer\n");
|
||||
goto err_bus;
|
||||
}
|
||||
|
||||
payload_data = kzalloc(payload_size + 1, GFP_KERNEL);
|
||||
if (!payload_data)
|
||||
goto err_alloc;
|
||||
|
||||
payload_data[0] = reg_offset;
|
||||
memcpy(&payload_data[1], (data + 7), payload_size);
|
||||
|
||||
msg.addr = slave_addr;
|
||||
msg.flags = 0;
|
||||
msg.len = payload_size + 1;
|
||||
msg.buf = payload_data;
|
||||
|
||||
ret = i2c_transfer(adapter, &msg, 1);
|
||||
if (ret < 0)
|
||||
DRM_DEV_ERROR(dev,
|
||||
"Failed to xfer payload of size (%u) to reg (%u)\n",
|
||||
payload_size, reg_offset);
|
||||
|
||||
kfree(payload_data);
|
||||
err_alloc:
|
||||
i2c_put_adapter(adapter);
|
||||
err_bus:
|
||||
return data + payload_size + 7;
|
||||
}
|
||||
|
||||
static const u8 *mipi_exec_spi(struct intel_dsi *intel_dsi, const u8 *data)
|
||||
|
@ -453,8 +549,8 @@ static const char *sequence_name(enum mipi_seq seq_id)
|
|||
return "(unknown)";
|
||||
}
|
||||
|
||||
void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
|
||||
enum mipi_seq seq_id)
|
||||
static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi,
|
||||
enum mipi_seq seq_id)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
|
||||
const u8 *data;
|
||||
|
@ -519,6 +615,22 @@ void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
|
|||
}
|
||||
}
|
||||
|
||||
void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
|
||||
enum mipi_seq seq_id)
|
||||
{
|
||||
if (seq_id == MIPI_SEQ_POWER_ON && intel_dsi->gpio_panel)
|
||||
gpiod_set_value_cansleep(intel_dsi->gpio_panel, 1);
|
||||
if (seq_id == MIPI_SEQ_BACKLIGHT_ON && intel_dsi->gpio_backlight)
|
||||
gpiod_set_value_cansleep(intel_dsi->gpio_backlight, 1);
|
||||
|
||||
intel_dsi_vbt_exec(intel_dsi, seq_id);
|
||||
|
||||
if (seq_id == MIPI_SEQ_POWER_OFF && intel_dsi->gpio_panel)
|
||||
gpiod_set_value_cansleep(intel_dsi->gpio_panel, 0);
|
||||
if (seq_id == MIPI_SEQ_BACKLIGHT_OFF && intel_dsi->gpio_backlight)
|
||||
gpiod_set_value_cansleep(intel_dsi->gpio_backlight, 0);
|
||||
}
|
||||
|
||||
void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
|
||||
|
@ -664,6 +776,8 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
|
|||
intel_dsi->panel_off_delay = pps->panel_off_delay / 10;
|
||||
intel_dsi->panel_pwr_cycle_delay = pps->panel_power_cycle_delay / 10;
|
||||
|
||||
intel_dsi->i2c_bus_num = -1;
|
||||
|
||||
/* a regular driver would get the device in probe */
|
||||
for_each_dsi_port(port, intel_dsi->ports) {
|
||||
mipi_dsi_attach(intel_dsi->dsi_hosts[port]->device);
|
||||
|
@ -671,3 +785,110 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
|
|||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* On some BYT/CHT devs some sequences are incomplete and we need to manually
|
||||
* control some GPIOs. We need to add a GPIO lookup table before we get these.
|
||||
* If the GOP did not initialize the panel (HDMI inserted) we may need to also
|
||||
* change the pinmux for the SoC's PWM0 pin from GPIO to PWM.
|
||||
*/
|
||||
static struct gpiod_lookup_table pmic_panel_gpio_table = {
|
||||
/* Intel GFX is consumer */
|
||||
.dev_id = "0000:00:02.0",
|
||||
.table = {
|
||||
/* Panel EN/DISABLE */
|
||||
GPIO_LOOKUP("gpio_crystalcove", 94, "panel", GPIO_ACTIVE_HIGH),
|
||||
{ }
|
||||
},
|
||||
};
|
||||
|
||||
static struct gpiod_lookup_table soc_panel_gpio_table = {
|
||||
.dev_id = "0000:00:02.0",
|
||||
.table = {
|
||||
GPIO_LOOKUP("INT33FC:01", 10, "backlight", GPIO_ACTIVE_HIGH),
|
||||
GPIO_LOOKUP("INT33FC:01", 11, "panel", GPIO_ACTIVE_HIGH),
|
||||
{ }
|
||||
},
|
||||
};
|
||||
|
||||
static const struct pinctrl_map soc_pwm_pinctrl_map[] = {
|
||||
PIN_MAP_MUX_GROUP("0000:00:02.0", "soc_pwm0", "INT33FC:00",
|
||||
"pwm0_grp", "pwm"),
|
||||
};
|
||||
|
||||
void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
|
||||
{
|
||||
struct drm_device *dev = intel_dsi->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
|
||||
enum gpiod_flags flags = panel_is_on ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
|
||||
bool want_backlight_gpio = false;
|
||||
bool want_panel_gpio = false;
|
||||
struct pinctrl *pinctrl;
|
||||
int ret;
|
||||
|
||||
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
|
||||
mipi_config->pwm_blc == PPS_BLC_PMIC) {
|
||||
gpiod_add_lookup_table(&pmic_panel_gpio_table);
|
||||
want_panel_gpio = true;
|
||||
}
|
||||
|
||||
if (IS_VALLEYVIEW(dev_priv) && mipi_config->pwm_blc == PPS_BLC_SOC) {
|
||||
gpiod_add_lookup_table(&soc_panel_gpio_table);
|
||||
want_panel_gpio = true;
|
||||
want_backlight_gpio = true;
|
||||
|
||||
/* Ensure PWM0 pin is muxed as PWM instead of GPIO */
|
||||
ret = pinctrl_register_mappings(soc_pwm_pinctrl_map,
|
||||
ARRAY_SIZE(soc_pwm_pinctrl_map));
|
||||
if (ret)
|
||||
DRM_ERROR("Failed to register pwm0 pinmux mapping\n");
|
||||
|
||||
pinctrl = devm_pinctrl_get_select(dev->dev, "soc_pwm0");
|
||||
if (IS_ERR(pinctrl))
|
||||
DRM_ERROR("Failed to set pinmux to PWM\n");
|
||||
}
|
||||
|
||||
if (want_panel_gpio) {
|
||||
intel_dsi->gpio_panel = gpiod_get(dev->dev, "panel", flags);
|
||||
if (IS_ERR(intel_dsi->gpio_panel)) {
|
||||
DRM_ERROR("Failed to own gpio for panel control\n");
|
||||
intel_dsi->gpio_panel = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if (want_backlight_gpio) {
|
||||
intel_dsi->gpio_backlight =
|
||||
gpiod_get(dev->dev, "backlight", flags);
|
||||
if (IS_ERR(intel_dsi->gpio_backlight)) {
|
||||
DRM_ERROR("Failed to own gpio for backlight control\n");
|
||||
intel_dsi->gpio_backlight = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi)
|
||||
{
|
||||
struct drm_device *dev = intel_dsi->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
|
||||
|
||||
if (intel_dsi->gpio_panel) {
|
||||
gpiod_put(intel_dsi->gpio_panel);
|
||||
intel_dsi->gpio_panel = NULL;
|
||||
}
|
||||
|
||||
if (intel_dsi->gpio_backlight) {
|
||||
gpiod_put(intel_dsi->gpio_backlight);
|
||||
intel_dsi->gpio_backlight = NULL;
|
||||
}
|
||||
|
||||
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
|
||||
mipi_config->pwm_blc == PPS_BLC_PMIC)
|
||||
gpiod_remove_lookup_table(&pmic_panel_gpio_table);
|
||||
|
||||
if (IS_VALLEYVIEW(dev_priv) && mipi_config->pwm_blc == PPS_BLC_SOC) {
|
||||
pinctrl_unregister_mappings(soc_pwm_pinctrl_map);
|
||||
gpiod_remove_lookup_table(&soc_panel_gpio_table);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -125,7 +125,7 @@ static struct intel_dvo *enc_to_dvo(struct intel_encoder *encoder)
|
|||
return container_of(encoder, struct intel_dvo, base);
|
||||
}
|
||||
|
||||
static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
|
||||
static struct intel_dvo *intel_attached_dvo(struct intel_connector *connector)
|
||||
{
|
||||
return enc_to_dvo(intel_attached_encoder(connector));
|
||||
}
|
||||
|
@ -134,7 +134,7 @@ static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
|
|||
{
|
||||
struct drm_device *dev = connector->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base);
|
||||
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
|
||||
u32 tmp;
|
||||
|
||||
tmp = I915_READ(intel_dvo->dev.dvo_reg);
|
||||
|
@ -220,7 +220,7 @@ static enum drm_mode_status
|
|||
intel_dvo_mode_valid(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
|
||||
struct intel_dvo *intel_dvo = intel_attached_dvo(to_intel_connector(connector));
|
||||
const struct drm_display_mode *fixed_mode =
|
||||
to_intel_connector(connector)->panel.fixed_mode;
|
||||
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
|
||||
|
@ -311,7 +311,7 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder,
|
|||
static enum drm_connector_status
|
||||
intel_dvo_detect(struct drm_connector *connector, bool force)
|
||||
{
|
||||
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
|
||||
struct intel_dvo *intel_dvo = intel_attached_dvo(to_intel_connector(connector));
|
||||
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
|
||||
connector->base.id, connector->name);
|
||||
return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
|
||||
|
|
|
@ -126,8 +126,8 @@ static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
|
|||
}
|
||||
}
|
||||
|
||||
static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
|
||||
enum pipe pipe, bool enable)
|
||||
static void ilk_set_fifo_underrun_reporting(struct drm_device *dev,
|
||||
enum pipe pipe, bool enable)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
u32 bit = (pipe == PIPE_A) ?
|
||||
|
@ -139,7 +139,7 @@ static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
|
|||
ilk_disable_display_irq(dev_priv, bit);
|
||||
}
|
||||
|
||||
static void ivybridge_check_fifo_underruns(struct intel_crtc *crtc)
|
||||
static void ivb_check_fifo_underruns(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
|
@ -157,9 +157,9 @@ static void ivybridge_check_fifo_underruns(struct intel_crtc *crtc)
|
|||
DRM_ERROR("fifo underrun on pipe %c\n", pipe_name(pipe));
|
||||
}
|
||||
|
||||
static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
|
||||
enum pipe pipe,
|
||||
bool enable, bool old)
|
||||
static void ivb_set_fifo_underrun_reporting(struct drm_device *dev,
|
||||
enum pipe pipe, bool enable,
|
||||
bool old)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
if (enable) {
|
||||
|
@ -180,8 +180,8 @@ static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
|
|||
}
|
||||
}
|
||||
|
||||
static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
|
||||
enum pipe pipe, bool enable)
|
||||
static void bdw_set_fifo_underrun_reporting(struct drm_device *dev,
|
||||
enum pipe pipe, bool enable)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
|
@ -264,11 +264,11 @@ static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
|
|||
if (HAS_GMCH(dev_priv))
|
||||
i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
|
||||
else if (IS_GEN_RANGE(dev_priv, 5, 6))
|
||||
ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
|
||||
ilk_set_fifo_underrun_reporting(dev, pipe, enable);
|
||||
else if (IS_GEN(dev_priv, 7))
|
||||
ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
|
||||
ivb_set_fifo_underrun_reporting(dev, pipe, enable, old);
|
||||
else if (INTEL_GEN(dev_priv) >= 8)
|
||||
broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
|
||||
bdw_set_fifo_underrun_reporting(dev, pipe, enable);
|
||||
|
||||
return old;
|
||||
}
|
||||
|
@ -427,7 +427,7 @@ void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv)
|
|||
if (HAS_GMCH(dev_priv))
|
||||
i9xx_check_fifo_underruns(crtc);
|
||||
else if (IS_GEN(dev_priv, 7))
|
||||
ivybridge_check_fifo_underruns(crtc);
|
||||
ivb_check_fifo_underruns(crtc);
|
||||
}
|
||||
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
|
|
@ -85,16 +85,17 @@ assert_hdmi_transcoder_func_disabled(struct drm_i915_private *dev_priv,
|
|||
"HDMI transcoder function enabled, expecting disabled\n");
|
||||
}
|
||||
|
||||
struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
|
||||
struct intel_hdmi *enc_to_intel_hdmi(struct intel_encoder *encoder)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port =
|
||||
container_of(encoder, struct intel_digital_port, base.base);
|
||||
container_of(&encoder->base, struct intel_digital_port,
|
||||
base.base);
|
||||
return &intel_dig_port->hdmi;
|
||||
}
|
||||
|
||||
static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
|
||||
static struct intel_hdmi *intel_attached_hdmi(struct intel_connector *connector)
|
||||
{
|
||||
return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base);
|
||||
return enc_to_intel_hdmi(intel_attached_encoder(connector));
|
||||
}
|
||||
|
||||
static u32 g4x_infoframe_index(unsigned int type)
|
||||
|
@ -602,7 +603,7 @@ u32 intel_hdmi_infoframes_enabled(struct intel_encoder *encoder,
|
|||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
u32 val, ret = 0;
|
||||
int i;
|
||||
|
||||
|
@ -646,7 +647,7 @@ static void intel_write_infoframe(struct intel_encoder *encoder,
|
|||
enum hdmi_infoframe_type type,
|
||||
const union hdmi_infoframe *frame)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
|
||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
|
||||
u8 buffer[VIDEO_DIP_DATA_SIZE];
|
||||
ssize_t len;
|
||||
|
||||
|
@ -675,7 +676,7 @@ void intel_read_infoframe(struct intel_encoder *encoder,
|
|||
enum hdmi_infoframe_type type,
|
||||
union hdmi_infoframe *frame)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
|
||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
|
||||
u8 buffer[VIDEO_DIP_DATA_SIZE];
|
||||
int ret;
|
||||
|
||||
|
@ -855,7 +856,7 @@ static void g4x_set_infoframes(struct intel_encoder *encoder,
|
|||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
|
||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
|
||||
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
|
||||
i915_reg_t reg = VIDEO_DIP_CTL;
|
||||
u32 val = I915_READ(reg);
|
||||
|
@ -1038,7 +1039,7 @@ static void ibx_set_infoframes(struct intel_encoder *encoder,
|
|||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
|
||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
|
||||
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
|
||||
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
|
||||
u32 val = I915_READ(reg);
|
||||
|
@ -1097,7 +1098,7 @@ static void cpt_set_infoframes(struct intel_encoder *encoder,
|
|||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
|
||||
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
|
||||
u32 val = I915_READ(reg);
|
||||
|
||||
|
@ -1146,7 +1147,7 @@ static void vlv_set_infoframes(struct intel_encoder *encoder,
|
|||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
|
||||
i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
|
||||
u32 val = I915_READ(reg);
|
||||
u32 port = VIDEO_DIP_PORT(encoder->port);
|
||||
|
@ -1737,7 +1738,7 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder,
|
|||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
|
||||
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
|
||||
u32 hdmi_val;
|
||||
|
||||
|
@ -1774,7 +1775,7 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
|
|||
enum pipe *pipe)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
|
||||
intel_wakeref_t wakeref;
|
||||
bool ret;
|
||||
|
||||
|
@ -1793,7 +1794,7 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
|
|||
static void intel_hdmi_get_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
u32 tmp, flags = 0;
|
||||
|
@ -1874,7 +1875,7 @@ static void g4x_enable_hdmi(struct intel_encoder *encoder,
|
|||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
|
||||
u32 temp;
|
||||
|
||||
temp = I915_READ(intel_hdmi->hdmi_reg);
|
||||
|
@ -1896,7 +1897,7 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder,
|
|||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
|
||||
u32 temp;
|
||||
|
||||
temp = I915_READ(intel_hdmi->hdmi_reg);
|
||||
|
@ -1947,7 +1948,7 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
|
|||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
u32 temp;
|
||||
|
||||
|
@ -2007,7 +2008,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
|
|||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
|
||||
struct intel_digital_port *intel_dig_port =
|
||||
hdmi_to_dig_port(intel_hdmi);
|
||||
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
|
||||
|
@ -2160,7 +2161,7 @@ static enum drm_mode_status
|
|||
intel_hdmi_mode_valid(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
|
||||
struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector));
|
||||
struct drm_device *dev = intel_hdmi_to_dev(hdmi);
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
enum drm_mode_status status;
|
||||
|
@ -2316,7 +2317,7 @@ static int intel_hdmi_compute_bpc(struct intel_encoder *encoder,
|
|||
struct intel_crtc_state *crtc_state,
|
||||
int clock, bool force_dvi)
|
||||
{
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
|
||||
int bpc;
|
||||
|
||||
for (bpc = 12; bpc >= 10; bpc -= 2) {
|
||||
|
@ -2334,7 +2335,7 @@ static int intel_hdmi_compute_clock(struct intel_encoder *encoder,
|
|||
struct intel_crtc_state *crtc_state,
|
||||
bool force_dvi)
|
||||
{
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
|
||||
const struct drm_display_mode *adjusted_mode =
|
||||
&crtc_state->hw.adjusted_mode;
|
||||
int bpc, clock = adjusted_mode->crtc_clock;
|
||||
|
@ -2404,7 +2405,7 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
|
|||
struct intel_crtc_state *pipe_config,
|
||||
struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
|
||||
struct drm_connector *connector = conn_state->connector;
|
||||
|
@ -2496,7 +2497,7 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
|
|||
static void
|
||||
intel_hdmi_unset_edid(struct drm_connector *connector)
|
||||
{
|
||||
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
|
||||
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector));
|
||||
|
||||
intel_hdmi->has_hdmi_sink = false;
|
||||
intel_hdmi->has_audio = false;
|
||||
|
@ -2512,7 +2513,7 @@ static void
|
|||
intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->dev);
|
||||
struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
|
||||
struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector));
|
||||
enum port port = hdmi_to_dig_port(hdmi)->base.port;
|
||||
struct i2c_adapter *adapter =
|
||||
intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus);
|
||||
|
@ -2559,7 +2560,7 @@ static bool
|
|||
intel_hdmi_set_edid(struct drm_connector *connector)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->dev);
|
||||
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
|
||||
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector));
|
||||
intel_wakeref_t wakeref;
|
||||
struct edid *edid;
|
||||
bool connected = false;
|
||||
|
@ -2600,7 +2601,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
|
|||
{
|
||||
enum drm_connector_status status = connector_status_disconnected;
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->dev);
|
||||
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
|
||||
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector));
|
||||
struct intel_encoder *encoder = &hdmi_to_dig_port(intel_hdmi)->base;
|
||||
intel_wakeref_t wakeref;
|
||||
|
||||
|
@ -2663,7 +2664,7 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder,
|
|||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port =
|
||||
enc_to_dig_port(&encoder->base);
|
||||
enc_to_dig_port(encoder);
|
||||
|
||||
intel_hdmi_prepare(encoder, pipe_config);
|
||||
|
||||
|
@ -2676,7 +2677,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
|
|||
const struct intel_crtc_state *pipe_config,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
|
||||
struct intel_digital_port *dport = enc_to_dig_port(encoder);
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
|
||||
vlv_phy_pre_encoder_enable(encoder, pipe_config);
|
||||
|
@ -2746,7 +2747,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
|
|||
const struct intel_crtc_state *pipe_config,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
|
||||
struct intel_digital_port *dport = enc_to_dig_port(encoder);
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
|
@ -2772,7 +2773,7 @@ static struct i2c_adapter *
|
|||
intel_hdmi_get_i2c_adapter(struct drm_connector *connector)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->dev);
|
||||
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
|
||||
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector));
|
||||
|
||||
return intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus);
|
||||
}
|
||||
|
@ -2816,7 +2817,7 @@ intel_hdmi_connector_register(struct drm_connector *connector)
|
|||
|
||||
static void intel_hdmi_destroy(struct drm_connector *connector)
|
||||
{
|
||||
struct cec_notifier *n = intel_attached_hdmi(connector)->cec_notifier;
|
||||
struct cec_notifier *n = intel_attached_hdmi(to_intel_connector(connector))->cec_notifier;
|
||||
|
||||
cec_notifier_conn_unregister(n);
|
||||
|
||||
|
@ -2906,7 +2907,7 @@ bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
|
|||
bool scrambling)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
|
||||
struct drm_scrambling *sink_scrambling =
|
||||
&connector->display_info.hdmi.scdc.scrambling;
|
||||
struct i2c_adapter *adapter =
|
||||
|
|
|
@ -29,7 +29,7 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg,
|
|||
enum port port);
|
||||
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
|
||||
struct intel_connector *intel_connector);
|
||||
struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
|
||||
struct intel_hdmi *enc_to_intel_hdmi(struct intel_encoder *encoder);
|
||||
int intel_hdmi_compute_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config,
|
||||
struct drm_connector_state *conn_state);
|
||||
|
|
|
@ -302,7 +302,7 @@ intel_encoder_hotplug(struct intel_encoder *encoder,
|
|||
static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder)
|
||||
{
|
||||
return intel_encoder_is_dig_port(encoder) &&
|
||||
enc_to_dig_port(&encoder->base)->hpd_pulse != NULL;
|
||||
enc_to_dig_port(encoder)->hpd_pulse != NULL;
|
||||
}
|
||||
|
||||
static void i915_digport_work_func(struct work_struct *work)
|
||||
|
@ -335,7 +335,7 @@ static void i915_digport_work_func(struct work_struct *work)
|
|||
if (!long_hpd && !short_hpd)
|
||||
continue;
|
||||
|
||||
dig_port = enc_to_dig_port(&encoder->base);
|
||||
dig_port = enc_to_dig_port(encoder);
|
||||
|
||||
ret = dig_port->hpd_pulse(dig_port, long_hpd);
|
||||
if (ret == IRQ_NONE) {
|
||||
|
|
|
@ -434,8 +434,8 @@ void lspcon_write_infoframe(struct intel_encoder *encoder,
|
|||
const void *frame, ssize_t len)
|
||||
{
|
||||
bool ret;
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder);
|
||||
|
||||
/* LSPCON only needs AVI IF */
|
||||
if (type != HDMI_INFOFRAME_TYPE_AVI)
|
||||
|
@ -472,7 +472,7 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
|
|||
ssize_t ret;
|
||||
union hdmi_infoframe frame;
|
||||
u8 buf[VIDEO_DIP_DATA_SIZE];
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
struct intel_lspcon *lspcon = &dig_port->lspcon;
|
||||
const struct drm_display_mode *adjusted_mode =
|
||||
&crtc_state->hw.adjusted_mode;
|
||||
|
@ -522,7 +522,7 @@ u32 lspcon_infoframes_enabled(struct intel_encoder *encoder,
|
|||
const struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
/* FIXME actually read this from the hw */
|
||||
return enc_to_intel_lspcon(&encoder->base)->active;
|
||||
return enc_to_intel_lspcon(encoder)->active;
|
||||
}
|
||||
|
||||
void lspcon_resume(struct intel_lspcon *lspcon)
|
||||
|
|
|
@ -98,7 +98,7 @@ static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
|
|||
break;
|
||||
case INTEL_OUTPUT_DP:
|
||||
case INTEL_OUTPUT_EDP:
|
||||
dig_port = enc_to_dig_port(&encoder->base);
|
||||
dig_port = enc_to_dig_port(encoder);
|
||||
switch (dig_port->base.port) {
|
||||
case PORT_B:
|
||||
*source = INTEL_PIPE_CRC_SOURCE_DP_B;
|
||||
|
|
|
@ -1523,3 +1523,27 @@ bool intel_psr_enabled(struct intel_dp *intel_dp)
|
|||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void intel_psr_atomic_check(struct drm_connector *connector,
|
||||
struct drm_connector_state *old_state,
|
||||
struct drm_connector_state *new_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->dev);
|
||||
struct intel_connector *intel_connector;
|
||||
struct intel_digital_port *dig_port;
|
||||
struct drm_crtc_state *crtc_state;
|
||||
|
||||
if (!CAN_PSR(dev_priv) || !new_state->crtc ||
|
||||
dev_priv->psr.initially_probed)
|
||||
return;
|
||||
|
||||
intel_connector = to_intel_connector(connector);
|
||||
dig_port = enc_to_dig_port(intel_connector->encoder);
|
||||
if (dev_priv->psr.dp != &dig_port->dp)
|
||||
return;
|
||||
|
||||
crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
|
||||
new_state->crtc);
|
||||
crtc_state->mode_changed = true;
|
||||
dev_priv->psr.initially_probed = true;
|
||||
}
|
||||
|
|
|
@ -8,6 +8,8 @@
|
|||
|
||||
#include "intel_frontbuffer.h"
|
||||
|
||||
struct drm_connector;
|
||||
struct drm_connector_state;
|
||||
struct drm_i915_private;
|
||||
struct intel_crtc_state;
|
||||
struct intel_dp;
|
||||
|
@ -35,5 +37,8 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp);
|
|||
int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
|
||||
u32 *out_value);
|
||||
bool intel_psr_enabled(struct intel_dp *intel_dp);
|
||||
void intel_psr_atomic_check(struct drm_connector *connector,
|
||||
struct drm_connector_state *old_state,
|
||||
struct drm_connector_state *new_state);
|
||||
|
||||
#endif /* __INTEL_PSR_H__ */
|
||||
|
|
|
@ -180,7 +180,7 @@ static struct intel_sdvo *to_sdvo(struct intel_encoder *encoder)
|
|||
return container_of(encoder, struct intel_sdvo, base);
|
||||
}
|
||||
|
||||
static struct intel_sdvo *intel_attached_sdvo(struct drm_connector *connector)
|
||||
static struct intel_sdvo *intel_attached_sdvo(struct intel_connector *connector)
|
||||
{
|
||||
return to_sdvo(intel_attached_encoder(connector));
|
||||
}
|
||||
|
@ -1551,7 +1551,7 @@ static bool intel_sdvo_connector_get_hw_state(struct intel_connector *connector)
|
|||
{
|
||||
struct intel_sdvo_connector *intel_sdvo_connector =
|
||||
to_intel_sdvo_connector(&connector->base);
|
||||
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(&connector->base);
|
||||
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
|
||||
u16 active_outputs = 0;
|
||||
|
||||
intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs);
|
||||
|
@ -1823,7 +1823,7 @@ static enum drm_mode_status
|
|||
intel_sdvo_mode_valid(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
|
||||
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
|
||||
struct intel_sdvo_connector *intel_sdvo_connector =
|
||||
to_intel_sdvo_connector(connector);
|
||||
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
|
||||
|
@ -1941,7 +1941,7 @@ intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo)
|
|||
static struct edid *
|
||||
intel_sdvo_get_edid(struct drm_connector *connector)
|
||||
{
|
||||
struct intel_sdvo *sdvo = intel_attached_sdvo(connector);
|
||||
struct intel_sdvo *sdvo = intel_attached_sdvo(to_intel_connector(connector));
|
||||
return drm_get_edid(connector, &sdvo->ddc);
|
||||
}
|
||||
|
||||
|
@ -1959,7 +1959,7 @@ intel_sdvo_get_analog_edid(struct drm_connector *connector)
|
|||
static enum drm_connector_status
|
||||
intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
|
||||
{
|
||||
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
|
||||
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
|
||||
struct intel_sdvo_connector *intel_sdvo_connector =
|
||||
to_intel_sdvo_connector(connector);
|
||||
enum drm_connector_status status;
|
||||
|
@ -2028,7 +2028,7 @@ static enum drm_connector_status
|
|||
intel_sdvo_detect(struct drm_connector *connector, bool force)
|
||||
{
|
||||
u16 response;
|
||||
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
|
||||
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
|
||||
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
|
||||
enum drm_connector_status ret;
|
||||
|
||||
|
@ -2175,7 +2175,7 @@ static const struct drm_display_mode sdvo_tv_modes[] = {
|
|||
|
||||
static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
|
||||
{
|
||||
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
|
||||
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
|
||||
const struct drm_connector_state *conn_state = connector->state;
|
||||
struct intel_sdvo_sdtv_resolution_request tv_res;
|
||||
u32 reply = 0, format_map = 0;
|
||||
|
@ -2215,7 +2215,7 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
|
|||
|
||||
static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
|
||||
{
|
||||
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
|
||||
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->dev);
|
||||
struct drm_display_mode *newmode;
|
||||
|
||||
|
@ -2379,7 +2379,7 @@ intel_sdvo_connector_atomic_set_property(struct drm_connector *connector,
|
|||
static int
|
||||
intel_sdvo_connector_register(struct drm_connector *connector)
|
||||
{
|
||||
struct intel_sdvo *sdvo = intel_attached_sdvo(connector);
|
||||
struct intel_sdvo *sdvo = intel_attached_sdvo(to_intel_connector(connector));
|
||||
int ret;
|
||||
|
||||
ret = intel_connector_register(connector);
|
||||
|
@ -2394,7 +2394,7 @@ intel_sdvo_connector_register(struct drm_connector *connector)
|
|||
static void
|
||||
intel_sdvo_connector_unregister(struct drm_connector *connector)
|
||||
{
|
||||
struct intel_sdvo *sdvo = intel_attached_sdvo(connector);
|
||||
struct intel_sdvo *sdvo = intel_attached_sdvo(to_intel_connector(connector));
|
||||
|
||||
sysfs_remove_link(&connector->kdev->kobj,
|
||||
sdvo->ddc.dev.kobj.name);
|
||||
|
@ -2932,7 +2932,7 @@ static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo)
|
|||
|
||||
list_for_each_entry_safe(connector, tmp,
|
||||
&dev->mode_config.connector_list, head) {
|
||||
if (intel_attached_encoder(connector) == &intel_sdvo->base) {
|
||||
if (intel_attached_encoder(to_intel_connector(connector)) == &intel_sdvo->base) {
|
||||
drm_connector_unregister(connector);
|
||||
intel_connector_destroy(connector);
|
||||
}
|
||||
|
|
|
@ -583,15 +583,16 @@ skl_program_plane(struct intel_plane *plane,
|
|||
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
|
||||
u32 surf_addr = plane_state->color_plane[color_plane].offset;
|
||||
u32 stride = skl_plane_stride(plane_state, color_plane);
|
||||
u32 aux_dist = plane_state->color_plane[1].offset - surf_addr;
|
||||
u32 aux_stride = skl_plane_stride(plane_state, 1);
|
||||
const struct drm_framebuffer *fb = plane_state->hw.fb;
|
||||
int aux_plane = intel_main_to_aux_plane(fb, color_plane);
|
||||
u32 aux_dist = plane_state->color_plane[aux_plane].offset - surf_addr;
|
||||
u32 aux_stride = skl_plane_stride(plane_state, aux_plane);
|
||||
int crtc_x = plane_state->uapi.dst.x1;
|
||||
int crtc_y = plane_state->uapi.dst.y1;
|
||||
u32 x = plane_state->color_plane[color_plane].x;
|
||||
u32 y = plane_state->color_plane[color_plane].y;
|
||||
u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
|
||||
u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
|
||||
const struct drm_framebuffer *fb = plane_state->hw.fb;
|
||||
u8 alpha = plane_state->hw.alpha >> 8;
|
||||
u32 plane_color_ctl = 0;
|
||||
unsigned long irqflags;
|
||||
|
@ -2106,7 +2107,8 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
|
|||
fb->modifier == I915_FORMAT_MOD_Yf_TILED ||
|
||||
fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
|
||||
fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
|
||||
fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS)) {
|
||||
fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
|
||||
fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS)) {
|
||||
DRM_DEBUG_KMS("Y/Yf tiling not supported in IF-ID mode\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -2578,7 +2580,16 @@ static const u64 skl_plane_format_modifiers_ccs[] = {
|
|||
DRM_FORMAT_MOD_INVALID
|
||||
};
|
||||
|
||||
static const u64 gen12_plane_format_modifiers_ccs[] = {
|
||||
static const u64 gen12_plane_format_modifiers_mc_ccs[] = {
|
||||
I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS,
|
||||
I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS,
|
||||
I915_FORMAT_MOD_Y_TILED,
|
||||
I915_FORMAT_MOD_X_TILED,
|
||||
DRM_FORMAT_MOD_LINEAR,
|
||||
DRM_FORMAT_MOD_INVALID
|
||||
};
|
||||
|
||||
static const u64 gen12_plane_format_modifiers_rc_ccs[] = {
|
||||
I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS,
|
||||
I915_FORMAT_MOD_Y_TILED,
|
||||
I915_FORMAT_MOD_X_TILED,
|
||||
|
@ -2743,10 +2754,21 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
|
|||
}
|
||||
}
|
||||
|
||||
static bool gen12_plane_supports_mc_ccs(enum plane_id plane_id)
|
||||
{
|
||||
return plane_id < PLANE_SPRITE4;
|
||||
}
|
||||
|
||||
static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
|
||||
u32 format, u64 modifier)
|
||||
{
|
||||
struct intel_plane *plane = to_intel_plane(_plane);
|
||||
|
||||
switch (modifier) {
|
||||
case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
|
||||
if (!gen12_plane_supports_mc_ccs(plane->id))
|
||||
return false;
|
||||
/* fall through */
|
||||
case DRM_FORMAT_MOD_LINEAR:
|
||||
case I915_FORMAT_MOD_X_TILED:
|
||||
case I915_FORMAT_MOD_Y_TILED:
|
||||
|
@ -2764,11 +2786,6 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
|
|||
if (is_ccs_modifier(modifier))
|
||||
return true;
|
||||
/* fall through */
|
||||
case DRM_FORMAT_RGB565:
|
||||
case DRM_FORMAT_XRGB2101010:
|
||||
case DRM_FORMAT_XBGR2101010:
|
||||
case DRM_FORMAT_ARGB2101010:
|
||||
case DRM_FORMAT_ABGR2101010:
|
||||
case DRM_FORMAT_YUYV:
|
||||
case DRM_FORMAT_YVYU:
|
||||
case DRM_FORMAT_UYVY:
|
||||
|
@ -2777,6 +2794,14 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
|
|||
case DRM_FORMAT_P010:
|
||||
case DRM_FORMAT_P012:
|
||||
case DRM_FORMAT_P016:
|
||||
if (modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS)
|
||||
return true;
|
||||
/* fall through */
|
||||
case DRM_FORMAT_RGB565:
|
||||
case DRM_FORMAT_XRGB2101010:
|
||||
case DRM_FORMAT_XBGR2101010:
|
||||
case DRM_FORMAT_ARGB2101010:
|
||||
case DRM_FORMAT_ABGR2101010:
|
||||
case DRM_FORMAT_XVYU2101010:
|
||||
case DRM_FORMAT_C8:
|
||||
case DRM_FORMAT_XBGR16161616F:
|
||||
|
@ -2910,6 +2935,14 @@ static const u32 *icl_get_plane_formats(struct drm_i915_private *dev_priv,
|
|||
}
|
||||
}
|
||||
|
||||
static const u64 *gen12_get_plane_modifiers(enum plane_id plane_id)
|
||||
{
|
||||
if (gen12_plane_supports_mc_ccs(plane_id))
|
||||
return gen12_plane_format_modifiers_mc_ccs;
|
||||
else
|
||||
return gen12_plane_format_modifiers_rc_ccs;
|
||||
}
|
||||
|
||||
static bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe, enum plane_id plane_id)
|
||||
{
|
||||
|
@ -2975,7 +3008,7 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
|
|||
|
||||
plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id);
|
||||
if (INTEL_GEN(dev_priv) >= 12) {
|
||||
modifiers = gen12_plane_format_modifiers_ccs;
|
||||
modifiers = gen12_get_plane_modifiers(plane_id);
|
||||
plane_funcs = &gen12_plane_funcs;
|
||||
} else {
|
||||
if (plane->has_ccs)
|
||||
|
|
|
@ -898,7 +898,7 @@ static struct intel_tv *enc_to_tv(struct intel_encoder *encoder)
|
|||
return container_of(encoder, struct intel_tv, base);
|
||||
}
|
||||
|
||||
static struct intel_tv *intel_attached_tv(struct drm_connector *connector)
|
||||
static struct intel_tv *intel_attached_tv(struct intel_connector *connector)
|
||||
{
|
||||
return enc_to_tv(intel_attached_encoder(connector));
|
||||
}
|
||||
|
@ -1527,7 +1527,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
|
|||
((video_levels->black << TV_BLACK_LEVEL_SHIFT) |
|
||||
(video_levels->blank << TV_BLANK_LEVEL_SHIFT)));
|
||||
|
||||
assert_pipe_disabled(dev_priv, intel_crtc->pipe);
|
||||
assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
|
||||
|
||||
/* Filter ctl must be set before TV_WIN_SIZE */
|
||||
tv_filter_ctl = TV_AUTO_SCALE;
|
||||
|
@ -1662,7 +1662,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
|
|||
*/
|
||||
static void intel_tv_find_better_format(struct drm_connector *connector)
|
||||
{
|
||||
struct intel_tv *intel_tv = intel_attached_tv(connector);
|
||||
struct intel_tv *intel_tv = intel_attached_tv(to_intel_connector(connector));
|
||||
const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state);
|
||||
int i;
|
||||
|
||||
|
@ -1689,7 +1689,7 @@ intel_tv_detect(struct drm_connector *connector,
|
|||
struct drm_modeset_acquire_ctx *ctx,
|
||||
bool force)
|
||||
{
|
||||
struct intel_tv *intel_tv = intel_attached_tv(connector);
|
||||
struct intel_tv *intel_tv = intel_attached_tv(to_intel_connector(connector));
|
||||
enum drm_connector_status status;
|
||||
int type;
|
||||
|
||||
|
|
|
@ -943,7 +943,7 @@ static void intel_dsc_dsi_pps_write(struct intel_encoder *encoder,
|
|||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
struct mipi_dsi_device *dsi;
|
||||
struct drm_dsc_picture_parameter_set pps;
|
||||
enum port port;
|
||||
|
@ -961,7 +961,7 @@ static void intel_dsc_dsi_pps_write(struct intel_encoder *encoder,
|
|||
static void intel_dsc_dp_pps_write(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
|
||||
struct drm_dsc_pps_infoframe dp_dsc_pps_sdp;
|
||||
|
|
|
@ -23,7 +23,6 @@
|
|||
* Author: Jani Nikula <jani.nikula@intel.com>
|
||||
*/
|
||||
|
||||
#include <linux/gpio/consumer.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
|
@ -319,7 +318,7 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
|
|||
static bool glk_dsi_enable_io(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
enum port port;
|
||||
u32 tmp;
|
||||
bool cold_boot = false;
|
||||
|
@ -367,7 +366,7 @@ static bool glk_dsi_enable_io(struct intel_encoder *encoder)
|
|||
static void glk_dsi_device_ready(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
enum port port;
|
||||
u32 val;
|
||||
|
||||
|
@ -438,7 +437,7 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
|
|||
static void bxt_dsi_device_ready(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
enum port port;
|
||||
u32 val;
|
||||
|
||||
|
@ -465,7 +464,7 @@ static void bxt_dsi_device_ready(struct intel_encoder *encoder)
|
|||
static void vlv_dsi_device_ready(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
enum port port;
|
||||
u32 val;
|
||||
|
||||
|
@ -516,7 +515,7 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder)
|
|||
static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
enum port port;
|
||||
u32 val;
|
||||
|
||||
|
@ -546,7 +545,7 @@ static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder)
|
|||
static void glk_dsi_disable_mipi_io(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
enum port port;
|
||||
u32 tmp;
|
||||
|
||||
|
@ -579,7 +578,7 @@ static void glk_dsi_clear_device_ready(struct intel_encoder *encoder)
|
|||
static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
enum port port;
|
||||
|
||||
DRM_DEBUG_KMS("\n");
|
||||
|
@ -625,7 +624,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
|
|||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
enum port port;
|
||||
|
||||
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
|
||||
|
@ -681,7 +680,7 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
|
|||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
enum port port;
|
||||
|
||||
for_each_dsi_port(port, intel_dsi->ports) {
|
||||
|
@ -745,7 +744,7 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
|
|||
const struct intel_crtc_state *pipe_config,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
struct drm_crtc *crtc = pipe_config->uapi.crtc;
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
|
@ -793,9 +792,6 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
|
|||
if (!IS_GEMINILAKE(dev_priv))
|
||||
intel_dsi_prepare(encoder, pipe_config);
|
||||
|
||||
/* Power on, try both CRC pmic gpio and VBT */
|
||||
if (intel_dsi->gpio_panel)
|
||||
gpiod_set_value_cansleep(intel_dsi->gpio_panel, 1);
|
||||
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON);
|
||||
intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
|
||||
|
||||
|
@ -850,7 +846,7 @@ static void intel_dsi_disable(struct intel_encoder *encoder,
|
|||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
enum port port;
|
||||
|
||||
DRM_DEBUG_KMS("\n");
|
||||
|
@ -886,7 +882,7 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder,
|
|||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
enum port port;
|
||||
u32 val;
|
||||
|
||||
|
@ -895,7 +891,7 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder,
|
|||
if (IS_GEN9_LP(dev_priv)) {
|
||||
intel_crtc_vblank_off(old_crtc_state);
|
||||
|
||||
skylake_scaler_disable(old_crtc_state);
|
||||
skl_scaler_disable(old_crtc_state);
|
||||
}
|
||||
|
||||
if (is_vid_mode(intel_dsi)) {
|
||||
|
@ -945,11 +941,8 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder,
|
|||
/* Assert reset */
|
||||
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_ASSERT_RESET);
|
||||
|
||||
/* Power off, try both CRC pmic gpio and VBT */
|
||||
intel_dsi_msleep(intel_dsi, intel_dsi->panel_off_delay);
|
||||
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_OFF);
|
||||
if (intel_dsi->gpio_panel)
|
||||
gpiod_set_value_cansleep(intel_dsi->gpio_panel, 0);
|
||||
|
||||
/*
|
||||
* FIXME As we do with eDP, just make a note of the time here
|
||||
|
@ -962,7 +955,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
|
|||
enum pipe *pipe)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
intel_wakeref_t wakeref;
|
||||
enum port port;
|
||||
bool active = false;
|
||||
|
@ -1041,7 +1034,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
|
|||
&pipe_config->hw.adjusted_mode;
|
||||
struct drm_display_mode *adjusted_mode_sw;
|
||||
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
unsigned int lane_count = intel_dsi->lane_count;
|
||||
unsigned int bpp, fmt;
|
||||
enum port port;
|
||||
|
@ -1234,7 +1227,7 @@ static void set_dsi_timings(struct drm_encoder *encoder,
|
|||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(encoder));
|
||||
enum port port;
|
||||
unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
|
||||
unsigned int lane_count = intel_dsi->lane_count;
|
||||
|
@ -1322,7 +1315,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
|
|||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(encoder));
|
||||
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
|
||||
enum port port;
|
||||
unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
|
||||
|
@ -1512,7 +1505,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
|
|||
static void intel_dsi_unprepare(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
enum port port;
|
||||
u32 val;
|
||||
|
||||
|
@ -1539,12 +1532,9 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder)
|
|||
|
||||
static void intel_dsi_encoder_destroy(struct drm_encoder *encoder)
|
||||
{
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
|
||||
/* dispose of the gpios */
|
||||
if (intel_dsi->gpio_panel)
|
||||
gpiod_put(intel_dsi->gpio_panel);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(encoder));
|
||||
|
||||
intel_dsi_vbt_gpio_cleanup(intel_dsi);
|
||||
intel_encoder_destroy(encoder);
|
||||
}
|
||||
|
||||
|
@ -1825,6 +1815,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
|
|||
struct drm_connector *connector;
|
||||
struct drm_display_mode *current_mode, *fixed_mode;
|
||||
enum port port;
|
||||
enum pipe pipe;
|
||||
|
||||
DRM_DEBUG_KMS("\n");
|
||||
|
||||
|
@ -1923,20 +1914,8 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
|
|||
|
||||
vlv_dphy_param_init(intel_dsi);
|
||||
|
||||
/*
|
||||
* In case of BYT with CRC PMIC, we need to use GPIO for
|
||||
* Panel control.
|
||||
*/
|
||||
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
|
||||
(dev_priv->vbt.dsi.config->pwm_blc == PPS_BLC_PMIC)) {
|
||||
intel_dsi->gpio_panel =
|
||||
gpiod_get(dev->dev, "panel", GPIOD_OUT_HIGH);
|
||||
|
||||
if (IS_ERR(intel_dsi->gpio_panel)) {
|
||||
DRM_ERROR("Failed to own gpio for panel control\n");
|
||||
intel_dsi->gpio_panel = NULL;
|
||||
}
|
||||
}
|
||||
intel_dsi_vbt_gpio_init(intel_dsi,
|
||||
intel_dsi_get_hw_state(intel_encoder, &pipe));
|
||||
|
||||
drm_connector_init(dev, connector, &intel_dsi_connector_funcs,
|
||||
DRM_MODE_CONNECTOR_DSI);
|
||||
|
|
|
@ -117,7 +117,7 @@ int vlv_dsi_pll_compute(struct intel_encoder *encoder,
|
|||
struct intel_crtc_state *config)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
int ret;
|
||||
u32 dsi_clk;
|
||||
|
||||
|
@ -255,7 +255,7 @@ u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
|
|||
struct intel_crtc_state *config)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
|
||||
u32 dsi_clock, pclk;
|
||||
u32 pll_ctl, pll_div;
|
||||
|
@ -321,7 +321,7 @@ u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
|
|||
u32 pclk;
|
||||
u32 dsi_clk;
|
||||
u32 dsi_ratio;
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
|
||||
|
||||
|
@ -341,7 +341,7 @@ void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
|
|||
{
|
||||
u32 temp;
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
|
||||
temp = I915_READ(MIPI_CTRL(port));
|
||||
temp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
|
||||
|
@ -455,7 +455,7 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder,
|
|||
struct intel_crtc_state *config)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
u8 dsi_ratio, dsi_ratio_min, dsi_ratio_max;
|
||||
u32 dsi_clk;
|
||||
|
||||
|
@ -503,7 +503,7 @@ void bxt_dsi_pll_enable(struct intel_encoder *encoder,
|
|||
const struct intel_crtc_state *config)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
enum port port;
|
||||
u32 val;
|
||||
|
||||
|
|
|
@ -1,5 +0,0 @@
|
|||
# For building individual subdir files on the command line
|
||||
subdir-ccflags-y += -I$(srctree)/$(src)/..
|
||||
|
||||
# Extra header tests
|
||||
header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h
|
|
@ -69,6 +69,7 @@
|
|||
|
||||
#include <drm/i915_drm.h>
|
||||
|
||||
#include "gt/gen6_ppgtt.h"
|
||||
#include "gt/intel_context.h"
|
||||
#include "gt/intel_engine_heartbeat.h"
|
||||
#include "gt/intel_engine_pm.h"
|
||||
|
@ -705,7 +706,7 @@ i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
|
|||
if (HAS_FULL_PPGTT(i915)) {
|
||||
struct i915_ppgtt *ppgtt;
|
||||
|
||||
ppgtt = i915_ppgtt_create(i915);
|
||||
ppgtt = i915_ppgtt_create(&i915->gt);
|
||||
if (IS_ERR(ppgtt)) {
|
||||
DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
|
||||
PTR_ERR(ppgtt));
|
||||
|
@ -760,12 +761,6 @@ void i915_gem_driver_release__contexts(struct drm_i915_private *i915)
|
|||
flush_work(&i915->gem.contexts.free_work);
|
||||
}
|
||||
|
||||
static int context_idr_cleanup(int id, void *p, void *data)
|
||||
{
|
||||
context_close(p);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vm_idr_cleanup(int id, void *p, void *data)
|
||||
{
|
||||
i915_vm_put(p);
|
||||
|
@ -773,7 +768,8 @@ static int vm_idr_cleanup(int id, void *p, void *data)
|
|||
}
|
||||
|
||||
static int gem_context_register(struct i915_gem_context *ctx,
|
||||
struct drm_i915_file_private *fpriv)
|
||||
struct drm_i915_file_private *fpriv,
|
||||
u32 *id)
|
||||
{
|
||||
struct i915_address_space *vm;
|
||||
int ret;
|
||||
|
@ -791,14 +787,10 @@ static int gem_context_register(struct i915_gem_context *ctx,
|
|||
current->comm, pid_nr(ctx->pid));
|
||||
|
||||
/* And finally expose ourselves to userspace via the idr */
|
||||
mutex_lock(&fpriv->context_idr_lock);
|
||||
ret = idr_alloc(&fpriv->context_idr, ctx, 0, 0, GFP_KERNEL);
|
||||
mutex_unlock(&fpriv->context_idr_lock);
|
||||
if (ret >= 0)
|
||||
goto out;
|
||||
ret = xa_alloc(&fpriv->context_xa, id, ctx, xa_limit_32b, GFP_KERNEL);
|
||||
if (ret)
|
||||
put_pid(fetch_and_zero(&ctx->pid));
|
||||
|
||||
put_pid(fetch_and_zero(&ctx->pid));
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -808,11 +800,11 @@ int i915_gem_context_open(struct drm_i915_private *i915,
|
|||
struct drm_i915_file_private *file_priv = file->driver_priv;
|
||||
struct i915_gem_context *ctx;
|
||||
int err;
|
||||
u32 id;
|
||||
|
||||
xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC);
|
||||
|
||||
mutex_init(&file_priv->context_idr_lock);
|
||||
mutex_init(&file_priv->vm_idr_lock);
|
||||
|
||||
idr_init(&file_priv->context_idr);
|
||||
idr_init_base(&file_priv->vm_idr, 1);
|
||||
|
||||
ctx = i915_gem_create_context(i915, 0);
|
||||
|
@ -821,21 +813,19 @@ int i915_gem_context_open(struct drm_i915_private *i915,
|
|||
goto err;
|
||||
}
|
||||
|
||||
err = gem_context_register(ctx, file_priv);
|
||||
err = gem_context_register(ctx, file_priv, &id);
|
||||
if (err < 0)
|
||||
goto err_ctx;
|
||||
|
||||
GEM_BUG_ON(err > 0);
|
||||
|
||||
GEM_BUG_ON(id);
|
||||
return 0;
|
||||
|
||||
err_ctx:
|
||||
context_close(ctx);
|
||||
err:
|
||||
idr_destroy(&file_priv->vm_idr);
|
||||
idr_destroy(&file_priv->context_idr);
|
||||
xa_destroy(&file_priv->context_xa);
|
||||
mutex_destroy(&file_priv->vm_idr_lock);
|
||||
mutex_destroy(&file_priv->context_idr_lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -843,10 +833,12 @@ void i915_gem_context_close(struct drm_file *file)
|
|||
{
|
||||
struct drm_i915_file_private *file_priv = file->driver_priv;
|
||||
struct drm_i915_private *i915 = file_priv->dev_priv;
|
||||
struct i915_gem_context *ctx;
|
||||
unsigned long idx;
|
||||
|
||||
idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
|
||||
idr_destroy(&file_priv->context_idr);
|
||||
mutex_destroy(&file_priv->context_idr_lock);
|
||||
xa_for_each(&file_priv->context_xa, idx, ctx)
|
||||
context_close(ctx);
|
||||
xa_destroy(&file_priv->context_xa);
|
||||
|
||||
idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL);
|
||||
idr_destroy(&file_priv->vm_idr);
|
||||
|
@ -870,7 +862,7 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
|
|||
if (args->flags)
|
||||
return -EINVAL;
|
||||
|
||||
ppgtt = i915_ppgtt_create(i915);
|
||||
ppgtt = i915_ppgtt_create(&i915->gt);
|
||||
if (IS_ERR(ppgtt))
|
||||
return PTR_ERR(ppgtt);
|
||||
|
||||
|
@ -1244,12 +1236,14 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
|
|||
* image, or into the registers directory, does not stick). Pristine
|
||||
* and idle contexts will be configured on pinning.
|
||||
*/
|
||||
if (!intel_context_is_pinned(ce))
|
||||
if (!intel_context_pin_if_active(ce))
|
||||
return 0;
|
||||
|
||||
rq = intel_engine_create_kernel_request(ce->engine);
|
||||
if (IS_ERR(rq))
|
||||
return PTR_ERR(rq);
|
||||
if (IS_ERR(rq)) {
|
||||
ret = PTR_ERR(rq);
|
||||
goto out_unpin;
|
||||
}
|
||||
|
||||
/* Serialise with the remote context */
|
||||
ret = intel_context_prepare_remote_request(ce, rq);
|
||||
|
@ -1257,6 +1251,8 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
|
|||
ret = gen8_emit_rpcs_config(rq, ce, sseu);
|
||||
|
||||
i915_request_add(rq);
|
||||
out_unpin:
|
||||
intel_context_unpin(ce);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -2187,6 +2183,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
|
|||
struct drm_i915_gem_context_create_ext *args = data;
|
||||
struct create_ext ext_data;
|
||||
int ret;
|
||||
u32 id;
|
||||
|
||||
if (!DRIVER_CAPS(i915)->has_logical_contexts)
|
||||
return -ENODEV;
|
||||
|
@ -2218,11 +2215,11 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
|
|||
goto err_ctx;
|
||||
}
|
||||
|
||||
ret = gem_context_register(ext_data.ctx, ext_data.fpriv);
|
||||
ret = gem_context_register(ext_data.ctx, ext_data.fpriv, &id);
|
||||
if (ret < 0)
|
||||
goto err_ctx;
|
||||
|
||||
args->ctx_id = ret;
|
||||
args->ctx_id = id;
|
||||
DRM_DEBUG("HW context %d created\n", args->ctx_id);
|
||||
|
||||
return 0;
|
||||
|
@ -2245,11 +2242,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
|
|||
if (!args->ctx_id)
|
||||
return -ENOENT;
|
||||
|
||||
if (mutex_lock_interruptible(&file_priv->context_idr_lock))
|
||||
return -EINTR;
|
||||
|
||||
ctx = idr_remove(&file_priv->context_idr, args->ctx_id);
|
||||
mutex_unlock(&file_priv->context_idr_lock);
|
||||
ctx = xa_erase(&file_priv->context_xa, args->ctx_id);
|
||||
if (!ctx)
|
||||
return -ENOENT;
|
||||
|
||||
|
|
|
@ -13,7 +13,6 @@
|
|||
|
||||
#include "i915_drv.h"
|
||||
#include "i915_gem.h"
|
||||
#include "i915_gem_gtt.h"
|
||||
#include "i915_scheduler.h"
|
||||
#include "intel_device_info.h"
|
||||
|
||||
|
|
|
@ -2173,7 +2173,7 @@ static int eb_submit(struct i915_execbuffer *eb)
|
|||
}
|
||||
|
||||
if (intel_context_nopreempt(eb->context))
|
||||
eb->request->flags |= I915_REQUEST_NOPREEMPT;
|
||||
__set_bit(I915_FENCE_FLAG_NOPREEMPT, &eb->request->fence.flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -16,46 +16,6 @@ const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = {
|
|||
.release = i915_gem_object_release_memory_region,
|
||||
};
|
||||
|
||||
/* XXX: Time to vfunc your life up? */
|
||||
void __iomem *
|
||||
i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj,
|
||||
unsigned long n)
|
||||
{
|
||||
resource_size_t offset;
|
||||
|
||||
offset = i915_gem_object_get_dma_address(obj, n);
|
||||
offset -= obj->mm.region->region.start;
|
||||
|
||||
return io_mapping_map_wc(&obj->mm.region->iomap, offset, PAGE_SIZE);
|
||||
}
|
||||
|
||||
void __iomem *
|
||||
i915_gem_object_lmem_io_map_page_atomic(struct drm_i915_gem_object *obj,
|
||||
unsigned long n)
|
||||
{
|
||||
resource_size_t offset;
|
||||
|
||||
offset = i915_gem_object_get_dma_address(obj, n);
|
||||
offset -= obj->mm.region->region.start;
|
||||
|
||||
return io_mapping_map_atomic_wc(&obj->mm.region->iomap, offset);
|
||||
}
|
||||
|
||||
void __iomem *
|
||||
i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
|
||||
unsigned long n,
|
||||
unsigned long size)
|
||||
{
|
||||
resource_size_t offset;
|
||||
|
||||
GEM_BUG_ON(!i915_gem_object_is_contiguous(obj));
|
||||
|
||||
offset = i915_gem_object_get_dma_address(obj, n);
|
||||
offset -= obj->mm.region->region.start;
|
||||
|
||||
return io_mapping_map_wc(&obj->mm.region->iomap, offset, size);
|
||||
}
|
||||
|
||||
bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
return obj->ops == &i915_gem_lmem_obj_ops;
|
||||
|
|
|
@ -14,14 +14,6 @@ struct intel_memory_region;
|
|||
|
||||
extern const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops;
|
||||
|
||||
void __iomem *i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
|
||||
unsigned long n, unsigned long size);
|
||||
void __iomem *i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj,
|
||||
unsigned long n);
|
||||
void __iomem *
|
||||
i915_gem_object_lmem_io_map_page_atomic(struct drm_i915_gem_object *obj,
|
||||
unsigned long n);
|
||||
|
||||
bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
|
||||
|
||||
struct drm_i915_gem_object *
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
* Copyright © 2014-2016 Intel Corporation
|
||||
*/
|
||||
|
||||
#include <linux/anon_inodes.h>
|
||||
#include <linux/mman.h>
|
||||
#include <linux/pfn_t.h>
|
||||
#include <linux/sizes.h>
|
||||
|
@ -212,6 +213,7 @@ static vm_fault_t i915_error_to_vmf_fault(int err)
|
|||
case -EIO: /* shmemfs failure from swap device */
|
||||
case -EFAULT: /* purged object */
|
||||
case -ENODEV: /* bad object, how did you get here! */
|
||||
case -ENXIO: /* unable to access backing store (on device) */
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
case -ENOSPC: /* shmemfs allocation failure */
|
||||
|
@ -236,42 +238,38 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
|
|||
struct vm_area_struct *area = vmf->vma;
|
||||
struct i915_mmap_offset *mmo = area->vm_private_data;
|
||||
struct drm_i915_gem_object *obj = mmo->obj;
|
||||
unsigned long i, size = area->vm_end - area->vm_start;
|
||||
bool write = area->vm_flags & VM_WRITE;
|
||||
vm_fault_t ret = VM_FAULT_SIGBUS;
|
||||
resource_size_t iomap;
|
||||
int err;
|
||||
|
||||
if (!i915_gem_object_has_struct_page(obj))
|
||||
return ret;
|
||||
|
||||
/* Sanity check that we allow writing into this object */
|
||||
if (i915_gem_object_is_readonly(obj) && write)
|
||||
return ret;
|
||||
if (unlikely(i915_gem_object_is_readonly(obj) &&
|
||||
area->vm_flags & VM_WRITE))
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
err = i915_gem_object_pin_pages(obj);
|
||||
if (err)
|
||||
return i915_error_to_vmf_fault(err);
|
||||
goto out;
|
||||
|
||||
/* PTEs are revoked in obj->ops->put_pages() */
|
||||
for (i = 0; i < size >> PAGE_SHIFT; i++) {
|
||||
struct page *page = i915_gem_object_get_page(obj, i);
|
||||
|
||||
ret = vmf_insert_pfn(area,
|
||||
(unsigned long)area->vm_start + i * PAGE_SIZE,
|
||||
page_to_pfn(page));
|
||||
if (ret != VM_FAULT_NOPAGE)
|
||||
break;
|
||||
iomap = -1;
|
||||
if (!i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_STRUCT_PAGE)) {
|
||||
iomap = obj->mm.region->iomap.base;
|
||||
iomap -= obj->mm.region->region.start;
|
||||
}
|
||||
|
||||
if (write) {
|
||||
/* PTEs are revoked in obj->ops->put_pages() */
|
||||
err = remap_io_sg(area,
|
||||
area->vm_start, area->vm_end - area->vm_start,
|
||||
obj->mm.pages->sgl, iomap);
|
||||
|
||||
if (area->vm_flags & VM_WRITE) {
|
||||
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
|
||||
obj->cache_dirty = true; /* XXX flush after PAT update? */
|
||||
obj->mm.dirty = true;
|
||||
}
|
||||
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
|
||||
return ret;
|
||||
out:
|
||||
return i915_error_to_vmf_fault(err);
|
||||
}
|
||||
|
||||
static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
|
||||
|
@ -560,7 +558,9 @@ __assign_mmap_offset(struct drm_file *file,
|
|||
}
|
||||
|
||||
if (mmap_type != I915_MMAP_TYPE_GTT &&
|
||||
!i915_gem_object_has_struct_page(obj)) {
|
||||
!i915_gem_object_type_has(obj,
|
||||
I915_GEM_OBJECT_HAS_STRUCT_PAGE |
|
||||
I915_GEM_OBJECT_HAS_IOMEM)) {
|
||||
err = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
|
@ -694,6 +694,46 @@ static const struct vm_operations_struct vm_ops_cpu = {
|
|||
.close = vm_close,
|
||||
};
|
||||
|
||||
static int singleton_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct drm_i915_private *i915 = file->private_data;
|
||||
|
||||
cmpxchg(&i915->gem.mmap_singleton, file, NULL);
|
||||
drm_dev_put(&i915->drm);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct file_operations singleton_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.release = singleton_release,
|
||||
};
|
||||
|
||||
static struct file *mmap_singleton(struct drm_i915_private *i915)
|
||||
{
|
||||
struct file *file;
|
||||
|
||||
rcu_read_lock();
|
||||
file = i915->gem.mmap_singleton;
|
||||
if (file && !get_file_rcu(file))
|
||||
file = NULL;
|
||||
rcu_read_unlock();
|
||||
if (file)
|
||||
return file;
|
||||
|
||||
file = anon_inode_getfile("i915.gem", &singleton_fops, i915, O_RDWR);
|
||||
if (IS_ERR(file))
|
||||
return file;
|
||||
|
||||
/* Everyone shares a single global address space */
|
||||
file->f_mapping = i915->drm.anon_inode->i_mapping;
|
||||
|
||||
smp_store_mb(i915->gem.mmap_singleton, file);
|
||||
drm_dev_get(&i915->drm);
|
||||
|
||||
return file;
|
||||
}
|
||||
|
||||
/*
|
||||
* This overcomes the limitation in drm_gem_mmap's assignment of a
|
||||
* drm_gem_object as the vma->vm_private_data. Since we need to
|
||||
|
@ -707,6 +747,7 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
|
|||
struct drm_device *dev = priv->minor->dev;
|
||||
struct i915_mmap_offset *mmo = NULL;
|
||||
struct drm_gem_object *obj = NULL;
|
||||
struct file *anon;
|
||||
|
||||
if (drm_dev_is_unplugged(dev))
|
||||
return -ENODEV;
|
||||
|
@ -755,9 +796,26 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
|
|||
vma->vm_flags &= ~VM_MAYWRITE;
|
||||
}
|
||||
|
||||
anon = mmap_singleton(to_i915(obj->dev));
|
||||
if (IS_ERR(anon)) {
|
||||
drm_gem_object_put_unlocked(obj);
|
||||
return PTR_ERR(anon);
|
||||
}
|
||||
|
||||
vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vma->vm_private_data = mmo;
|
||||
|
||||
/*
|
||||
* We keep the ref on mmo->obj, not vm_file, but we require
|
||||
* vma->vm_file->f_mapping, see vma_link(), for later revocation.
|
||||
* Our userspace is accustomed to having per-file resource cleanup
|
||||
* (i.e. contexts, objects and requests) on their close(fd), which
|
||||
* requires avoiding extraneous references to their filp, hence why
|
||||
* we prefer to use an anonymous file for their mmaps.
|
||||
*/
|
||||
fput(vma->vm_file);
|
||||
vma->vm_file = anon;
|
||||
|
||||
switch (mmo->mmap_type) {
|
||||
case I915_MMAP_TYPE_WC:
|
||||
vma->vm_page_prot =
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include "display/intel_frontbuffer.h"
|
||||
#include "i915_gem_object_types.h"
|
||||
#include "i915_gem_gtt.h"
|
||||
#include "i915_vma_types.h"
|
||||
|
||||
void i915_gem_init__objects(struct drm_i915_private *i915);
|
||||
|
||||
|
|
|
@ -158,9 +158,7 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
|
|||
|
||||
static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
|
||||
{
|
||||
if (i915_gem_object_is_lmem(obj))
|
||||
io_mapping_unmap((void __force __iomem *)ptr);
|
||||
else if (is_vmalloc_addr(ptr))
|
||||
if (is_vmalloc_addr(ptr))
|
||||
vunmap(ptr);
|
||||
else
|
||||
kunmap(kmap_to_page(ptr));
|
||||
|
@ -236,46 +234,44 @@ unlock:
|
|||
return err;
|
||||
}
|
||||
|
||||
static inline pte_t iomap_pte(resource_size_t base,
|
||||
dma_addr_t offset,
|
||||
pgprot_t prot)
|
||||
{
|
||||
return pte_mkspecial(pfn_pte((base + offset) >> PAGE_SHIFT, prot));
|
||||
}
|
||||
|
||||
/* The 'mapping' part of i915_gem_object_pin_map() below */
|
||||
static void *i915_gem_object_map(struct drm_i915_gem_object *obj,
|
||||
enum i915_map_type type)
|
||||
{
|
||||
unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
|
||||
unsigned long n_pte = obj->base.size >> PAGE_SHIFT;
|
||||
struct sg_table *sgt = obj->mm.pages;
|
||||
struct sgt_iter sgt_iter;
|
||||
struct page *page;
|
||||
struct page *stack_pages[32];
|
||||
struct page **pages = stack_pages;
|
||||
unsigned long i = 0;
|
||||
pte_t *stack[32], **mem;
|
||||
struct vm_struct *area;
|
||||
pgprot_t pgprot;
|
||||
void *addr;
|
||||
|
||||
if (i915_gem_object_is_lmem(obj)) {
|
||||
void __iomem *io;
|
||||
|
||||
if (type != I915_MAP_WC)
|
||||
return NULL;
|
||||
|
||||
io = i915_gem_object_lmem_io_map(obj, 0, obj->base.size);
|
||||
return (void __force *)io;
|
||||
}
|
||||
if (!i915_gem_object_has_struct_page(obj) && type != I915_MAP_WC)
|
||||
return NULL;
|
||||
|
||||
/* A single page can always be kmapped */
|
||||
if (n_pages == 1 && type == I915_MAP_WB)
|
||||
if (n_pte == 1 && type == I915_MAP_WB)
|
||||
return kmap(sg_page(sgt->sgl));
|
||||
|
||||
if (n_pages > ARRAY_SIZE(stack_pages)) {
|
||||
mem = stack;
|
||||
if (n_pte > ARRAY_SIZE(stack)) {
|
||||
/* Too big for stack -- allocate temporary array instead */
|
||||
pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
|
||||
if (!pages)
|
||||
mem = kvmalloc_array(n_pte, sizeof(*mem), GFP_KERNEL);
|
||||
if (!mem)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
for_each_sgt_page(page, sgt_iter, sgt)
|
||||
pages[i++] = page;
|
||||
|
||||
/* Check that we have the expected number of pages */
|
||||
GEM_BUG_ON(i != n_pages);
|
||||
area = alloc_vm_area(obj->base.size, mem);
|
||||
if (!area) {
|
||||
if (mem != stack)
|
||||
kvfree(mem);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
switch (type) {
|
||||
default:
|
||||
|
@ -288,12 +284,31 @@ static void *i915_gem_object_map(struct drm_i915_gem_object *obj,
|
|||
pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
|
||||
break;
|
||||
}
|
||||
addr = vmap(pages, n_pages, 0, pgprot);
|
||||
|
||||
if (pages != stack_pages)
|
||||
kvfree(pages);
|
||||
if (i915_gem_object_has_struct_page(obj)) {
|
||||
struct sgt_iter iter;
|
||||
struct page *page;
|
||||
pte_t **ptes = mem;
|
||||
|
||||
return addr;
|
||||
for_each_sgt_page(page, iter, sgt)
|
||||
**ptes++ = mk_pte(page, pgprot);
|
||||
} else {
|
||||
resource_size_t iomap;
|
||||
struct sgt_iter iter;
|
||||
pte_t **ptes = mem;
|
||||
dma_addr_t addr;
|
||||
|
||||
iomap = obj->mm.region->iomap.base;
|
||||
iomap -= obj->mm.region->region.start;
|
||||
|
||||
for_each_sgt_daddr(addr, iter, sgt)
|
||||
**ptes++ = iomap_pte(iomap, addr, pgprot);
|
||||
}
|
||||
|
||||
if (mem != stack)
|
||||
kvfree(mem);
|
||||
|
||||
return area->addr;
|
||||
}
|
||||
|
||||
/* get, pin, and map the pages of the object into kernel space */
|
||||
|
|
|
@ -107,7 +107,10 @@ void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
|
|||
{
|
||||
INIT_LIST_HEAD(&obj->mm.blocks);
|
||||
obj->mm.region = intel_memory_region_get(mem);
|
||||
|
||||
obj->flags |= flags;
|
||||
if (obj->base.size <= mem->min_page_size)
|
||||
obj->flags |= I915_BO_ALLOC_CONTIGUOUS;
|
||||
|
||||
mutex_lock(&mem->objects.lock);
|
||||
|
||||
|
|
|
@ -594,6 +594,8 @@ static int init_shmem(struct intel_memory_region *mem)
|
|||
err);
|
||||
}
|
||||
|
||||
intel_memory_region_set_name(mem, "system");
|
||||
|
||||
return 0; /* Don't error, we can simply fallback to the kernel mnt */
|
||||
}
|
||||
|
||||
|
|
|
@ -645,6 +645,8 @@ i915_gem_object_create_stolen(struct drm_i915_private *i915,
|
|||
|
||||
static int init_stolen(struct intel_memory_region *mem)
|
||||
{
|
||||
intel_memory_region_set_name(mem, "stolen");
|
||||
|
||||
/*
|
||||
* Initialise stolen early so that we may reserve preallocated
|
||||
* objects for the BIOS to KMS transition.
|
||||
|
|
|
@ -7,6 +7,12 @@
|
|||
#ifndef __HUGE_GEM_OBJECT_H
|
||||
#define __HUGE_GEM_OBJECT_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#include "gem/i915_gem_object_types.h"
|
||||
|
||||
struct drm_i915_private;
|
||||
|
||||
struct drm_i915_gem_object *
|
||||
huge_gem_object(struct drm_i915_private *i915,
|
||||
phys_addr_t phys_size,
|
||||
|
|
|
@ -1017,38 +1017,33 @@ __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
|
|||
return err;
|
||||
}
|
||||
|
||||
static int __cpu_check_lmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
|
||||
static int __cpu_check_vmap(struct drm_i915_gem_object *obj, u32 dword, u32 val)
|
||||
{
|
||||
unsigned long n;
|
||||
unsigned long n = obj->base.size >> PAGE_SHIFT;
|
||||
u32 *ptr;
|
||||
int err;
|
||||
|
||||
i915_gem_object_lock(obj);
|
||||
err = i915_gem_object_set_to_wc_domain(obj, false);
|
||||
i915_gem_object_unlock(obj);
|
||||
err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = i915_gem_object_pin_pages(obj);
|
||||
if (err)
|
||||
return err;
|
||||
ptr = i915_gem_object_pin_map(obj, I915_MAP_WC);
|
||||
if (IS_ERR(ptr))
|
||||
return PTR_ERR(ptr);
|
||||
|
||||
for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
|
||||
u32 __iomem *base;
|
||||
u32 read_val;
|
||||
|
||||
base = i915_gem_object_lmem_io_map_page_atomic(obj, n);
|
||||
|
||||
read_val = ioread32(base + dword);
|
||||
io_mapping_unmap_atomic(base);
|
||||
if (read_val != val) {
|
||||
pr_err("n=%lu base[%u]=%u, val=%u\n",
|
||||
n, dword, read_val, val);
|
||||
ptr += dword;
|
||||
while (n--) {
|
||||
if (*ptr != val) {
|
||||
pr_err("base[%u]=%08x, val=%08x\n",
|
||||
dword, *ptr, val);
|
||||
err = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
ptr += PAGE_SIZE / sizeof(*ptr);
|
||||
}
|
||||
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
i915_gem_object_unpin_map(obj);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -1056,10 +1051,8 @@ static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
|
|||
{
|
||||
if (i915_gem_object_has_struct_page(obj))
|
||||
return __cpu_check_shmem(obj, dword, val);
|
||||
else if (i915_gem_object_is_lmem(obj))
|
||||
return __cpu_check_lmem(obj, dword, val);
|
||||
|
||||
return -ENODEV;
|
||||
else
|
||||
return __cpu_check_vmap(obj, dword, val);
|
||||
}
|
||||
|
||||
static int __igt_write_huge(struct intel_context *ce,
|
||||
|
@ -1872,7 +1865,7 @@ int i915_gem_huge_page_mock_selftests(void)
|
|||
mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL;
|
||||
mkwrite_device_info(dev_priv)->ppgtt_size = 48;
|
||||
|
||||
ppgtt = i915_ppgtt_create(dev_priv);
|
||||
ppgtt = i915_ppgtt_create(&dev_priv->gt);
|
||||
if (IS_ERR(ppgtt)) {
|
||||
err = PTR_ERR(ppgtt);
|
||||
goto out_unlock;
|
||||
|
|
|
@ -325,7 +325,10 @@ static int igt_gem_coherency(void *arg)
|
|||
values = offsets + ncachelines;
|
||||
|
||||
ctx.engine = random_engine(i915, &prng);
|
||||
GEM_BUG_ON(!ctx.engine);
|
||||
if (!ctx.engine) {
|
||||
err = -ENODEV;
|
||||
goto out_free;
|
||||
}
|
||||
pr_info("%s: using %s\n", __func__, ctx.engine->name);
|
||||
intel_engine_pm_get(ctx.engine);
|
||||
|
||||
|
@ -354,7 +357,7 @@ static int igt_gem_coherency(void *arg)
|
|||
ctx.obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
|
||||
if (IS_ERR(ctx.obj)) {
|
||||
err = PTR_ERR(ctx.obj);
|
||||
goto free;
|
||||
goto out_pm;
|
||||
}
|
||||
|
||||
i915_random_reorder(offsets, ncachelines, &prng);
|
||||
|
@ -405,14 +408,15 @@ static int igt_gem_coherency(void *arg)
|
|||
}
|
||||
}
|
||||
}
|
||||
free:
|
||||
out_pm:
|
||||
intel_engine_pm_put(ctx.engine);
|
||||
out_free:
|
||||
kfree(offsets);
|
||||
return err;
|
||||
|
||||
put_object:
|
||||
i915_gem_object_put(ctx.obj);
|
||||
goto free;
|
||||
goto out_pm;
|
||||
}
|
||||
|
||||
int i915_gem_coherency_live_selftests(struct drm_i915_private *i915)
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
#include "gt/intel_engine_pm.h"
|
||||
#include "gt/intel_gt.h"
|
||||
#include "gt/intel_gt_pm.h"
|
||||
#include "gem/i915_gem_region.h"
|
||||
#include "huge_gem_object.h"
|
||||
#include "i915_selftest.h"
|
||||
#include "selftests/i915_random.h"
|
||||
|
@ -725,114 +726,359 @@ err_obj:
|
|||
goto out;
|
||||
}
|
||||
|
||||
#define expand32(x) (((x) << 0) | ((x) << 8) | ((x) << 16) | ((x) << 24))
|
||||
static int igt_mmap(void *arg, enum i915_mmap_type type)
|
||||
static int gtt_set(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct drm_i915_private *i915 = arg;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct i915_mmap_offset *mmo;
|
||||
struct vm_area_struct *area;
|
||||
unsigned long addr;
|
||||
void *vaddr;
|
||||
int err = 0, i;
|
||||
struct i915_vma *vma;
|
||||
void __iomem *map;
|
||||
int err = 0;
|
||||
|
||||
if (!i915_ggtt_has_aperture(&i915->ggtt))
|
||||
return 0;
|
||||
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
|
||||
if (IS_ERR(vma))
|
||||
return PTR_ERR(vma);
|
||||
|
||||
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
|
||||
if (IS_ERR(obj))
|
||||
return PTR_ERR(obj);
|
||||
|
||||
vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
|
||||
if (IS_ERR(vaddr)) {
|
||||
err = PTR_ERR(vaddr);
|
||||
intel_gt_pm_get(vma->vm->gt);
|
||||
map = i915_vma_pin_iomap(vma);
|
||||
i915_vma_unpin(vma);
|
||||
if (IS_ERR(map)) {
|
||||
err = PTR_ERR(map);
|
||||
goto out;
|
||||
}
|
||||
memset(vaddr, POISON_INUSE, PAGE_SIZE);
|
||||
|
||||
memset_io(map, POISON_INUSE, obj->base.size);
|
||||
i915_vma_unpin_iomap(vma);
|
||||
|
||||
out:
|
||||
intel_gt_pm_put(vma->vm->gt);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int gtt_check(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct i915_vma *vma;
|
||||
void __iomem *map;
|
||||
int err = 0;
|
||||
|
||||
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
|
||||
if (IS_ERR(vma))
|
||||
return PTR_ERR(vma);
|
||||
|
||||
intel_gt_pm_get(vma->vm->gt);
|
||||
map = i915_vma_pin_iomap(vma);
|
||||
i915_vma_unpin(vma);
|
||||
if (IS_ERR(map)) {
|
||||
err = PTR_ERR(map);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (memchr_inv((void __force *)map, POISON_FREE, obj->base.size)) {
|
||||
pr_err("%s: Write via mmap did not land in backing store (GTT)\n",
|
||||
obj->mm.region->name);
|
||||
err = -EINVAL;
|
||||
}
|
||||
i915_vma_unpin_iomap(vma);
|
||||
|
||||
out:
|
||||
intel_gt_pm_put(vma->vm->gt);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int wc_set(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
void *vaddr;
|
||||
|
||||
vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
|
||||
if (IS_ERR(vaddr))
|
||||
return PTR_ERR(vaddr);
|
||||
|
||||
memset(vaddr, POISON_INUSE, obj->base.size);
|
||||
i915_gem_object_flush_map(obj);
|
||||
i915_gem_object_unpin_map(obj);
|
||||
|
||||
mmo = mmap_offset_attach(obj, type, NULL);
|
||||
if (IS_ERR(mmo)) {
|
||||
err = PTR_ERR(mmo);
|
||||
goto out;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int wc_check(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
void *vaddr;
|
||||
int err = 0;
|
||||
|
||||
vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
|
||||
if (IS_ERR(vaddr))
|
||||
return PTR_ERR(vaddr);
|
||||
|
||||
if (memchr_inv(vaddr, POISON_FREE, obj->base.size)) {
|
||||
pr_err("%s: Write via mmap did not land in backing store (WC)\n",
|
||||
obj->mm.region->name);
|
||||
err = -EINVAL;
|
||||
}
|
||||
i915_gem_object_unpin_map(obj);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
|
||||
{
|
||||
if (type == I915_MMAP_TYPE_GTT &&
|
||||
!i915_ggtt_has_aperture(&to_i915(obj->base.dev)->ggtt))
|
||||
return false;
|
||||
|
||||
if (type != I915_MMAP_TYPE_GTT &&
|
||||
!i915_gem_object_type_has(obj,
|
||||
I915_GEM_OBJECT_HAS_STRUCT_PAGE |
|
||||
I915_GEM_OBJECT_HAS_IOMEM))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#define expand32(x) (((x) << 0) | ((x) << 8) | ((x) << 16) | ((x) << 24))
|
||||
static int __igt_mmap(struct drm_i915_private *i915,
|
||||
struct drm_i915_gem_object *obj,
|
||||
enum i915_mmap_type type)
|
||||
{
|
||||
struct i915_mmap_offset *mmo;
|
||||
struct vm_area_struct *area;
|
||||
unsigned long addr;
|
||||
int err, i;
|
||||
|
||||
if (!can_mmap(obj, type))
|
||||
return 0;
|
||||
|
||||
err = wc_set(obj);
|
||||
if (err == -ENXIO)
|
||||
err = gtt_set(obj);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
mmo = mmap_offset_attach(obj, type, NULL);
|
||||
if (IS_ERR(mmo))
|
||||
return PTR_ERR(mmo);
|
||||
|
||||
addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED);
|
||||
if (IS_ERR_VALUE(addr)) {
|
||||
err = addr;
|
||||
goto out;
|
||||
}
|
||||
if (IS_ERR_VALUE(addr))
|
||||
return addr;
|
||||
|
||||
pr_debug("igt_mmap() @ %lx\n", addr);
|
||||
pr_debug("igt_mmap(%s, %d) @ %lx\n", obj->mm.region->name, type, addr);
|
||||
|
||||
area = find_vma(current->mm, addr);
|
||||
if (!area) {
|
||||
pr_err("Did not create a vm_area_struct for the mmap\n");
|
||||
pr_err("%s: Did not create a vm_area_struct for the mmap\n",
|
||||
obj->mm.region->name);
|
||||
err = -EINVAL;
|
||||
goto out_unmap;
|
||||
}
|
||||
|
||||
if (area->vm_private_data != mmo) {
|
||||
pr_err("vm_area_struct did not point back to our mmap_offset object!\n");
|
||||
pr_err("%s: vm_area_struct did not point back to our mmap_offset object!\n",
|
||||
obj->mm.region->name);
|
||||
err = -EINVAL;
|
||||
goto out_unmap;
|
||||
}
|
||||
|
||||
for (i = 0; i < PAGE_SIZE / sizeof(u32); i++) {
|
||||
for (i = 0; i < obj->base.size / sizeof(u32); i++) {
|
||||
u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux)));
|
||||
u32 x;
|
||||
|
||||
if (get_user(x, ux)) {
|
||||
pr_err("Unable to read from mmap, offset:%zd\n",
|
||||
i * sizeof(x));
|
||||
pr_err("%s: Unable to read from mmap, offset:%zd\n",
|
||||
obj->mm.region->name, i * sizeof(x));
|
||||
err = -EFAULT;
|
||||
break;
|
||||
goto out_unmap;
|
||||
}
|
||||
|
||||
if (x != expand32(POISON_INUSE)) {
|
||||
pr_err("Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n",
|
||||
pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n",
|
||||
obj->mm.region->name,
|
||||
i * sizeof(x), x, expand32(POISON_INUSE));
|
||||
err = -EINVAL;
|
||||
break;
|
||||
goto out_unmap;
|
||||
}
|
||||
|
||||
x = expand32(POISON_FREE);
|
||||
if (put_user(x, ux)) {
|
||||
pr_err("Unable to write to mmap, offset:%zd\n",
|
||||
i * sizeof(x));
|
||||
pr_err("%s: Unable to write to mmap, offset:%zd\n",
|
||||
obj->mm.region->name, i * sizeof(x));
|
||||
err = -EFAULT;
|
||||
break;
|
||||
goto out_unmap;
|
||||
}
|
||||
}
|
||||
|
||||
if (type == I915_MMAP_TYPE_GTT)
|
||||
intel_gt_flush_ggtt_writes(&i915->gt);
|
||||
|
||||
err = wc_check(obj);
|
||||
if (err == -ENXIO)
|
||||
err = gtt_check(obj);
|
||||
out_unmap:
|
||||
vm_munmap(addr, PAGE_SIZE);
|
||||
|
||||
vaddr = i915_gem_object_pin_map(obj, I915_MAP_FORCE_WC);
|
||||
if (IS_ERR(vaddr)) {
|
||||
err = PTR_ERR(vaddr);
|
||||
goto out;
|
||||
}
|
||||
if (err == 0 && memchr_inv(vaddr, POISON_FREE, PAGE_SIZE)) {
|
||||
pr_err("Write via mmap did not land in backing store\n");
|
||||
err = -EINVAL;
|
||||
}
|
||||
i915_gem_object_unpin_map(obj);
|
||||
|
||||
out:
|
||||
i915_gem_object_put(obj);
|
||||
vm_munmap(addr, obj->base.size);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int igt_mmap_gtt(void *arg)
|
||||
static int igt_mmap(void *arg)
|
||||
{
|
||||
return igt_mmap(arg, I915_MMAP_TYPE_GTT);
|
||||
struct drm_i915_private *i915 = arg;
|
||||
struct intel_memory_region *mr;
|
||||
enum intel_region_id id;
|
||||
|
||||
for_each_memory_region(mr, i915, id) {
|
||||
unsigned long sizes[] = {
|
||||
PAGE_SIZE,
|
||||
mr->min_page_size,
|
||||
SZ_4M,
|
||||
};
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(sizes); i++) {
|
||||
struct drm_i915_gem_object *obj;
|
||||
int err;
|
||||
|
||||
obj = i915_gem_object_create_region(mr, sizes[i], 0);
|
||||
if (obj == ERR_PTR(-ENODEV))
|
||||
continue;
|
||||
|
||||
if (IS_ERR(obj))
|
||||
return PTR_ERR(obj);
|
||||
|
||||
err = __igt_mmap(i915, obj, I915_MMAP_TYPE_GTT);
|
||||
if (err == 0)
|
||||
err = __igt_mmap(i915, obj, I915_MMAP_TYPE_WC);
|
||||
|
||||
i915_gem_object_put(obj);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int igt_mmap_cpu(void *arg)
|
||||
static int __igt_mmap_gpu(struct drm_i915_private *i915,
|
||||
struct drm_i915_gem_object *obj,
|
||||
enum i915_mmap_type type)
|
||||
{
|
||||
return igt_mmap(arg, I915_MMAP_TYPE_WC);
|
||||
struct intel_engine_cs *engine;
|
||||
struct i915_mmap_offset *mmo;
|
||||
unsigned long addr;
|
||||
u32 __user *ux;
|
||||
u32 bbe;
|
||||
int err;
|
||||
|
||||
/*
|
||||
* Verify that the mmap access into the backing store aligns with
|
||||
* that of the GPU, i.e. that mmap is indeed writing into the same
|
||||
* page as being read by the GPU.
|
||||
*/
|
||||
|
||||
if (!can_mmap(obj, type))
|
||||
return 0;
|
||||
|
||||
err = wc_set(obj);
|
||||
if (err == -ENXIO)
|
||||
err = gtt_set(obj);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
mmo = mmap_offset_attach(obj, type, NULL);
|
||||
if (IS_ERR(mmo))
|
||||
return PTR_ERR(mmo);
|
||||
|
||||
addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED);
|
||||
if (IS_ERR_VALUE(addr))
|
||||
return addr;
|
||||
|
||||
ux = u64_to_user_ptr((u64)addr);
|
||||
bbe = MI_BATCH_BUFFER_END;
|
||||
if (put_user(bbe, ux)) {
|
||||
pr_err("%s: Unable to write to mmap\n", obj->mm.region->name);
|
||||
err = -EFAULT;
|
||||
goto out_unmap;
|
||||
}
|
||||
|
||||
if (type == I915_MMAP_TYPE_GTT)
|
||||
intel_gt_flush_ggtt_writes(&i915->gt);
|
||||
|
||||
for_each_uabi_engine(engine, i915) {
|
||||
struct i915_request *rq;
|
||||
struct i915_vma *vma;
|
||||
|
||||
vma = i915_vma_instance(obj, engine->kernel_context->vm, NULL);
|
||||
if (IS_ERR(vma)) {
|
||||
err = PTR_ERR(vma);
|
||||
goto out_unmap;
|
||||
}
|
||||
|
||||
err = i915_vma_pin(vma, 0, 0, PIN_USER);
|
||||
if (err)
|
||||
goto out_unmap;
|
||||
|
||||
rq = i915_request_create(engine->kernel_context);
|
||||
if (IS_ERR(rq)) {
|
||||
err = PTR_ERR(rq);
|
||||
goto out_unpin;
|
||||
}
|
||||
|
||||
i915_vma_lock(vma);
|
||||
err = i915_request_await_object(rq, vma->obj, false);
|
||||
if (err == 0)
|
||||
err = i915_vma_move_to_active(vma, rq, 0);
|
||||
i915_vma_unlock(vma);
|
||||
|
||||
err = engine->emit_bb_start(rq, vma->node.start, 0, 0);
|
||||
i915_request_get(rq);
|
||||
i915_request_add(rq);
|
||||
|
||||
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
|
||||
struct drm_printer p =
|
||||
drm_info_printer(engine->i915->drm.dev);
|
||||
|
||||
pr_err("%s(%s, %s): Failed to execute batch\n",
|
||||
__func__, engine->name, obj->mm.region->name);
|
||||
intel_engine_dump(engine, &p,
|
||||
"%s\n", engine->name);
|
||||
|
||||
intel_gt_set_wedged(engine->gt);
|
||||
err = -EIO;
|
||||
}
|
||||
i915_request_put(rq);
|
||||
|
||||
out_unpin:
|
||||
i915_vma_unpin(vma);
|
||||
if (err)
|
||||
goto out_unmap;
|
||||
}
|
||||
|
||||
out_unmap:
|
||||
vm_munmap(addr, obj->base.size);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int igt_mmap_gpu(void *arg)
|
||||
{
|
||||
struct drm_i915_private *i915 = arg;
|
||||
struct intel_memory_region *mr;
|
||||
enum intel_region_id id;
|
||||
|
||||
for_each_memory_region(mr, i915, id) {
|
||||
struct drm_i915_gem_object *obj;
|
||||
int err;
|
||||
|
||||
obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0);
|
||||
if (obj == ERR_PTR(-ENODEV))
|
||||
continue;
|
||||
|
||||
if (IS_ERR(obj))
|
||||
return PTR_ERR(obj);
|
||||
|
||||
err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_GTT);
|
||||
if (err == 0)
|
||||
err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_WC);
|
||||
|
||||
i915_gem_object_put(obj);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int check_present_pte(pte_t *pte, unsigned long addr, void *data)
|
||||
|
@ -887,32 +1133,24 @@ static int prefault_range(u64 start, u64 len)
|
|||
return __get_user(c, end - 1);
|
||||
}
|
||||
|
||||
static int igt_mmap_revoke(void *arg, enum i915_mmap_type type)
|
||||
static int __igt_mmap_revoke(struct drm_i915_private *i915,
|
||||
struct drm_i915_gem_object *obj,
|
||||
enum i915_mmap_type type)
|
||||
{
|
||||
struct drm_i915_private *i915 = arg;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct i915_mmap_offset *mmo;
|
||||
unsigned long addr;
|
||||
int err;
|
||||
|
||||
if (!i915_ggtt_has_aperture(&i915->ggtt))
|
||||
if (!can_mmap(obj, type))
|
||||
return 0;
|
||||
|
||||
obj = i915_gem_object_create_internal(i915, SZ_4M);
|
||||
if (IS_ERR(obj))
|
||||
return PTR_ERR(obj);
|
||||
|
||||
mmo = mmap_offset_attach(obj, type, NULL);
|
||||
if (IS_ERR(mmo)) {
|
||||
err = PTR_ERR(mmo);
|
||||
goto out;
|
||||
}
|
||||
if (IS_ERR(mmo))
|
||||
return PTR_ERR(mmo);
|
||||
|
||||
addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED);
|
||||
if (IS_ERR_VALUE(addr)) {
|
||||
err = addr;
|
||||
goto out;
|
||||
}
|
||||
if (IS_ERR_VALUE(addr))
|
||||
return addr;
|
||||
|
||||
err = prefault_range(addr, obj->base.size);
|
||||
if (err)
|
||||
|
@ -922,8 +1160,10 @@ static int igt_mmap_revoke(void *arg, enum i915_mmap_type type)
|
|||
!atomic_read(&obj->bind_count));
|
||||
|
||||
err = check_present(addr, obj->base.size);
|
||||
if (err)
|
||||
if (err) {
|
||||
pr_err("%s: was not present\n", obj->mm.region->name);
|
||||
goto out_unmap;
|
||||
}
|
||||
|
||||
/*
|
||||
* After unbinding the object from the GGTT, its address may be reused
|
||||
|
@ -947,24 +1187,43 @@ static int igt_mmap_revoke(void *arg, enum i915_mmap_type type)
|
|||
}
|
||||
|
||||
err = check_absent(addr, obj->base.size);
|
||||
if (err)
|
||||
if (err) {
|
||||
pr_err("%s: was not absent\n", obj->mm.region->name);
|
||||
goto out_unmap;
|
||||
}
|
||||
|
||||
out_unmap:
|
||||
vm_munmap(addr, obj->base.size);
|
||||
out:
|
||||
i915_gem_object_put(obj);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int igt_mmap_gtt_revoke(void *arg)
|
||||
static int igt_mmap_revoke(void *arg)
|
||||
{
|
||||
return igt_mmap_revoke(arg, I915_MMAP_TYPE_GTT);
|
||||
}
|
||||
struct drm_i915_private *i915 = arg;
|
||||
struct intel_memory_region *mr;
|
||||
enum intel_region_id id;
|
||||
|
||||
static int igt_mmap_cpu_revoke(void *arg)
|
||||
{
|
||||
return igt_mmap_revoke(arg, I915_MMAP_TYPE_WC);
|
||||
for_each_memory_region(mr, i915, id) {
|
||||
struct drm_i915_gem_object *obj;
|
||||
int err;
|
||||
|
||||
obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0);
|
||||
if (obj == ERR_PTR(-ENODEV))
|
||||
continue;
|
||||
|
||||
if (IS_ERR(obj))
|
||||
return PTR_ERR(obj);
|
||||
|
||||
err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_GTT);
|
||||
if (err == 0)
|
||||
err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_WC);
|
||||
|
||||
i915_gem_object_put(obj);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
|
||||
|
@ -973,10 +1232,9 @@ int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
|
|||
SUBTEST(igt_partial_tiling),
|
||||
SUBTEST(igt_smoke_tiling),
|
||||
SUBTEST(igt_mmap_offset_exhaustion),
|
||||
SUBTEST(igt_mmap_gtt),
|
||||
SUBTEST(igt_mmap_cpu),
|
||||
SUBTEST(igt_mmap_gtt_revoke),
|
||||
SUBTEST(igt_mmap_cpu_revoke),
|
||||
SUBTEST(igt_mmap),
|
||||
SUBTEST(igt_mmap_revoke),
|
||||
SUBTEST(igt_mmap_gpu),
|
||||
};
|
||||
|
||||
return i915_subtests(tests, i915);
|
||||
|
|
|
@ -77,12 +77,13 @@ live_context(struct drm_i915_private *i915, struct file *file)
|
|||
{
|
||||
struct i915_gem_context *ctx;
|
||||
int err;
|
||||
u32 id;
|
||||
|
||||
ctx = i915_gem_create_context(i915, 0);
|
||||
if (IS_ERR(ctx))
|
||||
return ctx;
|
||||
|
||||
err = gem_context_register(ctx, to_drm_file(file)->driver_priv);
|
||||
err = gem_context_register(ctx, to_drm_file(file)->driver_priv, &id);
|
||||
if (err < 0)
|
||||
goto err_ctx;
|
||||
|
||||
|
|
|
@ -7,6 +7,8 @@
|
|||
#ifndef __MOCK_GEM_OBJECT_H__
|
||||
#define __MOCK_GEM_OBJECT_H__
|
||||
|
||||
#include "gem/i915_gem_object_types.h"
|
||||
|
||||
struct mock_object {
|
||||
struct drm_i915_gem_object base;
|
||||
};
|
||||
|
|
|
@ -1,5 +0,0 @@
|
|||
# For building individual subdir files on the command line
|
||||
subdir-ccflags-y += -I$(srctree)/$(src)/..
|
||||
|
||||
# Extra header tests
|
||||
header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h
|
|
@ -0,0 +1,482 @@
|
|||
// SPDX-License-Identifier: MIT
|
||||
/*
|
||||
* Copyright © 2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#include <linux/log2.h>
|
||||
|
||||
#include "gen6_ppgtt.h"
|
||||
#include "i915_scatterlist.h"
|
||||
#include "i915_trace.h"
|
||||
#include "i915_vgpu.h"
|
||||
#include "intel_gt.h"
|
||||
|
||||
/* Write pde (index) from the page directory @pd to the page table @pt */
|
||||
static inline void gen6_write_pde(const struct gen6_ppgtt *ppgtt,
|
||||
const unsigned int pde,
|
||||
const struct i915_page_table *pt)
|
||||
{
|
||||
/* Caller needs to make sure the write completes if necessary */
|
||||
iowrite32(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID,
|
||||
ppgtt->pd_addr + pde);
|
||||
}
|
||||
|
||||
void gen7_ppgtt_enable(struct intel_gt *gt)
|
||||
{
|
||||
struct drm_i915_private *i915 = gt->i915;
|
||||
struct intel_uncore *uncore = gt->uncore;
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
u32 ecochk;
|
||||
|
||||
intel_uncore_rmw(uncore, GAC_ECO_BITS, 0, ECOBITS_PPGTT_CACHE64B);
|
||||
|
||||
ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
|
||||
if (IS_HASWELL(i915)) {
|
||||
ecochk |= ECOCHK_PPGTT_WB_HSW;
|
||||
} else {
|
||||
ecochk |= ECOCHK_PPGTT_LLC_IVB;
|
||||
ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
|
||||
}
|
||||
intel_uncore_write(uncore, GAM_ECOCHK, ecochk);
|
||||
|
||||
for_each_engine(engine, gt, id) {
|
||||
/* GFX_MODE is per-ring on gen7+ */
|
||||
ENGINE_WRITE(engine,
|
||||
RING_MODE_GEN7,
|
||||
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
|
||||
}
|
||||
}
|
||||
|
||||
void gen6_ppgtt_enable(struct intel_gt *gt)
|
||||
{
|
||||
struct intel_uncore *uncore = gt->uncore;
|
||||
|
||||
intel_uncore_rmw(uncore,
|
||||
GAC_ECO_BITS,
|
||||
0,
|
||||
ECOBITS_SNB_BIT | ECOBITS_PPGTT_CACHE64B);
|
||||
|
||||
intel_uncore_rmw(uncore,
|
||||
GAB_CTL,
|
||||
0,
|
||||
GAB_CTL_CONT_AFTER_PAGEFAULT);
|
||||
|
||||
intel_uncore_rmw(uncore,
|
||||
GAM_ECOCHK,
|
||||
0,
|
||||
ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
|
||||
|
||||
if (HAS_PPGTT(uncore->i915)) /* may be disabled for VT-d */
|
||||
intel_uncore_write(uncore,
|
||||
GFX_MODE,
|
||||
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
|
||||
}
|
||||
|
||||
/* PPGTT support for Sandybdrige/Gen6 and later */
|
||||
static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
|
||||
u64 start, u64 length)
|
||||
{
|
||||
struct gen6_ppgtt * const ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
|
||||
const unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
|
||||
const gen6_pte_t scratch_pte = vm->scratch[0].encode;
|
||||
unsigned int pde = first_entry / GEN6_PTES;
|
||||
unsigned int pte = first_entry % GEN6_PTES;
|
||||
unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
|
||||
|
||||
while (num_entries) {
|
||||
struct i915_page_table * const pt =
|
||||
i915_pt_entry(ppgtt->base.pd, pde++);
|
||||
const unsigned int count = min(num_entries, GEN6_PTES - pte);
|
||||
gen6_pte_t *vaddr;
|
||||
|
||||
GEM_BUG_ON(px_base(pt) == px_base(&vm->scratch[1]));
|
||||
|
||||
num_entries -= count;
|
||||
|
||||
GEM_BUG_ON(count > atomic_read(&pt->used));
|
||||
if (!atomic_sub_return(count, &pt->used))
|
||||
ppgtt->scan_for_unused_pt = true;
|
||||
|
||||
/*
|
||||
* Note that the hw doesn't support removing PDE on the fly
|
||||
* (they are cached inside the context with no means to
|
||||
* invalidate the cache), so we can only reset the PTE
|
||||
* entries back to scratch.
|
||||
*/
|
||||
|
||||
vaddr = kmap_atomic_px(pt);
|
||||
memset32(vaddr + pte, scratch_pte, count);
|
||||
kunmap_atomic(vaddr);
|
||||
|
||||
pte = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
|
||||
struct i915_vma *vma,
|
||||
enum i915_cache_level cache_level,
|
||||
u32 flags)
|
||||
{
|
||||
struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
|
||||
struct i915_page_directory * const pd = ppgtt->pd;
|
||||
unsigned int first_entry = vma->node.start / I915_GTT_PAGE_SIZE;
|
||||
unsigned int act_pt = first_entry / GEN6_PTES;
|
||||
unsigned int act_pte = first_entry % GEN6_PTES;
|
||||
const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
|
||||
struct sgt_dma iter = sgt_dma(vma);
|
||||
gen6_pte_t *vaddr;
|
||||
|
||||
GEM_BUG_ON(pd->entry[act_pt] == &vm->scratch[1]);
|
||||
|
||||
vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt));
|
||||
do {
|
||||
vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
|
||||
|
||||
iter.dma += I915_GTT_PAGE_SIZE;
|
||||
if (iter.dma == iter.max) {
|
||||
iter.sg = __sg_next(iter.sg);
|
||||
if (!iter.sg)
|
||||
break;
|
||||
|
||||
iter.dma = sg_dma_address(iter.sg);
|
||||
iter.max = iter.dma + iter.sg->length;
|
||||
}
|
||||
|
||||
if (++act_pte == GEN6_PTES) {
|
||||
kunmap_atomic(vaddr);
|
||||
vaddr = kmap_atomic_px(i915_pt_entry(pd, ++act_pt));
|
||||
act_pte = 0;
|
||||
}
|
||||
} while (1);
|
||||
kunmap_atomic(vaddr);
|
||||
|
||||
vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
|
||||
}
|
||||
|
||||
static void gen6_flush_pd(struct gen6_ppgtt *ppgtt, u64 start, u64 end)
|
||||
{
|
||||
struct i915_page_directory * const pd = ppgtt->base.pd;
|
||||
struct i915_page_table *pt;
|
||||
unsigned int pde;
|
||||
|
||||
start = round_down(start, SZ_64K);
|
||||
end = round_up(end, SZ_64K) - start;
|
||||
|
||||
mutex_lock(&ppgtt->flush);
|
||||
|
||||
gen6_for_each_pde(pt, pd, start, end, pde)
|
||||
gen6_write_pde(ppgtt, pde, pt);
|
||||
|
||||
mb();
|
||||
ioread32(ppgtt->pd_addr + pde - 1);
|
||||
gen6_ggtt_invalidate(ppgtt->base.vm.gt->ggtt);
|
||||
mb();
|
||||
|
||||
mutex_unlock(&ppgtt->flush);
|
||||
}
|
||||
|
||||
static int gen6_alloc_va_range(struct i915_address_space *vm,
|
||||
u64 start, u64 length)
|
||||
{
|
||||
struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
|
||||
struct i915_page_directory * const pd = ppgtt->base.pd;
|
||||
struct i915_page_table *pt, *alloc = NULL;
|
||||
intel_wakeref_t wakeref;
|
||||
u64 from = start;
|
||||
unsigned int pde;
|
||||
int ret = 0;
|
||||
|
||||
wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
|
||||
|
||||
spin_lock(&pd->lock);
|
||||
gen6_for_each_pde(pt, pd, start, length, pde) {
|
||||
const unsigned int count = gen6_pte_count(start, length);
|
||||
|
||||
if (px_base(pt) == px_base(&vm->scratch[1])) {
|
||||
spin_unlock(&pd->lock);
|
||||
|
||||
pt = fetch_and_zero(&alloc);
|
||||
if (!pt)
|
||||
pt = alloc_pt(vm);
|
||||
if (IS_ERR(pt)) {
|
||||
ret = PTR_ERR(pt);
|
||||
goto unwind_out;
|
||||
}
|
||||
|
||||
fill32_px(pt, vm->scratch[0].encode);
|
||||
|
||||
spin_lock(&pd->lock);
|
||||
if (pd->entry[pde] == &vm->scratch[1]) {
|
||||
pd->entry[pde] = pt;
|
||||
} else {
|
||||
alloc = pt;
|
||||
pt = pd->entry[pde];
|
||||
}
|
||||
}
|
||||
|
||||
atomic_add(count, &pt->used);
|
||||
}
|
||||
spin_unlock(&pd->lock);
|
||||
|
||||
if (i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND))
|
||||
gen6_flush_pd(ppgtt, from, start);
|
||||
|
||||
goto out;
|
||||
|
||||
unwind_out:
|
||||
gen6_ppgtt_clear_range(vm, from, start - from);
|
||||
out:
|
||||
if (alloc)
|
||||
free_px(vm, alloc);
|
||||
intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int gen6_ppgtt_init_scratch(struct gen6_ppgtt *ppgtt)
|
||||
{
|
||||
struct i915_address_space * const vm = &ppgtt->base.vm;
|
||||
struct i915_page_directory * const pd = ppgtt->base.pd;
|
||||
int ret;
|
||||
|
||||
ret = setup_scratch_page(vm, __GFP_HIGHMEM);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
vm->scratch[0].encode =
|
||||
vm->pte_encode(px_dma(&vm->scratch[0]),
|
||||
I915_CACHE_NONE, PTE_READ_ONLY);
|
||||
|
||||
if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[1])))) {
|
||||
cleanup_scratch_page(vm);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
fill32_px(&vm->scratch[1], vm->scratch[0].encode);
|
||||
memset_p(pd->entry, &vm->scratch[1], I915_PDES);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void gen6_ppgtt_free_pd(struct gen6_ppgtt *ppgtt)
|
||||
{
|
||||
struct i915_page_directory * const pd = ppgtt->base.pd;
|
||||
struct i915_page_dma * const scratch =
|
||||
px_base(&ppgtt->base.vm.scratch[1]);
|
||||
struct i915_page_table *pt;
|
||||
u32 pde;
|
||||
|
||||
gen6_for_all_pdes(pt, pd, pde)
|
||||
if (px_base(pt) != scratch)
|
||||
free_px(&ppgtt->base.vm, pt);
|
||||
}
|
||||
|
||||
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
|
||||
{
|
||||
struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
|
||||
|
||||
__i915_vma_put(ppgtt->vma);
|
||||
|
||||
gen6_ppgtt_free_pd(ppgtt);
|
||||
free_scratch(vm);
|
||||
|
||||
mutex_destroy(&ppgtt->flush);
|
||||
mutex_destroy(&ppgtt->pin_mutex);
|
||||
kfree(ppgtt->base.pd);
|
||||
}
|
||||
|
||||
static int pd_vma_set_pages(struct i915_vma *vma)
|
||||
{
|
||||
vma->pages = ERR_PTR(-ENODEV);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pd_vma_clear_pages(struct i915_vma *vma)
|
||||
{
|
||||
GEM_BUG_ON(!vma->pages);
|
||||
|
||||
vma->pages = NULL;
|
||||
}
|
||||
|
||||
static int pd_vma_bind(struct i915_vma *vma,
|
||||
enum i915_cache_level cache_level,
|
||||
u32 unused)
|
||||
{
|
||||
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
|
||||
struct gen6_ppgtt *ppgtt = vma->private;
|
||||
u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE;
|
||||
|
||||
px_base(ppgtt->base.pd)->ggtt_offset = ggtt_offset * sizeof(gen6_pte_t);
|
||||
ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset;
|
||||
|
||||
gen6_flush_pd(ppgtt, 0, ppgtt->base.vm.total);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pd_vma_unbind(struct i915_vma *vma)
|
||||
{
|
||||
struct gen6_ppgtt *ppgtt = vma->private;
|
||||
struct i915_page_directory * const pd = ppgtt->base.pd;
|
||||
struct i915_page_dma * const scratch =
|
||||
px_base(&ppgtt->base.vm.scratch[1]);
|
||||
struct i915_page_table *pt;
|
||||
unsigned int pde;
|
||||
|
||||
if (!ppgtt->scan_for_unused_pt)
|
||||
return;
|
||||
|
||||
/* Free all no longer used page tables */
|
||||
gen6_for_all_pdes(pt, ppgtt->base.pd, pde) {
|
||||
if (px_base(pt) == scratch || atomic_read(&pt->used))
|
||||
continue;
|
||||
|
||||
free_px(&ppgtt->base.vm, pt);
|
||||
pd->entry[pde] = scratch;
|
||||
}
|
||||
|
||||
ppgtt->scan_for_unused_pt = false;
|
||||
}
|
||||
|
||||
static const struct i915_vma_ops pd_vma_ops = {
|
||||
.set_pages = pd_vma_set_pages,
|
||||
.clear_pages = pd_vma_clear_pages,
|
||||
.bind_vma = pd_vma_bind,
|
||||
.unbind_vma = pd_vma_unbind,
|
||||
};
|
||||
|
||||
static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
|
||||
{
|
||||
struct i915_ggtt *ggtt = ppgtt->base.vm.gt->ggtt;
|
||||
struct i915_vma *vma;
|
||||
|
||||
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
|
||||
GEM_BUG_ON(size > ggtt->vm.total);
|
||||
|
||||
vma = i915_vma_alloc();
|
||||
if (!vma)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
i915_active_init(&vma->active, NULL, NULL);
|
||||
|
||||
kref_init(&vma->ref);
|
||||
mutex_init(&vma->pages_mutex);
|
||||
vma->vm = i915_vm_get(&ggtt->vm);
|
||||
vma->ops = &pd_vma_ops;
|
||||
vma->private = ppgtt;
|
||||
|
||||
vma->size = size;
|
||||
vma->fence_size = size;
|
||||
atomic_set(&vma->flags, I915_VMA_GGTT);
|
||||
vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */
|
||||
|
||||
INIT_LIST_HEAD(&vma->obj_link);
|
||||
INIT_LIST_HEAD(&vma->closed_link);
|
||||
|
||||
return vma;
|
||||
}
|
||||
|
||||
int gen6_ppgtt_pin(struct i915_ppgtt *base)
|
||||
{
|
||||
struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
|
||||
int err;
|
||||
|
||||
GEM_BUG_ON(!atomic_read(&ppgtt->base.vm.open));
|
||||
|
||||
/*
|
||||
* Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
|
||||
* which will be pinned into every active context.
|
||||
* (When vma->pin_count becomes atomic, I expect we will naturally
|
||||
* need a larger, unpacked, type and kill this redundancy.)
|
||||
*/
|
||||
if (atomic_add_unless(&ppgtt->pin_count, 1, 0))
|
||||
return 0;
|
||||
|
||||
if (mutex_lock_interruptible(&ppgtt->pin_mutex))
|
||||
return -EINTR;
|
||||
|
||||
/*
|
||||
* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
|
||||
* allocator works in address space sizes, so it's multiplied by page
|
||||
* size. We allocate at the top of the GTT to avoid fragmentation.
|
||||
*/
|
||||
err = 0;
|
||||
if (!atomic_read(&ppgtt->pin_count))
|
||||
err = i915_ggtt_pin(ppgtt->vma, GEN6_PD_ALIGN, PIN_HIGH);
|
||||
if (!err)
|
||||
atomic_inc(&ppgtt->pin_count);
|
||||
mutex_unlock(&ppgtt->pin_mutex);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
void gen6_ppgtt_unpin(struct i915_ppgtt *base)
|
||||
{
|
||||
struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
|
||||
|
||||
GEM_BUG_ON(!atomic_read(&ppgtt->pin_count));
|
||||
if (atomic_dec_and_test(&ppgtt->pin_count))
|
||||
i915_vma_unpin(ppgtt->vma);
|
||||
}
|
||||
|
||||
void gen6_ppgtt_unpin_all(struct i915_ppgtt *base)
|
||||
{
|
||||
struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
|
||||
|
||||
if (!atomic_read(&ppgtt->pin_count))
|
||||
return;
|
||||
|
||||
i915_vma_unpin(ppgtt->vma);
|
||||
atomic_set(&ppgtt->pin_count, 0);
|
||||
}
|
||||
|
||||
struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt)
|
||||
{
|
||||
struct i915_ggtt * const ggtt = gt->ggtt;
|
||||
struct gen6_ppgtt *ppgtt;
|
||||
int err;
|
||||
|
||||
ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
|
||||
if (!ppgtt)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
mutex_init(&ppgtt->flush);
|
||||
mutex_init(&ppgtt->pin_mutex);
|
||||
|
||||
ppgtt_init(&ppgtt->base, gt);
|
||||
ppgtt->base.vm.top = 1;
|
||||
|
||||
ppgtt->base.vm.bind_async_flags = I915_VMA_LOCAL_BIND;
|
||||
ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range;
|
||||
ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
|
||||
ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries;
|
||||
ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup;
|
||||
|
||||
ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode;
|
||||
|
||||
ppgtt->base.pd = __alloc_pd(sizeof(*ppgtt->base.pd));
|
||||
if (!ppgtt->base.pd) {
|
||||
err = -ENOMEM;
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
err = gen6_ppgtt_init_scratch(ppgtt);
|
||||
if (err)
|
||||
goto err_pd;
|
||||
|
||||
ppgtt->vma = pd_vma_create(ppgtt, GEN6_PD_SIZE);
|
||||
if (IS_ERR(ppgtt->vma)) {
|
||||
err = PTR_ERR(ppgtt->vma);
|
||||
goto err_scratch;
|
||||
}
|
||||
|
||||
return &ppgtt->base;
|
||||
|
||||
err_scratch:
|
||||
free_scratch(&ppgtt->base.vm);
|
||||
err_pd:
|
||||
kfree(ppgtt->base.pd);
|
||||
err_free:
|
||||
mutex_destroy(&ppgtt->pin_mutex);
|
||||
kfree(ppgtt);
|
||||
return ERR_PTR(err);
|
||||
}
|
|
@ -0,0 +1,76 @@
|
|||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __GEN6_PPGTT_H__
|
||||
#define __GEN6_PPGTT_H__
|
||||
|
||||
#include "intel_gtt.h"
|
||||
|
||||
struct gen6_ppgtt {
|
||||
struct i915_ppgtt base;
|
||||
|
||||
struct mutex flush;
|
||||
struct i915_vma *vma;
|
||||
gen6_pte_t __iomem *pd_addr;
|
||||
|
||||
atomic_t pin_count;
|
||||
struct mutex pin_mutex;
|
||||
|
||||
bool scan_for_unused_pt;
|
||||
};
|
||||
|
||||
static inline u32 gen6_pte_index(u32 addr)
|
||||
{
|
||||
return i915_pte_index(addr, GEN6_PDE_SHIFT);
|
||||
}
|
||||
|
||||
static inline u32 gen6_pte_count(u32 addr, u32 length)
|
||||
{
|
||||
return i915_pte_count(addr, length, GEN6_PDE_SHIFT);
|
||||
}
|
||||
|
||||
static inline u32 gen6_pde_index(u32 addr)
|
||||
{
|
||||
return i915_pde_index(addr, GEN6_PDE_SHIFT);
|
||||
}
|
||||
|
||||
#define __to_gen6_ppgtt(base) container_of(base, struct gen6_ppgtt, base)
|
||||
|
||||
static inline struct gen6_ppgtt *to_gen6_ppgtt(struct i915_ppgtt *base)
|
||||
{
|
||||
BUILD_BUG_ON(offsetof(struct gen6_ppgtt, base));
|
||||
return __to_gen6_ppgtt(base);
|
||||
}
|
||||
|
||||
/*
|
||||
* gen6_for_each_pde() iterates over every pde from start until start+length.
|
||||
* If start and start+length are not perfectly divisible, the macro will round
|
||||
* down and up as needed. Start=0 and length=2G effectively iterates over
|
||||
* every PDE in the system. The macro modifies ALL its parameters except 'pd',
|
||||
* so each of the other parameters should preferably be a simple variable, or
|
||||
* at most an lvalue with no side-effects!
|
||||
*/
|
||||
#define gen6_for_each_pde(pt, pd, start, length, iter) \
|
||||
for (iter = gen6_pde_index(start); \
|
||||
length > 0 && iter < I915_PDES && \
|
||||
(pt = i915_pt_entry(pd, iter), true); \
|
||||
({ u32 temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT); \
|
||||
temp = min(temp - start, length); \
|
||||
start += temp, length -= temp; }), ++iter)
|
||||
|
||||
#define gen6_for_all_pdes(pt, pd, iter) \
|
||||
for (iter = 0; \
|
||||
iter < I915_PDES && \
|
||||
(pt = i915_pt_entry(pd, iter), true); \
|
||||
++iter)
|
||||
|
||||
int gen6_ppgtt_pin(struct i915_ppgtt *base);
|
||||
void gen6_ppgtt_unpin(struct i915_ppgtt *base);
|
||||
void gen6_ppgtt_unpin_all(struct i915_ppgtt *base);
|
||||
void gen6_ppgtt_enable(struct intel_gt *gt);
|
||||
void gen7_ppgtt_enable(struct intel_gt *gt);
|
||||
struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt);
|
||||
|
||||
#endif
|
|
@ -0,0 +1,723 @@
|
|||
// SPDX-License-Identifier: MIT
|
||||
/*
|
||||
* Copyright © 2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#include <linux/log2.h>
|
||||
|
||||
#include "gen8_ppgtt.h"
|
||||
#include "i915_scatterlist.h"
|
||||
#include "i915_trace.h"
|
||||
#include "i915_vgpu.h"
|
||||
#include "intel_gt.h"
|
||||
#include "intel_gtt.h"
|
||||
|
||||
static u64 gen8_pde_encode(const dma_addr_t addr,
|
||||
const enum i915_cache_level level)
|
||||
{
|
||||
u64 pde = addr | _PAGE_PRESENT | _PAGE_RW;
|
||||
|
||||
if (level != I915_CACHE_NONE)
|
||||
pde |= PPAT_CACHED_PDE;
|
||||
else
|
||||
pde |= PPAT_UNCACHED;
|
||||
|
||||
return pde;
|
||||
}
|
||||
|
||||
static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
|
||||
{
|
||||
struct drm_i915_private *i915 = ppgtt->vm.i915;
|
||||
struct intel_uncore *uncore = ppgtt->vm.gt->uncore;
|
||||
enum vgt_g2v_type msg;
|
||||
int i;
|
||||
|
||||
if (create)
|
||||
atomic_inc(px_used(ppgtt->pd)); /* never remove */
|
||||
else
|
||||
atomic_dec(px_used(ppgtt->pd));
|
||||
|
||||
mutex_lock(&i915->vgpu.lock);
|
||||
|
||||
if (i915_vm_is_4lvl(&ppgtt->vm)) {
|
||||
const u64 daddr = px_dma(ppgtt->pd);
|
||||
|
||||
intel_uncore_write(uncore,
|
||||
vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
|
||||
intel_uncore_write(uncore,
|
||||
vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
|
||||
|
||||
msg = create ?
|
||||
VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
|
||||
VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY;
|
||||
} else {
|
||||
for (i = 0; i < GEN8_3LVL_PDPES; i++) {
|
||||
const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
|
||||
|
||||
intel_uncore_write(uncore,
|
||||
vgtif_reg(pdp[i].lo),
|
||||
lower_32_bits(daddr));
|
||||
intel_uncore_write(uncore,
|
||||
vgtif_reg(pdp[i].hi),
|
||||
upper_32_bits(daddr));
|
||||
}
|
||||
|
||||
msg = create ?
|
||||
VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
|
||||
VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY;
|
||||
}
|
||||
|
||||
/* g2v_notify atomically (via hv trap) consumes the message packet. */
|
||||
intel_uncore_write(uncore, vgtif_reg(g2v_notify), msg);
|
||||
|
||||
mutex_unlock(&i915->vgpu.lock);
|
||||
}
|
||||
|
||||
/* Index shifts into the pagetable are offset by GEN8_PTE_SHIFT [12] */
|
||||
#define GEN8_PAGE_SIZE (SZ_4K) /* page and page-directory sizes are the same */
|
||||
#define GEN8_PTE_SHIFT (ilog2(GEN8_PAGE_SIZE))
|
||||
#define GEN8_PDES (GEN8_PAGE_SIZE / sizeof(u64))
|
||||
#define gen8_pd_shift(lvl) ((lvl) * ilog2(GEN8_PDES))
|
||||
#define gen8_pd_index(i, lvl) i915_pde_index((i), gen8_pd_shift(lvl))
|
||||
#define __gen8_pte_shift(lvl) (GEN8_PTE_SHIFT + gen8_pd_shift(lvl))
|
||||
#define __gen8_pte_index(a, lvl) i915_pde_index((a), __gen8_pte_shift(lvl))
|
||||
|
||||
#define as_pd(x) container_of((x), typeof(struct i915_page_directory), pt)
|
||||
|
||||
static inline unsigned int
|
||||
gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx)
|
||||
{
|
||||
const int shift = gen8_pd_shift(lvl);
|
||||
const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
|
||||
|
||||
GEM_BUG_ON(start >= end);
|
||||
end += ~mask >> gen8_pd_shift(1);
|
||||
|
||||
*idx = i915_pde_index(start, shift);
|
||||
if ((start ^ end) & mask)
|
||||
return GEN8_PDES - *idx;
|
||||
else
|
||||
return i915_pde_index(end, shift) - *idx;
|
||||
}
|
||||
|
||||
static inline bool gen8_pd_contains(u64 start, u64 end, int lvl)
|
||||
{
|
||||
const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
|
||||
|
||||
GEM_BUG_ON(start >= end);
|
||||
return (start ^ end) & mask && (start & ~mask) == 0;
|
||||
}
|
||||
|
||||
static inline unsigned int gen8_pt_count(u64 start, u64 end)
|
||||
{
|
||||
GEM_BUG_ON(start >= end);
|
||||
if ((start ^ end) >> gen8_pd_shift(1))
|
||||
return GEN8_PDES - (start & (GEN8_PDES - 1));
|
||||
else
|
||||
return end - start;
|
||||
}
|
||||
|
||||
static inline unsigned int
|
||||
gen8_pd_top_count(const struct i915_address_space *vm)
|
||||
{
|
||||
unsigned int shift = __gen8_pte_shift(vm->top);
|
||||
return (vm->total + (1ull << shift) - 1) >> shift;
|
||||
}
|
||||
|
||||
static inline struct i915_page_directory *
|
||||
gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx)
|
||||
{
|
||||
struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
|
||||
|
||||
if (vm->top == 2)
|
||||
return ppgtt->pd;
|
||||
else
|
||||
return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top));
|
||||
}
|
||||
|
||||
static inline struct i915_page_directory *
|
||||
gen8_pdp_for_page_address(struct i915_address_space * const vm, const u64 addr)
|
||||
{
|
||||
return gen8_pdp_for_page_index(vm, addr >> GEN8_PTE_SHIFT);
|
||||
}
|
||||
|
||||
static void __gen8_ppgtt_cleanup(struct i915_address_space *vm,
|
||||
struct i915_page_directory *pd,
|
||||
int count, int lvl)
|
||||
{
|
||||
if (lvl) {
|
||||
void **pde = pd->entry;
|
||||
|
||||
do {
|
||||
if (!*pde)
|
||||
continue;
|
||||
|
||||
__gen8_ppgtt_cleanup(vm, *pde, GEN8_PDES, lvl - 1);
|
||||
} while (pde++, --count);
|
||||
}
|
||||
|
||||
free_px(vm, pd);
|
||||
}
|
||||
|
||||
static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
|
||||
{
|
||||
struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
|
||||
|
||||
if (intel_vgpu_active(vm->i915))
|
||||
gen8_ppgtt_notify_vgt(ppgtt, false);
|
||||
|
||||
__gen8_ppgtt_cleanup(vm, ppgtt->pd, gen8_pd_top_count(vm), vm->top);
|
||||
free_scratch(vm);
|
||||
}
|
||||
|
||||
static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm,
|
||||
struct i915_page_directory * const pd,
|
||||
u64 start, const u64 end, int lvl)
|
||||
{
|
||||
const struct i915_page_scratch * const scratch = &vm->scratch[lvl];
|
||||
unsigned int idx, len;
|
||||
|
||||
GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
|
||||
|
||||
len = gen8_pd_range(start, end, lvl--, &idx);
|
||||
DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
|
||||
__func__, vm, lvl + 1, start, end,
|
||||
idx, len, atomic_read(px_used(pd)));
|
||||
GEM_BUG_ON(!len || len >= atomic_read(px_used(pd)));
|
||||
|
||||
do {
|
||||
struct i915_page_table *pt = pd->entry[idx];
|
||||
|
||||
if (atomic_fetch_inc(&pt->used) >> gen8_pd_shift(1) &&
|
||||
gen8_pd_contains(start, end, lvl)) {
|
||||
DBG("%s(%p):{ lvl:%d, idx:%d, start:%llx, end:%llx } removing pd\n",
|
||||
__func__, vm, lvl + 1, idx, start, end);
|
||||
clear_pd_entry(pd, idx, scratch);
|
||||
__gen8_ppgtt_cleanup(vm, as_pd(pt), I915_PDES, lvl);
|
||||
start += (u64)I915_PDES << gen8_pd_shift(lvl);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (lvl) {
|
||||
start = __gen8_ppgtt_clear(vm, as_pd(pt),
|
||||
start, end, lvl);
|
||||
} else {
|
||||
unsigned int count;
|
||||
u64 *vaddr;
|
||||
|
||||
count = gen8_pt_count(start, end);
|
||||
DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } removing pte\n",
|
||||
__func__, vm, lvl, start, end,
|
||||
gen8_pd_index(start, 0), count,
|
||||
atomic_read(&pt->used));
|
||||
GEM_BUG_ON(!count || count >= atomic_read(&pt->used));
|
||||
|
||||
vaddr = kmap_atomic_px(pt);
|
||||
memset64(vaddr + gen8_pd_index(start, 0),
|
||||
vm->scratch[0].encode,
|
||||
count);
|
||||
kunmap_atomic(vaddr);
|
||||
|
||||
atomic_sub(count, &pt->used);
|
||||
start += count;
|
||||
}
|
||||
|
||||
if (release_pd_entry(pd, idx, pt, scratch))
|
||||
free_px(vm, pt);
|
||||
} while (idx++, --len);
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
static void gen8_ppgtt_clear(struct i915_address_space *vm,
|
||||
u64 start, u64 length)
|
||||
{
|
||||
GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
|
||||
GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
|
||||
GEM_BUG_ON(range_overflows(start, length, vm->total));
|
||||
|
||||
start >>= GEN8_PTE_SHIFT;
|
||||
length >>= GEN8_PTE_SHIFT;
|
||||
GEM_BUG_ON(length == 0);
|
||||
|
||||
__gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
|
||||
start, start + length, vm->top);
|
||||
}
|
||||
|
||||
static int __gen8_ppgtt_alloc(struct i915_address_space * const vm,
|
||||
struct i915_page_directory * const pd,
|
||||
u64 * const start, const u64 end, int lvl)
|
||||
{
|
||||
const struct i915_page_scratch * const scratch = &vm->scratch[lvl];
|
||||
struct i915_page_table *alloc = NULL;
|
||||
unsigned int idx, len;
|
||||
int ret = 0;
|
||||
|
||||
GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
|
||||
|
||||
len = gen8_pd_range(*start, end, lvl--, &idx);
|
||||
DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
|
||||
__func__, vm, lvl + 1, *start, end,
|
||||
idx, len, atomic_read(px_used(pd)));
|
||||
GEM_BUG_ON(!len || (idx + len - 1) >> gen8_pd_shift(1));
|
||||
|
||||
spin_lock(&pd->lock);
|
||||
GEM_BUG_ON(!atomic_read(px_used(pd))); /* Must be pinned! */
|
||||
do {
|
||||
struct i915_page_table *pt = pd->entry[idx];
|
||||
|
||||
if (!pt) {
|
||||
spin_unlock(&pd->lock);
|
||||
|
||||
DBG("%s(%p):{ lvl:%d, idx:%d } allocating new tree\n",
|
||||
__func__, vm, lvl + 1, idx);
|
||||
|
||||
pt = fetch_and_zero(&alloc);
|
||||
if (lvl) {
|
||||
if (!pt) {
|
||||
pt = &alloc_pd(vm)->pt;
|
||||
if (IS_ERR(pt)) {
|
||||
ret = PTR_ERR(pt);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
fill_px(pt, vm->scratch[lvl].encode);
|
||||
} else {
|
||||
if (!pt) {
|
||||
pt = alloc_pt(vm);
|
||||
if (IS_ERR(pt)) {
|
||||
ret = PTR_ERR(pt);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
if (intel_vgpu_active(vm->i915) ||
|
||||
gen8_pt_count(*start, end) < I915_PDES)
|
||||
fill_px(pt, vm->scratch[lvl].encode);
|
||||
}
|
||||
|
||||
spin_lock(&pd->lock);
|
||||
if (likely(!pd->entry[idx]))
|
||||
set_pd_entry(pd, idx, pt);
|
||||
else
|
||||
alloc = pt, pt = pd->entry[idx];
|
||||
}
|
||||
|
||||
if (lvl) {
|
||||
atomic_inc(&pt->used);
|
||||
spin_unlock(&pd->lock);
|
||||
|
||||
ret = __gen8_ppgtt_alloc(vm, as_pd(pt),
|
||||
start, end, lvl);
|
||||
if (unlikely(ret)) {
|
||||
if (release_pd_entry(pd, idx, pt, scratch))
|
||||
free_px(vm, pt);
|
||||
goto out;
|
||||
}
|
||||
|
||||
spin_lock(&pd->lock);
|
||||
atomic_dec(&pt->used);
|
||||
GEM_BUG_ON(!atomic_read(&pt->used));
|
||||
} else {
|
||||
unsigned int count = gen8_pt_count(*start, end);
|
||||
|
||||
DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } inserting pte\n",
|
||||
__func__, vm, lvl, *start, end,
|
||||
gen8_pd_index(*start, 0), count,
|
||||
atomic_read(&pt->used));
|
||||
|
||||
atomic_add(count, &pt->used);
|
||||
/* All other pdes may be simultaneously removed */
|
||||
GEM_BUG_ON(atomic_read(&pt->used) > NALLOC * I915_PDES);
|
||||
*start += count;
|
||||
}
|
||||
} while (idx++, --len);
|
||||
spin_unlock(&pd->lock);
|
||||
out:
|
||||
if (alloc)
|
||||
free_px(vm, alloc);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int gen8_ppgtt_alloc(struct i915_address_space *vm,
|
||||
u64 start, u64 length)
|
||||
{
|
||||
u64 from;
|
||||
int err;
|
||||
|
||||
GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
|
||||
GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
|
||||
GEM_BUG_ON(range_overflows(start, length, vm->total));
|
||||
|
||||
start >>= GEN8_PTE_SHIFT;
|
||||
length >>= GEN8_PTE_SHIFT;
|
||||
GEM_BUG_ON(length == 0);
|
||||
from = start;
|
||||
|
||||
err = __gen8_ppgtt_alloc(vm, i915_vm_to_ppgtt(vm)->pd,
|
||||
&start, start + length, vm->top);
|
||||
if (unlikely(err && from != start))
|
||||
__gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
|
||||
from, start, vm->top);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static __always_inline u64
|
||||
gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
|
||||
struct i915_page_directory *pdp,
|
||||
struct sgt_dma *iter,
|
||||
u64 idx,
|
||||
enum i915_cache_level cache_level,
|
||||
u32 flags)
|
||||
{
|
||||
struct i915_page_directory *pd;
|
||||
const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
|
||||
gen8_pte_t *vaddr;
|
||||
|
||||
pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2));
|
||||
vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
|
||||
do {
|
||||
vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
|
||||
|
||||
iter->dma += I915_GTT_PAGE_SIZE;
|
||||
if (iter->dma >= iter->max) {
|
||||
iter->sg = __sg_next(iter->sg);
|
||||
if (!iter->sg) {
|
||||
idx = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
iter->dma = sg_dma_address(iter->sg);
|
||||
iter->max = iter->dma + iter->sg->length;
|
||||
}
|
||||
|
||||
if (gen8_pd_index(++idx, 0) == 0) {
|
||||
if (gen8_pd_index(idx, 1) == 0) {
|
||||
/* Limited by sg length for 3lvl */
|
||||
if (gen8_pd_index(idx, 2) == 0)
|
||||
break;
|
||||
|
||||
pd = pdp->entry[gen8_pd_index(idx, 2)];
|
||||
}
|
||||
|
||||
kunmap_atomic(vaddr);
|
||||
vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
|
||||
}
|
||||
} while (1);
|
||||
kunmap_atomic(vaddr);
|
||||
|
||||
return idx;
|
||||
}
|
||||
|
||||
static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
|
||||
struct sgt_dma *iter,
|
||||
enum i915_cache_level cache_level,
|
||||
u32 flags)
|
||||
{
|
||||
const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
|
||||
u64 start = vma->node.start;
|
||||
dma_addr_t rem = iter->sg->length;
|
||||
|
||||
GEM_BUG_ON(!i915_vm_is_4lvl(vma->vm));
|
||||
|
||||
do {
|
||||
struct i915_page_directory * const pdp =
|
||||
gen8_pdp_for_page_address(vma->vm, start);
|
||||
struct i915_page_directory * const pd =
|
||||
i915_pd_entry(pdp, __gen8_pte_index(start, 2));
|
||||
gen8_pte_t encode = pte_encode;
|
||||
unsigned int maybe_64K = -1;
|
||||
unsigned int page_size;
|
||||
gen8_pte_t *vaddr;
|
||||
u16 index;
|
||||
|
||||
if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
|
||||
IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
|
||||
rem >= I915_GTT_PAGE_SIZE_2M &&
|
||||
!__gen8_pte_index(start, 0)) {
|
||||
index = __gen8_pte_index(start, 1);
|
||||
encode |= GEN8_PDE_PS_2M;
|
||||
page_size = I915_GTT_PAGE_SIZE_2M;
|
||||
|
||||
vaddr = kmap_atomic_px(pd);
|
||||
} else {
|
||||
struct i915_page_table *pt =
|
||||
i915_pt_entry(pd, __gen8_pte_index(start, 1));
|
||||
|
||||
index = __gen8_pte_index(start, 0);
|
||||
page_size = I915_GTT_PAGE_SIZE;
|
||||
|
||||
if (!index &&
|
||||
vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
|
||||
IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
|
||||
(IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
|
||||
rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE))
|
||||
maybe_64K = __gen8_pte_index(start, 1);
|
||||
|
||||
vaddr = kmap_atomic_px(pt);
|
||||
}
|
||||
|
||||
do {
|
||||
GEM_BUG_ON(iter->sg->length < page_size);
|
||||
vaddr[index++] = encode | iter->dma;
|
||||
|
||||
start += page_size;
|
||||
iter->dma += page_size;
|
||||
rem -= page_size;
|
||||
if (iter->dma >= iter->max) {
|
||||
iter->sg = __sg_next(iter->sg);
|
||||
if (!iter->sg)
|
||||
break;
|
||||
|
||||
rem = iter->sg->length;
|
||||
iter->dma = sg_dma_address(iter->sg);
|
||||
iter->max = iter->dma + rem;
|
||||
|
||||
if (maybe_64K != -1 && index < I915_PDES &&
|
||||
!(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
|
||||
(IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
|
||||
rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE)))
|
||||
maybe_64K = -1;
|
||||
|
||||
if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
|
||||
break;
|
||||
}
|
||||
} while (rem >= page_size && index < I915_PDES);
|
||||
|
||||
kunmap_atomic(vaddr);
|
||||
|
||||
/*
|
||||
* Is it safe to mark the 2M block as 64K? -- Either we have
|
||||
* filled whole page-table with 64K entries, or filled part of
|
||||
* it and have reached the end of the sg table and we have
|
||||
* enough padding.
|
||||
*/
|
||||
if (maybe_64K != -1 &&
|
||||
(index == I915_PDES ||
|
||||
(i915_vm_has_scratch_64K(vma->vm) &&
|
||||
!iter->sg && IS_ALIGNED(vma->node.start +
|
||||
vma->node.size,
|
||||
I915_GTT_PAGE_SIZE_2M)))) {
|
||||
vaddr = kmap_atomic_px(pd);
|
||||
vaddr[maybe_64K] |= GEN8_PDE_IPS_64K;
|
||||
kunmap_atomic(vaddr);
|
||||
page_size = I915_GTT_PAGE_SIZE_64K;
|
||||
|
||||
/*
|
||||
* We write all 4K page entries, even when using 64K
|
||||
* pages. In order to verify that the HW isn't cheating
|
||||
* by using the 4K PTE instead of the 64K PTE, we want
|
||||
* to remove all the surplus entries. If the HW skipped
|
||||
* the 64K PTE, it will read/write into the scratch page
|
||||
* instead - which we detect as missing results during
|
||||
* selftests.
|
||||
*/
|
||||
if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
|
||||
u16 i;
|
||||
|
||||
encode = vma->vm->scratch[0].encode;
|
||||
vaddr = kmap_atomic_px(i915_pt_entry(pd, maybe_64K));
|
||||
|
||||
for (i = 1; i < index; i += 16)
|
||||
memset64(vaddr + i, encode, 15);
|
||||
|
||||
kunmap_atomic(vaddr);
|
||||
}
|
||||
}
|
||||
|
||||
vma->page_sizes.gtt |= page_size;
|
||||
} while (iter->sg);
|
||||
}
|
||||
|
||||
static void gen8_ppgtt_insert(struct i915_address_space *vm,
|
||||
struct i915_vma *vma,
|
||||
enum i915_cache_level cache_level,
|
||||
u32 flags)
|
||||
{
|
||||
struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
|
||||
struct sgt_dma iter = sgt_dma(vma);
|
||||
|
||||
if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
|
||||
gen8_ppgtt_insert_huge(vma, &iter, cache_level, flags);
|
||||
} else {
|
||||
u64 idx = vma->node.start >> GEN8_PTE_SHIFT;
|
||||
|
||||
do {
|
||||
struct i915_page_directory * const pdp =
|
||||
gen8_pdp_for_page_index(vm, idx);
|
||||
|
||||
idx = gen8_ppgtt_insert_pte(ppgtt, pdp, &iter, idx,
|
||||
cache_level, flags);
|
||||
} while (idx);
|
||||
|
||||
vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
static int gen8_init_scratch(struct i915_address_space *vm)
|
||||
{
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* If everybody agrees to not to write into the scratch page,
|
||||
* we can reuse it for all vm, keeping contexts and processes separate.
|
||||
*/
|
||||
if (vm->has_read_only && vm->gt->vm && !i915_is_ggtt(vm->gt->vm)) {
|
||||
struct i915_address_space *clone = vm->gt->vm;
|
||||
|
||||
GEM_BUG_ON(!clone->has_read_only);
|
||||
|
||||
vm->scratch_order = clone->scratch_order;
|
||||
memcpy(vm->scratch, clone->scratch, sizeof(vm->scratch));
|
||||
px_dma(&vm->scratch[0]) = 0; /* no xfer of ownership */
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = setup_scratch_page(vm, __GFP_HIGHMEM);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
vm->scratch[0].encode =
|
||||
gen8_pte_encode(px_dma(&vm->scratch[0]),
|
||||
I915_CACHE_LLC, vm->has_read_only);
|
||||
|
||||
for (i = 1; i <= vm->top; i++) {
|
||||
if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[i]))))
|
||||
goto free_scratch;
|
||||
|
||||
fill_px(&vm->scratch[i], vm->scratch[i - 1].encode);
|
||||
vm->scratch[i].encode =
|
||||
gen8_pde_encode(px_dma(&vm->scratch[i]),
|
||||
I915_CACHE_LLC);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
free_scratch:
|
||||
free_scratch(vm);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
|
||||
{
|
||||
struct i915_address_space *vm = &ppgtt->vm;
|
||||
struct i915_page_directory *pd = ppgtt->pd;
|
||||
unsigned int idx;
|
||||
|
||||
GEM_BUG_ON(vm->top != 2);
|
||||
GEM_BUG_ON(gen8_pd_top_count(vm) != GEN8_3LVL_PDPES);
|
||||
|
||||
for (idx = 0; idx < GEN8_3LVL_PDPES; idx++) {
|
||||
struct i915_page_directory *pde;
|
||||
|
||||
pde = alloc_pd(vm);
|
||||
if (IS_ERR(pde))
|
||||
return PTR_ERR(pde);
|
||||
|
||||
fill_px(pde, vm->scratch[1].encode);
|
||||
set_pd_entry(pd, idx, pde);
|
||||
atomic_inc(px_used(pde)); /* keep pinned */
|
||||
}
|
||||
wmb();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct i915_page_directory *
|
||||
gen8_alloc_top_pd(struct i915_address_space *vm)
|
||||
{
|
||||
const unsigned int count = gen8_pd_top_count(vm);
|
||||
struct i915_page_directory *pd;
|
||||
|
||||
GEM_BUG_ON(count > ARRAY_SIZE(pd->entry));
|
||||
|
||||
pd = __alloc_pd(offsetof(typeof(*pd), entry[count]));
|
||||
if (unlikely(!pd))
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
if (unlikely(setup_page_dma(vm, px_base(pd)))) {
|
||||
kfree(pd);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
fill_page_dma(px_base(pd), vm->scratch[vm->top].encode, count);
|
||||
atomic_inc(px_used(pd)); /* mark as pinned */
|
||||
return pd;
|
||||
}
|
||||
|
||||
/*
|
||||
* GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
|
||||
* with a net effect resembling a 2-level page table in normal x86 terms. Each
|
||||
* PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
|
||||
* space.
|
||||
*
|
||||
*/
|
||||
struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt)
|
||||
{
|
||||
struct i915_ppgtt *ppgtt;
|
||||
int err;
|
||||
|
||||
ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
|
||||
if (!ppgtt)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ppgtt_init(ppgtt, gt);
|
||||
ppgtt->vm.top = i915_vm_is_4lvl(&ppgtt->vm) ? 3 : 2;
|
||||
|
||||
/*
|
||||
* From bdw, there is hw support for read-only pages in the PPGTT.
|
||||
*
|
||||
* Gen11 has HSDES#:1807136187 unresolved. Disable ro support
|
||||
* for now.
|
||||
*
|
||||
* Gen12 has inherited the same read-only fault issue from gen11.
|
||||
*/
|
||||
ppgtt->vm.has_read_only = !IS_GEN_RANGE(gt->i915, 11, 12);
|
||||
|
||||
/*
|
||||
* There are only few exceptions for gen >=6. chv and bxt.
|
||||
* And we are not sure about the latter so play safe for now.
|
||||
*/
|
||||
if (IS_CHERRYVIEW(gt->i915) || IS_BROXTON(gt->i915))
|
||||
ppgtt->vm.pt_kmap_wc = true;
|
||||
|
||||
err = gen8_init_scratch(&ppgtt->vm);
|
||||
if (err)
|
||||
goto err_free;
|
||||
|
||||
ppgtt->pd = gen8_alloc_top_pd(&ppgtt->vm);
|
||||
if (IS_ERR(ppgtt->pd)) {
|
||||
err = PTR_ERR(ppgtt->pd);
|
||||
goto err_free_scratch;
|
||||
}
|
||||
|
||||
if (!i915_vm_is_4lvl(&ppgtt->vm)) {
|
||||
err = gen8_preallocate_top_level_pdp(ppgtt);
|
||||
if (err)
|
||||
goto err_free_pd;
|
||||
}
|
||||
|
||||
ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND;
|
||||
ppgtt->vm.insert_entries = gen8_ppgtt_insert;
|
||||
ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
|
||||
ppgtt->vm.clear_range = gen8_ppgtt_clear;
|
||||
|
||||
if (intel_vgpu_active(gt->i915))
|
||||
gen8_ppgtt_notify_vgt(ppgtt, true);
|
||||
|
||||
ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
|
||||
|
||||
return ppgtt;
|
||||
|
||||
err_free_pd:
|
||||
__gen8_ppgtt_cleanup(&ppgtt->vm, ppgtt->pd,
|
||||
gen8_pd_top_count(&ppgtt->vm), ppgtt->vm.top);
|
||||
err_free_scratch:
|
||||
free_scratch(&ppgtt->vm);
|
||||
err_free:
|
||||
kfree(ppgtt);
|
||||
return ERR_PTR(err);
|
||||
}
|
|
@ -0,0 +1,13 @@
|
|||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __GEN8_PPGTT_H__
|
||||
#define __GEN8_PPGTT_H__
|
||||
|
||||
struct intel_gt;
|
||||
|
||||
struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt);
|
||||
|
||||
#endif
|
|
@ -43,144 +43,27 @@ intel_context_create(struct intel_engine_cs *engine)
|
|||
return ce;
|
||||
}
|
||||
|
||||
int __intel_context_do_pin(struct intel_context *ce)
|
||||
int intel_context_alloc_state(struct intel_context *ce)
|
||||
{
|
||||
int err;
|
||||
int err = 0;
|
||||
|
||||
if (mutex_lock_interruptible(&ce->pin_mutex))
|
||||
return -EINTR;
|
||||
|
||||
if (likely(!atomic_read(&ce->pin_count))) {
|
||||
intel_wakeref_t wakeref;
|
||||
if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
|
||||
err = ce->ops->alloc(ce);
|
||||
if (unlikely(err))
|
||||
goto unlock;
|
||||
|
||||
if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
|
||||
err = ce->ops->alloc(ce);
|
||||
if (unlikely(err))
|
||||
goto err;
|
||||
|
||||
__set_bit(CONTEXT_ALLOC_BIT, &ce->flags);
|
||||
}
|
||||
|
||||
err = 0;
|
||||
with_intel_runtime_pm(ce->engine->uncore->rpm, wakeref)
|
||||
err = ce->ops->pin(ce);
|
||||
if (err)
|
||||
goto err;
|
||||
|
||||
CE_TRACE(ce, "pin ring:{head:%04x, tail:%04x}\n",
|
||||
ce->ring->head, ce->ring->tail);
|
||||
|
||||
smp_mb__before_atomic(); /* flush pin before it is visible */
|
||||
set_bit(CONTEXT_ALLOC_BIT, &ce->flags);
|
||||
}
|
||||
|
||||
atomic_inc(&ce->pin_count);
|
||||
GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
|
||||
|
||||
mutex_unlock(&ce->pin_mutex);
|
||||
return 0;
|
||||
|
||||
err:
|
||||
unlock:
|
||||
mutex_unlock(&ce->pin_mutex);
|
||||
return err;
|
||||
}
|
||||
|
||||
void intel_context_unpin(struct intel_context *ce)
|
||||
{
|
||||
if (likely(atomic_add_unless(&ce->pin_count, -1, 1)))
|
||||
return;
|
||||
|
||||
/* We may be called from inside intel_context_pin() to evict another */
|
||||
intel_context_get(ce);
|
||||
mutex_lock_nested(&ce->pin_mutex, SINGLE_DEPTH_NESTING);
|
||||
|
||||
if (likely(atomic_dec_and_test(&ce->pin_count))) {
|
||||
CE_TRACE(ce, "retire\n");
|
||||
|
||||
ce->ops->unpin(ce);
|
||||
|
||||
intel_context_active_release(ce);
|
||||
}
|
||||
|
||||
mutex_unlock(&ce->pin_mutex);
|
||||
intel_context_put(ce);
|
||||
}
|
||||
|
||||
static int __context_pin_state(struct i915_vma *vma)
|
||||
{
|
||||
unsigned int bias = i915_ggtt_pin_bias(vma) | PIN_OFFSET_BIAS;
|
||||
int err;
|
||||
|
||||
err = i915_ggtt_pin(vma, 0, bias | PIN_HIGH);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/*
|
||||
* And mark it as a globally pinned object to let the shrinker know
|
||||
* it cannot reclaim the object until we release it.
|
||||
*/
|
||||
i915_vma_make_unshrinkable(vma);
|
||||
vma->obj->mm.dirty = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __context_unpin_state(struct i915_vma *vma)
|
||||
{
|
||||
i915_vma_make_shrinkable(vma);
|
||||
__i915_vma_unpin(vma);
|
||||
}
|
||||
|
||||
__i915_active_call
|
||||
static void __intel_context_retire(struct i915_active *active)
|
||||
{
|
||||
struct intel_context *ce = container_of(active, typeof(*ce), active);
|
||||
|
||||
CE_TRACE(ce, "retire\n");
|
||||
|
||||
set_bit(CONTEXT_VALID_BIT, &ce->flags);
|
||||
if (ce->state)
|
||||
__context_unpin_state(ce->state);
|
||||
|
||||
intel_timeline_unpin(ce->timeline);
|
||||
intel_ring_unpin(ce->ring);
|
||||
|
||||
intel_context_put(ce);
|
||||
}
|
||||
|
||||
static int __intel_context_active(struct i915_active *active)
|
||||
{
|
||||
struct intel_context *ce = container_of(active, typeof(*ce), active);
|
||||
int err;
|
||||
|
||||
intel_context_get(ce);
|
||||
|
||||
err = intel_ring_pin(ce->ring);
|
||||
if (err)
|
||||
goto err_put;
|
||||
|
||||
err = intel_timeline_pin(ce->timeline);
|
||||
if (err)
|
||||
goto err_ring;
|
||||
|
||||
if (!ce->state)
|
||||
return 0;
|
||||
|
||||
err = __context_pin_state(ce->state);
|
||||
if (err)
|
||||
goto err_timeline;
|
||||
|
||||
return 0;
|
||||
|
||||
err_timeline:
|
||||
intel_timeline_unpin(ce->timeline);
|
||||
err_ring:
|
||||
intel_ring_unpin(ce->ring);
|
||||
err_put:
|
||||
intel_context_put(ce);
|
||||
return err;
|
||||
}
|
||||
|
||||
int intel_context_active_acquire(struct intel_context *ce)
|
||||
static int intel_context_active_acquire(struct intel_context *ce)
|
||||
{
|
||||
int err;
|
||||
|
||||
|
@ -201,13 +84,184 @@ int intel_context_active_acquire(struct intel_context *ce)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void intel_context_active_release(struct intel_context *ce)
|
||||
static void intel_context_active_release(struct intel_context *ce)
|
||||
{
|
||||
/* Nodes preallocated in intel_context_active() */
|
||||
i915_active_acquire_barrier(&ce->active);
|
||||
i915_active_release(&ce->active);
|
||||
}
|
||||
|
||||
int __intel_context_do_pin(struct intel_context *ce)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
|
||||
err = intel_context_alloc_state(ce);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
if (mutex_lock_interruptible(&ce->pin_mutex))
|
||||
return -EINTR;
|
||||
|
||||
if (likely(!atomic_read(&ce->pin_count))) {
|
||||
err = intel_context_active_acquire(ce);
|
||||
if (unlikely(err))
|
||||
goto err;
|
||||
|
||||
err = ce->ops->pin(ce);
|
||||
if (unlikely(err))
|
||||
goto err_active;
|
||||
|
||||
CE_TRACE(ce, "pin ring:{head:%04x, tail:%04x}\n",
|
||||
ce->ring->head, ce->ring->tail);
|
||||
|
||||
smp_mb__before_atomic(); /* flush pin before it is visible */
|
||||
}
|
||||
|
||||
atomic_inc(&ce->pin_count);
|
||||
GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
|
||||
|
||||
mutex_unlock(&ce->pin_mutex);
|
||||
return 0;
|
||||
|
||||
err_active:
|
||||
intel_context_active_release(ce);
|
||||
err:
|
||||
mutex_unlock(&ce->pin_mutex);
|
||||
return err;
|
||||
}
|
||||
|
||||
void intel_context_unpin(struct intel_context *ce)
|
||||
{
|
||||
if (!atomic_dec_and_test(&ce->pin_count))
|
||||
return;
|
||||
|
||||
CE_TRACE(ce, "unpin\n");
|
||||
ce->ops->unpin(ce);
|
||||
|
||||
/*
|
||||
* Once released, we may asynchronously drop the active reference.
|
||||
* As that may be the only reference keeping the context alive,
|
||||
* take an extra now so that it is not freed before we finish
|
||||
* dereferencing it.
|
||||
*/
|
||||
intel_context_get(ce);
|
||||
intel_context_active_release(ce);
|
||||
intel_context_put(ce);
|
||||
}
|
||||
|
||||
static int __context_pin_state(struct i915_vma *vma)
|
||||
{
|
||||
unsigned int bias = i915_ggtt_pin_bias(vma) | PIN_OFFSET_BIAS;
|
||||
int err;
|
||||
|
||||
err = i915_ggtt_pin(vma, 0, bias | PIN_HIGH);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = i915_active_acquire(&vma->active);
|
||||
if (err)
|
||||
goto err_unpin;
|
||||
|
||||
/*
|
||||
* And mark it as a globally pinned object to let the shrinker know
|
||||
* it cannot reclaim the object until we release it.
|
||||
*/
|
||||
i915_vma_make_unshrinkable(vma);
|
||||
vma->obj->mm.dirty = true;
|
||||
|
||||
return 0;
|
||||
|
||||
err_unpin:
|
||||
i915_vma_unpin(vma);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void __context_unpin_state(struct i915_vma *vma)
|
||||
{
|
||||
i915_vma_make_shrinkable(vma);
|
||||
i915_active_release(&vma->active);
|
||||
__i915_vma_unpin(vma);
|
||||
}
|
||||
|
||||
static int __ring_active(struct intel_ring *ring)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = i915_active_acquire(&ring->vma->active);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = intel_ring_pin(ring);
|
||||
if (err)
|
||||
goto err_active;
|
||||
|
||||
return 0;
|
||||
|
||||
err_active:
|
||||
i915_active_release(&ring->vma->active);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void __ring_retire(struct intel_ring *ring)
|
||||
{
|
||||
intel_ring_unpin(ring);
|
||||
i915_active_release(&ring->vma->active);
|
||||
}
|
||||
|
||||
__i915_active_call
|
||||
static void __intel_context_retire(struct i915_active *active)
|
||||
{
|
||||
struct intel_context *ce = container_of(active, typeof(*ce), active);
|
||||
|
||||
CE_TRACE(ce, "retire\n");
|
||||
|
||||
set_bit(CONTEXT_VALID_BIT, &ce->flags);
|
||||
if (ce->state)
|
||||
__context_unpin_state(ce->state);
|
||||
|
||||
intel_timeline_unpin(ce->timeline);
|
||||
__ring_retire(ce->ring);
|
||||
|
||||
intel_context_put(ce);
|
||||
}
|
||||
|
||||
static int __intel_context_active(struct i915_active *active)
|
||||
{
|
||||
struct intel_context *ce = container_of(active, typeof(*ce), active);
|
||||
int err;
|
||||
|
||||
CE_TRACE(ce, "active\n");
|
||||
|
||||
intel_context_get(ce);
|
||||
|
||||
err = __ring_active(ce->ring);
|
||||
if (err)
|
||||
goto err_put;
|
||||
|
||||
err = intel_timeline_pin(ce->timeline);
|
||||
if (err)
|
||||
goto err_ring;
|
||||
|
||||
if (!ce->state)
|
||||
return 0;
|
||||
|
||||
err = __context_pin_state(ce->state);
|
||||
if (err)
|
||||
goto err_timeline;
|
||||
|
||||
return 0;
|
||||
|
||||
err_timeline:
|
||||
intel_timeline_unpin(ce->timeline);
|
||||
err_ring:
|
||||
__ring_retire(ce->ring);
|
||||
err_put:
|
||||
intel_context_put(ce);
|
||||
return err;
|
||||
}
|
||||
|
||||
void
|
||||
intel_context_init(struct intel_context *ce,
|
||||
struct intel_engine_cs *engine)
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
#define CE_TRACE(ce, fmt, ...) do { \
|
||||
const struct intel_context *ce__ = (ce); \
|
||||
ENGINE_TRACE(ce__->engine, "context:%llx" fmt, \
|
||||
ENGINE_TRACE(ce__->engine, "context:%llx " fmt, \
|
||||
ce__->timeline->fence_context, \
|
||||
##__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
@ -31,6 +31,8 @@ void intel_context_fini(struct intel_context *ce);
|
|||
struct intel_context *
|
||||
intel_context_create(struct intel_engine_cs *engine);
|
||||
|
||||
int intel_context_alloc_state(struct intel_context *ce);
|
||||
|
||||
void intel_context_free(struct intel_context *ce);
|
||||
|
||||
/**
|
||||
|
@ -76,9 +78,14 @@ static inline void intel_context_unlock_pinned(struct intel_context *ce)
|
|||
|
||||
int __intel_context_do_pin(struct intel_context *ce);
|
||||
|
||||
static inline bool intel_context_pin_if_active(struct intel_context *ce)
|
||||
{
|
||||
return atomic_inc_not_zero(&ce->pin_count);
|
||||
}
|
||||
|
||||
static inline int intel_context_pin(struct intel_context *ce)
|
||||
{
|
||||
if (likely(atomic_inc_not_zero(&ce->pin_count)))
|
||||
if (likely(intel_context_pin_if_active(ce)))
|
||||
return 0;
|
||||
|
||||
return __intel_context_do_pin(ce);
|
||||
|
@ -116,9 +123,6 @@ static inline void intel_context_exit(struct intel_context *ce)
|
|||
ce->ops->exit(ce);
|
||||
}
|
||||
|
||||
int intel_context_active_acquire(struct intel_context *ce);
|
||||
void intel_context_active_release(struct intel_context *ce);
|
||||
|
||||
static inline struct intel_context *intel_context_get(struct intel_context *ce)
|
||||
{
|
||||
kref_get(&ce->ref);
|
||||
|
|
|
@ -17,6 +17,8 @@
|
|||
#include "intel_engine_types.h"
|
||||
#include "intel_sseu.h"
|
||||
|
||||
#define CONTEXT_REDZONE POISON_INUSE
|
||||
|
||||
struct i915_gem_context;
|
||||
struct i915_vma;
|
||||
struct intel_context;
|
||||
|
|
|
@ -202,7 +202,7 @@ void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask);
|
|||
u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
|
||||
u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
|
||||
|
||||
void intel_engine_get_instdone(struct intel_engine_cs *engine,
|
||||
void intel_engine_get_instdone(const struct intel_engine_cs *engine,
|
||||
struct intel_instdone *instdone);
|
||||
|
||||
void intel_engine_init_execlists(struct intel_engine_cs *engine);
|
||||
|
@ -282,7 +282,7 @@ static inline void __intel_engine_reset(struct intel_engine_cs *engine,
|
|||
|
||||
bool intel_engines_are_idle(struct intel_gt *gt);
|
||||
bool intel_engine_is_idle(struct intel_engine_cs *engine);
|
||||
bool intel_engine_flush_submission(struct intel_engine_cs *engine);
|
||||
void intel_engine_flush_submission(struct intel_engine_cs *engine);
|
||||
|
||||
void intel_engines_reset_default_submission(struct intel_gt *gt);
|
||||
|
||||
|
|
|
@ -914,8 +914,8 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
|
|||
}
|
||||
|
||||
static u32
|
||||
read_subslice_reg(struct intel_engine_cs *engine, int slice, int subslice,
|
||||
i915_reg_t reg)
|
||||
read_subslice_reg(const struct intel_engine_cs *engine,
|
||||
int slice, int subslice, i915_reg_t reg)
|
||||
{
|
||||
struct drm_i915_private *i915 = engine->i915;
|
||||
struct intel_uncore *uncore = engine->uncore;
|
||||
|
@ -959,7 +959,7 @@ read_subslice_reg(struct intel_engine_cs *engine, int slice, int subslice,
|
|||
}
|
||||
|
||||
/* NB: please notice the memset */
|
||||
void intel_engine_get_instdone(struct intel_engine_cs *engine,
|
||||
void intel_engine_get_instdone(const struct intel_engine_cs *engine,
|
||||
struct intel_instdone *instdone)
|
||||
{
|
||||
struct drm_i915_private *i915 = engine->i915;
|
||||
|
@ -1047,10 +1047,9 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
|
|||
return idle;
|
||||
}
|
||||
|
||||
bool intel_engine_flush_submission(struct intel_engine_cs *engine)
|
||||
void intel_engine_flush_submission(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct tasklet_struct *t = &engine->execlists.tasklet;
|
||||
bool active = tasklet_is_locked(t);
|
||||
|
||||
if (__tasklet_is_scheduled(t)) {
|
||||
local_bh_disable();
|
||||
|
@ -1061,13 +1060,10 @@ bool intel_engine_flush_submission(struct intel_engine_cs *engine)
|
|||
tasklet_unlock(t);
|
||||
}
|
||||
local_bh_enable();
|
||||
active = true;
|
||||
}
|
||||
|
||||
/* Otherwise flush the tasklet if it was running on another cpu */
|
||||
tasklet_unlock_wait(t);
|
||||
|
||||
return active;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -199,7 +199,7 @@ int intel_engine_pulse(struct intel_engine_cs *engine)
|
|||
goto out_unlock;
|
||||
}
|
||||
|
||||
rq->flags |= I915_REQUEST_SENTINEL;
|
||||
__set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags);
|
||||
idle_pulse(engine, rq);
|
||||
|
||||
__i915_request_commit(rq);
|
||||
|
|
|
@ -20,6 +20,7 @@ static int __engine_unpark(struct intel_wakeref *wf)
|
|||
{
|
||||
struct intel_engine_cs *engine =
|
||||
container_of(wf, typeof(*engine), wakeref);
|
||||
struct intel_context *ce;
|
||||
void *map;
|
||||
|
||||
ENGINE_TRACE(engine, "\n");
|
||||
|
@ -34,6 +35,27 @@ static int __engine_unpark(struct intel_wakeref *wf)
|
|||
if (!IS_ERR_OR_NULL(map))
|
||||
engine->pinned_default_state = map;
|
||||
|
||||
/* Discard stale context state from across idling */
|
||||
ce = engine->kernel_context;
|
||||
if (ce) {
|
||||
GEM_BUG_ON(test_bit(CONTEXT_VALID_BIT, &ce->flags));
|
||||
|
||||
/* First poison the image to verify we never fully trust it */
|
||||
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && ce->state) {
|
||||
struct drm_i915_gem_object *obj = ce->state->obj;
|
||||
int type = i915_coherent_map_type(engine->i915);
|
||||
|
||||
map = i915_gem_object_pin_map(obj, type);
|
||||
if (!IS_ERR(map)) {
|
||||
memset(map, CONTEXT_REDZONE, obj->base.size);
|
||||
i915_gem_object_flush_map(obj);
|
||||
i915_gem_object_unpin_map(obj);
|
||||
}
|
||||
}
|
||||
|
||||
ce->ops->reset(ce);
|
||||
}
|
||||
|
||||
if (engine->unpark)
|
||||
engine->unpark(engine);
|
||||
|
||||
|
@ -123,16 +145,16 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
|
|||
unsigned long flags;
|
||||
bool result = true;
|
||||
|
||||
/* GPU is pointing to the void, as good as in the kernel context. */
|
||||
if (intel_gt_is_wedged(engine->gt))
|
||||
return true;
|
||||
|
||||
GEM_BUG_ON(!intel_context_is_barrier(ce));
|
||||
|
||||
/* Already inside the kernel context, safe to power down. */
|
||||
if (engine->wakeref_serial == engine->serial)
|
||||
return true;
|
||||
|
||||
/* GPU is pointing to the void, as good as in the kernel context. */
|
||||
if (intel_gt_is_wedged(engine->gt))
|
||||
return true;
|
||||
|
||||
/*
|
||||
* Note, we do this without taking the timeline->mutex. We cannot
|
||||
* as we may be called while retiring the kernel context and so
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#include "i915_drv.h"
|
||||
#include "intel_engine.h"
|
||||
#include "intel_engine_user.h"
|
||||
#include "intel_gt.h"
|
||||
|
||||
struct intel_engine_cs *
|
||||
intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
|
||||
|
@ -200,6 +201,9 @@ void intel_engines_driver_register(struct drm_i915_private *i915)
|
|||
uabi_node);
|
||||
char old[sizeof(engine->name)];
|
||||
|
||||
if (intel_gt_has_init_error(engine->gt))
|
||||
continue; /* ignore incomplete engines */
|
||||
|
||||
GEM_BUG_ON(engine->class >= ARRAY_SIZE(uabi_classes));
|
||||
engine->uabi_class = uabi_classes[engine->class];
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -38,8 +38,6 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
|
|||
void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt)
|
||||
{
|
||||
gt->ggtt = ggtt;
|
||||
|
||||
intel_gt_sanitize(gt, false);
|
||||
}
|
||||
|
||||
static void init_unused_ring(struct intel_gt *gt, u32 base)
|
||||
|
@ -77,10 +75,6 @@ int intel_gt_init_hw(struct intel_gt *gt)
|
|||
struct intel_uncore *uncore = gt->uncore;
|
||||
int ret;
|
||||
|
||||
ret = intel_gt_terminally_wedged(gt);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
gt->last_init_time = ktime_get();
|
||||
|
||||
/* Double layer security blanket, see i915_gem_init() */
|
||||
|
@ -372,7 +366,7 @@ static void intel_gt_fini_scratch(struct intel_gt *gt)
|
|||
static struct i915_address_space *kernel_vm(struct intel_gt *gt)
|
||||
{
|
||||
if (INTEL_PPGTT(gt->i915) > INTEL_PPGTT_ALIASING)
|
||||
return &i915_ppgtt_create(gt->i915)->vm;
|
||||
return &i915_ppgtt_create(gt)->vm;
|
||||
else
|
||||
return i915_vm_get(>->ggtt->vm);
|
||||
}
|
||||
|
@ -410,14 +404,13 @@ static int __engines_record_defaults(struct intel_gt *gt)
|
|||
struct intel_context *ce;
|
||||
struct i915_request *rq;
|
||||
|
||||
/* We must be able to switch to something! */
|
||||
GEM_BUG_ON(!engine->kernel_context);
|
||||
|
||||
err = intel_renderstate_init(&so, engine);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
/* We must be able to switch to something! */
|
||||
GEM_BUG_ON(!engine->kernel_context);
|
||||
engine->serial++; /* force the kernel context switch */
|
||||
|
||||
ce = intel_context_create(engine);
|
||||
if (IS_ERR(ce)) {
|
||||
err = PTR_ERR(ce);
|
||||
|
|
|
@ -58,9 +58,14 @@ static inline u32 intel_gt_scratch_offset(const struct intel_gt *gt,
|
|||
return i915_ggtt_offset(gt->scratch) + field;
|
||||
}
|
||||
|
||||
static inline bool intel_gt_is_wedged(struct intel_gt *gt)
|
||||
static inline bool intel_gt_is_wedged(const struct intel_gt *gt)
|
||||
{
|
||||
return __intel_reset_failed(>->reset);
|
||||
}
|
||||
|
||||
static inline bool intel_gt_has_init_error(const struct intel_gt *gt)
|
||||
{
|
||||
return test_bit(I915_WEDGED_ON_INIT, >->reset.flags);
|
||||
}
|
||||
|
||||
#endif /* __INTEL_GT_H__ */
|
||||
|
|
|
@ -126,17 +126,7 @@ static bool reset_engines(struct intel_gt *gt)
|
|||
return __intel_gt_reset(gt, ALL_ENGINES) == 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gt_sanitize: called after the GPU has lost power
|
||||
* @gt: the i915 GT container
|
||||
* @force: ignore a failed reset and sanitize engine state anyway
|
||||
*
|
||||
* Anytime we reset the GPU, either with an explicit GPU reset or through a
|
||||
* PCI power cycle, the GPU loses state and we must reset our state tracking
|
||||
* to match. Note that calling intel_gt_sanitize() if the GPU has not
|
||||
* been reset results in much confusion!
|
||||
*/
|
||||
void intel_gt_sanitize(struct intel_gt *gt, bool force)
|
||||
static void gt_sanitize(struct intel_gt *gt, bool force)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
|
@ -189,6 +179,10 @@ int intel_gt_resume(struct intel_gt *gt)
|
|||
enum intel_engine_id id;
|
||||
int err;
|
||||
|
||||
err = intel_gt_has_init_error(gt);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
GT_TRACE(gt, "\n");
|
||||
|
||||
/*
|
||||
|
@ -201,30 +195,26 @@ int intel_gt_resume(struct intel_gt *gt)
|
|||
|
||||
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
|
||||
intel_rc6_sanitize(>->rc6);
|
||||
gt_sanitize(gt, true);
|
||||
if (intel_gt_is_wedged(gt)) {
|
||||
err = -EIO;
|
||||
goto out_fw;
|
||||
}
|
||||
|
||||
/* Only when the HW is re-initialised, can we replay the requests */
|
||||
err = intel_gt_init_hw(gt);
|
||||
if (err) {
|
||||
dev_err(gt->i915->drm.dev,
|
||||
"Failed to initialize GPU, declaring it wedged!\n");
|
||||
intel_gt_set_wedged(gt);
|
||||
goto err_fw;
|
||||
goto err_wedged;
|
||||
}
|
||||
|
||||
intel_rps_enable(>->rps);
|
||||
intel_llc_enable(>->llc);
|
||||
|
||||
for_each_engine(engine, gt, id) {
|
||||
struct intel_context *ce;
|
||||
|
||||
intel_engine_pm_get(engine);
|
||||
|
||||
ce = engine->kernel_context;
|
||||
if (ce) {
|
||||
GEM_BUG_ON(!intel_context_is_pinned(ce));
|
||||
ce->ops->reset(ce);
|
||||
}
|
||||
|
||||
engine->serial++; /* kernel context lost */
|
||||
err = engine->resume(engine);
|
||||
|
||||
|
@ -233,7 +223,7 @@ int intel_gt_resume(struct intel_gt *gt)
|
|||
dev_err(gt->i915->drm.dev,
|
||||
"Failed to restart %s (%d)\n",
|
||||
engine->name, err);
|
||||
break;
|
||||
goto err_wedged;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -243,11 +233,14 @@ int intel_gt_resume(struct intel_gt *gt)
|
|||
|
||||
user_forcewake(gt, false);
|
||||
|
||||
err_fw:
|
||||
out_fw:
|
||||
intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
|
||||
intel_gt_pm_put(gt);
|
||||
|
||||
return err;
|
||||
|
||||
err_wedged:
|
||||
intel_gt_set_wedged(gt);
|
||||
goto out_fw;
|
||||
}
|
||||
|
||||
static void wait_for_suspend(struct intel_gt *gt)
|
||||
|
@ -315,7 +308,7 @@ void intel_gt_suspend_late(struct intel_gt *gt)
|
|||
intel_llc_disable(>->llc);
|
||||
}
|
||||
|
||||
intel_gt_sanitize(gt, false);
|
||||
gt_sanitize(gt, false);
|
||||
|
||||
GT_TRACE(gt, "\n");
|
||||
}
|
||||
|
|
|
@ -51,8 +51,6 @@ void intel_gt_pm_init_early(struct intel_gt *gt);
|
|||
void intel_gt_pm_init(struct intel_gt *gt);
|
||||
void intel_gt_pm_fini(struct intel_gt *gt);
|
||||
|
||||
void intel_gt_sanitize(struct intel_gt *gt, bool force);
|
||||
|
||||
void intel_gt_suspend_prepare(struct intel_gt *gt);
|
||||
void intel_gt_suspend_late(struct intel_gt *gt);
|
||||
int intel_gt_resume(struct intel_gt *gt);
|
||||
|
|
|
@ -14,13 +14,16 @@
|
|||
#include "intel_gt_requests.h"
|
||||
#include "intel_timeline.h"
|
||||
|
||||
static void retire_requests(struct intel_timeline *tl)
|
||||
static bool retire_requests(struct intel_timeline *tl)
|
||||
{
|
||||
struct i915_request *rq, *rn;
|
||||
|
||||
list_for_each_entry_safe(rq, rn, &tl->requests, link)
|
||||
if (!i915_request_retire(rq))
|
||||
break;
|
||||
return false;
|
||||
|
||||
/* And check nothing new was submitted */
|
||||
return !i915_active_fence_isset(&tl->last_request);
|
||||
}
|
||||
|
||||
static bool flush_submission(struct intel_gt *gt)
|
||||
|
@ -29,9 +32,13 @@ static bool flush_submission(struct intel_gt *gt)
|
|||
enum intel_engine_id id;
|
||||
bool active = false;
|
||||
|
||||
if (!intel_gt_pm_is_awake(gt))
|
||||
return false;
|
||||
|
||||
for_each_engine(engine, gt, id) {
|
||||
active |= intel_engine_flush_submission(engine);
|
||||
intel_engine_flush_submission(engine);
|
||||
active |= flush_work(&engine->retire_work);
|
||||
active |= flush_work(&engine->wakeref.work);
|
||||
}
|
||||
|
||||
return active;
|
||||
|
@ -120,7 +127,6 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
|
|||
timeout = -timeout, interruptible = false;
|
||||
|
||||
flush_submission(gt); /* kick the ksoftirqd tasklets */
|
||||
|
||||
spin_lock(&timelines->lock);
|
||||
list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
|
||||
if (!mutex_trylock(&tl->mutex)) {
|
||||
|
@ -145,7 +151,8 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
|
|||
}
|
||||
}
|
||||
|
||||
retire_requests(tl);
|
||||
if (!retire_requests(tl) || flush_submission(gt))
|
||||
active_count++;
|
||||
|
||||
spin_lock(&timelines->lock);
|
||||
|
||||
|
@ -153,8 +160,6 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
|
|||
list_safe_reset_next(tl, tn, link);
|
||||
if (atomic_dec_and_test(&tl->active_count))
|
||||
list_del(&tl->link);
|
||||
else
|
||||
active_count += i915_active_fence_isset(&tl->last_request);
|
||||
|
||||
mutex_unlock(&tl->mutex);
|
||||
|
||||
|
@ -169,9 +174,6 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
|
|||
list_for_each_entry_safe(tl, tn, &free, link)
|
||||
__intel_timeline_free(&tl->kref);
|
||||
|
||||
if (flush_submission(gt))
|
||||
active_count++;
|
||||
|
||||
return active_count ? timeout : 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,598 @@
|
|||
// SPDX-License-Identifier: MIT
|
||||
/*
|
||||
* Copyright © 2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#include <linux/slab.h> /* fault-inject.h is not standalone! */
|
||||
|
||||
#include <linux/fault-inject.h>
|
||||
|
||||
#include "i915_trace.h"
|
||||
#include "intel_gt.h"
|
||||
#include "intel_gtt.h"
|
||||
|
||||
void stash_init(struct pagestash *stash)
|
||||
{
|
||||
pagevec_init(&stash->pvec);
|
||||
spin_lock_init(&stash->lock);
|
||||
}
|
||||
|
||||
static struct page *stash_pop_page(struct pagestash *stash)
|
||||
{
|
||||
struct page *page = NULL;
|
||||
|
||||
spin_lock(&stash->lock);
|
||||
if (likely(stash->pvec.nr))
|
||||
page = stash->pvec.pages[--stash->pvec.nr];
|
||||
spin_unlock(&stash->lock);
|
||||
|
||||
return page;
|
||||
}
|
||||
|
||||
static void stash_push_pagevec(struct pagestash *stash, struct pagevec *pvec)
|
||||
{
|
||||
unsigned int nr;
|
||||
|
||||
spin_lock_nested(&stash->lock, SINGLE_DEPTH_NESTING);
|
||||
|
||||
nr = min_t(typeof(nr), pvec->nr, pagevec_space(&stash->pvec));
|
||||
memcpy(stash->pvec.pages + stash->pvec.nr,
|
||||
pvec->pages + pvec->nr - nr,
|
||||
sizeof(pvec->pages[0]) * nr);
|
||||
stash->pvec.nr += nr;
|
||||
|
||||
spin_unlock(&stash->lock);
|
||||
|
||||
pvec->nr -= nr;
|
||||
}
|
||||
|
||||
static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
|
||||
{
|
||||
struct pagevec stack;
|
||||
struct page *page;
|
||||
|
||||
if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
|
||||
i915_gem_shrink_all(vm->i915);
|
||||
|
||||
page = stash_pop_page(&vm->free_pages);
|
||||
if (page)
|
||||
return page;
|
||||
|
||||
if (!vm->pt_kmap_wc)
|
||||
return alloc_page(gfp);
|
||||
|
||||
/* Look in our global stash of WC pages... */
|
||||
page = stash_pop_page(&vm->i915->mm.wc_stash);
|
||||
if (page)
|
||||
return page;
|
||||
|
||||
/*
|
||||
* Otherwise batch allocate pages to amortize cost of set_pages_wc.
|
||||
*
|
||||
* We have to be careful as page allocation may trigger the shrinker
|
||||
* (via direct reclaim) which will fill up the WC stash underneath us.
|
||||
* So we add our WB pages into a temporary pvec on the stack and merge
|
||||
* them into the WC stash after all the allocations are complete.
|
||||
*/
|
||||
pagevec_init(&stack);
|
||||
do {
|
||||
struct page *page;
|
||||
|
||||
page = alloc_page(gfp);
|
||||
if (unlikely(!page))
|
||||
break;
|
||||
|
||||
stack.pages[stack.nr++] = page;
|
||||
} while (pagevec_space(&stack));
|
||||
|
||||
if (stack.nr && !set_pages_array_wc(stack.pages, stack.nr)) {
|
||||
page = stack.pages[--stack.nr];
|
||||
|
||||
/* Merge spare WC pages to the global stash */
|
||||
if (stack.nr)
|
||||
stash_push_pagevec(&vm->i915->mm.wc_stash, &stack);
|
||||
|
||||
/* Push any surplus WC pages onto the local VM stash */
|
||||
if (stack.nr)
|
||||
stash_push_pagevec(&vm->free_pages, &stack);
|
||||
}
|
||||
|
||||
/* Return unwanted leftovers */
|
||||
if (unlikely(stack.nr)) {
|
||||
WARN_ON_ONCE(set_pages_array_wb(stack.pages, stack.nr));
|
||||
__pagevec_release(&stack);
|
||||
}
|
||||
|
||||
return page;
|
||||
}
|
||||
|
||||
static void vm_free_pages_release(struct i915_address_space *vm,
|
||||
bool immediate)
|
||||
{
|
||||
struct pagevec *pvec = &vm->free_pages.pvec;
|
||||
struct pagevec stack;
|
||||
|
||||
lockdep_assert_held(&vm->free_pages.lock);
|
||||
GEM_BUG_ON(!pagevec_count(pvec));
|
||||
|
||||
if (vm->pt_kmap_wc) {
|
||||
/*
|
||||
* When we use WC, first fill up the global stash and then
|
||||
* only if full immediately free the overflow.
|
||||
*/
|
||||
stash_push_pagevec(&vm->i915->mm.wc_stash, pvec);
|
||||
|
||||
/*
|
||||
* As we have made some room in the VM's free_pages,
|
||||
* we can wait for it to fill again. Unless we are
|
||||
* inside i915_address_space_fini() and must
|
||||
* immediately release the pages!
|
||||
*/
|
||||
if (pvec->nr <= (immediate ? 0 : PAGEVEC_SIZE - 1))
|
||||
return;
|
||||
|
||||
/*
|
||||
* We have to drop the lock to allow ourselves to sleep,
|
||||
* so take a copy of the pvec and clear the stash for
|
||||
* others to use it as we sleep.
|
||||
*/
|
||||
stack = *pvec;
|
||||
pagevec_reinit(pvec);
|
||||
spin_unlock(&vm->free_pages.lock);
|
||||
|
||||
pvec = &stack;
|
||||
set_pages_array_wb(pvec->pages, pvec->nr);
|
||||
|
||||
spin_lock(&vm->free_pages.lock);
|
||||
}
|
||||
|
||||
__pagevec_release(pvec);
|
||||
}
|
||||
|
||||
static void vm_free_page(struct i915_address_space *vm, struct page *page)
|
||||
{
|
||||
/*
|
||||
* On !llc, we need to change the pages back to WB. We only do so
|
||||
* in bulk, so we rarely need to change the page attributes here,
|
||||
* but doing so requires a stop_machine() from deep inside arch/x86/mm.
|
||||
* To make detection of the possible sleep more likely, use an
|
||||
* unconditional might_sleep() for everybody.
|
||||
*/
|
||||
might_sleep();
|
||||
spin_lock(&vm->free_pages.lock);
|
||||
while (!pagevec_space(&vm->free_pages.pvec))
|
||||
vm_free_pages_release(vm, false);
|
||||
GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec) >= PAGEVEC_SIZE);
|
||||
pagevec_add(&vm->free_pages.pvec, page);
|
||||
spin_unlock(&vm->free_pages.lock);
|
||||
}
|
||||
|
||||
void __i915_vm_close(struct i915_address_space *vm)
|
||||
{
|
||||
struct i915_vma *vma, *vn;
|
||||
|
||||
mutex_lock(&vm->mutex);
|
||||
list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
|
||||
/* Keep the obj (and hence the vma) alive as _we_ destroy it */
|
||||
if (!kref_get_unless_zero(&obj->base.refcount))
|
||||
continue;
|
||||
|
||||
atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
|
||||
WARN_ON(__i915_vma_unbind(vma));
|
||||
__i915_vma_put(vma);
|
||||
|
||||
i915_gem_object_put(obj);
|
||||
}
|
||||
GEM_BUG_ON(!list_empty(&vm->bound_list));
|
||||
mutex_unlock(&vm->mutex);
|
||||
}
|
||||
|
||||
void i915_address_space_fini(struct i915_address_space *vm)
|
||||
{
|
||||
spin_lock(&vm->free_pages.lock);
|
||||
if (pagevec_count(&vm->free_pages.pvec))
|
||||
vm_free_pages_release(vm, true);
|
||||
GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec));
|
||||
spin_unlock(&vm->free_pages.lock);
|
||||
|
||||
drm_mm_takedown(&vm->mm);
|
||||
|
||||
mutex_destroy(&vm->mutex);
|
||||
}
|
||||
|
||||
static void __i915_vm_release(struct work_struct *work)
|
||||
{
|
||||
struct i915_address_space *vm =
|
||||
container_of(work, struct i915_address_space, rcu.work);
|
||||
|
||||
vm->cleanup(vm);
|
||||
i915_address_space_fini(vm);
|
||||
|
||||
kfree(vm);
|
||||
}
|
||||
|
||||
void i915_vm_release(struct kref *kref)
|
||||
{
|
||||
struct i915_address_space *vm =
|
||||
container_of(kref, struct i915_address_space, ref);
|
||||
|
||||
GEM_BUG_ON(i915_is_ggtt(vm));
|
||||
trace_i915_ppgtt_release(vm);
|
||||
|
||||
queue_rcu_work(vm->i915->wq, &vm->rcu);
|
||||
}
|
||||
|
||||
void i915_address_space_init(struct i915_address_space *vm, int subclass)
|
||||
{
|
||||
kref_init(&vm->ref);
|
||||
INIT_RCU_WORK(&vm->rcu, __i915_vm_release);
|
||||
atomic_set(&vm->open, 1);
|
||||
|
||||
/*
|
||||
* The vm->mutex must be reclaim safe (for use in the shrinker).
|
||||
* Do a dummy acquire now under fs_reclaim so that any allocation
|
||||
* attempt holding the lock is immediately reported by lockdep.
|
||||
*/
|
||||
mutex_init(&vm->mutex);
|
||||
lockdep_set_subclass(&vm->mutex, subclass);
|
||||
i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
|
||||
|
||||
GEM_BUG_ON(!vm->total);
|
||||
drm_mm_init(&vm->mm, 0, vm->total);
|
||||
vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
|
||||
|
||||
stash_init(&vm->free_pages);
|
||||
|
||||
INIT_LIST_HEAD(&vm->bound_list);
|
||||
}
|
||||
|
||||
void clear_pages(struct i915_vma *vma)
|
||||
{
|
||||
GEM_BUG_ON(!vma->pages);
|
||||
|
||||
if (vma->pages != vma->obj->mm.pages) {
|
||||
sg_free_table(vma->pages);
|
||||
kfree(vma->pages);
|
||||
}
|
||||
vma->pages = NULL;
|
||||
|
||||
memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
|
||||
}
|
||||
|
||||
static int __setup_page_dma(struct i915_address_space *vm,
|
||||
struct i915_page_dma *p,
|
||||
gfp_t gfp)
|
||||
{
|
||||
p->page = vm_alloc_page(vm, gfp | I915_GFP_ALLOW_FAIL);
|
||||
if (unlikely(!p->page))
|
||||
return -ENOMEM;
|
||||
|
||||
p->daddr = dma_map_page_attrs(vm->dma,
|
||||
p->page, 0, PAGE_SIZE,
|
||||
PCI_DMA_BIDIRECTIONAL,
|
||||
DMA_ATTR_SKIP_CPU_SYNC |
|
||||
DMA_ATTR_NO_WARN);
|
||||
if (unlikely(dma_mapping_error(vm->dma, p->daddr))) {
|
||||
vm_free_page(vm, p->page);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int setup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p)
|
||||
{
|
||||
return __setup_page_dma(vm, p, __GFP_HIGHMEM);
|
||||
}
|
||||
|
||||
void cleanup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p)
|
||||
{
|
||||
dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
vm_free_page(vm, p->page);
|
||||
}
|
||||
|
||||
void
|
||||
fill_page_dma(const struct i915_page_dma *p, const u64 val, unsigned int count)
|
||||
{
|
||||
kunmap_atomic(memset64(kmap_atomic(p->page), val, count));
|
||||
}
|
||||
|
||||
int setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
|
||||
{
|
||||
unsigned long size;
|
||||
|
||||
/*
|
||||
* In order to utilize 64K pages for an object with a size < 2M, we will
|
||||
* need to support a 64K scratch page, given that every 16th entry for a
|
||||
* page-table operating in 64K mode must point to a properly aligned 64K
|
||||
* region, including any PTEs which happen to point to scratch.
|
||||
*
|
||||
* This is only relevant for the 48b PPGTT where we support
|
||||
* huge-gtt-pages, see also i915_vma_insert(). However, as we share the
|
||||
* scratch (read-only) between all vm, we create one 64k scratch page
|
||||
* for all.
|
||||
*/
|
||||
size = I915_GTT_PAGE_SIZE_4K;
|
||||
if (i915_vm_is_4lvl(vm) &&
|
||||
HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) {
|
||||
size = I915_GTT_PAGE_SIZE_64K;
|
||||
gfp |= __GFP_NOWARN;
|
||||
}
|
||||
gfp |= __GFP_ZERO | __GFP_RETRY_MAYFAIL;
|
||||
|
||||
do {
|
||||
unsigned int order = get_order(size);
|
||||
struct page *page;
|
||||
dma_addr_t addr;
|
||||
|
||||
page = alloc_pages(gfp, order);
|
||||
if (unlikely(!page))
|
||||
goto skip;
|
||||
|
||||
addr = dma_map_page_attrs(vm->dma,
|
||||
page, 0, size,
|
||||
PCI_DMA_BIDIRECTIONAL,
|
||||
DMA_ATTR_SKIP_CPU_SYNC |
|
||||
DMA_ATTR_NO_WARN);
|
||||
if (unlikely(dma_mapping_error(vm->dma, addr)))
|
||||
goto free_page;
|
||||
|
||||
if (unlikely(!IS_ALIGNED(addr, size)))
|
||||
goto unmap_page;
|
||||
|
||||
vm->scratch[0].base.page = page;
|
||||
vm->scratch[0].base.daddr = addr;
|
||||
vm->scratch_order = order;
|
||||
return 0;
|
||||
|
||||
unmap_page:
|
||||
dma_unmap_page(vm->dma, addr, size, PCI_DMA_BIDIRECTIONAL);
|
||||
free_page:
|
||||
__free_pages(page, order);
|
||||
skip:
|
||||
if (size == I915_GTT_PAGE_SIZE_4K)
|
||||
return -ENOMEM;
|
||||
|
||||
size = I915_GTT_PAGE_SIZE_4K;
|
||||
gfp &= ~__GFP_NOWARN;
|
||||
} while (1);
|
||||
}
|
||||
|
||||
void cleanup_scratch_page(struct i915_address_space *vm)
|
||||
{
|
||||
struct i915_page_dma *p = px_base(&vm->scratch[0]);
|
||||
unsigned int order = vm->scratch_order;
|
||||
|
||||
dma_unmap_page(vm->dma, p->daddr, BIT(order) << PAGE_SHIFT,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
__free_pages(p->page, order);
|
||||
}
|
||||
|
||||
void free_scratch(struct i915_address_space *vm)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!px_dma(&vm->scratch[0])) /* set to 0 on clones */
|
||||
return;
|
||||
|
||||
for (i = 1; i <= vm->top; i++) {
|
||||
if (!px_dma(&vm->scratch[i]))
|
||||
break;
|
||||
cleanup_page_dma(vm, px_base(&vm->scratch[i]));
|
||||
}
|
||||
|
||||
cleanup_scratch_page(vm);
|
||||
}
|
||||
|
||||
void gtt_write_workarounds(struct intel_gt *gt)
|
||||
{
|
||||
struct drm_i915_private *i915 = gt->i915;
|
||||
struct intel_uncore *uncore = gt->uncore;
|
||||
|
||||
/*
|
||||
* This function is for gtt related workarounds. This function is
|
||||
* called on driver load and after a GPU reset, so you can place
|
||||
* workarounds here even if they get overwritten by GPU reset.
|
||||
*/
|
||||
/* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */
|
||||
if (IS_BROADWELL(i915))
|
||||
intel_uncore_write(uncore,
|
||||
GEN8_L3_LRA_1_GPGPU,
|
||||
GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
|
||||
else if (IS_CHERRYVIEW(i915))
|
||||
intel_uncore_write(uncore,
|
||||
GEN8_L3_LRA_1_GPGPU,
|
||||
GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
|
||||
else if (IS_GEN9_LP(i915))
|
||||
intel_uncore_write(uncore,
|
||||
GEN8_L3_LRA_1_GPGPU,
|
||||
GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
|
||||
else if (INTEL_GEN(i915) >= 9 && INTEL_GEN(i915) <= 11)
|
||||
intel_uncore_write(uncore,
|
||||
GEN8_L3_LRA_1_GPGPU,
|
||||
GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
|
||||
|
||||
/*
|
||||
* To support 64K PTEs we need to first enable the use of the
|
||||
* Intermediate-Page-Size(IPS) bit of the PDE field via some magical
|
||||
* mmio, otherwise the page-walker will simply ignore the IPS bit. This
|
||||
* shouldn't be needed after GEN10.
|
||||
*
|
||||
* 64K pages were first introduced from BDW+, although technically they
|
||||
* only *work* from gen9+. For pre-BDW we instead have the option for
|
||||
* 32K pages, but we don't currently have any support for it in our
|
||||
* driver.
|
||||
*/
|
||||
if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K) &&
|
||||
INTEL_GEN(i915) <= 10)
|
||||
intel_uncore_rmw(uncore,
|
||||
GEN8_GAMW_ECO_DEV_RW_IA,
|
||||
0,
|
||||
GAMW_ECO_ENABLE_64K_IPS_FIELD);
|
||||
|
||||
if (IS_GEN_RANGE(i915, 8, 11)) {
|
||||
bool can_use_gtt_cache = true;
|
||||
|
||||
/*
|
||||
* According to the BSpec if we use 2M/1G pages then we also
|
||||
* need to disable the GTT cache. At least on BDW we can see
|
||||
* visual corruption when using 2M pages, and not disabling the
|
||||
* GTT cache.
|
||||
*/
|
||||
if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_2M))
|
||||
can_use_gtt_cache = false;
|
||||
|
||||
/* WaGttCachingOffByDefault */
|
||||
intel_uncore_write(uncore,
|
||||
HSW_GTT_CACHE_EN,
|
||||
can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0);
|
||||
WARN_ON_ONCE(can_use_gtt_cache &&
|
||||
intel_uncore_read(uncore,
|
||||
HSW_GTT_CACHE_EN) == 0);
|
||||
}
|
||||
}
|
||||
|
||||
u64 gen8_pte_encode(dma_addr_t addr,
|
||||
enum i915_cache_level level,
|
||||
u32 flags)
|
||||
{
|
||||
gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
|
||||
|
||||
if (unlikely(flags & PTE_READ_ONLY))
|
||||
pte &= ~_PAGE_RW;
|
||||
|
||||
switch (level) {
|
||||
case I915_CACHE_NONE:
|
||||
pte |= PPAT_UNCACHED;
|
||||
break;
|
||||
case I915_CACHE_WT:
|
||||
pte |= PPAT_DISPLAY_ELLC;
|
||||
break;
|
||||
default:
|
||||
pte |= PPAT_CACHED;
|
||||
break;
|
||||
}
|
||||
|
||||
return pte;
|
||||
}
|
||||
|
||||
static void tgl_setup_private_ppat(struct intel_uncore *uncore)
|
||||
{
|
||||
/* TGL doesn't support LLC or AGE settings */
|
||||
intel_uncore_write(uncore, GEN12_PAT_INDEX(0), GEN8_PPAT_WB);
|
||||
intel_uncore_write(uncore, GEN12_PAT_INDEX(1), GEN8_PPAT_WC);
|
||||
intel_uncore_write(uncore, GEN12_PAT_INDEX(2), GEN8_PPAT_WT);
|
||||
intel_uncore_write(uncore, GEN12_PAT_INDEX(3), GEN8_PPAT_UC);
|
||||
intel_uncore_write(uncore, GEN12_PAT_INDEX(4), GEN8_PPAT_WB);
|
||||
intel_uncore_write(uncore, GEN12_PAT_INDEX(5), GEN8_PPAT_WB);
|
||||
intel_uncore_write(uncore, GEN12_PAT_INDEX(6), GEN8_PPAT_WB);
|
||||
intel_uncore_write(uncore, GEN12_PAT_INDEX(7), GEN8_PPAT_WB);
|
||||
}
|
||||
|
||||
static void cnl_setup_private_ppat(struct intel_uncore *uncore)
|
||||
{
|
||||
intel_uncore_write(uncore,
|
||||
GEN10_PAT_INDEX(0),
|
||||
GEN8_PPAT_WB | GEN8_PPAT_LLC);
|
||||
intel_uncore_write(uncore,
|
||||
GEN10_PAT_INDEX(1),
|
||||
GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
|
||||
intel_uncore_write(uncore,
|
||||
GEN10_PAT_INDEX(2),
|
||||
GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
|
||||
intel_uncore_write(uncore,
|
||||
GEN10_PAT_INDEX(3),
|
||||
GEN8_PPAT_UC);
|
||||
intel_uncore_write(uncore,
|
||||
GEN10_PAT_INDEX(4),
|
||||
GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
|
||||
intel_uncore_write(uncore,
|
||||
GEN10_PAT_INDEX(5),
|
||||
GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
|
||||
intel_uncore_write(uncore,
|
||||
GEN10_PAT_INDEX(6),
|
||||
GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
|
||||
intel_uncore_write(uncore,
|
||||
GEN10_PAT_INDEX(7),
|
||||
GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
|
||||
}
|
||||
|
||||
/*
|
||||
* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
|
||||
* bits. When using advanced contexts each context stores its own PAT, but
|
||||
* writing this data shouldn't be harmful even in those cases.
|
||||
*/
|
||||
static void bdw_setup_private_ppat(struct intel_uncore *uncore)
|
||||
{
|
||||
u64 pat;
|
||||
|
||||
pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */
|
||||
GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
|
||||
GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */
|
||||
GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */
|
||||
GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
|
||||
GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
|
||||
GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
|
||||
GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
|
||||
|
||||
intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
|
||||
intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
|
||||
}
|
||||
|
||||
static void chv_setup_private_ppat(struct intel_uncore *uncore)
|
||||
{
|
||||
u64 pat;
|
||||
|
||||
/*
|
||||
* Map WB on BDW to snooped on CHV.
|
||||
*
|
||||
* Only the snoop bit has meaning for CHV, the rest is
|
||||
* ignored.
|
||||
*
|
||||
* The hardware will never snoop for certain types of accesses:
|
||||
* - CPU GTT (GMADR->GGTT->no snoop->memory)
|
||||
* - PPGTT page tables
|
||||
* - some other special cycles
|
||||
*
|
||||
* As with BDW, we also need to consider the following for GT accesses:
|
||||
* "For GGTT, there is NO pat_sel[2:0] from the entry,
|
||||
* so RTL will always use the value corresponding to
|
||||
* pat_sel = 000".
|
||||
* Which means we must set the snoop bit in PAT entry 0
|
||||
* in order to keep the global status page working.
|
||||
*/
|
||||
|
||||
pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
|
||||
GEN8_PPAT(1, 0) |
|
||||
GEN8_PPAT(2, 0) |
|
||||
GEN8_PPAT(3, 0) |
|
||||
GEN8_PPAT(4, CHV_PPAT_SNOOP) |
|
||||
GEN8_PPAT(5, CHV_PPAT_SNOOP) |
|
||||
GEN8_PPAT(6, CHV_PPAT_SNOOP) |
|
||||
GEN8_PPAT(7, CHV_PPAT_SNOOP);
|
||||
|
||||
intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
|
||||
intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
|
||||
}
|
||||
|
||||
void setup_private_pat(struct intel_uncore *uncore)
|
||||
{
|
||||
struct drm_i915_private *i915 = uncore->i915;
|
||||
|
||||
GEM_BUG_ON(INTEL_GEN(i915) < 8);
|
||||
|
||||
if (INTEL_GEN(i915) >= 12)
|
||||
tgl_setup_private_ppat(uncore);
|
||||
else if (INTEL_GEN(i915) >= 10)
|
||||
cnl_setup_private_ppat(uncore);
|
||||
else if (IS_CHERRYVIEW(i915) || IS_GEN9_LP(i915))
|
||||
chv_setup_private_ppat(uncore);
|
||||
else
|
||||
bdw_setup_private_ppat(uncore);
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
|
||||
#include "selftests/mock_gtt.c"
|
||||
#endif
|
|
@ -0,0 +1,587 @@
|
|||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2020 Intel Corporation
|
||||
*
|
||||
* Please try to maintain the following order within this file unless it makes
|
||||
* sense to do otherwise. From top to bottom:
|
||||
* 1. typedefs
|
||||
* 2. #defines, and macros
|
||||
* 3. structure definitions
|
||||
* 4. function prototypes
|
||||
*
|
||||
* Within each section, please try to order by generation in ascending order,
|
||||
* from top to bottom (ie. gen6 on the top, gen8 on the bottom).
|
||||
*/
|
||||
|
||||
#ifndef __INTEL_GTT_H__
|
||||
#define __INTEL_GTT_H__
|
||||
|
||||
#include <linux/io-mapping.h>
|
||||
#include <linux/kref.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/pagevec.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
#include <drm/drm_mm.h>
|
||||
|
||||
#include "gt/intel_reset.h"
|
||||
#include "i915_gem_fence_reg.h"
|
||||
#include "i915_selftest.h"
|
||||
#include "i915_vma_types.h"
|
||||
|
||||
#define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_TRACE_GTT)
|
||||
#define DBG(...) trace_printk(__VA_ARGS__)
|
||||
#else
|
||||
#define DBG(...)
|
||||
#endif
|
||||
|
||||
#define NALLOC 3 /* 1 normal, 1 for concurrent threads, 1 for preallocation */
|
||||
|
||||
#define I915_GTT_PAGE_SIZE_4K BIT_ULL(12)
|
||||
#define I915_GTT_PAGE_SIZE_64K BIT_ULL(16)
|
||||
#define I915_GTT_PAGE_SIZE_2M BIT_ULL(21)
|
||||
|
||||
#define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K
|
||||
#define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M
|
||||
|
||||
#define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE
|
||||
|
||||
#define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
|
||||
|
||||
#define I915_FENCE_REG_NONE -1
|
||||
#define I915_MAX_NUM_FENCES 32
|
||||
/* 32 fences + sign bit for FENCE_REG_NONE */
|
||||
#define I915_MAX_NUM_FENCE_BITS 6
|
||||
|
||||
typedef u32 gen6_pte_t;
|
||||
typedef u64 gen8_pte_t;
|
||||
|
||||
#define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT)
|
||||
|
||||
#define I915_PTES(pte_len) ((unsigned int)(PAGE_SIZE / (pte_len)))
|
||||
#define I915_PTE_MASK(pte_len) (I915_PTES(pte_len) - 1)
|
||||
#define I915_PDES 512
|
||||
#define I915_PDE_MASK (I915_PDES - 1)
|
||||
|
||||
/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
|
||||
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
|
||||
#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
|
||||
#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
|
||||
#define GEN6_PTE_CACHE_LLC (2 << 1)
|
||||
#define GEN6_PTE_UNCACHED (1 << 1)
|
||||
#define GEN6_PTE_VALID REG_BIT(0)
|
||||
|
||||
#define GEN6_PTES I915_PTES(sizeof(gen6_pte_t))
|
||||
#define GEN6_PD_SIZE (I915_PDES * PAGE_SIZE)
|
||||
#define GEN6_PD_ALIGN (PAGE_SIZE * 16)
|
||||
#define GEN6_PDE_SHIFT 22
|
||||
#define GEN6_PDE_VALID REG_BIT(0)
|
||||
#define NUM_PTE(pde_shift) (1 << (pde_shift - PAGE_SHIFT))
|
||||
|
||||
#define GEN7_PTE_CACHE_L3_LLC (3 << 1)
|
||||
|
||||
#define BYT_PTE_SNOOPED_BY_CPU_CACHES REG_BIT(2)
|
||||
#define BYT_PTE_WRITEABLE REG_BIT(1)
|
||||
|
||||
/*
|
||||
* Cacheability Control is a 4-bit value. The low three bits are stored in bits
|
||||
* 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
|
||||
*/
|
||||
#define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \
|
||||
(((bits) & 0x8) << (11 - 3)))
|
||||
#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
|
||||
#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
|
||||
#define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8)
|
||||
#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
|
||||
#define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7)
|
||||
#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
|
||||
#define HSW_PTE_UNCACHED (0)
|
||||
#define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0))
|
||||
#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
|
||||
|
||||
/*
|
||||
* GEN8 32b style address is defined as a 3 level page table:
|
||||
* 31:30 | 29:21 | 20:12 | 11:0
|
||||
* PDPE | PDE | PTE | offset
|
||||
* The difference as compared to normal x86 3 level page table is the PDPEs are
|
||||
* programmed via register.
|
||||
*
|
||||
* GEN8 48b style address is defined as a 4 level page table:
|
||||
* 47:39 | 38:30 | 29:21 | 20:12 | 11:0
|
||||
* PML4E | PDPE | PDE | PTE | offset
|
||||
*/
|
||||
#define GEN8_3LVL_PDPES 4
|
||||
|
||||
#define PPAT_UNCACHED (_PAGE_PWT | _PAGE_PCD)
|
||||
#define PPAT_CACHED_PDE 0 /* WB LLC */
|
||||
#define PPAT_CACHED _PAGE_PAT /* WB LLCeLLC */
|
||||
#define PPAT_DISPLAY_ELLC _PAGE_PCD /* WT eLLC */
|
||||
|
||||
#define CHV_PPAT_SNOOP REG_BIT(6)
|
||||
#define GEN8_PPAT_AGE(x) ((x)<<4)
|
||||
#define GEN8_PPAT_LLCeLLC (3<<2)
|
||||
#define GEN8_PPAT_LLCELLC (2<<2)
|
||||
#define GEN8_PPAT_LLC (1<<2)
|
||||
#define GEN8_PPAT_WB (3<<0)
|
||||
#define GEN8_PPAT_WT (2<<0)
|
||||
#define GEN8_PPAT_WC (1<<0)
|
||||
#define GEN8_PPAT_UC (0<<0)
|
||||
#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
|
||||
#define GEN8_PPAT(i, x) ((u64)(x) << ((i) * 8))
|
||||
|
||||
#define GEN8_PDE_IPS_64K BIT(11)
|
||||
#define GEN8_PDE_PS_2M BIT(7)
|
||||
|
||||
#define for_each_sgt_daddr(__dp, __iter, __sgt) \
|
||||
__for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE)
|
||||
|
||||
struct i915_page_dma {
|
||||
struct page *page;
|
||||
union {
|
||||
dma_addr_t daddr;
|
||||
|
||||
/*
|
||||
* For gen6/gen7 only. This is the offset in the GGTT
|
||||
* where the page directory entries for PPGTT begin
|
||||
*/
|
||||
u32 ggtt_offset;
|
||||
};
|
||||
};
|
||||
|
||||
struct i915_page_scratch {
|
||||
struct i915_page_dma base;
|
||||
u64 encode;
|
||||
};
|
||||
|
||||
struct i915_page_table {
|
||||
struct i915_page_dma base;
|
||||
atomic_t used;
|
||||
};
|
||||
|
||||
struct i915_page_directory {
|
||||
struct i915_page_table pt;
|
||||
spinlock_t lock;
|
||||
void *entry[512];
|
||||
};
|
||||
|
||||
#define __px_choose_expr(x, type, expr, other) \
|
||||
__builtin_choose_expr( \
|
||||
__builtin_types_compatible_p(typeof(x), type) || \
|
||||
__builtin_types_compatible_p(typeof(x), const type), \
|
||||
({ type __x = (type)(x); expr; }), \
|
||||
other)
|
||||
|
||||
#define px_base(px) \
|
||||
__px_choose_expr(px, struct i915_page_dma *, __x, \
|
||||
__px_choose_expr(px, struct i915_page_scratch *, &__x->base, \
|
||||
__px_choose_expr(px, struct i915_page_table *, &__x->base, \
|
||||
__px_choose_expr(px, struct i915_page_directory *, &__x->pt.base, \
|
||||
(void)0))))
|
||||
#define px_dma(px) (px_base(px)->daddr)
|
||||
|
||||
#define px_pt(px) \
|
||||
__px_choose_expr(px, struct i915_page_table *, __x, \
|
||||
__px_choose_expr(px, struct i915_page_directory *, &__x->pt, \
|
||||
(void)0))
|
||||
#define px_used(px) (&px_pt(px)->used)
|
||||
|
||||
enum i915_cache_level;
|
||||
|
||||
struct drm_i915_file_private;
|
||||
struct drm_i915_gem_object;
|
||||
struct i915_vma;
|
||||
struct intel_gt;
|
||||
|
||||
struct i915_vma_ops {
|
||||
/* Map an object into an address space with the given cache flags. */
|
||||
int (*bind_vma)(struct i915_vma *vma,
|
||||
enum i915_cache_level cache_level,
|
||||
u32 flags);
|
||||
/*
|
||||
* Unmap an object from an address space. This usually consists of
|
||||
* setting the valid PTE entries to a reserved scratch page.
|
||||
*/
|
||||
void (*unbind_vma)(struct i915_vma *vma);
|
||||
|
||||
int (*set_pages)(struct i915_vma *vma);
|
||||
void (*clear_pages)(struct i915_vma *vma);
|
||||
};
|
||||
|
||||
struct pagestash {
|
||||
spinlock_t lock;
|
||||
struct pagevec pvec;
|
||||
};
|
||||
|
||||
void stash_init(struct pagestash *stash);
|
||||
|
||||
struct i915_address_space {
|
||||
struct kref ref;
|
||||
struct rcu_work rcu;
|
||||
|
||||
struct drm_mm mm;
|
||||
struct intel_gt *gt;
|
||||
struct drm_i915_private *i915;
|
||||
struct device *dma;
|
||||
/*
|
||||
* Every address space belongs to a struct file - except for the global
|
||||
* GTT that is owned by the driver (and so @file is set to NULL). In
|
||||
* principle, no information should leak from one context to another
|
||||
* (or between files/processes etc) unless explicitly shared by the
|
||||
* owner. Tracking the owner is important in order to free up per-file
|
||||
* objects along with the file, to aide resource tracking, and to
|
||||
* assign blame.
|
||||
*/
|
||||
struct drm_i915_file_private *file;
|
||||
u64 total; /* size addr space maps (ex. 2GB for ggtt) */
|
||||
u64 reserved; /* size addr space reserved */
|
||||
|
||||
unsigned int bind_async_flags;
|
||||
|
||||
/*
|
||||
* Each active user context has its own address space (in full-ppgtt).
|
||||
* Since the vm may be shared between multiple contexts, we count how
|
||||
* many contexts keep us "open". Once open hits zero, we are closed
|
||||
* and do not allow any new attachments, and proceed to shutdown our
|
||||
* vma and page directories.
|
||||
*/
|
||||
atomic_t open;
|
||||
|
||||
struct mutex mutex; /* protects vma and our lists */
|
||||
#define VM_CLASS_GGTT 0
|
||||
#define VM_CLASS_PPGTT 1
|
||||
|
||||
struct i915_page_scratch scratch[4];
|
||||
unsigned int scratch_order;
|
||||
unsigned int top;
|
||||
|
||||
/**
|
||||
* List of vma currently bound.
|
||||
*/
|
||||
struct list_head bound_list;
|
||||
|
||||
struct pagestash free_pages;
|
||||
|
||||
/* Global GTT */
|
||||
bool is_ggtt:1;
|
||||
|
||||
/* Some systems require uncached updates of the page directories */
|
||||
bool pt_kmap_wc:1;
|
||||
|
||||
/* Some systems support read-only mappings for GGTT and/or PPGTT */
|
||||
bool has_read_only:1;
|
||||
|
||||
u64 (*pte_encode)(dma_addr_t addr,
|
||||
enum i915_cache_level level,
|
||||
u32 flags); /* Create a valid PTE */
|
||||
#define PTE_READ_ONLY BIT(0)
|
||||
|
||||
int (*allocate_va_range)(struct i915_address_space *vm,
|
||||
u64 start, u64 length);
|
||||
void (*clear_range)(struct i915_address_space *vm,
|
||||
u64 start, u64 length);
|
||||
void (*insert_page)(struct i915_address_space *vm,
|
||||
dma_addr_t addr,
|
||||
u64 offset,
|
||||
enum i915_cache_level cache_level,
|
||||
u32 flags);
|
||||
void (*insert_entries)(struct i915_address_space *vm,
|
||||
struct i915_vma *vma,
|
||||
enum i915_cache_level cache_level,
|
||||
u32 flags);
|
||||
void (*cleanup)(struct i915_address_space *vm);
|
||||
|
||||
struct i915_vma_ops vma_ops;
|
||||
|
||||
I915_SELFTEST_DECLARE(struct fault_attr fault_attr);
|
||||
I915_SELFTEST_DECLARE(bool scrub_64K);
|
||||
};
|
||||
|
||||
/*
|
||||
* The Graphics Translation Table is the way in which GEN hardware translates a
|
||||
* Graphics Virtual Address into a Physical Address. In addition to the normal
|
||||
* collateral associated with any va->pa translations GEN hardware also has a
|
||||
* portion of the GTT which can be mapped by the CPU and remain both coherent
|
||||
* and correct (in cases like swizzling). That region is referred to as GMADR in
|
||||
* the spec.
|
||||
*/
|
||||
struct i915_ggtt {
|
||||
struct i915_address_space vm;
|
||||
|
||||
struct io_mapping iomap; /* Mapping to our CPU mappable region */
|
||||
struct resource gmadr; /* GMADR resource */
|
||||
resource_size_t mappable_end; /* End offset that we can CPU map */
|
||||
|
||||
/** "Graphics Stolen Memory" holds the global PTEs */
|
||||
void __iomem *gsm;
|
||||
void (*invalidate)(struct i915_ggtt *ggtt);
|
||||
|
||||
/** PPGTT used for aliasing the PPGTT with the GTT */
|
||||
struct i915_ppgtt *alias;
|
||||
|
||||
bool do_idle_maps;
|
||||
|
||||
int mtrr;
|
||||
|
||||
/** Bit 6 swizzling required for X tiling */
|
||||
u32 bit_6_swizzle_x;
|
||||
/** Bit 6 swizzling required for Y tiling */
|
||||
u32 bit_6_swizzle_y;
|
||||
|
||||
u32 pin_bias;
|
||||
|
||||
unsigned int num_fences;
|
||||
struct i915_fence_reg fence_regs[I915_MAX_NUM_FENCES];
|
||||
struct list_head fence_list;
|
||||
|
||||
/**
|
||||
* List of all objects in gtt_space, currently mmaped by userspace.
|
||||
* All objects within this list must also be on bound_list.
|
||||
*/
|
||||
struct list_head userfault_list;
|
||||
|
||||
/* Manual runtime pm autosuspend delay for user GGTT mmaps */
|
||||
struct intel_wakeref_auto userfault_wakeref;
|
||||
|
||||
struct mutex error_mutex;
|
||||
struct drm_mm_node error_capture;
|
||||
struct drm_mm_node uc_fw;
|
||||
};
|
||||
|
||||
struct i915_ppgtt {
|
||||
struct i915_address_space vm;
|
||||
|
||||
struct i915_page_directory *pd;
|
||||
};
|
||||
|
||||
#define i915_is_ggtt(vm) ((vm)->is_ggtt)
|
||||
|
||||
static inline bool
|
||||
i915_vm_is_4lvl(const struct i915_address_space *vm)
|
||||
{
|
||||
return (vm->total - 1) >> 32;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
i915_vm_has_scratch_64K(struct i915_address_space *vm)
|
||||
{
|
||||
return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
i915_vm_has_cache_coloring(struct i915_address_space *vm)
|
||||
{
|
||||
return i915_is_ggtt(vm) && vm->mm.color_adjust;
|
||||
}
|
||||
|
||||
static inline struct i915_ggtt *
|
||||
i915_vm_to_ggtt(struct i915_address_space *vm)
|
||||
{
|
||||
BUILD_BUG_ON(offsetof(struct i915_ggtt, vm));
|
||||
GEM_BUG_ON(!i915_is_ggtt(vm));
|
||||
return container_of(vm, struct i915_ggtt, vm);
|
||||
}
|
||||
|
||||
static inline struct i915_ppgtt *
|
||||
i915_vm_to_ppgtt(struct i915_address_space *vm)
|
||||
{
|
||||
BUILD_BUG_ON(offsetof(struct i915_ppgtt, vm));
|
||||
GEM_BUG_ON(i915_is_ggtt(vm));
|
||||
return container_of(vm, struct i915_ppgtt, vm);
|
||||
}
|
||||
|
||||
static inline struct i915_address_space *
|
||||
i915_vm_get(struct i915_address_space *vm)
|
||||
{
|
||||
kref_get(&vm->ref);
|
||||
return vm;
|
||||
}
|
||||
|
||||
void i915_vm_release(struct kref *kref);
|
||||
|
||||
static inline void i915_vm_put(struct i915_address_space *vm)
|
||||
{
|
||||
kref_put(&vm->ref, i915_vm_release);
|
||||
}
|
||||
|
||||
static inline struct i915_address_space *
|
||||
i915_vm_open(struct i915_address_space *vm)
|
||||
{
|
||||
GEM_BUG_ON(!atomic_read(&vm->open));
|
||||
atomic_inc(&vm->open);
|
||||
return i915_vm_get(vm);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
i915_vm_tryopen(struct i915_address_space *vm)
|
||||
{
|
||||
if (atomic_add_unless(&vm->open, 1, 0))
|
||||
return i915_vm_get(vm);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void __i915_vm_close(struct i915_address_space *vm);
|
||||
|
||||
static inline void
|
||||
i915_vm_close(struct i915_address_space *vm)
|
||||
{
|
||||
GEM_BUG_ON(!atomic_read(&vm->open));
|
||||
if (atomic_dec_and_test(&vm->open))
|
||||
__i915_vm_close(vm);
|
||||
|
||||
i915_vm_put(vm);
|
||||
}
|
||||
|
||||
void i915_address_space_init(struct i915_address_space *vm, int subclass);
|
||||
void i915_address_space_fini(struct i915_address_space *vm);
|
||||
|
||||
static inline u32 i915_pte_index(u64 address, unsigned int pde_shift)
|
||||
{
|
||||
const u32 mask = NUM_PTE(pde_shift) - 1;
|
||||
|
||||
return (address >> PAGE_SHIFT) & mask;
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper to counts the number of PTEs within the given length. This count
|
||||
* does not cross a page table boundary, so the max value would be
|
||||
* GEN6_PTES for GEN6, and GEN8_PTES for GEN8.
|
||||
*/
|
||||
static inline u32 i915_pte_count(u64 addr, u64 length, unsigned int pde_shift)
|
||||
{
|
||||
const u64 mask = ~((1ULL << pde_shift) - 1);
|
||||
u64 end;
|
||||
|
||||
GEM_BUG_ON(length == 0);
|
||||
GEM_BUG_ON(offset_in_page(addr | length));
|
||||
|
||||
end = addr + length;
|
||||
|
||||
if ((addr & mask) != (end & mask))
|
||||
return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift);
|
||||
|
||||
return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift);
|
||||
}
|
||||
|
||||
static inline u32 i915_pde_index(u64 addr, u32 shift)
|
||||
{
|
||||
return (addr >> shift) & I915_PDE_MASK;
|
||||
}
|
||||
|
||||
static inline struct i915_page_table *
|
||||
i915_pt_entry(const struct i915_page_directory * const pd,
|
||||
const unsigned short n)
|
||||
{
|
||||
return pd->entry[n];
|
||||
}
|
||||
|
||||
static inline struct i915_page_directory *
|
||||
i915_pd_entry(const struct i915_page_directory * const pdp,
|
||||
const unsigned short n)
|
||||
{
|
||||
return pdp->entry[n];
|
||||
}
|
||||
|
||||
static inline dma_addr_t
|
||||
i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n)
|
||||
{
|
||||
struct i915_page_dma *pt = ppgtt->pd->entry[n];
|
||||
|
||||
return px_dma(pt ?: px_base(&ppgtt->vm.scratch[ppgtt->vm.top]));
|
||||
}
|
||||
|
||||
void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt);
|
||||
|
||||
int i915_ggtt_probe_hw(struct drm_i915_private *i915);
|
||||
int i915_ggtt_init_hw(struct drm_i915_private *i915);
|
||||
int i915_ggtt_enable_hw(struct drm_i915_private *i915);
|
||||
void i915_ggtt_enable_guc(struct i915_ggtt *ggtt);
|
||||
void i915_ggtt_disable_guc(struct i915_ggtt *ggtt);
|
||||
int i915_init_ggtt(struct drm_i915_private *i915);
|
||||
void i915_ggtt_driver_release(struct drm_i915_private *i915);
|
||||
|
||||
static inline bool i915_ggtt_has_aperture(const struct i915_ggtt *ggtt)
|
||||
{
|
||||
return ggtt->mappable_end > 0;
|
||||
}
|
||||
|
||||
int i915_ppgtt_init_hw(struct intel_gt *gt);
|
||||
|
||||
struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt);
|
||||
|
||||
void i915_gem_suspend_gtt_mappings(struct drm_i915_private *i915);
|
||||
void i915_gem_restore_gtt_mappings(struct drm_i915_private *i915);
|
||||
|
||||
u64 gen8_pte_encode(dma_addr_t addr,
|
||||
enum i915_cache_level level,
|
||||
u32 flags);
|
||||
|
||||
int setup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p);
|
||||
void cleanup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p);
|
||||
|
||||
#define kmap_atomic_px(px) kmap_atomic(px_base(px)->page)
|
||||
|
||||
void
|
||||
fill_page_dma(const struct i915_page_dma *p, const u64 val, unsigned int count);
|
||||
|
||||
#define fill_px(px, v) fill_page_dma(px_base(px), (v), PAGE_SIZE / sizeof(u64))
|
||||
#define fill32_px(px, v) do { \
|
||||
u64 v__ = lower_32_bits(v); \
|
||||
fill_px((px), v__ << 32 | v__); \
|
||||
} while (0)
|
||||
|
||||
int setup_scratch_page(struct i915_address_space *vm, gfp_t gfp);
|
||||
void cleanup_scratch_page(struct i915_address_space *vm);
|
||||
void free_scratch(struct i915_address_space *vm);
|
||||
|
||||
struct i915_page_table *alloc_pt(struct i915_address_space *vm);
|
||||
struct i915_page_directory *alloc_pd(struct i915_address_space *vm);
|
||||
struct i915_page_directory *__alloc_pd(size_t sz);
|
||||
|
||||
void free_pd(struct i915_address_space *vm, struct i915_page_dma *pd);
|
||||
|
||||
#define free_px(vm, px) free_pd(vm, px_base(px))
|
||||
|
||||
void
|
||||
__set_pd_entry(struct i915_page_directory * const pd,
|
||||
const unsigned short idx,
|
||||
struct i915_page_dma * const to,
|
||||
u64 (*encode)(const dma_addr_t, const enum i915_cache_level));
|
||||
|
||||
#define set_pd_entry(pd, idx, to) \
|
||||
__set_pd_entry((pd), (idx), px_base(to), gen8_pde_encode)
|
||||
|
||||
void
|
||||
clear_pd_entry(struct i915_page_directory * const pd,
|
||||
const unsigned short idx,
|
||||
const struct i915_page_scratch * const scratch);
|
||||
|
||||
bool
|
||||
release_pd_entry(struct i915_page_directory * const pd,
|
||||
const unsigned short idx,
|
||||
struct i915_page_table * const pt,
|
||||
const struct i915_page_scratch * const scratch);
|
||||
void gen6_ggtt_invalidate(struct i915_ggtt *ggtt);
|
||||
|
||||
int ggtt_set_pages(struct i915_vma *vma);
|
||||
int ppgtt_set_pages(struct i915_vma *vma);
|
||||
void clear_pages(struct i915_vma *vma);
|
||||
|
||||
void gtt_write_workarounds(struct intel_gt *gt);
|
||||
|
||||
void setup_private_pat(struct intel_uncore *uncore);
|
||||
|
||||
static inline struct sgt_dma {
|
||||
struct scatterlist *sg;
|
||||
dma_addr_t dma, max;
|
||||
} sgt_dma(struct i915_vma *vma) {
|
||||
struct scatterlist *sg = vma->pages->sgl;
|
||||
dma_addr_t addr = sg_dma_address(sg);
|
||||
|
||||
return (struct sgt_dma){ sg, addr, addr + sg->length };
|
||||
}
|
||||
|
||||
#endif
|
|
@ -488,17 +488,23 @@ lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine)
|
|||
return desc;
|
||||
}
|
||||
|
||||
static u32 *set_offsets(u32 *regs,
|
||||
static inline unsigned int dword_in_page(void *addr)
|
||||
{
|
||||
return offset_in_page(addr) / sizeof(u32);
|
||||
}
|
||||
|
||||
static void set_offsets(u32 *regs,
|
||||
const u8 *data,
|
||||
const struct intel_engine_cs *engine)
|
||||
const struct intel_engine_cs *engine,
|
||||
bool clear)
|
||||
#define NOP(x) (BIT(7) | (x))
|
||||
#define LRI(count, flags) ((flags) << 6 | (count))
|
||||
#define LRI(count, flags) ((flags) << 6 | (count) | BUILD_BUG_ON_ZERO(count >= BIT(6)))
|
||||
#define POSTED BIT(0)
|
||||
#define REG(x) (((x) >> 2) | BUILD_BUG_ON_ZERO(x >= 0x200))
|
||||
#define REG16(x) \
|
||||
(((x) >> 9) | BIT(7) | BUILD_BUG_ON_ZERO(x >= 0x10000)), \
|
||||
(((x) >> 2) & 0x7f)
|
||||
#define END() 0
|
||||
#define END(x) 0, (x)
|
||||
{
|
||||
const u32 base = engine->mmio_base;
|
||||
|
||||
|
@ -506,7 +512,10 @@ static u32 *set_offsets(u32 *regs,
|
|||
u8 count, flags;
|
||||
|
||||
if (*data & BIT(7)) { /* skip */
|
||||
regs += *data++ & ~BIT(7);
|
||||
count = *data++ & ~BIT(7);
|
||||
if (clear)
|
||||
memset32(regs, MI_NOOP, count);
|
||||
regs += count;
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -532,12 +541,25 @@ static u32 *set_offsets(u32 *regs,
|
|||
offset |= v & ~BIT(7);
|
||||
} while (v & BIT(7));
|
||||
|
||||
*regs = base + (offset << 2);
|
||||
regs[0] = base + (offset << 2);
|
||||
if (clear)
|
||||
regs[1] = 0;
|
||||
regs += 2;
|
||||
} while (--count);
|
||||
}
|
||||
|
||||
return regs;
|
||||
if (clear) {
|
||||
u8 count = *++data;
|
||||
|
||||
/* Clear past the tail for HW access */
|
||||
GEM_BUG_ON(dword_in_page(regs) > count);
|
||||
memset32(regs, MI_NOOP, count - dword_in_page(regs));
|
||||
|
||||
/* Close the batch; used mainly by live_lrc_layout() */
|
||||
*regs = MI_BATCH_BUFFER_END;
|
||||
if (INTEL_GEN(engine->i915) >= 10)
|
||||
*regs |= BIT(0);
|
||||
}
|
||||
}
|
||||
|
||||
static const u8 gen8_xcs_offsets[] = {
|
||||
|
@ -572,7 +594,7 @@ static const u8 gen8_xcs_offsets[] = {
|
|||
REG16(0x200),
|
||||
REG(0x028),
|
||||
|
||||
END(),
|
||||
END(80)
|
||||
};
|
||||
|
||||
static const u8 gen9_xcs_offsets[] = {
|
||||
|
@ -656,7 +678,7 @@ static const u8 gen9_xcs_offsets[] = {
|
|||
REG16(0x67c),
|
||||
REG(0x068),
|
||||
|
||||
END(),
|
||||
END(176)
|
||||
};
|
||||
|
||||
static const u8 gen12_xcs_offsets[] = {
|
||||
|
@ -688,7 +710,7 @@ static const u8 gen12_xcs_offsets[] = {
|
|||
REG16(0x274),
|
||||
REG16(0x270),
|
||||
|
||||
END(),
|
||||
END(80)
|
||||
};
|
||||
|
||||
static const u8 gen8_rcs_offsets[] = {
|
||||
|
@ -725,7 +747,91 @@ static const u8 gen8_rcs_offsets[] = {
|
|||
LRI(1, 0),
|
||||
REG(0x0c8),
|
||||
|
||||
END(),
|
||||
END(80)
|
||||
};
|
||||
|
||||
static const u8 gen9_rcs_offsets[] = {
|
||||
NOP(1),
|
||||
LRI(14, POSTED),
|
||||
REG16(0x244),
|
||||
REG(0x34),
|
||||
REG(0x30),
|
||||
REG(0x38),
|
||||
REG(0x3c),
|
||||
REG(0x168),
|
||||
REG(0x140),
|
||||
REG(0x110),
|
||||
REG(0x11c),
|
||||
REG(0x114),
|
||||
REG(0x118),
|
||||
REG(0x1c0),
|
||||
REG(0x1c4),
|
||||
REG(0x1c8),
|
||||
|
||||
NOP(3),
|
||||
LRI(9, POSTED),
|
||||
REG16(0x3a8),
|
||||
REG16(0x28c),
|
||||
REG16(0x288),
|
||||
REG16(0x284),
|
||||
REG16(0x280),
|
||||
REG16(0x27c),
|
||||
REG16(0x278),
|
||||
REG16(0x274),
|
||||
REG16(0x270),
|
||||
|
||||
NOP(13),
|
||||
LRI(1, 0),
|
||||
REG(0xc8),
|
||||
|
||||
NOP(13),
|
||||
LRI(44, POSTED),
|
||||
REG(0x28),
|
||||
REG(0x9c),
|
||||
REG(0xc0),
|
||||
REG(0x178),
|
||||
REG(0x17c),
|
||||
REG16(0x358),
|
||||
REG(0x170),
|
||||
REG(0x150),
|
||||
REG(0x154),
|
||||
REG(0x158),
|
||||
REG16(0x41c),
|
||||
REG16(0x600),
|
||||
REG16(0x604),
|
||||
REG16(0x608),
|
||||
REG16(0x60c),
|
||||
REG16(0x610),
|
||||
REG16(0x614),
|
||||
REG16(0x618),
|
||||
REG16(0x61c),
|
||||
REG16(0x620),
|
||||
REG16(0x624),
|
||||
REG16(0x628),
|
||||
REG16(0x62c),
|
||||
REG16(0x630),
|
||||
REG16(0x634),
|
||||
REG16(0x638),
|
||||
REG16(0x63c),
|
||||
REG16(0x640),
|
||||
REG16(0x644),
|
||||
REG16(0x648),
|
||||
REG16(0x64c),
|
||||
REG16(0x650),
|
||||
REG16(0x654),
|
||||
REG16(0x658),
|
||||
REG16(0x65c),
|
||||
REG16(0x660),
|
||||
REG16(0x664),
|
||||
REG16(0x668),
|
||||
REG16(0x66c),
|
||||
REG16(0x670),
|
||||
REG16(0x674),
|
||||
REG16(0x678),
|
||||
REG16(0x67c),
|
||||
REG(0x68),
|
||||
|
||||
END(176)
|
||||
};
|
||||
|
||||
static const u8 gen11_rcs_offsets[] = {
|
||||
|
@ -766,7 +872,7 @@ static const u8 gen11_rcs_offsets[] = {
|
|||
LRI(1, 0),
|
||||
REG(0x0c8),
|
||||
|
||||
END(),
|
||||
END(80)
|
||||
};
|
||||
|
||||
static const u8 gen12_rcs_offsets[] = {
|
||||
|
@ -807,7 +913,7 @@ static const u8 gen12_rcs_offsets[] = {
|
|||
LRI(1, 0),
|
||||
REG(0x0c8),
|
||||
|
||||
END(),
|
||||
END(80)
|
||||
};
|
||||
|
||||
#undef END
|
||||
|
@ -832,6 +938,8 @@ static const u8 *reg_offsets(const struct intel_engine_cs *engine)
|
|||
return gen12_rcs_offsets;
|
||||
else if (INTEL_GEN(engine->i915) >= 11)
|
||||
return gen11_rcs_offsets;
|
||||
else if (INTEL_GEN(engine->i915) >= 9)
|
||||
return gen9_rcs_offsets;
|
||||
else
|
||||
return gen8_rcs_offsets;
|
||||
} else {
|
||||
|
@ -1108,7 +1216,7 @@ __execlists_schedule_in(struct i915_request *rq)
|
|||
/* We don't need a strict matching tag, just different values */
|
||||
ce->lrc_desc &= ~GENMASK_ULL(47, 37);
|
||||
ce->lrc_desc |=
|
||||
(u64)(engine->context_tag++ % NUM_CONTEXT_TAG) <<
|
||||
(u64)(++engine->context_tag % NUM_CONTEXT_TAG) <<
|
||||
GEN11_SW_CTX_ID_SHIFT;
|
||||
BUILD_BUG_ON(NUM_CONTEXT_TAG > GEN12_MAX_CONTEXT_HW_ID);
|
||||
}
|
||||
|
@ -1243,10 +1351,6 @@ static u64 execlists_update_context(struct i915_request *rq)
|
|||
*/
|
||||
wmb();
|
||||
|
||||
/* Wa_1607138340:tgl */
|
||||
if (IS_TGL_REVID(rq->i915, TGL_REVID_A0, TGL_REVID_A0))
|
||||
desc |= CTX_DESC_FORCE_RESTORE;
|
||||
|
||||
ce->lrc_desc &= ~CTX_DESC_FORCE_RESTORE;
|
||||
return desc;
|
||||
}
|
||||
|
@ -1430,8 +1534,8 @@ static bool can_merge_rq(const struct i915_request *prev,
|
|||
if (i915_request_completed(next))
|
||||
return true;
|
||||
|
||||
if (unlikely((prev->flags ^ next->flags) &
|
||||
(I915_REQUEST_NOPREEMPT | I915_REQUEST_SENTINEL)))
|
||||
if (unlikely((prev->fence.flags ^ next->fence.flags) &
|
||||
(I915_FENCE_FLAG_NOPREEMPT | I915_FENCE_FLAG_SENTINEL)))
|
||||
return false;
|
||||
|
||||
if (!can_merge_ctx(prev->context, next->context))
|
||||
|
@ -1443,7 +1547,7 @@ static bool can_merge_rq(const struct i915_request *prev,
|
|||
static void virtual_update_register_offsets(u32 *regs,
|
||||
struct intel_engine_cs *engine)
|
||||
{
|
||||
set_offsets(regs, reg_offsets(engine), engine);
|
||||
set_offsets(regs, reg_offsets(engine), engine, false);
|
||||
}
|
||||
|
||||
static bool virtual_matches(const struct virtual_engine *ve,
|
||||
|
@ -1590,7 +1694,7 @@ active_timeslice(const struct intel_engine_cs *engine)
|
|||
{
|
||||
const struct i915_request *rq = *engine->execlists.active;
|
||||
|
||||
if (i915_request_completed(rq))
|
||||
if (!rq || i915_request_completed(rq))
|
||||
return 0;
|
||||
|
||||
if (engine->execlists.switch_priority_hint < effective_prio(rq))
|
||||
|
@ -1636,6 +1740,11 @@ static void set_preempt_timeout(struct intel_engine_cs *engine)
|
|||
active_preempt_timeout(engine));
|
||||
}
|
||||
|
||||
static inline void clear_ports(struct i915_request **ports, int count)
|
||||
{
|
||||
memset_p((void **)ports, NULL, count);
|
||||
}
|
||||
|
||||
static void execlists_dequeue(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct intel_engine_execlists * const execlists = &engine->execlists;
|
||||
|
@ -1996,10 +2105,9 @@ done:
|
|||
|
||||
goto skip_submit;
|
||||
}
|
||||
clear_ports(port + 1, last_port - port);
|
||||
|
||||
memset(port + 1, 0, (last_port - port) * sizeof(*port));
|
||||
execlists_submit_ports(engine);
|
||||
|
||||
set_preempt_timeout(engine);
|
||||
} else {
|
||||
skip_submit:
|
||||
|
@ -2014,13 +2122,14 @@ cancel_port_requests(struct intel_engine_execlists * const execlists)
|
|||
|
||||
for (port = execlists->pending; *port; port++)
|
||||
execlists_schedule_out(*port);
|
||||
memset(execlists->pending, 0, sizeof(execlists->pending));
|
||||
clear_ports(execlists->pending, ARRAY_SIZE(execlists->pending));
|
||||
|
||||
/* Mark the end of active before we overwrite *active */
|
||||
for (port = xchg(&execlists->active, execlists->pending); *port; port++)
|
||||
execlists_schedule_out(*port);
|
||||
WRITE_ONCE(execlists->active,
|
||||
memset(execlists->inflight, 0, sizeof(execlists->inflight)));
|
||||
clear_ports(execlists->inflight, ARRAY_SIZE(execlists->inflight));
|
||||
|
||||
WRITE_ONCE(execlists->active, execlists->inflight);
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
@ -2176,7 +2285,6 @@ static void process_csb(struct intel_engine_cs *engine)
|
|||
|
||||
/* Point active to the new ELSP; prevent overwriting */
|
||||
WRITE_ONCE(execlists->active, execlists->pending);
|
||||
set_timeslice(engine);
|
||||
|
||||
if (!inject_preempt_hang(execlists))
|
||||
ring_set_paused(engine, 0);
|
||||
|
@ -2217,6 +2325,7 @@ static void process_csb(struct intel_engine_cs *engine)
|
|||
} while (head != tail);
|
||||
|
||||
execlists->csb_head = head;
|
||||
set_timeslice(engine);
|
||||
|
||||
/*
|
||||
* Gen11 has proven to fail wrt global observation point between
|
||||
|
@ -2399,7 +2508,7 @@ set_redzone(void *vaddr, const struct intel_engine_cs *engine)
|
|||
|
||||
vaddr += engine->context_size;
|
||||
|
||||
memset(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE);
|
||||
memset(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -2410,7 +2519,7 @@ check_redzone(const void *vaddr, const struct intel_engine_cs *engine)
|
|||
|
||||
vaddr += engine->context_size;
|
||||
|
||||
if (memchr_inv(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE))
|
||||
if (memchr_inv(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE))
|
||||
dev_err_once(engine->i915->drm.dev,
|
||||
"%s context redzone overwritten!\n",
|
||||
engine->name);
|
||||
|
@ -2453,33 +2562,21 @@ __execlists_context_pin(struct intel_context *ce,
|
|||
struct intel_engine_cs *engine)
|
||||
{
|
||||
void *vaddr;
|
||||
int ret;
|
||||
|
||||
GEM_BUG_ON(!ce->state);
|
||||
|
||||
ret = intel_context_active_acquire(ce);
|
||||
if (ret)
|
||||
goto err;
|
||||
GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
|
||||
|
||||
vaddr = i915_gem_object_pin_map(ce->state->obj,
|
||||
i915_coherent_map_type(engine->i915) |
|
||||
I915_MAP_OVERRIDE);
|
||||
if (IS_ERR(vaddr)) {
|
||||
ret = PTR_ERR(vaddr);
|
||||
goto unpin_active;
|
||||
}
|
||||
if (IS_ERR(vaddr))
|
||||
return PTR_ERR(vaddr);
|
||||
|
||||
ce->lrc_desc = lrc_descriptor(ce, engine);
|
||||
ce->lrc_desc = lrc_descriptor(ce, engine) | CTX_DESC_FORCE_RESTORE;
|
||||
ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
|
||||
__execlists_update_reg_state(ce, engine);
|
||||
|
||||
return 0;
|
||||
|
||||
unpin_active:
|
||||
intel_context_active_release(ce);
|
||||
err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int execlists_context_pin(struct intel_context *ce)
|
||||
|
@ -2494,6 +2591,9 @@ static int execlists_context_alloc(struct intel_context *ce)
|
|||
|
||||
static void execlists_context_reset(struct intel_context *ce)
|
||||
{
|
||||
CE_TRACE(ce, "reset\n");
|
||||
GEM_BUG_ON(!intel_context_is_pinned(ce));
|
||||
|
||||
/*
|
||||
* Because we emit WA_TAIL_DWORDS there may be a disparity
|
||||
* between our bookkeeping in ce->ring->head and ce->ring->tail and
|
||||
|
@ -2510,8 +2610,14 @@ static void execlists_context_reset(struct intel_context *ce)
|
|||
* So to avoid that we reset the context images upon resume. For
|
||||
* simplicity, we just zero everything out.
|
||||
*/
|
||||
intel_ring_reset(ce->ring, 0);
|
||||
intel_ring_reset(ce->ring, ce->ring->emit);
|
||||
|
||||
/* Scrub away the garbage */
|
||||
execlists_init_reg_state(ce->lrc_reg_state,
|
||||
ce, ce->engine, ce->ring, true);
|
||||
__execlists_update_reg_state(ce, ce->engine);
|
||||
|
||||
ce->lrc_desc |= CTX_DESC_FORCE_RESTORE;
|
||||
}
|
||||
|
||||
static const struct intel_context_ops execlists_context_ops = {
|
||||
|
@ -2925,6 +3031,8 @@ static void enable_execlists(struct intel_engine_cs *engine)
|
|||
RING_HWS_PGA,
|
||||
i915_ggtt_offset(engine->status_page.vma));
|
||||
ENGINE_POSTING_READ(engine, RING_HWS_PGA);
|
||||
|
||||
engine->context_tag = 0;
|
||||
}
|
||||
|
||||
static bool unexpected_starting_state(struct intel_engine_cs *engine)
|
||||
|
@ -3030,10 +3138,8 @@ static void reset_csb_pointers(struct intel_engine_cs *engine)
|
|||
&execlists->csb_status[reset_value]);
|
||||
}
|
||||
|
||||
static void __execlists_reset_reg_state(const struct intel_context *ce,
|
||||
const struct intel_engine_cs *engine)
|
||||
static void __reset_stop_ring(u32 *regs, const struct intel_engine_cs *engine)
|
||||
{
|
||||
u32 *regs = ce->lrc_reg_state;
|
||||
int x;
|
||||
|
||||
x = lrc_ring_mi_mode(engine);
|
||||
|
@ -3043,6 +3149,14 @@ static void __execlists_reset_reg_state(const struct intel_context *ce,
|
|||
}
|
||||
}
|
||||
|
||||
static void __execlists_reset_reg_state(const struct intel_context *ce,
|
||||
const struct intel_engine_cs *engine)
|
||||
{
|
||||
u32 *regs = ce->lrc_reg_state;
|
||||
|
||||
__reset_stop_ring(regs, engine);
|
||||
}
|
||||
|
||||
static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
|
||||
{
|
||||
struct intel_engine_execlists * const execlists = &engine->execlists;
|
||||
|
@ -3795,7 +3909,6 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
|
|||
{
|
||||
/* Default vfuncs which can be overriden by each engine. */
|
||||
|
||||
engine->release = execlists_release;
|
||||
engine->resume = execlists_resume;
|
||||
|
||||
engine->cops = &execlists_context_ops;
|
||||
|
@ -3910,6 +4023,9 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
|
|||
|
||||
reset_csb_pointers(engine);
|
||||
|
||||
/* Finally, take ownership and responsibility for cleanup! */
|
||||
engine->release = execlists_release;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3949,18 +4065,21 @@ static u32 intel_lr_indirect_ctx_offset(const struct intel_engine_cs *engine)
|
|||
|
||||
static void init_common_reg_state(u32 * const regs,
|
||||
const struct intel_engine_cs *engine,
|
||||
const struct intel_ring *ring)
|
||||
const struct intel_ring *ring,
|
||||
bool inhibit)
|
||||
{
|
||||
regs[CTX_CONTEXT_CONTROL] =
|
||||
_MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) |
|
||||
_MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH);
|
||||
u32 ctl;
|
||||
|
||||
ctl = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH);
|
||||
ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
|
||||
if (inhibit)
|
||||
ctl |= CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT;
|
||||
if (INTEL_GEN(engine->i915) < 11)
|
||||
regs[CTX_CONTEXT_CONTROL] |=
|
||||
_MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT |
|
||||
CTX_CTRL_RS_CTX_ENABLE);
|
||||
ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT |
|
||||
CTX_CTRL_RS_CTX_ENABLE);
|
||||
regs[CTX_CONTEXT_CONTROL] = ctl;
|
||||
|
||||
regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
|
||||
regs[CTX_BB_STATE] = RING_BB_PPGTT;
|
||||
}
|
||||
|
||||
static void init_wa_bb_reg_state(u32 * const regs,
|
||||
|
@ -4016,7 +4135,7 @@ static void execlists_init_reg_state(u32 *regs,
|
|||
const struct intel_context *ce,
|
||||
const struct intel_engine_cs *engine,
|
||||
const struct intel_ring *ring,
|
||||
bool close)
|
||||
bool inhibit)
|
||||
{
|
||||
/*
|
||||
* A context is actually a big batch buffer with several
|
||||
|
@ -4028,21 +4147,17 @@ static void execlists_init_reg_state(u32 *regs,
|
|||
*
|
||||
* Must keep consistent with virtual_update_register_offsets().
|
||||
*/
|
||||
u32 *bbe = set_offsets(regs, reg_offsets(engine), engine);
|
||||
set_offsets(regs, reg_offsets(engine), engine, inhibit);
|
||||
|
||||
if (close) { /* Close the batch; used mainly by live_lrc_layout() */
|
||||
*bbe = MI_BATCH_BUFFER_END;
|
||||
if (INTEL_GEN(engine->i915) >= 10)
|
||||
*bbe |= BIT(0);
|
||||
}
|
||||
|
||||
init_common_reg_state(regs, engine, ring);
|
||||
init_common_reg_state(regs, engine, ring, inhibit);
|
||||
init_ppgtt_reg_state(regs, vm_alias(ce->vm));
|
||||
|
||||
init_wa_bb_reg_state(regs, engine,
|
||||
INTEL_GEN(engine->i915) >= 12 ?
|
||||
GEN12_CTX_BB_PER_CTX_PTR :
|
||||
CTX_BB_PER_CTX_PTR);
|
||||
|
||||
__reset_stop_ring(regs, engine);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -4053,7 +4168,6 @@ populate_lr_context(struct intel_context *ce,
|
|||
{
|
||||
bool inhibit = true;
|
||||
void *vaddr;
|
||||
u32 *regs;
|
||||
int ret;
|
||||
|
||||
vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
|
||||
|
@ -4083,11 +4197,8 @@ populate_lr_context(struct intel_context *ce,
|
|||
|
||||
/* The second page of the context object contains some fields which must
|
||||
* be set up prior to the first execution. */
|
||||
regs = vaddr + LRC_STATE_PN * PAGE_SIZE;
|
||||
execlists_init_reg_state(regs, ce, engine, ring, inhibit);
|
||||
if (inhibit)
|
||||
regs[CTX_CONTEXT_CONTROL] |=
|
||||
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
|
||||
execlists_init_reg_state(vaddr + LRC_STATE_PN * PAGE_SIZE,
|
||||
ce, engine, ring, inhibit);
|
||||
|
||||
ret = 0;
|
||||
err_unpin_ctx:
|
||||
|
@ -4481,9 +4592,11 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings,
|
|||
ve->base.gt = siblings[0]->gt;
|
||||
ve->base.uncore = siblings[0]->uncore;
|
||||
ve->base.id = -1;
|
||||
|
||||
ve->base.class = OTHER_CLASS;
|
||||
ve->base.uabi_class = I915_ENGINE_CLASS_INVALID;
|
||||
ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
|
||||
ve->base.uabi_instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
|
||||
|
||||
/*
|
||||
* The decision on whether to submit a request using semaphores
|
||||
|
|
|
@ -127,7 +127,7 @@ struct drm_i915_mocs_table {
|
|||
LE_0_PAGETABLE | LE_TC_2_LLC_ELLC | LE_LRUM(3), \
|
||||
L3_3_WB)
|
||||
|
||||
static const struct drm_i915_mocs_entry skylake_mocs_table[] = {
|
||||
static const struct drm_i915_mocs_entry skl_mocs_table[] = {
|
||||
GEN9_MOCS_ENTRIES,
|
||||
MOCS_ENTRY(I915_MOCS_CACHED,
|
||||
LE_3_WB | LE_TC_2_LLC_ELLC | LE_LRUM(3),
|
||||
|
@ -233,7 +233,7 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
|
|||
LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), \
|
||||
L3_1_UC)
|
||||
|
||||
static const struct drm_i915_mocs_entry tigerlake_mocs_table[] = {
|
||||
static const struct drm_i915_mocs_entry tgl_mocs_table[] = {
|
||||
/* Base - Error (Reserved for Non-Use) */
|
||||
MOCS_ENTRY(0, 0x0, 0x0),
|
||||
/* Base - Reserved */
|
||||
|
@ -267,7 +267,7 @@ static const struct drm_i915_mocs_entry tigerlake_mocs_table[] = {
|
|||
L3_3_WB),
|
||||
};
|
||||
|
||||
static const struct drm_i915_mocs_entry icelake_mocs_table[] = {
|
||||
static const struct drm_i915_mocs_entry icl_mocs_table[] = {
|
||||
/* Base - Uncached (Deprecated) */
|
||||
MOCS_ENTRY(I915_MOCS_UNCACHED,
|
||||
LE_1_UC | LE_TC_1_LLC,
|
||||
|
@ -284,17 +284,17 @@ static bool get_mocs_settings(const struct drm_i915_private *i915,
|
|||
struct drm_i915_mocs_table *table)
|
||||
{
|
||||
if (INTEL_GEN(i915) >= 12) {
|
||||
table->size = ARRAY_SIZE(tigerlake_mocs_table);
|
||||
table->table = tigerlake_mocs_table;
|
||||
table->size = ARRAY_SIZE(tgl_mocs_table);
|
||||
table->table = tgl_mocs_table;
|
||||
table->n_entries = GEN11_NUM_MOCS_ENTRIES;
|
||||
} else if (IS_GEN(i915, 11)) {
|
||||
table->size = ARRAY_SIZE(icelake_mocs_table);
|
||||
table->table = icelake_mocs_table;
|
||||
table->size = ARRAY_SIZE(icl_mocs_table);
|
||||
table->table = icl_mocs_table;
|
||||
table->n_entries = GEN11_NUM_MOCS_ENTRIES;
|
||||
} else if (IS_GEN9_BC(i915) || IS_CANNONLAKE(i915)) {
|
||||
table->size = ARRAY_SIZE(skylake_mocs_table);
|
||||
table->size = ARRAY_SIZE(skl_mocs_table);
|
||||
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
|
||||
table->table = skylake_mocs_table;
|
||||
table->table = skl_mocs_table;
|
||||
} else if (IS_GEN9_LP(i915)) {
|
||||
table->size = ARRAY_SIZE(broxton_mocs_table);
|
||||
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
|
||||
|
|
|
@ -0,0 +1,218 @@
|
|||
// SPDX-License-Identifier: MIT
|
||||
/*
|
||||
* Copyright © 2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "i915_trace.h"
|
||||
#include "intel_gtt.h"
|
||||
#include "gen6_ppgtt.h"
|
||||
#include "gen8_ppgtt.h"
|
||||
|
||||
struct i915_page_table *alloc_pt(struct i915_address_space *vm)
|
||||
{
|
||||
struct i915_page_table *pt;
|
||||
|
||||
pt = kmalloc(sizeof(*pt), I915_GFP_ALLOW_FAIL);
|
||||
if (unlikely(!pt))
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
if (unlikely(setup_page_dma(vm, &pt->base))) {
|
||||
kfree(pt);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
atomic_set(&pt->used, 0);
|
||||
return pt;
|
||||
}
|
||||
|
||||
struct i915_page_directory *__alloc_pd(size_t sz)
|
||||
{
|
||||
struct i915_page_directory *pd;
|
||||
|
||||
pd = kzalloc(sz, I915_GFP_ALLOW_FAIL);
|
||||
if (unlikely(!pd))
|
||||
return NULL;
|
||||
|
||||
spin_lock_init(&pd->lock);
|
||||
return pd;
|
||||
}
|
||||
|
||||
struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
|
||||
{
|
||||
struct i915_page_directory *pd;
|
||||
|
||||
pd = __alloc_pd(sizeof(*pd));
|
||||
if (unlikely(!pd))
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
if (unlikely(setup_page_dma(vm, px_base(pd)))) {
|
||||
kfree(pd);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
return pd;
|
||||
}
|
||||
|
||||
void free_pd(struct i915_address_space *vm, struct i915_page_dma *pd)
|
||||
{
|
||||
cleanup_page_dma(vm, pd);
|
||||
kfree(pd);
|
||||
}
|
||||
|
||||
static inline void
|
||||
write_dma_entry(struct i915_page_dma * const pdma,
|
||||
const unsigned short idx,
|
||||
const u64 encoded_entry)
|
||||
{
|
||||
u64 * const vaddr = kmap_atomic(pdma->page);
|
||||
|
||||
vaddr[idx] = encoded_entry;
|
||||
kunmap_atomic(vaddr);
|
||||
}
|
||||
|
||||
void
|
||||
__set_pd_entry(struct i915_page_directory * const pd,
|
||||
const unsigned short idx,
|
||||
struct i915_page_dma * const to,
|
||||
u64 (*encode)(const dma_addr_t, const enum i915_cache_level))
|
||||
{
|
||||
/* Each thread pre-pins the pd, and we may have a thread per pde. */
|
||||
GEM_BUG_ON(atomic_read(px_used(pd)) > NALLOC * ARRAY_SIZE(pd->entry));
|
||||
|
||||
atomic_inc(px_used(pd));
|
||||
pd->entry[idx] = to;
|
||||
write_dma_entry(px_base(pd), idx, encode(to->daddr, I915_CACHE_LLC));
|
||||
}
|
||||
|
||||
void
|
||||
clear_pd_entry(struct i915_page_directory * const pd,
|
||||
const unsigned short idx,
|
||||
const struct i915_page_scratch * const scratch)
|
||||
{
|
||||
GEM_BUG_ON(atomic_read(px_used(pd)) == 0);
|
||||
|
||||
write_dma_entry(px_base(pd), idx, scratch->encode);
|
||||
pd->entry[idx] = NULL;
|
||||
atomic_dec(px_used(pd));
|
||||
}
|
||||
|
||||
bool
|
||||
release_pd_entry(struct i915_page_directory * const pd,
|
||||
const unsigned short idx,
|
||||
struct i915_page_table * const pt,
|
||||
const struct i915_page_scratch * const scratch)
|
||||
{
|
||||
bool free = false;
|
||||
|
||||
if (atomic_add_unless(&pt->used, -1, 1))
|
||||
return false;
|
||||
|
||||
spin_lock(&pd->lock);
|
||||
if (atomic_dec_and_test(&pt->used)) {
|
||||
clear_pd_entry(pd, idx, scratch);
|
||||
free = true;
|
||||
}
|
||||
spin_unlock(&pd->lock);
|
||||
|
||||
return free;
|
||||
}
|
||||
|
||||
int i915_ppgtt_init_hw(struct intel_gt *gt)
|
||||
{
|
||||
struct drm_i915_private *i915 = gt->i915;
|
||||
|
||||
gtt_write_workarounds(gt);
|
||||
|
||||
if (IS_GEN(i915, 6))
|
||||
gen6_ppgtt_enable(gt);
|
||||
else if (IS_GEN(i915, 7))
|
||||
gen7_ppgtt_enable(gt);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct i915_ppgtt *
|
||||
__ppgtt_create(struct intel_gt *gt)
|
||||
{
|
||||
if (INTEL_GEN(gt->i915) < 8)
|
||||
return gen6_ppgtt_create(gt);
|
||||
else
|
||||
return gen8_ppgtt_create(gt);
|
||||
}
|
||||
|
||||
struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt)
|
||||
{
|
||||
struct i915_ppgtt *ppgtt;
|
||||
|
||||
ppgtt = __ppgtt_create(gt);
|
||||
if (IS_ERR(ppgtt))
|
||||
return ppgtt;
|
||||
|
||||
trace_i915_ppgtt_create(&ppgtt->vm);
|
||||
|
||||
return ppgtt;
|
||||
}
|
||||
|
||||
static int ppgtt_bind_vma(struct i915_vma *vma,
|
||||
enum i915_cache_level cache_level,
|
||||
u32 flags)
|
||||
{
|
||||
u32 pte_flags;
|
||||
int err;
|
||||
|
||||
if (flags & I915_VMA_ALLOC) {
|
||||
err = vma->vm->allocate_va_range(vma->vm,
|
||||
vma->node.start, vma->size);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma));
|
||||
}
|
||||
|
||||
/* Applicable to VLV, and gen8+ */
|
||||
pte_flags = 0;
|
||||
if (i915_gem_object_is_readonly(vma->obj))
|
||||
pte_flags |= PTE_READ_ONLY;
|
||||
|
||||
GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)));
|
||||
vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
|
||||
wmb();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ppgtt_unbind_vma(struct i915_vma *vma)
|
||||
{
|
||||
if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)))
|
||||
vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
|
||||
}
|
||||
|
||||
int ppgtt_set_pages(struct i915_vma *vma)
|
||||
{
|
||||
GEM_BUG_ON(vma->pages);
|
||||
|
||||
vma->pages = vma->obj->mm.pages;
|
||||
|
||||
vma->page_sizes = vma->obj->mm.page_sizes;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt)
|
||||
{
|
||||
struct drm_i915_private *i915 = gt->i915;
|
||||
|
||||
ppgtt->vm.gt = gt;
|
||||
ppgtt->vm.i915 = i915;
|
||||
ppgtt->vm.dma = &i915->drm.pdev->dev;
|
||||
ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
|
||||
|
||||
i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
|
||||
|
||||
ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma;
|
||||
ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
|
||||
ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages;
|
||||
ppgtt->vm.vma_ops.clear_pages = clear_pages;
|
||||
}
|
|
@ -147,11 +147,7 @@ static void mark_innocent(struct i915_request *rq)
|
|||
|
||||
void __i915_request_reset(struct i915_request *rq, bool guilty)
|
||||
{
|
||||
GEM_TRACE("%s rq=%llx:%lld, guilty? %s\n",
|
||||
rq->engine->name,
|
||||
rq->fence.context,
|
||||
rq->fence.seqno,
|
||||
yesno(guilty));
|
||||
RQ_TRACE(rq, "guilty? %s\n", yesno(guilty));
|
||||
|
||||
GEM_BUG_ON(i915_request_completed(rq));
|
||||
|
||||
|
@ -251,9 +247,8 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int ironlake_do_reset(struct intel_gt *gt,
|
||||
intel_engine_mask_t engine_mask,
|
||||
unsigned int retry)
|
||||
static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask,
|
||||
unsigned int retry)
|
||||
{
|
||||
struct intel_uncore *uncore = gt->uncore;
|
||||
int ret;
|
||||
|
@ -597,7 +592,7 @@ static reset_func intel_get_gpu_reset(const struct intel_gt *gt)
|
|||
else if (INTEL_GEN(i915) >= 6)
|
||||
return gen6_reset_engines;
|
||||
else if (INTEL_GEN(i915) >= 5)
|
||||
return ironlake_do_reset;
|
||||
return ilk_do_reset;
|
||||
else if (IS_G4X(i915))
|
||||
return g4x_do_reset;
|
||||
else if (IS_G33(i915) || IS_PINEVIEW(i915))
|
||||
|
@ -625,7 +620,7 @@ int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask)
|
|||
*/
|
||||
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
|
||||
for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) {
|
||||
GEM_TRACE("engine_mask=%x\n", engine_mask);
|
||||
GT_TRACE(gt, "engine_mask=%x\n", engine_mask);
|
||||
preempt_disable();
|
||||
ret = reset(gt, engine_mask, retry);
|
||||
preempt_enable();
|
||||
|
@ -785,8 +780,7 @@ static void nop_submit_request(struct i915_request *request)
|
|||
struct intel_engine_cs *engine = request->engine;
|
||||
unsigned long flags;
|
||||
|
||||
GEM_TRACE("%s fence %llx:%lld -> -EIO\n",
|
||||
engine->name, request->fence.context, request->fence.seqno);
|
||||
RQ_TRACE(request, "-EIO\n");
|
||||
dma_fence_set_error(&request->fence, -EIO);
|
||||
|
||||
spin_lock_irqsave(&engine->active.lock, flags);
|
||||
|
@ -813,7 +807,7 @@ static void __intel_gt_set_wedged(struct intel_gt *gt)
|
|||
intel_engine_dump(engine, &p, "%s\n", engine->name);
|
||||
}
|
||||
|
||||
GEM_TRACE("start\n");
|
||||
GT_TRACE(gt, "start\n");
|
||||
|
||||
/*
|
||||
* First, stop submission to hw, but do not yet complete requests by
|
||||
|
@ -844,7 +838,7 @@ static void __intel_gt_set_wedged(struct intel_gt *gt)
|
|||
|
||||
reset_finish(gt, awake);
|
||||
|
||||
GEM_TRACE("end\n");
|
||||
GT_TRACE(gt, "end\n");
|
||||
}
|
||||
|
||||
void intel_gt_set_wedged(struct intel_gt *gt)
|
||||
|
@ -870,7 +864,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
|
|||
if (test_bit(I915_WEDGED_ON_INIT, >->reset.flags))
|
||||
return false;
|
||||
|
||||
GEM_TRACE("start\n");
|
||||
GT_TRACE(gt, "start\n");
|
||||
|
||||
/*
|
||||
* Before unwedging, make sure that all pending operations
|
||||
|
@ -932,7 +926,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
|
|||
*/
|
||||
intel_engines_reset_default_submission(gt);
|
||||
|
||||
GEM_TRACE("end\n");
|
||||
GT_TRACE(gt, "end\n");
|
||||
|
||||
smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
|
||||
clear_bit(I915_WEDGED, >->reset.flags);
|
||||
|
@ -1007,7 +1001,7 @@ void intel_gt_reset(struct intel_gt *gt,
|
|||
intel_engine_mask_t awake;
|
||||
int ret;
|
||||
|
||||
GEM_TRACE("flags=%lx\n", gt->reset.flags);
|
||||
GT_TRACE(gt, "flags=%lx\n", gt->reset.flags);
|
||||
|
||||
might_sleep();
|
||||
GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, >->reset.flags));
|
||||
|
@ -1236,7 +1230,7 @@ void intel_gt_handle_error(struct intel_gt *gt,
|
|||
engine_mask &= INTEL_INFO(gt->i915)->engine_mask;
|
||||
|
||||
if (flags & I915_ERROR_CAPTURE) {
|
||||
i915_capture_error_state(gt->i915, engine_mask, msg);
|
||||
i915_capture_error_state(gt->i915);
|
||||
intel_gt_clear_error_registers(gt, engine_mask);
|
||||
}
|
||||
|
||||
|
@ -1329,10 +1323,10 @@ int intel_gt_terminally_wedged(struct intel_gt *gt)
|
|||
if (!intel_gt_is_wedged(gt))
|
||||
return 0;
|
||||
|
||||
/* Reset still in progress? Maybe we will recover? */
|
||||
if (!test_bit(I915_RESET_BACKOFF, >->reset.flags))
|
||||
if (intel_gt_has_init_error(gt))
|
||||
return -EIO;
|
||||
|
||||
/* Reset still in progress? Maybe we will recover? */
|
||||
if (wait_event_interruptible(gt->reset.queue,
|
||||
!test_bit(I915_RESET_BACKOFF,
|
||||
>->reset.flags)))
|
||||
|
@ -1354,6 +1348,9 @@ void intel_gt_init_reset(struct intel_gt *gt)
|
|||
init_waitqueue_head(>->reset.queue);
|
||||
mutex_init(>->reset.mutex);
|
||||
init_srcu_struct(>->reset.backoff_srcu);
|
||||
|
||||
/* no GPU until we are ready! */
|
||||
__set_bit(I915_WEDGED, >->reset.flags);
|
||||
}
|
||||
|
||||
void intel_gt_fini_reset(struct intel_gt *gt)
|
||||
|
|
|
@ -33,6 +33,7 @@
|
|||
|
||||
#include "gem/i915_gem_context.h"
|
||||
|
||||
#include "gen6_ppgtt.h"
|
||||
#include "i915_drv.h"
|
||||
#include "i915_trace.h"
|
||||
#include "intel_context.h"
|
||||
|
@ -1328,26 +1329,12 @@ static int ring_context_alloc(struct intel_context *ce)
|
|||
|
||||
static int ring_context_pin(struct intel_context *ce)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = intel_context_active_acquire(ce);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = __context_pin_ppgtt(ce);
|
||||
if (err)
|
||||
goto err_active;
|
||||
|
||||
return 0;
|
||||
|
||||
err_active:
|
||||
intel_context_active_release(ce);
|
||||
return err;
|
||||
return __context_pin_ppgtt(ce);
|
||||
}
|
||||
|
||||
static void ring_context_reset(struct intel_context *ce)
|
||||
{
|
||||
intel_ring_reset(ce->ring, 0);
|
||||
intel_ring_reset(ce->ring, ce->ring->emit);
|
||||
}
|
||||
|
||||
static const struct intel_context_ops ring_context_ops = {
|
||||
|
@ -1394,7 +1381,7 @@ static int load_pd_dir(struct i915_request *rq,
|
|||
|
||||
intel_ring_advance(rq, cs);
|
||||
|
||||
return 0;
|
||||
return rq->engine->emit_flush(rq, EMIT_FLUSH);
|
||||
}
|
||||
|
||||
static inline int mi_set_context(struct i915_request *rq, u32 flags)
|
||||
|
@ -1408,14 +1395,6 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
|
|||
int len;
|
||||
u32 *cs;
|
||||
|
||||
flags |= MI_MM_SPACE_GTT;
|
||||
if (IS_HASWELL(i915))
|
||||
/* These flags are for resource streamer on HSW+ */
|
||||
flags |= HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN;
|
||||
else
|
||||
/* We need to save the extended state for powersaving modes */
|
||||
flags |= MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN;
|
||||
|
||||
len = 4;
|
||||
if (IS_GEN(i915, 7))
|
||||
len += 2 + (num_engines ? 4 * num_engines + 6 : 0);
|
||||
|
@ -1592,7 +1571,7 @@ static int switch_mm(struct i915_request *rq, struct i915_address_space *vm)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
return rq->engine->emit_flush(rq, EMIT_FLUSH);
|
||||
return rq->engine->emit_flush(rq, EMIT_INVALIDATE);
|
||||
}
|
||||
|
||||
static int switch_context(struct i915_request *rq)
|
||||
|
@ -1607,15 +1586,21 @@ static int switch_context(struct i915_request *rq)
|
|||
return ret;
|
||||
|
||||
if (ce->state) {
|
||||
u32 hw_flags;
|
||||
u32 flags;
|
||||
|
||||
GEM_BUG_ON(rq->engine->id != RCS0);
|
||||
|
||||
hw_flags = 0;
|
||||
if (!test_bit(CONTEXT_VALID_BIT, &ce->flags))
|
||||
hw_flags = MI_RESTORE_INHIBIT;
|
||||
/* For resource streamer on HSW+ and power context elsewhere */
|
||||
BUILD_BUG_ON(HSW_MI_RS_SAVE_STATE_EN != MI_SAVE_EXT_STATE_EN);
|
||||
BUILD_BUG_ON(HSW_MI_RS_RESTORE_STATE_EN != MI_RESTORE_EXT_STATE_EN);
|
||||
|
||||
ret = mi_set_context(rq, hw_flags);
|
||||
flags = MI_SAVE_EXT_STATE_EN | MI_MM_SPACE_GTT;
|
||||
if (test_bit(CONTEXT_VALID_BIT, &ce->flags))
|
||||
flags |= MI_RESTORE_EXT_STATE_EN;
|
||||
else
|
||||
flags |= MI_RESTORE_INHIBIT;
|
||||
|
||||
ret = mi_set_context(rq, flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
@ -1842,8 +1827,6 @@ static void setup_common(struct intel_engine_cs *engine)
|
|||
|
||||
setup_irq(engine);
|
||||
|
||||
engine->release = ring_release;
|
||||
|
||||
engine->resume = xcs_resume;
|
||||
engine->reset.prepare = reset_prepare;
|
||||
engine->reset.rewind = reset_rewind;
|
||||
|
@ -2009,6 +1992,9 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine)
|
|||
|
||||
GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma);
|
||||
|
||||
/* Finally, take ownership and responsibility for cleanup! */
|
||||
engine->release = ring_release;
|
||||
|
||||
return 0;
|
||||
|
||||
err_ring:
|
||||
|
|
|
@ -777,7 +777,7 @@ void intel_rps_boost(struct i915_request *rq)
|
|||
spin_lock_irqsave(&rq->lock, flags);
|
||||
if (!i915_request_has_waitboost(rq) &&
|
||||
!dma_fence_is_signaled_locked(&rq->fence)) {
|
||||
rq->flags |= I915_REQUEST_WAITBOOST;
|
||||
set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags);
|
||||
|
||||
if (!atomic_fetch_inc(&rps->num_waiters) &&
|
||||
READ_ONCE(rps->cur_freq) < rps->boost_freq)
|
||||
|
|
|
@ -348,7 +348,6 @@ void intel_timeline_enter(struct intel_timeline *tl)
|
|||
* use atomic to manipulate tl->active_count.
|
||||
*/
|
||||
lockdep_assert_held(&tl->mutex);
|
||||
GEM_BUG_ON(!atomic_read(&tl->pin_count));
|
||||
|
||||
if (atomic_add_unless(&tl->active_count, 1, 0))
|
||||
return;
|
||||
|
|
|
@ -254,7 +254,7 @@ static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine,
|
|||
|
||||
/* WaDisableDopClockGating:bdw
|
||||
*
|
||||
* Also see the related UCGTCL1 write in broadwell_init_clock_gating()
|
||||
* Also see the related UCGTCL1 write in bdw_init_clock_gating()
|
||||
* to disable EUTC clock gating.
|
||||
*/
|
||||
WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
|
||||
|
|
|
@ -149,7 +149,11 @@ static int mock_context_alloc(struct intel_context *ce)
|
|||
|
||||
static int mock_context_pin(struct intel_context *ce)
|
||||
{
|
||||
return intel_context_active_acquire(ce);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mock_context_reset(struct intel_context *ce)
|
||||
{
|
||||
}
|
||||
|
||||
static const struct intel_context_ops mock_context_ops = {
|
||||
|
@ -161,6 +165,7 @@ static const struct intel_context_ops mock_context_ops = {
|
|||
.enter = intel_context_enter_engine,
|
||||
.exit = intel_context_exit_engine,
|
||||
|
||||
.reset = mock_context_reset,
|
||||
.destroy = mock_context_destroy,
|
||||
};
|
||||
|
||||
|
|
|
@ -1312,7 +1312,7 @@ static int igt_reset_evict_ppgtt(void *arg)
|
|||
if (INTEL_PPGTT(gt->i915) < INTEL_PPGTT_FULL)
|
||||
return 0;
|
||||
|
||||
ppgtt = i915_ppgtt_create(gt->i915);
|
||||
ppgtt = i915_ppgtt_create(gt);
|
||||
if (IS_ERR(ppgtt))
|
||||
return PTR_ERR(ppgtt);
|
||||
|
||||
|
@ -1498,7 +1498,7 @@ static int igt_handle_error(void *arg)
|
|||
struct intel_engine_cs *engine = gt->engine[RCS0];
|
||||
struct hang h;
|
||||
struct i915_request *rq;
|
||||
struct i915_gpu_state *error;
|
||||
struct i915_gpu_coredump *error;
|
||||
int err;
|
||||
|
||||
/* Check that we can issue a global GPU and engine reset */
|
||||
|
|
|
@ -527,13 +527,19 @@ static struct i915_request *nop_request(struct intel_engine_cs *engine)
|
|||
return rq;
|
||||
}
|
||||
|
||||
static void wait_for_submit(struct intel_engine_cs *engine,
|
||||
struct i915_request *rq)
|
||||
static int wait_for_submit(struct intel_engine_cs *engine,
|
||||
struct i915_request *rq,
|
||||
unsigned long timeout)
|
||||
{
|
||||
timeout += jiffies;
|
||||
do {
|
||||
cond_resched();
|
||||
intel_engine_flush_submission(engine);
|
||||
} while (!i915_request_is_active(rq));
|
||||
if (i915_request_is_active(rq))
|
||||
return 0;
|
||||
} while (time_before(jiffies, timeout));
|
||||
|
||||
return -ETIME;
|
||||
}
|
||||
|
||||
static long timeslice_threshold(const struct intel_engine_cs *engine)
|
||||
|
@ -601,7 +607,12 @@ static int live_timeslice_queue(void *arg)
|
|||
goto err_heartbeat;
|
||||
}
|
||||
engine->schedule(rq, &attr);
|
||||
wait_for_submit(engine, rq);
|
||||
err = wait_for_submit(engine, rq, HZ / 2);
|
||||
if (err) {
|
||||
pr_err("%s: Timed out trying to submit semaphores\n",
|
||||
engine->name);
|
||||
goto err_rq;
|
||||
}
|
||||
|
||||
/* ELSP[1]: nop request */
|
||||
nop = nop_request(engine);
|
||||
|
@ -609,8 +620,13 @@ static int live_timeslice_queue(void *arg)
|
|||
err = PTR_ERR(nop);
|
||||
goto err_rq;
|
||||
}
|
||||
wait_for_submit(engine, nop);
|
||||
err = wait_for_submit(engine, nop, HZ / 2);
|
||||
i915_request_put(nop);
|
||||
if (err) {
|
||||
pr_err("%s: Timed out trying to submit nop\n",
|
||||
engine->name);
|
||||
goto err_rq;
|
||||
}
|
||||
|
||||
GEM_BUG_ON(i915_request_completed(rq));
|
||||
GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
|
||||
|
@ -1137,7 +1153,7 @@ static int live_nopreempt(void *arg)
|
|||
}
|
||||
|
||||
/* Low priority client, but unpreemptable! */
|
||||
rq_a->flags |= I915_REQUEST_NOPREEMPT;
|
||||
__set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq_a->fence.flags);
|
||||
|
||||
i915_request_add(rq_a);
|
||||
if (!igt_wait_for_spinner(&a.spin, rq_a)) {
|
||||
|
@ -3362,7 +3378,7 @@ static int live_lrc_layout(void *arg)
|
|||
struct intel_gt *gt = arg;
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
u32 *mem;
|
||||
u32 *lrc;
|
||||
int err;
|
||||
|
||||
/*
|
||||
|
@ -3370,13 +3386,13 @@ static int live_lrc_layout(void *arg)
|
|||
* match the layout saved by HW.
|
||||
*/
|
||||
|
||||
mem = kmalloc(PAGE_SIZE, GFP_KERNEL);
|
||||
if (!mem)
|
||||
lrc = kmalloc(PAGE_SIZE, GFP_KERNEL);
|
||||
if (!lrc)
|
||||
return -ENOMEM;
|
||||
|
||||
err = 0;
|
||||
for_each_engine(engine, gt, id) {
|
||||
u32 *hw, *lrc;
|
||||
u32 *hw;
|
||||
int dw;
|
||||
|
||||
if (!engine->default_state)
|
||||
|
@ -3390,8 +3406,7 @@ static int live_lrc_layout(void *arg)
|
|||
}
|
||||
hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
|
||||
|
||||
lrc = memset(mem, 0, PAGE_SIZE);
|
||||
execlists_init_reg_state(lrc,
|
||||
execlists_init_reg_state(memset(lrc, POISON_INUSE, PAGE_SIZE),
|
||||
engine->kernel_context,
|
||||
engine,
|
||||
engine->kernel_context->ring,
|
||||
|
@ -3406,6 +3421,13 @@ static int live_lrc_layout(void *arg)
|
|||
continue;
|
||||
}
|
||||
|
||||
if (lrc[dw] == 0) {
|
||||
pr_debug("%s: skipped instruction %x at dword %d\n",
|
||||
engine->name, lri, dw);
|
||||
dw++;
|
||||
continue;
|
||||
}
|
||||
|
||||
if ((lri & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
|
||||
pr_err("%s: Expected LRI command at dword %d, found %08x\n",
|
||||
engine->name, dw, lri);
|
||||
|
@ -3454,7 +3476,7 @@ static int live_lrc_layout(void *arg)
|
|||
break;
|
||||
}
|
||||
|
||||
kfree(mem);
|
||||
kfree(lrc);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,5 +0,0 @@
|
|||
# For building individual subdir files on the command line
|
||||
subdir-ccflags-y += -I$(srctree)/$(src)/../..
|
||||
|
||||
# Extra header tests
|
||||
header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h
|
|
@ -12,6 +12,9 @@
|
|||
|
||||
#include "i915_drv.h"
|
||||
|
||||
static const struct intel_uc_ops uc_ops_off;
|
||||
static const struct intel_uc_ops uc_ops_on;
|
||||
|
||||
/* Reset GuC providing us with fresh state for both GuC and HuC.
|
||||
*/
|
||||
static int __intel_uc_reset_hw(struct intel_uc *uc)
|
||||
|
@ -89,6 +92,11 @@ void intel_uc_init_early(struct intel_uc *uc)
|
|||
intel_huc_init_early(&uc->huc);
|
||||
|
||||
__confirm_options(uc);
|
||||
|
||||
if (intel_uc_uses_guc(uc))
|
||||
uc->ops = &uc_ops_on;
|
||||
else
|
||||
uc->ops = &uc_ops_off;
|
||||
}
|
||||
|
||||
void intel_uc_driver_late_release(struct intel_uc *uc)
|
||||
|
@ -245,12 +253,11 @@ static void guc_disable_communication(struct intel_guc *guc)
|
|||
DRM_INFO("GuC communication disabled\n");
|
||||
}
|
||||
|
||||
void intel_uc_fetch_firmwares(struct intel_uc *uc)
|
||||
static void __uc_fetch_firmwares(struct intel_uc *uc)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (!intel_uc_uses_guc(uc))
|
||||
return;
|
||||
GEM_BUG_ON(!intel_uc_uses_guc(uc));
|
||||
|
||||
err = intel_uc_fw_fetch(&uc->guc.fw);
|
||||
if (err)
|
||||
|
@ -260,20 +267,19 @@ void intel_uc_fetch_firmwares(struct intel_uc *uc)
|
|||
intel_uc_fw_fetch(&uc->huc.fw);
|
||||
}
|
||||
|
||||
void intel_uc_cleanup_firmwares(struct intel_uc *uc)
|
||||
static void __uc_cleanup_firmwares(struct intel_uc *uc)
|
||||
{
|
||||
intel_uc_fw_cleanup_fetch(&uc->huc.fw);
|
||||
intel_uc_fw_cleanup_fetch(&uc->guc.fw);
|
||||
}
|
||||
|
||||
void intel_uc_init(struct intel_uc *uc)
|
||||
static void __uc_init(struct intel_uc *uc)
|
||||
{
|
||||
struct intel_guc *guc = &uc->guc;
|
||||
struct intel_huc *huc = &uc->huc;
|
||||
int ret;
|
||||
|
||||
if (!intel_uc_uses_guc(uc))
|
||||
return;
|
||||
GEM_BUG_ON(!intel_uc_uses_guc(uc));
|
||||
|
||||
/* XXX: GuC submission is unavailable for now */
|
||||
GEM_BUG_ON(intel_uc_supports_guc_submission(uc));
|
||||
|
@ -288,7 +294,7 @@ void intel_uc_init(struct intel_uc *uc)
|
|||
intel_huc_init(huc);
|
||||
}
|
||||
|
||||
void intel_uc_fini(struct intel_uc *uc)
|
||||
static void __uc_fini(struct intel_uc *uc)
|
||||
{
|
||||
intel_huc_fini(&uc->huc);
|
||||
intel_guc_fini(&uc->guc);
|
||||
|
@ -309,14 +315,6 @@ static int __uc_sanitize(struct intel_uc *uc)
|
|||
return __intel_uc_reset_hw(uc);
|
||||
}
|
||||
|
||||
void intel_uc_sanitize(struct intel_uc *uc)
|
||||
{
|
||||
if (!intel_uc_supports_guc(uc))
|
||||
return;
|
||||
|
||||
__uc_sanitize(uc);
|
||||
}
|
||||
|
||||
/* Initialize and verify the uC regs related to uC positioning in WOPCM */
|
||||
static int uc_init_wopcm(struct intel_uc *uc)
|
||||
{
|
||||
|
@ -380,13 +378,8 @@ static bool uc_is_wopcm_locked(struct intel_uc *uc)
|
|||
(intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET) & GUC_WOPCM_OFFSET_VALID);
|
||||
}
|
||||
|
||||
int intel_uc_init_hw(struct intel_uc *uc)
|
||||
static int __uc_check_hw(struct intel_uc *uc)
|
||||
{
|
||||
struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
|
||||
struct intel_guc *guc = &uc->guc;
|
||||
struct intel_huc *huc = &uc->huc;
|
||||
int ret, attempts;
|
||||
|
||||
if (!intel_uc_supports_guc(uc))
|
||||
return 0;
|
||||
|
||||
|
@ -395,11 +388,24 @@ int intel_uc_init_hw(struct intel_uc *uc)
|
|||
* before on this system after reboot, otherwise we risk GPU hangs.
|
||||
* To check if GuC was loaded before we look at WOPCM registers.
|
||||
*/
|
||||
if (!intel_uc_uses_guc(uc) && !uc_is_wopcm_locked(uc))
|
||||
return 0;
|
||||
if (uc_is_wopcm_locked(uc))
|
||||
return -EIO;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __uc_init_hw(struct intel_uc *uc)
|
||||
{
|
||||
struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
|
||||
struct intel_guc *guc = &uc->guc;
|
||||
struct intel_huc *huc = &uc->huc;
|
||||
int ret, attempts;
|
||||
|
||||
GEM_BUG_ON(!intel_uc_supports_guc(uc));
|
||||
GEM_BUG_ON(!intel_uc_uses_guc(uc));
|
||||
|
||||
if (!intel_uc_fw_is_available(&guc->fw)) {
|
||||
ret = uc_is_wopcm_locked(uc) ||
|
||||
ret = __uc_check_hw(uc) ||
|
||||
intel_uc_fw_is_overridden(&guc->fw) ||
|
||||
intel_uc_supports_guc_submission(uc) ?
|
||||
intel_uc_fw_status_to_error(guc->fw.status) : 0;
|
||||
|
@ -495,7 +501,7 @@ err_out:
|
|||
return -EIO;
|
||||
}
|
||||
|
||||
void intel_uc_fini_hw(struct intel_uc *uc)
|
||||
static void __uc_fini_hw(struct intel_uc *uc)
|
||||
{
|
||||
struct intel_guc *guc = &uc->guc;
|
||||
|
||||
|
@ -595,3 +601,20 @@ int intel_uc_runtime_resume(struct intel_uc *uc)
|
|||
*/
|
||||
return __uc_resume(uc, true);
|
||||
}
|
||||
|
||||
static const struct intel_uc_ops uc_ops_off = {
|
||||
.init_hw = __uc_check_hw,
|
||||
};
|
||||
|
||||
static const struct intel_uc_ops uc_ops_on = {
|
||||
.sanitize = __uc_sanitize,
|
||||
|
||||
.init_fw = __uc_fetch_firmwares,
|
||||
.fini_fw = __uc_cleanup_firmwares,
|
||||
|
||||
.init = __uc_init,
|
||||
.fini = __uc_fini,
|
||||
|
||||
.init_hw = __uc_init_hw,
|
||||
.fini_hw = __uc_fini_hw,
|
||||
};
|
||||
|
|
|
@ -10,7 +10,20 @@
|
|||
#include "intel_huc.h"
|
||||
#include "i915_params.h"
|
||||
|
||||
struct intel_uc;
|
||||
|
||||
struct intel_uc_ops {
|
||||
int (*sanitize)(struct intel_uc *uc);
|
||||
void (*init_fw)(struct intel_uc *uc);
|
||||
void (*fini_fw)(struct intel_uc *uc);
|
||||
void (*init)(struct intel_uc *uc);
|
||||
void (*fini)(struct intel_uc *uc);
|
||||
int (*init_hw)(struct intel_uc *uc);
|
||||
void (*fini_hw)(struct intel_uc *uc);
|
||||
};
|
||||
|
||||
struct intel_uc {
|
||||
struct intel_uc_ops const *ops;
|
||||
struct intel_guc guc;
|
||||
struct intel_huc huc;
|
||||
|
||||
|
@ -21,13 +34,6 @@ struct intel_uc {
|
|||
void intel_uc_init_early(struct intel_uc *uc);
|
||||
void intel_uc_driver_late_release(struct intel_uc *uc);
|
||||
void intel_uc_init_mmio(struct intel_uc *uc);
|
||||
void intel_uc_fetch_firmwares(struct intel_uc *uc);
|
||||
void intel_uc_cleanup_firmwares(struct intel_uc *uc);
|
||||
void intel_uc_sanitize(struct intel_uc *uc);
|
||||
void intel_uc_init(struct intel_uc *uc);
|
||||
int intel_uc_init_hw(struct intel_uc *uc);
|
||||
void intel_uc_fini_hw(struct intel_uc *uc);
|
||||
void intel_uc_fini(struct intel_uc *uc);
|
||||
void intel_uc_reset_prepare(struct intel_uc *uc);
|
||||
void intel_uc_suspend(struct intel_uc *uc);
|
||||
void intel_uc_runtime_suspend(struct intel_uc *uc);
|
||||
|
@ -64,4 +70,20 @@ static inline bool intel_uc_uses_huc(struct intel_uc *uc)
|
|||
return intel_huc_is_enabled(&uc->huc);
|
||||
}
|
||||
|
||||
#define intel_uc_ops_function(_NAME, _OPS, _TYPE, _RET) \
|
||||
static inline _TYPE intel_uc_##_NAME(struct intel_uc *uc) \
|
||||
{ \
|
||||
if (uc->ops->_OPS) \
|
||||
return uc->ops->_OPS(uc); \
|
||||
return _RET; \
|
||||
}
|
||||
intel_uc_ops_function(sanitize, sanitize, int, 0);
|
||||
intel_uc_ops_function(fetch_firmwares, init_fw, void, );
|
||||
intel_uc_ops_function(cleanup_firmwares, fini_fw, void, );
|
||||
intel_uc_ops_function(init, init, void, );
|
||||
intel_uc_ops_function(fini, fini, void, );
|
||||
intel_uc_ops_function(init_hw, init_hw, int, 0);
|
||||
intel_uc_ops_function(fini_hw, fini_hw, void, );
|
||||
#undef intel_uc_ops_function
|
||||
|
||||
#endif
|
||||
|
|
|
@ -2675,7 +2675,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int init_broadwell_mmio_info(struct intel_gvt *gvt)
|
||||
static int init_bdw_mmio_info(struct intel_gvt *gvt)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = gvt->dev_priv;
|
||||
int ret;
|
||||
|
@ -3364,20 +3364,20 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
|
|||
goto err;
|
||||
|
||||
if (IS_BROADWELL(dev_priv)) {
|
||||
ret = init_broadwell_mmio_info(gvt);
|
||||
ret = init_bdw_mmio_info(gvt);
|
||||
if (ret)
|
||||
goto err;
|
||||
} else if (IS_SKYLAKE(dev_priv)
|
||||
|| IS_KABYLAKE(dev_priv)
|
||||
|| IS_COFFEELAKE(dev_priv)) {
|
||||
ret = init_broadwell_mmio_info(gvt);
|
||||
ret = init_bdw_mmio_info(gvt);
|
||||
if (ret)
|
||||
goto err;
|
||||
ret = init_skl_mmio_info(gvt);
|
||||
if (ret)
|
||||
goto err;
|
||||
} else if (IS_BROXTON(dev_priv)) {
|
||||
ret = init_broadwell_mmio_info(gvt);
|
||||
ret = init_bdw_mmio_info(gvt);
|
||||
if (ret)
|
||||
goto err;
|
||||
ret = init_skl_mmio_info(gvt);
|
||||
|
|
|
@ -1224,7 +1224,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
|
|||
enum intel_engine_id i;
|
||||
int ret;
|
||||
|
||||
ppgtt = i915_ppgtt_create(i915);
|
||||
ppgtt = i915_ppgtt_create(&i915->gt);
|
||||
if (IS_ERR(ppgtt))
|
||||
return PTR_ERR(ppgtt);
|
||||
|
||||
|
|
|
@ -605,12 +605,15 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
|
|||
struct intel_engine_cs *engine)
|
||||
{
|
||||
intel_engine_mask_t tmp, mask = engine->mask;
|
||||
struct llist_node *pos = NULL, *next;
|
||||
struct intel_gt *gt = engine->gt;
|
||||
struct llist_node *pos, *next;
|
||||
int err;
|
||||
|
||||
GEM_BUG_ON(i915_active_is_idle(ref));
|
||||
GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers));
|
||||
|
||||
/* Wait until the previous preallocation is completed */
|
||||
while (!llist_empty(&ref->preallocated_barriers))
|
||||
cond_resched();
|
||||
|
||||
/*
|
||||
* Preallocate a node for each physical engine supporting the target
|
||||
|
@ -653,16 +656,24 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
|
|||
GEM_BUG_ON(rcu_access_pointer(node->base.fence) != ERR_PTR(-EAGAIN));
|
||||
|
||||
GEM_BUG_ON(barrier_to_engine(node) != engine);
|
||||
llist_add(barrier_to_ll(node), &ref->preallocated_barriers);
|
||||
next = barrier_to_ll(node);
|
||||
next->next = pos;
|
||||
if (!pos)
|
||||
pos = next;
|
||||
intel_engine_pm_get(engine);
|
||||
}
|
||||
|
||||
GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers));
|
||||
llist_add_batch(next, pos, &ref->preallocated_barriers);
|
||||
|
||||
return 0;
|
||||
|
||||
unwind:
|
||||
llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
|
||||
while (pos) {
|
||||
struct active_node *node = barrier_from_ll(pos);
|
||||
|
||||
pos = pos->next;
|
||||
|
||||
atomic_dec(&ref->count);
|
||||
intel_engine_pm_put(barrier_to_engine(node));
|
||||
|
||||
|
|
|
@ -262,8 +262,10 @@ void i915_buddy_free_list(struct i915_buddy_mm *mm, struct list_head *objects)
|
|||
{
|
||||
struct i915_buddy_block *block, *on;
|
||||
|
||||
list_for_each_entry_safe(block, on, objects, link)
|
||||
list_for_each_entry_safe(block, on, objects, link) {
|
||||
i915_buddy_free(mm, block);
|
||||
cond_resched();
|
||||
}
|
||||
INIT_LIST_HEAD(objects);
|
||||
}
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue