Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux
Pull drm fixes from Dave Airlie: "Exynos, i915 and msm fixes and one core fix. exynos: hdmi power off and mixer issues msm: iommu, build fixes, i915: regression races and warning fixes" * 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: (22 commits) drm/i915: vlv_prepare_pll is only needed in case of non DSI interfaces drm: fix NULL pointer access by wrong ioctl drm/exynos: enable vsync interrupt while waiting for vblank drm/exynos: soft reset mixer before reconfigure after power-on drm/exynos: allow multiple layer updates per vsync for mixer drm/i915: Hold the table lock whilst walking the file's idr and counting the objects in debugfs drm/i915: BDW: Adding Reserved PCI IDs. drm/i915: Only mark the ctx as initialised after a SET_CONTEXT operation drm/exynos: stop mixer before gating clocks during poweroff drm/exynos: set power state variable after enabling clocks and power drm/exynos: disable unused windows on apply drm/exynos: Fix de-registration ordering drm/exynos: change zero to NULL for sparse drm/exynos: dpi: Fix NULL pointer dereference with legacy bindings drm/exynos: hdmi: fix power order issue drm/i915: default to having backlight if VBT not available drm/i915: cache hw power well enabled state drm/msm: fix IOMMU cleanup for -EPROBE_DEFER drm/msm: use PAGE_ALIGNED instead of IS_ALIGNED(PAGE_SIZE) drm/msm/hdmi: set hdp clock rate before prepare_enable ...
This commit is contained in:
commit
c163b524d2
|
@ -419,8 +419,9 @@ long drm_ioctl(struct file *filp,
|
|||
retcode = -EFAULT;
|
||||
goto err_i1;
|
||||
}
|
||||
} else
|
||||
} else if (cmd & IOC_OUT) {
|
||||
memset(kdata, 0, usize);
|
||||
}
|
||||
|
||||
if (ioctl->flags & DRM_UNLOCKED)
|
||||
retcode = func(dev, kdata, file_priv);
|
||||
|
|
|
@ -40,7 +40,7 @@ exynos_dpi_detect(struct drm_connector *connector, bool force)
|
|||
{
|
||||
struct exynos_dpi *ctx = connector_to_dpi(connector);
|
||||
|
||||
if (!ctx->panel->connector)
|
||||
if (ctx->panel && !ctx->panel->connector)
|
||||
drm_panel_attach(ctx->panel, &ctx->connector);
|
||||
|
||||
return connector_status_connected;
|
||||
|
|
|
@ -765,24 +765,24 @@ static int exynos_drm_init(void)
|
|||
|
||||
return 0;
|
||||
|
||||
err_unregister_pd:
|
||||
platform_device_unregister(exynos_drm_pdev);
|
||||
|
||||
err_remove_vidi:
|
||||
#ifdef CONFIG_DRM_EXYNOS_VIDI
|
||||
exynos_drm_remove_vidi();
|
||||
|
||||
err_unregister_pd:
|
||||
#endif
|
||||
platform_device_unregister(exynos_drm_pdev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void exynos_drm_exit(void)
|
||||
{
|
||||
platform_driver_unregister(&exynos_drm_platform_driver);
|
||||
#ifdef CONFIG_DRM_EXYNOS_VIDI
|
||||
exynos_drm_remove_vidi();
|
||||
#endif
|
||||
platform_device_unregister(exynos_drm_pdev);
|
||||
platform_driver_unregister(&exynos_drm_platform_driver);
|
||||
}
|
||||
|
||||
module_init(exynos_drm_init);
|
||||
|
|
|
@ -343,7 +343,7 @@ struct exynos_drm_display * exynos_dpi_probe(struct device *dev);
|
|||
int exynos_dpi_remove(struct device *dev);
|
||||
#else
|
||||
static inline struct exynos_drm_display *
|
||||
exynos_dpi_probe(struct device *dev) { return 0; }
|
||||
exynos_dpi_probe(struct device *dev) { return NULL; }
|
||||
static inline int exynos_dpi_remove(struct device *dev) { return 0; }
|
||||
#endif
|
||||
|
||||
|
|
|
@ -741,6 +741,8 @@ static void fimd_apply(struct exynos_drm_manager *mgr)
|
|||
win_data = &ctx->win_data[i];
|
||||
if (win_data->enabled)
|
||||
fimd_win_commit(mgr, i);
|
||||
else
|
||||
fimd_win_disable(mgr, i);
|
||||
}
|
||||
|
||||
fimd_commit(mgr);
|
||||
|
|
|
@ -2090,6 +2090,11 @@ out:
|
|||
|
||||
static void hdmi_dpms(struct exynos_drm_display *display, int mode)
|
||||
{
|
||||
struct hdmi_context *hdata = display->ctx;
|
||||
struct drm_encoder *encoder = hdata->encoder;
|
||||
struct drm_crtc *crtc = encoder->crtc;
|
||||
struct drm_crtc_helper_funcs *funcs = NULL;
|
||||
|
||||
DRM_DEBUG_KMS("mode %d\n", mode);
|
||||
|
||||
switch (mode) {
|
||||
|
@ -2099,6 +2104,20 @@ static void hdmi_dpms(struct exynos_drm_display *display, int mode)
|
|||
case DRM_MODE_DPMS_STANDBY:
|
||||
case DRM_MODE_DPMS_SUSPEND:
|
||||
case DRM_MODE_DPMS_OFF:
|
||||
/*
|
||||
* The SFRs of VP and Mixer are updated by Vertical Sync of
|
||||
* Timing generator which is a part of HDMI so the sequence
|
||||
* to disable TV Subsystem should be as following,
|
||||
* VP -> Mixer -> HDMI
|
||||
*
|
||||
* Below codes will try to disable Mixer and VP(if used)
|
||||
* prior to disabling HDMI.
|
||||
*/
|
||||
if (crtc)
|
||||
funcs = crtc->helper_private;
|
||||
if (funcs && funcs->dpms)
|
||||
(*funcs->dpms)(crtc, mode);
|
||||
|
||||
hdmi_poweroff(display);
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -377,6 +377,20 @@ static void mixer_run(struct mixer_context *ctx)
|
|||
mixer_regs_dump(ctx);
|
||||
}
|
||||
|
||||
static void mixer_stop(struct mixer_context *ctx)
|
||||
{
|
||||
struct mixer_resources *res = &ctx->mixer_res;
|
||||
int timeout = 20;
|
||||
|
||||
mixer_reg_writemask(res, MXR_STATUS, 0, MXR_STATUS_REG_RUN);
|
||||
|
||||
while (!(mixer_reg_read(res, MXR_STATUS) & MXR_STATUS_REG_IDLE) &&
|
||||
--timeout)
|
||||
usleep_range(10000, 12000);
|
||||
|
||||
mixer_regs_dump(ctx);
|
||||
}
|
||||
|
||||
static void vp_video_buffer(struct mixer_context *ctx, int win)
|
||||
{
|
||||
struct mixer_resources *res = &ctx->mixer_res;
|
||||
|
@ -497,13 +511,8 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
|
|||
static void mixer_layer_update(struct mixer_context *ctx)
|
||||
{
|
||||
struct mixer_resources *res = &ctx->mixer_res;
|
||||
u32 val;
|
||||
|
||||
val = mixer_reg_read(res, MXR_CFG);
|
||||
|
||||
/* allow one update per vsync only */
|
||||
if (!(val & MXR_CFG_LAYER_UPDATE_COUNT_MASK))
|
||||
mixer_reg_writemask(res, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
|
||||
mixer_reg_writemask(res, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
|
||||
}
|
||||
|
||||
static void mixer_graph_buffer(struct mixer_context *ctx, int win)
|
||||
|
@ -1010,6 +1019,8 @@ static void mixer_wait_for_vblank(struct exynos_drm_manager *mgr)
|
|||
}
|
||||
mutex_unlock(&mixer_ctx->mixer_mutex);
|
||||
|
||||
drm_vblank_get(mgr->crtc->dev, mixer_ctx->pipe);
|
||||
|
||||
atomic_set(&mixer_ctx->wait_vsync_event, 1);
|
||||
|
||||
/*
|
||||
|
@ -1020,6 +1031,8 @@ static void mixer_wait_for_vblank(struct exynos_drm_manager *mgr)
|
|||
!atomic_read(&mixer_ctx->wait_vsync_event),
|
||||
HZ/20))
|
||||
DRM_DEBUG_KMS("vblank wait timed out.\n");
|
||||
|
||||
drm_vblank_put(mgr->crtc->dev, mixer_ctx->pipe);
|
||||
}
|
||||
|
||||
static void mixer_window_suspend(struct exynos_drm_manager *mgr)
|
||||
|
@ -1061,7 +1074,7 @@ static void mixer_poweron(struct exynos_drm_manager *mgr)
|
|||
mutex_unlock(&ctx->mixer_mutex);
|
||||
return;
|
||||
}
|
||||
ctx->powered = true;
|
||||
|
||||
mutex_unlock(&ctx->mixer_mutex);
|
||||
|
||||
pm_runtime_get_sync(ctx->dev);
|
||||
|
@ -1072,6 +1085,12 @@ static void mixer_poweron(struct exynos_drm_manager *mgr)
|
|||
clk_prepare_enable(res->sclk_mixer);
|
||||
}
|
||||
|
||||
mutex_lock(&ctx->mixer_mutex);
|
||||
ctx->powered = true;
|
||||
mutex_unlock(&ctx->mixer_mutex);
|
||||
|
||||
mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
|
||||
|
||||
mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
|
||||
mixer_win_reset(ctx);
|
||||
|
||||
|
@ -1084,14 +1103,21 @@ static void mixer_poweroff(struct exynos_drm_manager *mgr)
|
|||
struct mixer_resources *res = &ctx->mixer_res;
|
||||
|
||||
mutex_lock(&ctx->mixer_mutex);
|
||||
if (!ctx->powered)
|
||||
goto out;
|
||||
if (!ctx->powered) {
|
||||
mutex_unlock(&ctx->mixer_mutex);
|
||||
return;
|
||||
}
|
||||
mutex_unlock(&ctx->mixer_mutex);
|
||||
|
||||
mixer_stop(ctx);
|
||||
mixer_window_suspend(mgr);
|
||||
|
||||
ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
|
||||
|
||||
mutex_lock(&ctx->mixer_mutex);
|
||||
ctx->powered = false;
|
||||
mutex_unlock(&ctx->mixer_mutex);
|
||||
|
||||
clk_disable_unprepare(res->mixer);
|
||||
if (ctx->vp_enabled) {
|
||||
clk_disable_unprepare(res->vp);
|
||||
|
@ -1099,12 +1125,6 @@ static void mixer_poweroff(struct exynos_drm_manager *mgr)
|
|||
}
|
||||
|
||||
pm_runtime_put_sync(ctx->dev);
|
||||
|
||||
mutex_lock(&ctx->mixer_mutex);
|
||||
ctx->powered = false;
|
||||
|
||||
out:
|
||||
mutex_unlock(&ctx->mixer_mutex);
|
||||
}
|
||||
|
||||
static void mixer_dpms(struct exynos_drm_manager *mgr, int mode)
|
||||
|
|
|
@ -78,6 +78,7 @@
|
|||
#define MXR_STATUS_BIG_ENDIAN (1 << 3)
|
||||
#define MXR_STATUS_ENDIAN_MASK (1 << 3)
|
||||
#define MXR_STATUS_SYNC_ENABLE (1 << 2)
|
||||
#define MXR_STATUS_REG_IDLE (1 << 1)
|
||||
#define MXR_STATUS_REG_RUN (1 << 0)
|
||||
|
||||
/* bits for MXR_CFG */
|
||||
|
|
|
@ -446,7 +446,9 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
|
|||
|
||||
memset(&stats, 0, sizeof(stats));
|
||||
stats.file_priv = file->driver_priv;
|
||||
spin_lock(&file->table_lock);
|
||||
idr_for_each(&file->object_idr, per_file_stats, &stats);
|
||||
spin_unlock(&file->table_lock);
|
||||
/*
|
||||
* Although we have a valid reference on file->pid, that does
|
||||
* not guarantee that the task_struct who called get_pid() is
|
||||
|
|
|
@ -977,6 +977,8 @@ struct i915_power_well {
|
|||
bool always_on;
|
||||
/* power well enable/disable usage count */
|
||||
int count;
|
||||
/* cached hw enabled state */
|
||||
bool hw_enabled;
|
||||
unsigned long domains;
|
||||
unsigned long data;
|
||||
const struct i915_power_well_ops *ops;
|
||||
|
|
|
@ -598,6 +598,7 @@ static int do_switch(struct intel_engine_cs *ring,
|
|||
struct intel_context *from = ring->last_context;
|
||||
struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(to);
|
||||
u32 hw_flags = 0;
|
||||
bool uninitialized = false;
|
||||
int ret, i;
|
||||
|
||||
if (from != NULL && ring == &dev_priv->ring[RCS]) {
|
||||
|
@ -696,19 +697,20 @@ static int do_switch(struct intel_engine_cs *ring,
|
|||
i915_gem_context_unreference(from);
|
||||
}
|
||||
|
||||
uninitialized = !to->is_initialized && from == NULL;
|
||||
to->is_initialized = true;
|
||||
|
||||
done:
|
||||
i915_gem_context_reference(to);
|
||||
ring->last_context = to;
|
||||
to->last_ring = ring;
|
||||
|
||||
if (ring->id == RCS && !to->is_initialized && from == NULL) {
|
||||
if (uninitialized) {
|
||||
ret = i915_gem_render_state_init(ring);
|
||||
if (ret)
|
||||
DRM_ERROR("init render state: %d\n", ret);
|
||||
}
|
||||
|
||||
to->is_initialized = true;
|
||||
|
||||
return 0;
|
||||
|
||||
unpin_out:
|
||||
|
|
|
@ -315,9 +315,6 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
|
|||
const struct bdb_lfp_backlight_data *backlight_data;
|
||||
const struct bdb_lfp_backlight_data_entry *entry;
|
||||
|
||||
/* Err to enabling backlight if no backlight block. */
|
||||
dev_priv->vbt.backlight.present = true;
|
||||
|
||||
backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT);
|
||||
if (!backlight_data)
|
||||
return;
|
||||
|
@ -1088,6 +1085,9 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
|
|||
|
||||
dev_priv->vbt.crt_ddc_pin = GMBUS_PORT_VGADDC;
|
||||
|
||||
/* Default to having backlight */
|
||||
dev_priv->vbt.backlight.present = true;
|
||||
|
||||
/* LFP panel data */
|
||||
dev_priv->vbt.lvds_dither = 1;
|
||||
dev_priv->vbt.lvds_vbt = 0;
|
||||
|
|
|
@ -4564,7 +4564,10 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
|
|||
if (intel_crtc->active)
|
||||
return;
|
||||
|
||||
vlv_prepare_pll(intel_crtc);
|
||||
is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
|
||||
|
||||
if (!is_dsi && !IS_CHERRYVIEW(dev))
|
||||
vlv_prepare_pll(intel_crtc);
|
||||
|
||||
/* Set up the display plane register */
|
||||
dspcntr = DISPPLANE_GAMMA_ENABLE;
|
||||
|
@ -4598,8 +4601,6 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
|
|||
if (encoder->pre_pll_enable)
|
||||
encoder->pre_pll_enable(encoder);
|
||||
|
||||
is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
|
||||
|
||||
if (!is_dsi) {
|
||||
if (IS_CHERRYVIEW(dev))
|
||||
chv_enable_pll(intel_crtc);
|
||||
|
@ -12411,8 +12412,8 @@ intel_display_capture_error_state(struct drm_device *dev)
|
|||
|
||||
for_each_pipe(i) {
|
||||
error->pipe[i].power_domain_on =
|
||||
intel_display_power_enabled_sw(dev_priv,
|
||||
POWER_DOMAIN_PIPE(i));
|
||||
intel_display_power_enabled_unlocked(dev_priv,
|
||||
POWER_DOMAIN_PIPE(i));
|
||||
if (!error->pipe[i].power_domain_on)
|
||||
continue;
|
||||
|
||||
|
@ -12447,7 +12448,7 @@ intel_display_capture_error_state(struct drm_device *dev)
|
|||
enum transcoder cpu_transcoder = transcoders[i];
|
||||
|
||||
error->transcoder[i].power_domain_on =
|
||||
intel_display_power_enabled_sw(dev_priv,
|
||||
intel_display_power_enabled_unlocked(dev_priv,
|
||||
POWER_DOMAIN_TRANSCODER(cpu_transcoder));
|
||||
if (!error->transcoder[i].power_domain_on)
|
||||
continue;
|
||||
|
|
|
@ -950,8 +950,8 @@ int intel_power_domains_init(struct drm_i915_private *);
|
|||
void intel_power_domains_remove(struct drm_i915_private *);
|
||||
bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
|
||||
enum intel_display_power_domain domain);
|
||||
bool intel_display_power_enabled_sw(struct drm_i915_private *dev_priv,
|
||||
enum intel_display_power_domain domain);
|
||||
bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
|
||||
enum intel_display_power_domain domain);
|
||||
void intel_display_power_get(struct drm_i915_private *dev_priv,
|
||||
enum intel_display_power_domain domain);
|
||||
void intel_display_power_put(struct drm_i915_private *dev_priv,
|
||||
|
|
|
@ -5603,8 +5603,8 @@ static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
|
|||
(HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
|
||||
}
|
||||
|
||||
bool intel_display_power_enabled_sw(struct drm_i915_private *dev_priv,
|
||||
enum intel_display_power_domain domain)
|
||||
bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
|
||||
enum intel_display_power_domain domain)
|
||||
{
|
||||
struct i915_power_domains *power_domains;
|
||||
struct i915_power_well *power_well;
|
||||
|
@ -5615,16 +5615,19 @@ bool intel_display_power_enabled_sw(struct drm_i915_private *dev_priv,
|
|||
return false;
|
||||
|
||||
power_domains = &dev_priv->power_domains;
|
||||
|
||||
is_enabled = true;
|
||||
|
||||
for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
|
||||
if (power_well->always_on)
|
||||
continue;
|
||||
|
||||
if (!power_well->count) {
|
||||
if (!power_well->hw_enabled) {
|
||||
is_enabled = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return is_enabled;
|
||||
}
|
||||
|
||||
|
@ -5632,30 +5635,15 @@ bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
|
|||
enum intel_display_power_domain domain)
|
||||
{
|
||||
struct i915_power_domains *power_domains;
|
||||
struct i915_power_well *power_well;
|
||||
bool is_enabled;
|
||||
int i;
|
||||
|
||||
if (dev_priv->pm.suspended)
|
||||
return false;
|
||||
bool ret;
|
||||
|
||||
power_domains = &dev_priv->power_domains;
|
||||
|
||||
is_enabled = true;
|
||||
|
||||
mutex_lock(&power_domains->lock);
|
||||
for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
|
||||
if (power_well->always_on)
|
||||
continue;
|
||||
|
||||
if (!power_well->ops->is_enabled(dev_priv, power_well)) {
|
||||
is_enabled = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
ret = intel_display_power_enabled_unlocked(dev_priv, domain);
|
||||
mutex_unlock(&power_domains->lock);
|
||||
|
||||
return is_enabled;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -5976,6 +5964,7 @@ void intel_display_power_get(struct drm_i915_private *dev_priv,
|
|||
if (!power_well->count++) {
|
||||
DRM_DEBUG_KMS("enabling %s\n", power_well->name);
|
||||
power_well->ops->enable(dev_priv, power_well);
|
||||
power_well->hw_enabled = true;
|
||||
}
|
||||
|
||||
check_power_well_state(dev_priv, power_well);
|
||||
|
@ -6005,6 +5994,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
|
|||
|
||||
if (!--power_well->count && i915.disable_power_well) {
|
||||
DRM_DEBUG_KMS("disabling %s\n", power_well->name);
|
||||
power_well->hw_enabled = false;
|
||||
power_well->ops->disable(dev_priv, power_well);
|
||||
}
|
||||
|
||||
|
@ -6267,8 +6257,11 @@ static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
|
|||
int i;
|
||||
|
||||
mutex_lock(&power_domains->lock);
|
||||
for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains)
|
||||
for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
|
||||
power_well->ops->sync_hw(dev_priv, power_well);
|
||||
power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
|
||||
power_well);
|
||||
}
|
||||
mutex_unlock(&power_domains->lock);
|
||||
}
|
||||
|
||||
|
|
|
@ -277,6 +277,7 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
|
|||
static const char *hpd_reg_names[] = {"hpd-gdsc", "hpd-5v"};
|
||||
static const char *pwr_reg_names[] = {"core-vdda", "core-vcc"};
|
||||
static const char *hpd_clk_names[] = {"iface_clk", "core_clk", "mdp_core_clk"};
|
||||
static unsigned long hpd_clk_freq[] = {0, 19200000, 0};
|
||||
static const char *pwr_clk_names[] = {"extp_clk", "alt_iface_clk"};
|
||||
|
||||
config.phy_init = hdmi_phy_8x74_init;
|
||||
|
@ -286,6 +287,7 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
|
|||
config.pwr_reg_names = pwr_reg_names;
|
||||
config.pwr_reg_cnt = ARRAY_SIZE(pwr_reg_names);
|
||||
config.hpd_clk_names = hpd_clk_names;
|
||||
config.hpd_freq = hpd_clk_freq;
|
||||
config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
|
||||
config.pwr_clk_names = pwr_clk_names;
|
||||
config.pwr_clk_cnt = ARRAY_SIZE(pwr_clk_names);
|
||||
|
|
|
@ -87,6 +87,7 @@ struct hdmi_platform_config {
|
|||
|
||||
/* clks that need to be on for hpd: */
|
||||
const char **hpd_clk_names;
|
||||
const long unsigned *hpd_freq;
|
||||
int hpd_clk_cnt;
|
||||
|
||||
/* clks that need to be on for screen pwr (ie pixel clk): */
|
||||
|
|
|
@ -127,6 +127,14 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
|
|||
}
|
||||
|
||||
for (i = 0; i < config->hpd_clk_cnt; i++) {
|
||||
if (config->hpd_freq && config->hpd_freq[i]) {
|
||||
ret = clk_set_rate(hdmi->hpd_clks[i],
|
||||
config->hpd_freq[i]);
|
||||
if (ret)
|
||||
dev_warn(dev->dev, "failed to set clk %s (%d)\n",
|
||||
config->hpd_clk_names[i], ret);
|
||||
}
|
||||
|
||||
ret = clk_prepare_enable(hdmi->hpd_clks[i]);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "failed to enable hpd clk: %s (%d)\n",
|
||||
|
|
|
@ -20,6 +20,10 @@
|
|||
#include "msm_mmu.h"
|
||||
#include "mdp5_kms.h"
|
||||
|
||||
static const char *iommu_ports[] = {
|
||||
"mdp_0",
|
||||
};
|
||||
|
||||
static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev);
|
||||
|
||||
static int mdp5_hw_init(struct msm_kms *kms)
|
||||
|
@ -104,6 +108,12 @@ static void mdp5_preclose(struct msm_kms *kms, struct drm_file *file)
|
|||
static void mdp5_destroy(struct msm_kms *kms)
|
||||
{
|
||||
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
|
||||
struct msm_mmu *mmu = mdp5_kms->mmu;
|
||||
|
||||
if (mmu) {
|
||||
mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
|
||||
mmu->funcs->destroy(mmu);
|
||||
}
|
||||
kfree(mdp5_kms);
|
||||
}
|
||||
|
||||
|
@ -216,10 +226,6 @@ fail:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static const char *iommu_ports[] = {
|
||||
"mdp_0",
|
||||
};
|
||||
|
||||
static int get_clk(struct platform_device *pdev, struct clk **clkp,
|
||||
const char *name)
|
||||
{
|
||||
|
@ -317,17 +323,23 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
|
|||
mmu = msm_iommu_new(dev, config->iommu);
|
||||
if (IS_ERR(mmu)) {
|
||||
ret = PTR_ERR(mmu);
|
||||
dev_err(dev->dev, "failed to init iommu: %d\n", ret);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = mmu->funcs->attach(mmu, iommu_ports,
|
||||
ARRAY_SIZE(iommu_ports));
|
||||
if (ret)
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "failed to attach iommu: %d\n", ret);
|
||||
mmu->funcs->destroy(mmu);
|
||||
goto fail;
|
||||
}
|
||||
} else {
|
||||
dev_info(dev->dev, "no iommu, fallback to phys "
|
||||
"contig buffers for scanout\n");
|
||||
mmu = NULL;
|
||||
}
|
||||
mdp5_kms->mmu = mmu;
|
||||
|
||||
mdp5_kms->id = msm_register_mmu(dev, mmu);
|
||||
if (mdp5_kms->id < 0) {
|
||||
|
|
|
@ -33,6 +33,7 @@ struct mdp5_kms {
|
|||
|
||||
/* mapper-id used to request GEM buffer mapped for scanout: */
|
||||
int id;
|
||||
struct msm_mmu *mmu;
|
||||
|
||||
/* for tracking smp allocation amongst pipes: */
|
||||
mdp5_smp_state_t smp_state;
|
||||
|
|
|
@ -159,7 +159,7 @@ static int msm_unload(struct drm_device *dev)
|
|||
static int get_mdp_ver(struct platform_device *pdev)
|
||||
{
|
||||
#ifdef CONFIG_OF
|
||||
const static struct of_device_id match_types[] = { {
|
||||
static const struct of_device_id match_types[] = { {
|
||||
.compatible = "qcom,mdss_mdp",
|
||||
.data = (void *)5,
|
||||
}, {
|
||||
|
|
|
@ -59,7 +59,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
|
|||
struct drm_framebuffer *fb = NULL;
|
||||
struct fb_info *fbi = NULL;
|
||||
struct drm_mode_fb_cmd2 mode_cmd = {0};
|
||||
dma_addr_t paddr;
|
||||
uint32_t paddr;
|
||||
int ret, size;
|
||||
|
||||
sizes->surface_bpp = 32;
|
||||
|
|
|
@ -278,6 +278,7 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
|
|||
uint32_t *iova)
|
||||
{
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
struct drm_device *dev = obj->dev;
|
||||
int ret = 0;
|
||||
|
||||
if (!msm_obj->domain[id].iova) {
|
||||
|
@ -285,6 +286,11 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
|
|||
struct msm_mmu *mmu = priv->mmus[id];
|
||||
struct page **pages = get_pages(obj);
|
||||
|
||||
if (!mmu) {
|
||||
dev_err(dev->dev, "null MMU pointer\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (IS_ERR(pages))
|
||||
return PTR_ERR(pages);
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@ static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
|
|||
unsigned long iova, int flags, void *arg)
|
||||
{
|
||||
DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
|
||||
return 0;
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
|
||||
|
@ -40,8 +40,10 @@ static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
|
|||
for (i = 0; i < cnt; i++) {
|
||||
struct device *msm_iommu_get_ctx(const char *ctx_name);
|
||||
struct device *ctx = msm_iommu_get_ctx(names[i]);
|
||||
if (IS_ERR_OR_NULL(ctx))
|
||||
if (IS_ERR_OR_NULL(ctx)) {
|
||||
dev_warn(dev->dev, "couldn't get %s context", names[i]);
|
||||
continue;
|
||||
}
|
||||
ret = iommu_attach_device(iommu->domain, ctx);
|
||||
if (ret) {
|
||||
dev_warn(dev->dev, "could not attach iommu to %s", names[i]);
|
||||
|
@ -52,6 +54,20 @@ static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void msm_iommu_detach(struct msm_mmu *mmu, const char **names, int cnt)
|
||||
{
|
||||
struct msm_iommu *iommu = to_msm_iommu(mmu);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < cnt; i++) {
|
||||
struct device *msm_iommu_get_ctx(const char *ctx_name);
|
||||
struct device *ctx = msm_iommu_get_ctx(names[i]);
|
||||
if (IS_ERR_OR_NULL(ctx))
|
||||
continue;
|
||||
iommu_detach_device(iommu->domain, ctx);
|
||||
}
|
||||
}
|
||||
|
||||
static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
|
||||
struct sg_table *sgt, unsigned len, int prot)
|
||||
{
|
||||
|
@ -110,7 +126,7 @@ static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova,
|
|||
|
||||
VERB("unmap[%d]: %08x(%x)", i, iova, bytes);
|
||||
|
||||
BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
|
||||
BUG_ON(!PAGE_ALIGNED(bytes));
|
||||
|
||||
da += bytes;
|
||||
}
|
||||
|
@ -127,6 +143,7 @@ static void msm_iommu_destroy(struct msm_mmu *mmu)
|
|||
|
||||
static const struct msm_mmu_funcs funcs = {
|
||||
.attach = msm_iommu_attach,
|
||||
.detach = msm_iommu_detach,
|
||||
.map = msm_iommu_map,
|
||||
.unmap = msm_iommu_unmap,
|
||||
.destroy = msm_iommu_destroy,
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
|
||||
struct msm_mmu_funcs {
|
||||
int (*attach)(struct msm_mmu *mmu, const char **names, int cnt);
|
||||
void (*detach)(struct msm_mmu *mmu, const char **names, int cnt);
|
||||
int (*map)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
|
||||
unsigned len, int prot);
|
||||
int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
|
||||
|
|
|
@ -237,13 +237,21 @@
|
|||
#define INTEL_BDW_GT3D_IDS(info) \
|
||||
_INTEL_BDW_D_IDS(3, info)
|
||||
|
||||
#define INTEL_BDW_RSVDM_IDS(info) \
|
||||
_INTEL_BDW_M_IDS(4, info)
|
||||
|
||||
#define INTEL_BDW_RSVDD_IDS(info) \
|
||||
_INTEL_BDW_D_IDS(4, info)
|
||||
|
||||
#define INTEL_BDW_M_IDS(info) \
|
||||
INTEL_BDW_GT12M_IDS(info), \
|
||||
INTEL_BDW_GT3M_IDS(info)
|
||||
INTEL_BDW_GT3M_IDS(info), \
|
||||
INTEL_BDW_RSVDM_IDS(info)
|
||||
|
||||
#define INTEL_BDW_D_IDS(info) \
|
||||
INTEL_BDW_GT12D_IDS(info), \
|
||||
INTEL_BDW_GT3D_IDS(info)
|
||||
INTEL_BDW_GT3D_IDS(info), \
|
||||
INTEL_BDW_RSVDD_IDS(info)
|
||||
|
||||
#define INTEL_CHV_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0x22b0, info), \
|
||||
|
|
Loading…
Reference in New Issue