Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux
Pull drm fixes from Dave Airlie: "This is just a regular fixes pull, mostly nouveau and i915, the i915 ones fix RC6 on Sandybridge after suspend/resume, which I think people have be wanting for quite a while! Now you shouldn't wish for more patches, as the new mutex/reservation code found a number of problems with the qxl driver, and it currently makes lockdep angry, I'm working on a set of fixes for it, but its a bit large, I'll submit them separately later today or tomorrow once I've banged on them a bit more, just warning you in advance :-)" Yeah, I'm definitely over the whole "wish for more patches" thing. * 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: drm/crtc-helper: explicit DPMS on after modeset drm/i915: fix up gt init sequence fallout drm/i915: Serialize almost all register access drm/i915: quirk no PCH_PWM_ENABLE for Dell XPS13 backlight drm/i915: correctly restore fences with objects attached drm/i915: Fix dereferencing invalid connectors in is_crtc_connector_off() drm/i915: Sanitize shared dpll state drm/i915: fix long-standing SNB regression in power consumption after resume v2 drm/i915: Preserve the DDI_A_4_LANES bit from the bios drm/i915: fix pfit regression for non-autoscaled resolutions drm/i915: fix up readout of the lvds dither bit on gen2/3 drm/nouveau: do not allow negative sizes for now drm/nouveau: add falcon interrupt handler drm/nouveau: use dedicated channel for async moves on GT/GF chipsets. drm/nouveau: bump fence timeout to 15 seconds drm/nouveau: do not unpin in nouveau_gem_object_del drm/nv50/kms: fix pin refcnt leaks drm/nouveau: fix some error-path leaks in fbcon handling code drm/nouveau: fix locking issues in page flipping paths
This commit is contained in:
commit
549f3a1218
|
@ -677,6 +677,11 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
|
|||
/* don't break so fail path works correct */
|
||||
fail = 1;
|
||||
break;
|
||||
|
||||
if (connector->dpms != DRM_MODE_DPMS_ON) {
|
||||
DRM_DEBUG_KMS("connector dpms not on, full mode switch\n");
|
||||
mode_changed = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -754,6 +759,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
|
|||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
|
||||
for (i = 0; i < set->num_connectors; i++) {
|
||||
DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
|
||||
drm_get_connector_name(set->connectors[i]));
|
||||
set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON);
|
||||
}
|
||||
}
|
||||
drm_helper_disable_unused_functions(dev);
|
||||
} else if (fb_changed) {
|
||||
|
@ -771,22 +782,6 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* crtc set_config helpers implicit set the crtc and all connected
|
||||
* encoders to DPMS on for a full mode set. But for just an fb update it
|
||||
* doesn't do that. To not confuse userspace, do an explicit DPMS_ON
|
||||
* unconditionally. This will also ensure driver internal dpms state is
|
||||
* consistent again.
|
||||
*/
|
||||
if (set->crtc->enabled) {
|
||||
DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
|
||||
for (i = 0; i < set->num_connectors; i++) {
|
||||
DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
|
||||
drm_get_connector_name(set->connectors[i]));
|
||||
set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON);
|
||||
}
|
||||
}
|
||||
|
||||
kfree(save_connectors);
|
||||
kfree(save_encoders);
|
||||
kfree(save_crtcs);
|
||||
|
|
|
@ -1495,6 +1495,14 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
dev_priv->dev = dev;
|
||||
dev_priv->info = info;
|
||||
|
||||
spin_lock_init(&dev_priv->irq_lock);
|
||||
spin_lock_init(&dev_priv->gpu_error.lock);
|
||||
spin_lock_init(&dev_priv->rps.lock);
|
||||
spin_lock_init(&dev_priv->backlight.lock);
|
||||
mutex_init(&dev_priv->dpio_lock);
|
||||
mutex_init(&dev_priv->rps.hw_lock);
|
||||
mutex_init(&dev_priv->modeset_restore_lock);
|
||||
|
||||
i915_dump_device_info(dev_priv);
|
||||
|
||||
if (i915_get_bridge_dev(dev)) {
|
||||
|
@ -1585,6 +1593,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
intel_detect_pch(dev);
|
||||
|
||||
intel_irq_init(dev);
|
||||
intel_gt_sanitize(dev);
|
||||
intel_gt_init(dev);
|
||||
|
||||
/* Try to make sure MCHBAR is enabled before poking at it */
|
||||
|
@ -1610,15 +1619,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
if (!IS_I945G(dev) && !IS_I945GM(dev))
|
||||
pci_enable_msi(dev->pdev);
|
||||
|
||||
spin_lock_init(&dev_priv->irq_lock);
|
||||
spin_lock_init(&dev_priv->gpu_error.lock);
|
||||
spin_lock_init(&dev_priv->rps.lock);
|
||||
spin_lock_init(&dev_priv->backlight.lock);
|
||||
mutex_init(&dev_priv->dpio_lock);
|
||||
|
||||
mutex_init(&dev_priv->rps.hw_lock);
|
||||
mutex_init(&dev_priv->modeset_restore_lock);
|
||||
|
||||
dev_priv->num_plane = 1;
|
||||
if (IS_VALLEYVIEW(dev))
|
||||
dev_priv->num_plane = 2;
|
||||
|
|
|
@ -706,7 +706,7 @@ static int i915_drm_thaw(struct drm_device *dev)
|
|||
{
|
||||
int error = 0;
|
||||
|
||||
intel_gt_reset(dev);
|
||||
intel_gt_sanitize(dev);
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
@ -732,7 +732,7 @@ int i915_resume(struct drm_device *dev)
|
|||
|
||||
pci_set_master(dev->pdev);
|
||||
|
||||
intel_gt_reset(dev);
|
||||
intel_gt_sanitize(dev);
|
||||
|
||||
/*
|
||||
* Platforms with opregion should have sane BIOS, older ones (gen3 and
|
||||
|
@ -1253,21 +1253,21 @@ hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
|
|||
|
||||
#define __i915_read(x, y) \
|
||||
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
|
||||
unsigned long irqflags; \
|
||||
u##x val = 0; \
|
||||
spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
|
||||
if (IS_GEN5(dev_priv->dev)) \
|
||||
ilk_dummy_write(dev_priv); \
|
||||
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
|
||||
unsigned long irqflags; \
|
||||
spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
|
||||
if (dev_priv->forcewake_count == 0) \
|
||||
dev_priv->gt.force_wake_get(dev_priv); \
|
||||
val = read##y(dev_priv->regs + reg); \
|
||||
if (dev_priv->forcewake_count == 0) \
|
||||
dev_priv->gt.force_wake_put(dev_priv); \
|
||||
spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
|
||||
} else { \
|
||||
val = read##y(dev_priv->regs + reg); \
|
||||
} \
|
||||
spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
|
||||
trace_i915_reg_rw(false, reg, val, sizeof(val)); \
|
||||
return val; \
|
||||
}
|
||||
|
@ -1280,8 +1280,10 @@ __i915_read(64, q)
|
|||
|
||||
#define __i915_write(x, y) \
|
||||
void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
|
||||
unsigned long irqflags; \
|
||||
u32 __fifo_ret = 0; \
|
||||
trace_i915_reg_rw(true, reg, val, sizeof(val)); \
|
||||
spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
|
||||
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
|
||||
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
|
||||
} \
|
||||
|
@ -1293,6 +1295,7 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
|
|||
gen6_gt_check_fifodbg(dev_priv); \
|
||||
} \
|
||||
hsw_unclaimed_reg_check(dev_priv, reg); \
|
||||
spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
|
||||
}
|
||||
__i915_write(8, b)
|
||||
__i915_write(16, w)
|
||||
|
|
|
@ -555,6 +555,7 @@ enum intel_sbi_destination {
|
|||
#define QUIRK_PIPEA_FORCE (1<<0)
|
||||
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
|
||||
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
|
||||
#define QUIRK_NO_PCH_PWM_ENABLE (1<<3)
|
||||
|
||||
struct intel_fbdev;
|
||||
struct intel_fbc_work;
|
||||
|
@ -1583,7 +1584,7 @@ void i915_handle_error(struct drm_device *dev, bool wedged);
|
|||
extern void intel_irq_init(struct drm_device *dev);
|
||||
extern void intel_hpd_init(struct drm_device *dev);
|
||||
extern void intel_gt_init(struct drm_device *dev);
|
||||
extern void intel_gt_reset(struct drm_device *dev);
|
||||
extern void intel_gt_sanitize(struct drm_device *dev);
|
||||
|
||||
void i915_error_state_free(struct kref *error_ref);
|
||||
|
||||
|
|
|
@ -2258,7 +2258,17 @@ void i915_gem_restore_fences(struct drm_device *dev)
|
|||
|
||||
for (i = 0; i < dev_priv->num_fence_regs; i++) {
|
||||
struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
|
||||
i915_gem_write_fence(dev, i, reg->obj);
|
||||
|
||||
/*
|
||||
* Commit delayed tiling changes if we have an object still
|
||||
* attached to the fence, otherwise just clear the fence.
|
||||
*/
|
||||
if (reg->obj) {
|
||||
i915_gem_object_update_fence(reg->obj, reg,
|
||||
reg->obj->tiling_mode);
|
||||
} else {
|
||||
i915_gem_write_fence(dev, i, NULL);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2795,6 +2805,10 @@ static void i915_gem_write_fence(struct drm_device *dev, int reg,
|
|||
if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
|
||||
mb();
|
||||
|
||||
WARN(obj && (!obj->stride || !obj->tiling_mode),
|
||||
"bogus fence setup with stride: 0x%x, tiling mode: %i\n",
|
||||
obj->stride, obj->tiling_mode);
|
||||
|
||||
switch (INTEL_INFO(dev)->gen) {
|
||||
case 7:
|
||||
case 6:
|
||||
|
@ -2836,6 +2850,7 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
|
|||
fence->obj = NULL;
|
||||
list_del_init(&fence->lru_list);
|
||||
}
|
||||
obj->fence_dirty = false;
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -2965,7 +2980,6 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
|
|||
return 0;
|
||||
|
||||
i915_gem_object_update_fence(obj, reg, enable);
|
||||
obj->fence_dirty = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -301,7 +301,7 @@ static void intel_ddi_mode_set(struct drm_encoder *encoder,
|
|||
struct intel_digital_port *intel_dig_port =
|
||||
enc_to_dig_port(encoder);
|
||||
|
||||
intel_dp->DP = intel_dig_port->port_reversal |
|
||||
intel_dp->DP = intel_dig_port->saved_port_bits |
|
||||
DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
|
||||
intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
|
||||
|
||||
|
@ -1109,7 +1109,8 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
|
|||
* enabling the port.
|
||||
*/
|
||||
I915_WRITE(DDI_BUF_CTL(port),
|
||||
intel_dig_port->port_reversal | DDI_BUF_CTL_ENABLE);
|
||||
intel_dig_port->saved_port_bits |
|
||||
DDI_BUF_CTL_ENABLE);
|
||||
} else if (type == INTEL_OUTPUT_EDP) {
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
|
||||
|
@ -1347,8 +1348,9 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
|
|||
intel_encoder->get_config = intel_ddi_get_config;
|
||||
|
||||
intel_dig_port->port = port;
|
||||
intel_dig_port->port_reversal = I915_READ(DDI_BUF_CTL(port)) &
|
||||
DDI_BUF_PORT_REVERSAL;
|
||||
intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
|
||||
(DDI_BUF_PORT_REVERSAL |
|
||||
DDI_A_4_LANES);
|
||||
intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
|
||||
|
||||
intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
|
||||
|
|
|
@ -4913,22 +4913,19 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
|
|||
uint32_t tmp;
|
||||
|
||||
tmp = I915_READ(PFIT_CONTROL);
|
||||
if (!(tmp & PFIT_ENABLE))
|
||||
return;
|
||||
|
||||
/* Check whether the pfit is attached to our pipe. */
|
||||
if (INTEL_INFO(dev)->gen < 4) {
|
||||
if (crtc->pipe != PIPE_B)
|
||||
return;
|
||||
|
||||
/* gen2/3 store dither state in pfit control, needs to match */
|
||||
pipe_config->gmch_pfit.control = tmp & PANEL_8TO6_DITHER_ENABLE;
|
||||
} else {
|
||||
if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
|
||||
return;
|
||||
}
|
||||
|
||||
if (!(tmp & PFIT_ENABLE))
|
||||
return;
|
||||
|
||||
pipe_config->gmch_pfit.control = I915_READ(PFIT_CONTROL);
|
||||
pipe_config->gmch_pfit.control = tmp;
|
||||
pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
|
||||
if (INTEL_INFO(dev)->gen < 5)
|
||||
pipe_config->gmch_pfit.lvds_border_bits =
|
||||
|
@ -8317,6 +8314,8 @@ check_shared_dpll_state(struct drm_device *dev)
|
|||
pll->active, pll->refcount);
|
||||
WARN(pll->active && !pll->on,
|
||||
"pll in active use but not on in sw tracking\n");
|
||||
WARN(pll->on && !pll->active,
|
||||
"pll in on but not on in use in sw tracking\n");
|
||||
WARN(pll->on != active,
|
||||
"pll on state mismatch (expected %i, found %i)\n",
|
||||
pll->on, active);
|
||||
|
@ -8541,15 +8540,20 @@ static void intel_set_config_restore_state(struct drm_device *dev,
|
|||
}
|
||||
|
||||
static bool
|
||||
is_crtc_connector_off(struct drm_crtc *crtc, struct drm_connector *connectors,
|
||||
int num_connectors)
|
||||
is_crtc_connector_off(struct drm_mode_set *set)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < num_connectors; i++)
|
||||
if (connectors[i].encoder &&
|
||||
connectors[i].encoder->crtc == crtc &&
|
||||
connectors[i].dpms != DRM_MODE_DPMS_ON)
|
||||
if (set->num_connectors == 0)
|
||||
return false;
|
||||
|
||||
if (WARN_ON(set->connectors == NULL))
|
||||
return false;
|
||||
|
||||
for (i = 0; i < set->num_connectors; i++)
|
||||
if (set->connectors[i]->encoder &&
|
||||
set->connectors[i]->encoder->crtc == set->crtc &&
|
||||
set->connectors[i]->dpms != DRM_MODE_DPMS_ON)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
|
@ -8562,10 +8566,8 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set,
|
|||
|
||||
/* We should be able to check here if the fb has the same properties
|
||||
* and then just flip_or_move it */
|
||||
if (set->connectors != NULL &&
|
||||
is_crtc_connector_off(set->crtc, *set->connectors,
|
||||
set->num_connectors)) {
|
||||
config->mode_changed = true;
|
||||
if (is_crtc_connector_off(set)) {
|
||||
config->mode_changed = true;
|
||||
} else if (set->crtc->fb != set->fb) {
|
||||
/* If we have no fb then treat it as a full mode set */
|
||||
if (set->crtc->fb == NULL) {
|
||||
|
@ -9398,6 +9400,17 @@ static void quirk_invert_brightness(struct drm_device *dev)
|
|||
DRM_INFO("applying inverted panel brightness quirk\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* Some machines (Dell XPS13) suffer broken backlight controls if
|
||||
* BLM_PCH_PWM_ENABLE is set.
|
||||
*/
|
||||
static void quirk_no_pcm_pwm_enable(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
dev_priv->quirks |= QUIRK_NO_PCH_PWM_ENABLE;
|
||||
DRM_INFO("applying no-PCH_PWM_ENABLE quirk\n");
|
||||
}
|
||||
|
||||
struct intel_quirk {
|
||||
int device;
|
||||
int subsystem_vendor;
|
||||
|
@ -9467,6 +9480,11 @@ static struct intel_quirk intel_quirks[] = {
|
|||
|
||||
/* Acer Aspire 4736Z */
|
||||
{ 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
|
||||
|
||||
/* Dell XPS13 HD Sandy Bridge */
|
||||
{ 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
|
||||
/* Dell XPS13 HD and XPS13 FHD Ivy Bridge */
|
||||
{ 0x0166, 0x1028, 0x058b, quirk_no_pcm_pwm_enable },
|
||||
};
|
||||
|
||||
static void intel_init_quirks(struct drm_device *dev)
|
||||
|
@ -9817,8 +9835,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
|
|||
}
|
||||
pll->refcount = pll->active;
|
||||
|
||||
DRM_DEBUG_KMS("%s hw state readout: refcount %i\n",
|
||||
pll->name, pll->refcount);
|
||||
DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n",
|
||||
pll->name, pll->refcount, pll->on);
|
||||
}
|
||||
|
||||
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
|
||||
|
@ -9869,6 +9887,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
|
|||
struct drm_plane *plane;
|
||||
struct intel_crtc *crtc;
|
||||
struct intel_encoder *encoder;
|
||||
int i;
|
||||
|
||||
intel_modeset_readout_hw_state(dev);
|
||||
|
||||
|
@ -9884,6 +9903,18 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
|
|||
intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]");
|
||||
}
|
||||
|
||||
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
|
||||
struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
|
||||
|
||||
if (!pll->on || pll->active)
|
||||
continue;
|
||||
|
||||
DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
|
||||
|
||||
pll->disable(dev_priv, pll);
|
||||
pll->on = false;
|
||||
}
|
||||
|
||||
if (force_restore) {
|
||||
/*
|
||||
* We need to use raw interfaces for restoring state to avoid
|
||||
|
|
|
@ -504,7 +504,7 @@ struct intel_dp {
|
|||
struct intel_digital_port {
|
||||
struct intel_encoder base;
|
||||
enum port port;
|
||||
u32 port_reversal;
|
||||
u32 saved_port_bits;
|
||||
struct intel_dp dp;
|
||||
struct intel_hdmi hdmi;
|
||||
};
|
||||
|
|
|
@ -109,6 +109,13 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
|
|||
flags |= DRM_MODE_FLAG_PVSYNC;
|
||||
|
||||
pipe_config->adjusted_mode.flags |= flags;
|
||||
|
||||
/* gen2/3 store dither state in pfit control, needs to match */
|
||||
if (INTEL_INFO(dev)->gen < 4) {
|
||||
tmp = I915_READ(PFIT_CONTROL);
|
||||
|
||||
pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
|
||||
}
|
||||
}
|
||||
|
||||
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
|
||||
|
@ -290,14 +297,11 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
|
|||
|
||||
intel_pch_panel_fitting(intel_crtc, pipe_config,
|
||||
intel_connector->panel.fitting_mode);
|
||||
return true;
|
||||
} else {
|
||||
intel_gmch_panel_fitting(intel_crtc, pipe_config,
|
||||
intel_connector->panel.fitting_mode);
|
||||
}
|
||||
|
||||
drm_mode_set_crtcinfo(adjusted_mode, 0);
|
||||
pipe_config->timings_set = true;
|
||||
}
|
||||
|
||||
/*
|
||||
* XXX: It would be nice to support lower refresh rates on the
|
||||
|
|
|
@ -194,6 +194,9 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
|
|||
adjusted_mode->vdisplay == mode->vdisplay)
|
||||
goto out;
|
||||
|
||||
drm_mode_set_crtcinfo(adjusted_mode, 0);
|
||||
pipe_config->timings_set = true;
|
||||
|
||||
switch (fitting_mode) {
|
||||
case DRM_MODE_SCALE_CENTER:
|
||||
/*
|
||||
|
@ -580,7 +583,8 @@ void intel_panel_enable_backlight(struct drm_device *dev,
|
|||
POSTING_READ(reg);
|
||||
I915_WRITE(reg, tmp | BLM_PWM_ENABLE);
|
||||
|
||||
if (HAS_PCH_SPLIT(dev)) {
|
||||
if (HAS_PCH_SPLIT(dev) &&
|
||||
!(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) {
|
||||
tmp = I915_READ(BLC_PWM_PCH_CTL1);
|
||||
tmp |= BLM_PCH_PWM_ENABLE;
|
||||
tmp &= ~BLM_PCH_OVERRIDE_ENABLE;
|
||||
|
|
|
@ -5476,7 +5476,7 @@ static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
|
|||
gen6_gt_check_fifodbg(dev_priv);
|
||||
}
|
||||
|
||||
void intel_gt_reset(struct drm_device *dev)
|
||||
void intel_gt_sanitize(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
|
@ -5487,6 +5487,10 @@ void intel_gt_reset(struct drm_device *dev)
|
|||
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
|
||||
__gen6_gt_force_wake_mt_reset(dev_priv);
|
||||
}
|
||||
|
||||
/* BIOS often leaves RC6 enabled, but disable it for hw init */
|
||||
if (INTEL_INFO(dev)->gen >= 6)
|
||||
intel_disable_gt_powersave(dev);
|
||||
}
|
||||
|
||||
void intel_gt_init(struct drm_device *dev)
|
||||
|
@ -5495,8 +5499,6 @@ void intel_gt_init(struct drm_device *dev)
|
|||
|
||||
spin_lock_init(&dev_priv->gt_lock);
|
||||
|
||||
intel_gt_reset(dev);
|
||||
|
||||
if (IS_VALLEYVIEW(dev)) {
|
||||
dev_priv->gt.force_wake_get = vlv_force_wake_get;
|
||||
dev_priv->gt.force_wake_put = vlv_force_wake_put;
|
||||
|
|
|
@ -90,6 +90,7 @@ nvc0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
|
|||
return ret;
|
||||
|
||||
nv_subdev(priv)->unit = 0x00008000;
|
||||
nv_subdev(priv)->intr = nouveau_falcon_intr;
|
||||
nv_engine(priv)->cclass = &nvc0_bsp_cclass;
|
||||
nv_engine(priv)->sclass = nvc0_bsp_sclass;
|
||||
return 0;
|
||||
|
|
|
@ -90,6 +90,7 @@ nve0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
|
|||
return ret;
|
||||
|
||||
nv_subdev(priv)->unit = 0x00008000;
|
||||
nv_subdev(priv)->intr = nouveau_falcon_intr;
|
||||
nv_engine(priv)->cclass = &nve0_bsp_cclass;
|
||||
nv_engine(priv)->sclass = nve0_bsp_sclass;
|
||||
return 0;
|
||||
|
|
|
@ -23,6 +23,25 @@
|
|||
#include <engine/falcon.h>
|
||||
#include <subdev/timer.h>
|
||||
|
||||
void
|
||||
nouveau_falcon_intr(struct nouveau_subdev *subdev)
|
||||
{
|
||||
struct nouveau_falcon *falcon = (void *)subdev;
|
||||
u32 dispatch = nv_ro32(falcon, 0x01c);
|
||||
u32 intr = nv_ro32(falcon, 0x008) & dispatch & ~(dispatch >> 16);
|
||||
|
||||
if (intr & 0x00000010) {
|
||||
nv_debug(falcon, "ucode halted\n");
|
||||
nv_wo32(falcon, 0x004, 0x00000010);
|
||||
intr &= ~0x00000010;
|
||||
}
|
||||
|
||||
if (intr) {
|
||||
nv_error(falcon, "unhandled intr 0x%08x\n", intr);
|
||||
nv_wo32(falcon, 0x004, intr);
|
||||
}
|
||||
}
|
||||
|
||||
u32
|
||||
_nouveau_falcon_rd32(struct nouveau_object *object, u64 addr)
|
||||
{
|
||||
|
|
|
@ -90,6 +90,7 @@ nvc0_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
|
|||
return ret;
|
||||
|
||||
nv_subdev(priv)->unit = 0x00000002;
|
||||
nv_subdev(priv)->intr = nouveau_falcon_intr;
|
||||
nv_engine(priv)->cclass = &nvc0_ppp_cclass;
|
||||
nv_engine(priv)->sclass = nvc0_ppp_sclass;
|
||||
return 0;
|
||||
|
|
|
@ -90,6 +90,7 @@ nvc0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
|
|||
return ret;
|
||||
|
||||
nv_subdev(priv)->unit = 0x00020000;
|
||||
nv_subdev(priv)->intr = nouveau_falcon_intr;
|
||||
nv_engine(priv)->cclass = &nvc0_vp_cclass;
|
||||
nv_engine(priv)->sclass = nvc0_vp_sclass;
|
||||
return 0;
|
||||
|
|
|
@ -90,6 +90,7 @@ nve0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
|
|||
return ret;
|
||||
|
||||
nv_subdev(priv)->unit = 0x00020000;
|
||||
nv_subdev(priv)->intr = nouveau_falcon_intr;
|
||||
nv_engine(priv)->cclass = &nve0_vp_cclass;
|
||||
nv_engine(priv)->sclass = nve0_vp_sclass;
|
||||
return 0;
|
||||
|
|
|
@ -72,6 +72,8 @@ int nouveau_falcon_create_(struct nouveau_object *, struct nouveau_object *,
|
|||
struct nouveau_oclass *, u32, bool, const char *,
|
||||
const char *, int, void **);
|
||||
|
||||
void nouveau_falcon_intr(struct nouveau_subdev *subdev);
|
||||
|
||||
#define _nouveau_falcon_dtor _nouveau_engine_dtor
|
||||
int _nouveau_falcon_init(struct nouveau_object *);
|
||||
int _nouveau_falcon_fini(struct nouveau_object *, bool);
|
||||
|
|
|
@ -148,6 +148,7 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
|
|||
|
||||
if (unlikely(nvbo->gem))
|
||||
DRM_ERROR("bo %p still attached to GEM object\n", bo);
|
||||
WARN_ON(nvbo->pin_refcnt > 0);
|
||||
nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
|
||||
kfree(nvbo);
|
||||
}
|
||||
|
@ -197,6 +198,12 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
|
|||
size_t acc_size;
|
||||
int ret;
|
||||
int type = ttm_bo_type_device;
|
||||
int max_size = INT_MAX & ~((1 << drm->client.base.vm->vmm->lpg_shift) - 1);
|
||||
|
||||
if (size <= 0 || size > max_size) {
|
||||
nv_warn(drm, "skipped size %x\n", (u32)size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (sg)
|
||||
type = ttm_bo_type_sg;
|
||||
|
@ -340,13 +347,15 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
|
|||
{
|
||||
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
|
||||
struct ttm_buffer_object *bo = &nvbo->bo;
|
||||
int ret;
|
||||
int ret, ref;
|
||||
|
||||
ret = ttm_bo_reserve(bo, false, false, false, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (--nvbo->pin_refcnt)
|
||||
ref = --nvbo->pin_refcnt;
|
||||
WARN_ON_ONCE(ref < 0);
|
||||
if (ref)
|
||||
goto out;
|
||||
|
||||
nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
|
||||
|
@ -578,7 +587,7 @@ nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
|
|||
int ret = RING_SPACE(chan, 2);
|
||||
if (ret == 0) {
|
||||
BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
|
||||
OUT_RING (chan, handle);
|
||||
OUT_RING (chan, handle & 0x0000ffff);
|
||||
FIRE_RING (chan);
|
||||
}
|
||||
return ret;
|
||||
|
@ -973,7 +982,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
|
|||
struct ttm_mem_reg *old_mem = &bo->mem;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&chan->cli->mutex);
|
||||
mutex_lock_nested(&chan->cli->mutex, SINGLE_DEPTH_NESTING);
|
||||
|
||||
/* create temporary vmas for the transfer and attach them to the
|
||||
* old nouveau_mem node, these will get cleaned up after ttm has
|
||||
|
@ -1014,7 +1023,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
|
|||
struct ttm_mem_reg *, struct ttm_mem_reg *);
|
||||
int (*init)(struct nouveau_channel *, u32 handle);
|
||||
} _methods[] = {
|
||||
{ "COPY", 0, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
|
||||
{ "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
|
||||
{ "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
|
||||
{ "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
|
||||
{ "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
|
||||
|
@ -1034,7 +1043,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
|
|||
struct nouveau_channel *chan;
|
||||
u32 handle = (mthd->engine << 16) | mthd->oclass;
|
||||
|
||||
if (mthd->init == nve0_bo_move_init)
|
||||
if (mthd->engine)
|
||||
chan = drm->cechan;
|
||||
else
|
||||
chan = drm->channel;
|
||||
|
|
|
@ -138,7 +138,7 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
|
|||
{
|
||||
struct nouveau_framebuffer *nouveau_fb;
|
||||
struct drm_gem_object *gem;
|
||||
int ret;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
|
||||
if (!gem)
|
||||
|
@ -146,15 +146,19 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
|
|||
|
||||
nouveau_fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL);
|
||||
if (!nouveau_fb)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
goto err_unref;
|
||||
|
||||
ret = nouveau_framebuffer_init(dev, nouveau_fb, mode_cmd, nouveau_gem_object(gem));
|
||||
if (ret) {
|
||||
drm_gem_object_unreference(gem);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
return &nouveau_fb->base;
|
||||
|
||||
err:
|
||||
kfree(nouveau_fb);
|
||||
err_unref:
|
||||
drm_gem_object_unreference(gem);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
|
||||
|
@ -524,9 +528,12 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
|
|||
struct nouveau_page_flip_state *s;
|
||||
struct nouveau_channel *chan = NULL;
|
||||
struct nouveau_fence *fence;
|
||||
struct list_head res;
|
||||
struct ttm_validate_buffer res_val[2];
|
||||
struct ttm_validate_buffer resv[2] = {
|
||||
{ .bo = &old_bo->bo },
|
||||
{ .bo = &new_bo->bo },
|
||||
};
|
||||
struct ww_acquire_ctx ticket;
|
||||
LIST_HEAD(res);
|
||||
int ret;
|
||||
|
||||
if (!drm->channel)
|
||||
|
@ -545,27 +552,19 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
|
|||
chan = drm->channel;
|
||||
spin_unlock(&old_bo->bo.bdev->fence_lock);
|
||||
|
||||
mutex_lock(&chan->cli->mutex);
|
||||
|
||||
if (new_bo != old_bo) {
|
||||
ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM);
|
||||
if (likely(!ret)) {
|
||||
res_val[0].bo = &old_bo->bo;
|
||||
res_val[1].bo = &new_bo->bo;
|
||||
INIT_LIST_HEAD(&res);
|
||||
list_add_tail(&res_val[0].head, &res);
|
||||
list_add_tail(&res_val[1].head, &res);
|
||||
ret = ttm_eu_reserve_buffers(&ticket, &res);
|
||||
if (ret)
|
||||
nouveau_bo_unpin(new_bo);
|
||||
}
|
||||
} else
|
||||
ret = ttm_bo_reserve(&new_bo->bo, false, false, false, 0);
|
||||
if (ret)
|
||||
goto fail_free;
|
||||
|
||||
if (ret) {
|
||||
mutex_unlock(&chan->cli->mutex);
|
||||
goto fail_free;
|
||||
list_add(&resv[1].head, &res);
|
||||
}
|
||||
list_add(&resv[0].head, &res);
|
||||
|
||||
mutex_lock(&chan->cli->mutex);
|
||||
ret = ttm_eu_reserve_buffers(&ticket, &res);
|
||||
if (ret)
|
||||
goto fail_unpin;
|
||||
|
||||
/* Initialize a page flip struct */
|
||||
*s = (struct nouveau_page_flip_state)
|
||||
|
@ -576,10 +575,8 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
|
|||
/* Emit a page flip */
|
||||
if (nv_device(drm->device)->card_type >= NV_50) {
|
||||
ret = nv50_display_flip_next(crtc, fb, chan, 0);
|
||||
if (ret) {
|
||||
mutex_unlock(&chan->cli->mutex);
|
||||
if (ret)
|
||||
goto fail_unreserve;
|
||||
}
|
||||
}
|
||||
|
||||
ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
|
||||
|
@ -590,22 +587,18 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
|
|||
/* Update the crtc struct and cleanup */
|
||||
crtc->fb = fb;
|
||||
|
||||
if (old_bo != new_bo) {
|
||||
ttm_eu_fence_buffer_objects(&ticket, &res, fence);
|
||||
ttm_eu_fence_buffer_objects(&ticket, &res, fence);
|
||||
if (old_bo != new_bo)
|
||||
nouveau_bo_unpin(old_bo);
|
||||
} else {
|
||||
nouveau_bo_fence(new_bo, fence);
|
||||
ttm_bo_unreserve(&new_bo->bo);
|
||||
}
|
||||
nouveau_fence_unref(&fence);
|
||||
return 0;
|
||||
|
||||
fail_unreserve:
|
||||
if (old_bo != new_bo) {
|
||||
ttm_eu_backoff_reservation(&ticket, &res);
|
||||
ttm_eu_backoff_reservation(&ticket, &res);
|
||||
fail_unpin:
|
||||
mutex_unlock(&chan->cli->mutex);
|
||||
if (old_bo != new_bo)
|
||||
nouveau_bo_unpin(new_bo);
|
||||
} else
|
||||
ttm_bo_unreserve(&new_bo->bo);
|
||||
fail_free:
|
||||
kfree(s);
|
||||
return ret;
|
||||
|
|
|
@ -192,6 +192,18 @@ nouveau_accel_init(struct nouveau_drm *drm)
|
|||
|
||||
arg0 = NVE0_CHANNEL_IND_ENGINE_GR;
|
||||
arg1 = 1;
|
||||
} else
|
||||
if (device->chipset >= 0xa3 &&
|
||||
device->chipset != 0xaa &&
|
||||
device->chipset != 0xac) {
|
||||
ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE,
|
||||
NVDRM_CHAN + 1, NvDmaFB, NvDmaTT,
|
||||
&drm->cechan);
|
||||
if (ret)
|
||||
NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
|
||||
|
||||
arg0 = NvDmaFB;
|
||||
arg1 = NvDmaTT;
|
||||
} else {
|
||||
arg0 = NvDmaFB;
|
||||
arg1 = NvDmaTT;
|
||||
|
@ -284,8 +296,6 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct lock_class_key drm_client_lock_class_key;
|
||||
|
||||
static int
|
||||
nouveau_drm_load(struct drm_device *dev, unsigned long flags)
|
||||
{
|
||||
|
@ -297,7 +307,6 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
|
|||
ret = nouveau_cli_create(pdev, "DRM", sizeof(*drm), (void**)&drm);
|
||||
if (ret)
|
||||
return ret;
|
||||
lockdep_set_class(&drm->client.mutex, &drm_client_lock_class_key);
|
||||
|
||||
dev->dev_private = drm;
|
||||
drm->dev = dev;
|
||||
|
|
|
@ -385,6 +385,7 @@ out_unlock:
|
|||
mutex_unlock(&dev->struct_mutex);
|
||||
if (chan)
|
||||
nouveau_bo_vma_del(nvbo, &fbcon->nouveau_fb.vma);
|
||||
nouveau_bo_unmap(nvbo);
|
||||
out_unpin:
|
||||
nouveau_bo_unpin(nvbo);
|
||||
out_unref:
|
||||
|
|
|
@ -143,7 +143,7 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
|
|||
int ret;
|
||||
|
||||
fence->channel = chan;
|
||||
fence->timeout = jiffies + (3 * DRM_HZ);
|
||||
fence->timeout = jiffies + (15 * DRM_HZ);
|
||||
fence->sequence = ++fctx->sequence;
|
||||
|
||||
ret = fctx->emit(fence);
|
||||
|
|
|
@ -50,12 +50,6 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
|
|||
return;
|
||||
nvbo->gem = NULL;
|
||||
|
||||
/* Lockdep hates you for doing reserve with gem object lock held */
|
||||
if (WARN_ON_ONCE(nvbo->pin_refcnt)) {
|
||||
nvbo->pin_refcnt = 1;
|
||||
nouveau_bo_unpin(nvbo);
|
||||
}
|
||||
|
||||
if (gem->import_attach)
|
||||
drm_prime_gem_destroy(gem, nvbo->bo.sg);
|
||||
|
||||
|
|
|
@ -355,6 +355,7 @@ struct nv50_oimm {
|
|||
|
||||
struct nv50_head {
|
||||
struct nouveau_crtc base;
|
||||
struct nouveau_bo *image;
|
||||
struct nv50_curs curs;
|
||||
struct nv50_sync sync;
|
||||
struct nv50_ovly ovly;
|
||||
|
@ -517,9 +518,10 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
|
|||
{
|
||||
struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
|
||||
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
|
||||
struct nv50_head *head = nv50_head(crtc);
|
||||
struct nv50_sync *sync = nv50_sync(crtc);
|
||||
int head = nv_crtc->index, ret;
|
||||
u32 *push;
|
||||
int ret;
|
||||
|
||||
swap_interval <<= 4;
|
||||
if (swap_interval == 0)
|
||||
|
@ -537,7 +539,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
|
|||
return ret;
|
||||
|
||||
BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
|
||||
OUT_RING (chan, NvEvoSema0 + head);
|
||||
OUT_RING (chan, NvEvoSema0 + nv_crtc->index);
|
||||
OUT_RING (chan, sync->addr ^ 0x10);
|
||||
BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1);
|
||||
OUT_RING (chan, sync->data + 1);
|
||||
|
@ -546,7 +548,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
|
|||
OUT_RING (chan, sync->data);
|
||||
} else
|
||||
if (chan && nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) {
|
||||
u64 addr = nv84_fence_crtc(chan, head) + sync->addr;
|
||||
u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + sync->addr;
|
||||
ret = RING_SPACE(chan, 12);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -565,7 +567,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
|
|||
OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL);
|
||||
} else
|
||||
if (chan) {
|
||||
u64 addr = nv84_fence_crtc(chan, head) + sync->addr;
|
||||
u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + sync->addr;
|
||||
ret = RING_SPACE(chan, 10);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -630,6 +632,8 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
|
|||
evo_mthd(push, 0x0080, 1);
|
||||
evo_data(push, 0x00000000);
|
||||
evo_kick(push, sync);
|
||||
|
||||
nouveau_bo_ref(nv_fb->nvbo, &head->image);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1038,18 +1042,17 @@ static int
|
|||
nv50_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
|
||||
{
|
||||
struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb);
|
||||
struct nv50_head *head = nv50_head(crtc);
|
||||
int ret;
|
||||
|
||||
ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (old_fb) {
|
||||
nvfb = nouveau_framebuffer(old_fb);
|
||||
nouveau_bo_unpin(nvfb->nvbo);
|
||||
if (ret == 0) {
|
||||
if (head->image)
|
||||
nouveau_bo_unpin(head->image);
|
||||
nouveau_bo_ref(nvfb->nvbo, &head->image);
|
||||
}
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -1198,6 +1201,15 @@ nv50_crtc_lut_load(struct drm_crtc *crtc)
|
|||
}
|
||||
}
|
||||
|
||||
static void
|
||||
nv50_crtc_disable(struct drm_crtc *crtc)
|
||||
{
|
||||
struct nv50_head *head = nv50_head(crtc);
|
||||
if (head->image)
|
||||
nouveau_bo_unpin(head->image);
|
||||
nouveau_bo_ref(NULL, &head->image);
|
||||
}
|
||||
|
||||
static int
|
||||
nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
|
||||
uint32_t handle, uint32_t width, uint32_t height)
|
||||
|
@ -1271,18 +1283,29 @@ nv50_crtc_destroy(struct drm_crtc *crtc)
|
|||
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
|
||||
struct nv50_disp *disp = nv50_disp(crtc->dev);
|
||||
struct nv50_head *head = nv50_head(crtc);
|
||||
|
||||
nv50_dmac_destroy(disp->core, &head->ovly.base);
|
||||
nv50_pioc_destroy(disp->core, &head->oimm.base);
|
||||
nv50_dmac_destroy(disp->core, &head->sync.base);
|
||||
nv50_pioc_destroy(disp->core, &head->curs.base);
|
||||
|
||||
/*XXX: this shouldn't be necessary, but the core doesn't call
|
||||
* disconnect() during the cleanup paths
|
||||
*/
|
||||
if (head->image)
|
||||
nouveau_bo_unpin(head->image);
|
||||
nouveau_bo_ref(NULL, &head->image);
|
||||
|
||||
nouveau_bo_unmap(nv_crtc->cursor.nvbo);
|
||||
if (nv_crtc->cursor.nvbo)
|
||||
nouveau_bo_unpin(nv_crtc->cursor.nvbo);
|
||||
nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
|
||||
|
||||
nouveau_bo_unmap(nv_crtc->lut.nvbo);
|
||||
if (nv_crtc->lut.nvbo)
|
||||
nouveau_bo_unpin(nv_crtc->lut.nvbo);
|
||||
nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
|
||||
|
||||
drm_crtc_cleanup(crtc);
|
||||
kfree(crtc);
|
||||
}
|
||||
|
@ -1296,6 +1319,7 @@ static const struct drm_crtc_helper_funcs nv50_crtc_hfunc = {
|
|||
.mode_set_base = nv50_crtc_mode_set_base,
|
||||
.mode_set_base_atomic = nv50_crtc_mode_set_base_atomic,
|
||||
.load_lut = nv50_crtc_lut_load,
|
||||
.disable = nv50_crtc_disable,
|
||||
};
|
||||
|
||||
static const struct drm_crtc_funcs nv50_crtc_func = {
|
||||
|
|
Loading…
Reference in New Issue