drm/i915: Don't wait interruptible for possible plane buffer flush

When we setup buffer for display plane, we'll check any pending
required GPU flush and possible make interruptible wait for flush
complete. But that wait would be most possibly to fail in case of
signals received for X process, which will then fail modeset process
and put display engine in unconsistent state. The result could be
blank screen or CPU hang, and DDX driver would always turn on outputs
DPMS after whatever modeset fails or not.

So this one creates new helper for setup display plane buffer, and
when needing flush using uninterruptible wait for that.

This one should fix bug like https://bugs.freedesktop.org/show_bug.cgi?id=24009.
Also fixing mode switch stress test on Ironlake.

Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Signed-off-by: Eric Anholt <eric@anholt.net>
This commit is contained in:
Zhenyu Wang 2009-11-25 13:09:39 +08:00 committed by Eric Anholt
parent 7c3f0a2726
commit b9241ea31f
3 changed files with 53 additions and 1 deletions

View File

@ -864,6 +864,7 @@ int i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptib
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
int write);
int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj);
int i915_gem_attach_phys_object(struct drm_device *dev,
struct drm_gem_object *obj, int id);
void i915_gem_detach_phys_object(struct drm_device *dev,

View File

@ -2837,6 +2837,57 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
return 0;
}
/*
* Prepare buffer for display plane. Use uninterruptible for possible flush
* wait, as in modesetting process we're not supposed to be interrupted.
*/
int
i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
uint32_t old_write_domain, old_read_domains;
int ret;
/* Not valid to be called on unbound objects. */
if (obj_priv->gtt_space == NULL)
return -EINVAL;
i915_gem_object_flush_gpu_write_domain(obj);
/* Wait on any GPU rendering and flushing to occur. */
if (obj_priv->active) {
#if WATCH_BUF
DRM_INFO("%s: object %p wait for seqno %08x\n",
__func__, obj, obj_priv->last_rendering_seqno);
#endif
ret = i915_do_wait_request(dev, obj_priv->last_rendering_seqno, 0);
if (ret != 0)
return ret;
}
old_write_domain = obj->write_domain;
old_read_domains = obj->read_domains;
obj->read_domains &= I915_GEM_DOMAIN_GTT;
i915_gem_object_flush_cpu_write_domain(obj);
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
obj->read_domains |= I915_GEM_DOMAIN_GTT;
obj->write_domain = I915_GEM_DOMAIN_GTT;
obj_priv->dirty = 1;
trace_i915_gem_object_change_domain(obj,
old_read_domains,
old_write_domain);
return 0;
}
/**
* Moves a single object to the CPU read, and possibly write domain.
*

View File

@ -1207,7 +1207,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return ret;
}
ret = i915_gem_object_set_to_gtt_domain(obj, 1);
ret = i915_gem_object_set_to_display_plane(obj);
if (ret != 0) {
i915_gem_object_unpin(obj);
mutex_unlock(&dev->struct_mutex);