drm/i915: protect RPS/RC6 related accesses (including PCU) with a new mutex
This allows the power related code to run independently of the rest of the pipeline, extending the resume and init time improvements into userspace, which would otherwise have been blocked on the struct mutex if we were doing PCU communication. v2: Also convert the locking for the rps sysfs interface. Suggested-by: Daniel Vetter <daniel.vetter@ffwll.ch> Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org> (v1) Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
parent
1a01ab3b2d
commit
4fc688ce79
|
@ -1280,7 +1280,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
|
|||
return 0;
|
||||
}
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -1296,7 +1296,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
|
|||
seq_printf(m, "%d\t\t%d\n", gpu_freq * GT_FREQUENCY_MULTIPLIER, ia_freq * 100);
|
||||
}
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1713,13 +1713,13 @@ i915_max_freq_read(struct file *filp,
|
|||
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
|
||||
return -ENODEV;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
len = snprintf(buf, sizeof(buf),
|
||||
"max freq: %d\n", dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
|
||||
if (len > sizeof(buf))
|
||||
len = sizeof(buf);
|
||||
|
@ -1754,7 +1754,7 @@ i915_max_freq_write(struct file *filp,
|
|||
|
||||
DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val);
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -1764,7 +1764,7 @@ i915_max_freq_write(struct file *filp,
|
|||
dev_priv->rps.max_delay = val / GT_FREQUENCY_MULTIPLIER;
|
||||
|
||||
gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
@ -1789,13 +1789,13 @@ i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max,
|
|||
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
|
||||
return -ENODEV;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
len = snprintf(buf, sizeof(buf),
|
||||
"min freq: %d\n", dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
|
||||
if (len > sizeof(buf))
|
||||
len = sizeof(buf);
|
||||
|
@ -1828,7 +1828,7 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
|||
|
||||
DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val);
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -1838,7 +1838,7 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
|||
dev_priv->rps.min_delay = val / GT_FREQUENCY_MULTIPLIER;
|
||||
|
||||
gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
|
|
@ -1625,6 +1625,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
spin_lock_init(&dev_priv->rps.lock);
|
||||
spin_lock_init(&dev_priv->dpio_lock);
|
||||
|
||||
mutex_init(&dev_priv->rps.hw_lock);
|
||||
|
||||
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
|
||||
dev_priv->num_pipe = 3;
|
||||
else if (IS_MOBILE(dev) || !IS_GEN2(dev))
|
||||
|
|
|
@ -564,6 +564,12 @@ struct intel_gen6_power_mgmt {
|
|||
u8 max_delay;
|
||||
|
||||
struct delayed_work delayed_resume_work;
|
||||
|
||||
/*
|
||||
* Protects RPS/RC6 register access and PCU communication.
|
||||
* Must be taken after struct_mutex if nested.
|
||||
*/
|
||||
struct mutex hw_lock;
|
||||
};
|
||||
|
||||
struct intel_ilk_power_mgmt {
|
||||
|
|
|
@ -378,7 +378,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
|
|||
if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
|
||||
return;
|
||||
|
||||
mutex_lock(&dev_priv->dev->struct_mutex);
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
|
||||
if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
|
||||
new_delay = dev_priv->rps.cur_delay + 1;
|
||||
|
@ -393,7 +393,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
|
|||
gen6_set_rps(dev_priv->dev, new_delay);
|
||||
}
|
||||
|
||||
mutex_unlock(&dev_priv->dev->struct_mutex);
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -211,12 +211,9 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
|
||||
ret = i915_mutex_lock_interruptible(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d", ret);
|
||||
}
|
||||
|
@ -228,12 +225,9 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
|
||||
ret = i915_mutex_lock_interruptible(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d", ret);
|
||||
}
|
||||
|
@ -254,16 +248,14 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
|
|||
|
||||
val /= GT_FREQUENCY_MULTIPLIER;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
|
||||
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
|
||||
hw_max = (rp_state_cap & 0xff);
|
||||
hw_min = ((rp_state_cap & 0xff0000) >> 16);
|
||||
|
||||
if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) {
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -272,7 +264,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
|
|||
|
||||
dev_priv->rps.max_delay = val;
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
@ -284,12 +276,9 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
|
||||
ret = i915_mutex_lock_interruptible(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d", ret);
|
||||
}
|
||||
|
@ -310,16 +299,14 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
|
|||
|
||||
val /= GT_FREQUENCY_MULTIPLIER;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
|
||||
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
|
||||
hw_max = (rp_state_cap & 0xff);
|
||||
hw_min = ((rp_state_cap & 0xff0000) >> 16);
|
||||
|
||||
if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -328,7 +315,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
|
|||
|
||||
dev_priv->rps.min_delay = val;
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
|
||||
return count;
|
||||
|
||||
|
|
|
@ -2323,7 +2323,7 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 limits = gen6_rps_limits(dev_priv, &val);
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
|
||||
WARN_ON(val > dev_priv->rps.max_delay);
|
||||
WARN_ON(val < dev_priv->rps.min_delay);
|
||||
|
||||
|
@ -2409,7 +2409,7 @@ static void gen6_enable_rps(struct drm_device *dev)
|
|||
int rc6_mode;
|
||||
int i, ret;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
|
||||
|
||||
/* Here begins a magic sequence of register writes to enable
|
||||
* auto-downclocking.
|
||||
|
@ -2550,7 +2550,7 @@ static void gen6_update_ring_freq(struct drm_device *dev)
|
|||
int gpu_freq, ia_freq, max_ia_freq;
|
||||
int scaling_factor = 180;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
|
||||
|
||||
max_ia_freq = cpufreq_quick_get_max(0);
|
||||
/*
|
||||
|
@ -3311,7 +3311,9 @@ void intel_disable_gt_powersave(struct drm_device *dev)
|
|||
ironlake_disable_rc6(dev);
|
||||
} else if (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) {
|
||||
cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
gen6_disable_rps(dev);
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3322,10 +3324,10 @@ static void intel_gen6_powersave_work(struct work_struct *work)
|
|||
rps.delayed_resume_work.work);
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
gen6_enable_rps(dev);
|
||||
gen6_update_ring_freq(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
}
|
||||
|
||||
void intel_enable_gt_powersave(struct drm_device *dev)
|
||||
|
@ -4245,7 +4247,7 @@ void intel_gt_init(struct drm_device *dev)
|
|||
|
||||
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
|
||||
{
|
||||
WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
|
||||
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
|
||||
|
||||
if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
|
||||
DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
|
||||
|
@ -4269,7 +4271,7 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
|
|||
|
||||
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
|
||||
{
|
||||
WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
|
||||
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
|
||||
|
||||
if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
|
||||
DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
|
||||
|
|
Loading…
Reference in New Issue