[PATCH] x86_64: Move ondemand timer into own work queue
Taking the cpu hotplug semaphore in a normal events workqueue is unsafe because other tasks can wait for any workqueues with it hold. This results in a deadlock. Move the DBS timer into its own work queue which is not affected by other work queue flushes to avoid this. Has been acked by Venkatesh. Cc: venkatesh.pallipadi@intel.com Cc: cpufreq@lists.linux.org.uk Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
ac71d12c99
commit
6810b548b2
|
@ -74,6 +74,8 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */
|
||||||
static DEFINE_MUTEX (dbs_mutex);
|
static DEFINE_MUTEX (dbs_mutex);
|
||||||
static DECLARE_WORK (dbs_work, do_dbs_timer, NULL);
|
static DECLARE_WORK (dbs_work, do_dbs_timer, NULL);
|
||||||
|
|
||||||
|
static struct workqueue_struct *dbs_workq;
|
||||||
|
|
||||||
struct dbs_tuners {
|
struct dbs_tuners {
|
||||||
unsigned int sampling_rate;
|
unsigned int sampling_rate;
|
||||||
unsigned int sampling_down_factor;
|
unsigned int sampling_down_factor;
|
||||||
|
@ -364,23 +366,29 @@ static void do_dbs_timer(void *data)
|
||||||
mutex_lock(&dbs_mutex);
|
mutex_lock(&dbs_mutex);
|
||||||
for_each_online_cpu(i)
|
for_each_online_cpu(i)
|
||||||
dbs_check_cpu(i);
|
dbs_check_cpu(i);
|
||||||
schedule_delayed_work(&dbs_work,
|
queue_delayed_work(dbs_workq, &dbs_work,
|
||||||
usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
|
usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
|
||||||
mutex_unlock(&dbs_mutex);
|
mutex_unlock(&dbs_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void dbs_timer_init(void)
|
static inline void dbs_timer_init(void)
|
||||||
{
|
{
|
||||||
INIT_WORK(&dbs_work, do_dbs_timer, NULL);
|
INIT_WORK(&dbs_work, do_dbs_timer, NULL);
|
||||||
schedule_delayed_work(&dbs_work,
|
if (!dbs_workq)
|
||||||
usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
|
dbs_workq = create_singlethread_workqueue("ondemand");
|
||||||
|
if (!dbs_workq) {
|
||||||
|
printk(KERN_ERR "ondemand: Cannot initialize kernel thread\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
queue_delayed_work(dbs_workq, &dbs_work,
|
||||||
|
usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void dbs_timer_exit(void)
|
static inline void dbs_timer_exit(void)
|
||||||
{
|
{
|
||||||
cancel_delayed_work(&dbs_work);
|
if (dbs_workq)
|
||||||
return;
|
cancel_rearming_delayed_workqueue(dbs_workq, &dbs_work);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
||||||
|
@ -489,8 +497,12 @@ static int __init cpufreq_gov_dbs_init(void)
|
||||||
|
|
||||||
static void __exit cpufreq_gov_dbs_exit(void)
|
static void __exit cpufreq_gov_dbs_exit(void)
|
||||||
{
|
{
|
||||||
/* Make sure that the scheduled work is indeed not running */
|
/* Make sure that the scheduled work is indeed not running.
|
||||||
flush_scheduled_work();
|
Assumes the timer has been cancelled first. */
|
||||||
|
if (dbs_workq) {
|
||||||
|
flush_workqueue(dbs_workq);
|
||||||
|
destroy_workqueue(dbs_workq);
|
||||||
|
}
|
||||||
|
|
||||||
cpufreq_unregister_governor(&cpufreq_gov_dbs);
|
cpufreq_unregister_governor(&cpufreq_gov_dbs);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue