x86, acpi, idle: Restructure the mwait idle routines

People seem to delight in writing wrong and broken mwait idle routines;
collapse the lot.

This leaves mwait_play_dead() the sole remaining user of __mwait() and
new __mwait() users are probably doing it wrong.

Also remove __sti_mwait() as its unused.

Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: Jacob Jun Pan <jacob.jun.pan@linux.intel.com>
Cc: Mike Galbraith <bitbucket@online.de>
Cc: Len Brown <lenb@kernel.org>
Cc: Rui Zhang <rui.zhang@intel.com>
Acked-by: Rafael Wysocki <rafael.j.wysocki@intel.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20131212141654.616820819@infradead.org
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
This commit is contained in:
Peter Zijlstra 2013-12-12 15:08:36 +01:00 committed by H. Peter Anvin
parent 40e2d7f9b5
commit 1682425539
7 changed files with 43 additions and 78 deletions

View File

@ -1,6 +1,8 @@
#ifndef _ASM_X86_MWAIT_H
#define _ASM_X86_MWAIT_H
#include <linux/sched.h>
#define MWAIT_SUBSTATE_MASK 0xf
#define MWAIT_CSTATE_MASK 0xf
#define MWAIT_SUBSTATE_SIZE 4
@ -13,4 +15,42 @@
#define MWAIT_ECX_INTERRUPT_BREAK 0x1
static inline void __monitor(const void *eax, unsigned long ecx,
unsigned long edx)
{
/* "monitor %eax, %ecx, %edx;" */
asm volatile(".byte 0x0f, 0x01, 0xc8;"
:: "a" (eax), "c" (ecx), "d"(edx));
}
static inline void __mwait(unsigned long eax, unsigned long ecx)
{
/* "mwait %eax, %ecx;" */
asm volatile(".byte 0x0f, 0x01, 0xc9;"
:: "a" (eax), "c" (ecx));
}
/*
* This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
* which can obviate IPI to trigger checking of need_resched.
* We execute MONITOR against need_resched and enter optimized wait state
* through MWAIT. Whenever someone changes need_resched, we would be woken
* up from MWAIT (without an IPI).
*
* New with Core Duo processors, MWAIT can take some hints based on CPU
* capability.
*/
static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
{
if (!current_set_polling_and_test()) {
if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
clflush((void *)&current_thread_info()->flags);
__monitor((void *)&current_thread_info()->flags, 0, 0);
if (!need_resched())
__mwait(eax, ecx);
}
__current_clr_polling();
}
#endif /* _ASM_X86_MWAIT_H */

View File

@ -700,29 +700,6 @@ static inline void sync_core(void)
#endif
}
static inline void __monitor(const void *eax, unsigned long ecx,
unsigned long edx)
{
/* "monitor %eax, %ecx, %edx;" */
asm volatile(".byte 0x0f, 0x01, 0xc8;"
:: "a" (eax), "c" (ecx), "d"(edx));
}
static inline void __mwait(unsigned long eax, unsigned long ecx)
{
/* "mwait %eax, %ecx;" */
asm volatile(".byte 0x0f, 0x01, 0xc9;"
:: "a" (eax), "c" (ecx));
}
static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
{
trace_hardirqs_on();
/* "mwait %eax, %ecx;" */
asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
:: "a" (eax), "c" (ecx));
}
extern void select_idle_routine(const struct cpuinfo_x86 *c);
extern void init_amd_e400_c1e_mask(void);

View File

@ -150,29 +150,6 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
}
EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe);
/*
* This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
* which can obviate IPI to trigger checking of need_resched.
* We execute MONITOR against need_resched and enter optimized wait state
* through MWAIT. Whenever someone changes need_resched, we would be woken
* up from MWAIT (without an IPI).
*
* New with Core Duo processors, MWAIT can take some hints based on CPU
* capability.
*/
void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
{
if (!need_resched()) {
if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
clflush((void *)&current_thread_info()->flags);
__monitor((void *)&current_thread_info()->flags, 0, 0);
smp_mb();
if (!need_resched())
__mwait(ax, cx);
}
}
void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx)
{
unsigned int cpu = smp_processor_id();

View File

@ -193,10 +193,7 @@ static int power_saving_thread(void *data)
CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
stop_critical_timings();
__monitor((void *)&current_thread_info()->flags, 0, 0);
smp_mb();
if (!need_resched())
__mwait(power_saving_mwait_eax, 1);
mwait_idle_with_hints(power_saving_mwait_eax, 1);
start_critical_timings();
if (lapic_marked_unstable)

View File

@ -727,11 +727,6 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
if (unlikely(!pr))
return -EINVAL;
if (cx->entry_method == ACPI_CSTATE_FFH) {
if (current_set_polling_and_test())
return -EINVAL;
}
lapic_timer_state_broadcast(pr, cx, 1);
acpi_idle_do_entry(cx);
@ -785,11 +780,6 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
if (unlikely(!pr))
return -EINVAL;
if (cx->entry_method == ACPI_CSTATE_FFH) {
if (current_set_polling_and_test())
return -EINVAL;
}
/*
* Must be done before busmaster disable as we might need to
* access HPET !
@ -841,11 +831,6 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
}
}
if (cx->entry_method == ACPI_CSTATE_FFH) {
if (current_set_polling_and_test())
return -EINVAL;
}
acpi_unlazy_tlb(smp_processor_id());
/* Tell the scheduler that we are going deep-idle: */

View File

@ -375,16 +375,7 @@ static int intel_idle(struct cpuidle_device *dev,
if (!(lapic_timer_reliable_states & (1 << (cstate))))
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
if (!current_set_polling_and_test()) {
if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
clflush((void *)&current_thread_info()->flags);
__monitor((void *)&current_thread_info()->flags, 0, 0);
smp_mb();
if (!need_resched())
__mwait(eax, ecx);
}
mwait_idle_with_hints(eax, ecx);
if (!(lapic_timer_reliable_states & (1 << (cstate))))
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);

View File

@ -438,9 +438,7 @@ static int clamp_thread(void *arg)
*/
local_touch_nmi();
stop_critical_timings();
__monitor((void *)&current_thread_info()->flags, 0, 0);
cpu_relax(); /* allow HT sibling to run */
__mwait(eax, ecx);
mwait_idle_with_hints(eax, ecx);
start_critical_timings();
atomic_inc(&idle_wakeup_counter);
}