2012-11-28 02:33:25 +08:00
|
|
|
#ifndef _LINUX_CONTEXT_TRACKING_H
|
|
|
|
#define _LINUX_CONTEXT_TRACKING_H
|
|
|
|
|
|
|
|
#include <linux/sched.h>
|
2013-05-16 07:21:38 +08:00
|
|
|
#include <linux/vtime.h>
|
2013-07-12 08:15:49 +08:00
|
|
|
#include <linux/context_tracking_state.h>
|
2013-02-24 07:23:25 +08:00
|
|
|
#include <asm/ptrace.h>
|
2013-01-08 01:12:14 +08:00
|
|
|
|
2013-05-16 07:21:38 +08:00
|
|
|
|
2013-02-24 08:19:14 +08:00
|
|
|
#ifdef CONFIG_CONTEXT_TRACKING
|
2013-07-10 06:55:25 +08:00
|
|
|
extern void context_tracking_cpu_set(int cpu);
|
|
|
|
|
2015-10-28 09:39:56 +08:00
|
|
|
/* Called with interrupts disabled. */
|
|
|
|
extern void __context_tracking_enter(enum ctx_state state);
|
|
|
|
extern void __context_tracking_exit(enum ctx_state state);
|
|
|
|
|
2015-02-11 04:27:50 +08:00
|
|
|
extern void context_tracking_enter(enum ctx_state state);
|
|
|
|
extern void context_tracking_exit(enum ctx_state state);
|
2013-07-10 08:44:35 +08:00
|
|
|
extern void context_tracking_user_enter(void);
|
|
|
|
extern void context_tracking_user_exit(void);
|
|
|
|
|
|
|
|
static inline void user_enter(void)
|
|
|
|
{
|
2013-11-06 21:45:57 +08:00
|
|
|
if (context_tracking_is_enabled())
|
2015-10-28 09:39:55 +08:00
|
|
|
context_tracking_enter(CONTEXT_USER);
|
2013-07-10 08:44:35 +08:00
|
|
|
|
|
|
|
}
|
|
|
|
static inline void user_exit(void)
|
|
|
|
{
|
2013-11-06 21:45:57 +08:00
|
|
|
if (context_tracking_is_enabled())
|
2015-10-28 09:39:55 +08:00
|
|
|
context_tracking_exit(CONTEXT_USER);
|
2013-07-10 08:44:35 +08:00
|
|
|
}
|
2013-02-24 07:23:25 +08:00
|
|
|
|
x86/entry: Avoid interrupt flag save and restore
Thanks to all the work that was done by Andy Lutomirski and others,
enter_from_user_mode() and prepare_exit_to_usermode() are now called only with
interrupts disabled. Let's provide them a version of user_enter()/user_exit()
that skips saving and restoring the interrupt flag.
On an AMD-based machine I tested this patch on, with force-enabled
context tracking, the speed-up in system calls was 90 clock cycles or 6%,
measured with the following simple benchmark:
#include <sys/signal.h>
#include <time.h>
#include <unistd.h>
#include <stdio.h>
unsigned long rdtsc()
{
unsigned long result;
asm volatile("rdtsc; shl $32, %%rdx; mov %%eax, %%eax\n"
"or %%rdx, %%rax" : "=a" (result) : : "rdx");
return result;
}
int main()
{
unsigned long tsc1, tsc2;
int pid = getpid();
int i;
tsc1 = rdtsc();
for (i = 0; i < 100000000; i++)
kill(pid, SIGWINCH);
tsc2 = rdtsc();
printf("%ld\n", tsc2 - tsc1);
}
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Reviewed-by: Andy Lutomirski <luto@kernel.org>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: kvm@vger.kernel.org
Link: http://lkml.kernel.org/r/1466434712-31440-2-git-send-email-pbonzini@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-06-20 22:58:29 +08:00
|
|
|
/* Called with interrupts disabled. */
|
|
|
|
static inline void user_enter_irqoff(void)
|
|
|
|
{
|
|
|
|
if (context_tracking_is_enabled())
|
|
|
|
__context_tracking_enter(CONTEXT_USER);
|
|
|
|
|
|
|
|
}
|
|
|
|
static inline void user_exit_irqoff(void)
|
|
|
|
{
|
|
|
|
if (context_tracking_is_enabled())
|
|
|
|
__context_tracking_exit(CONTEXT_USER);
|
|
|
|
}
|
|
|
|
|
2013-02-24 08:19:14 +08:00
|
|
|
static inline enum ctx_state exception_enter(void)
|
2013-02-24 07:23:25 +08:00
|
|
|
{
|
2013-02-24 08:19:14 +08:00
|
|
|
enum ctx_state prev_ctx;
|
|
|
|
|
2013-11-06 21:45:57 +08:00
|
|
|
if (!context_tracking_is_enabled())
|
2013-07-10 08:44:35 +08:00
|
|
|
return 0;
|
|
|
|
|
2013-02-24 08:19:14 +08:00
|
|
|
prev_ctx = this_cpu_read(context_tracking.state);
|
2015-02-11 04:27:50 +08:00
|
|
|
if (prev_ctx != CONTEXT_KERNEL)
|
|
|
|
context_tracking_exit(prev_ctx);
|
2013-02-24 08:19:14 +08:00
|
|
|
|
|
|
|
return prev_ctx;
|
2013-02-24 07:23:25 +08:00
|
|
|
}
|
|
|
|
|
2013-02-24 08:19:14 +08:00
|
|
|
static inline void exception_exit(enum ctx_state prev_ctx)
|
2013-02-24 07:23:25 +08:00
|
|
|
{
|
2013-11-06 21:45:57 +08:00
|
|
|
if (context_tracking_is_enabled()) {
|
2015-02-11 04:27:50 +08:00
|
|
|
if (prev_ctx != CONTEXT_KERNEL)
|
|
|
|
context_tracking_enter(prev_ctx);
|
2013-07-10 08:44:35 +08:00
|
|
|
}
|
2013-02-24 07:23:25 +08:00
|
|
|
}
|
|
|
|
|
2015-07-04 03:44:21 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* ct_state() - return the current context tracking state if known
|
|
|
|
*
|
|
|
|
* Returns the current cpu's context tracking state if context tracking
|
|
|
|
* is enabled. If context tracking is disabled, returns
|
|
|
|
* CONTEXT_DISABLED. This should be used primarily for debugging.
|
|
|
|
*/
|
|
|
|
static inline enum ctx_state ct_state(void)
|
|
|
|
{
|
|
|
|
return context_tracking_is_enabled() ?
|
|
|
|
this_cpu_read(context_tracking.state) : CONTEXT_DISABLED;
|
|
|
|
}
|
2012-11-28 02:33:25 +08:00
|
|
|
#else
|
|
|
|
static inline void user_enter(void) { }
|
|
|
|
static inline void user_exit(void) { }
|
x86/entry: Avoid interrupt flag save and restore
Thanks to all the work that was done by Andy Lutomirski and others,
enter_from_user_mode() and prepare_exit_to_usermode() are now called only with
interrupts disabled. Let's provide them a version of user_enter()/user_exit()
that skips saving and restoring the interrupt flag.
On an AMD-based machine I tested this patch on, with force-enabled
context tracking, the speed-up in system calls was 90 clock cycles or 6%,
measured with the following simple benchmark:
#include <sys/signal.h>
#include <time.h>
#include <unistd.h>
#include <stdio.h>
unsigned long rdtsc()
{
unsigned long result;
asm volatile("rdtsc; shl $32, %%rdx; mov %%eax, %%eax\n"
"or %%rdx, %%rax" : "=a" (result) : : "rdx");
return result;
}
int main()
{
unsigned long tsc1, tsc2;
int pid = getpid();
int i;
tsc1 = rdtsc();
for (i = 0; i < 100000000; i++)
kill(pid, SIGWINCH);
tsc2 = rdtsc();
printf("%ld\n", tsc2 - tsc1);
}
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Reviewed-by: Andy Lutomirski <luto@kernel.org>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: kvm@vger.kernel.org
Link: http://lkml.kernel.org/r/1466434712-31440-2-git-send-email-pbonzini@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-06-20 22:58:29 +08:00
|
|
|
static inline void user_enter_irqoff(void) { }
|
|
|
|
static inline void user_exit_irqoff(void) { }
|
2013-07-13 01:02:30 +08:00
|
|
|
static inline enum ctx_state exception_enter(void) { return 0; }
|
|
|
|
static inline void exception_exit(enum ctx_state prev_ctx) { }
|
2015-07-04 03:44:21 +08:00
|
|
|
static inline enum ctx_state ct_state(void) { return CONTEXT_DISABLED; }
|
2013-07-13 01:02:30 +08:00
|
|
|
#endif /* !CONFIG_CONTEXT_TRACKING */
|
2013-05-16 07:21:38 +08:00
|
|
|
|
2015-07-04 03:44:21 +08:00
|
|
|
#define CT_WARN_ON(cond) WARN_ON(context_tracking_is_enabled() && (cond))
|
2013-07-12 01:12:32 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_CONTEXT_TRACKING_FORCE
|
|
|
|
extern void context_tracking_init(void);
|
|
|
|
#else
|
|
|
|
static inline void context_tracking_init(void) { }
|
|
|
|
#endif /* CONFIG_CONTEXT_TRACKING_FORCE */
|
|
|
|
|
|
|
|
|
2013-07-13 01:02:30 +08:00
|
|
|
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
|
2016-06-15 21:09:28 +08:00
|
|
|
/* must be called with irqs disabled */
|
|
|
|
static inline void guest_enter_irqoff(void)
|
2013-07-10 08:44:35 +08:00
|
|
|
{
|
2015-11-19 23:47:32 +08:00
|
|
|
if (vtime_accounting_cpu_enabled())
|
2013-07-10 08:44:35 +08:00
|
|
|
vtime_guest_enter(current);
|
|
|
|
else
|
|
|
|
current->flags |= PF_VCPU;
|
2015-02-11 04:27:54 +08:00
|
|
|
|
|
|
|
if (context_tracking_is_enabled())
|
2015-10-28 09:39:56 +08:00
|
|
|
__context_tracking_enter(CONTEXT_GUEST);
|
2016-06-15 21:09:28 +08:00
|
|
|
|
|
|
|
/* KVM does not hold any references to rcu protected data when it
|
|
|
|
* switches CPU into a guest mode. In fact switching to a guest mode
|
|
|
|
* is very similar to exiting to userspace from rcu point of view. In
|
|
|
|
* addition CPU may stay in a guest mode for quite a long time (up to
|
|
|
|
* one time slice). Lets treat guest mode as quiescent state, just like
|
|
|
|
* we do with user-mode execution.
|
|
|
|
*/
|
|
|
|
if (!context_tracking_cpu_is_enabled())
|
|
|
|
rcu_virt_note_context_switch(smp_processor_id());
|
2013-07-10 08:44:35 +08:00
|
|
|
}
|
|
|
|
|
2016-06-15 21:09:28 +08:00
|
|
|
static inline void guest_exit_irqoff(void)
|
2013-07-10 08:44:35 +08:00
|
|
|
{
|
2015-02-11 04:27:54 +08:00
|
|
|
if (context_tracking_is_enabled())
|
2015-10-28 09:39:56 +08:00
|
|
|
__context_tracking_exit(CONTEXT_GUEST);
|
2015-02-11 04:27:54 +08:00
|
|
|
|
2015-11-19 23:47:32 +08:00
|
|
|
if (vtime_accounting_cpu_enabled())
|
2013-07-10 08:44:35 +08:00
|
|
|
vtime_guest_exit(current);
|
|
|
|
else
|
|
|
|
current->flags &= ~PF_VCPU;
|
|
|
|
}
|
2013-07-12 01:42:13 +08:00
|
|
|
|
2013-07-13 01:02:30 +08:00
|
|
|
#else
|
2016-06-15 21:09:28 +08:00
|
|
|
static inline void guest_enter_irqoff(void)
|
2013-05-16 07:21:38 +08:00
|
|
|
{
|
2013-07-13 01:02:30 +08:00
|
|
|
/*
|
2013-07-13 01:05:14 +08:00
|
|
|
* This is running in ioctl context so its safe
|
|
|
|
* to assume that it's the stime pending cputime
|
|
|
|
* to flush.
|
2013-07-13 01:02:30 +08:00
|
|
|
*/
|
|
|
|
vtime_account_system(current);
|
|
|
|
current->flags |= PF_VCPU;
|
2016-06-15 21:09:28 +08:00
|
|
|
rcu_virt_note_context_switch(smp_processor_id());
|
2013-05-16 07:21:38 +08:00
|
|
|
}
|
|
|
|
|
2016-06-15 21:09:28 +08:00
|
|
|
static inline void guest_exit_irqoff(void)
|
2013-05-16 07:21:38 +08:00
|
|
|
{
|
2013-07-13 01:05:14 +08:00
|
|
|
/* Flush the guest cputime we spent on the guest */
|
2013-07-13 01:02:30 +08:00
|
|
|
vtime_account_system(current);
|
|
|
|
current->flags &= ~PF_VCPU;
|
2013-05-16 07:21:38 +08:00
|
|
|
}
|
2013-07-13 01:02:30 +08:00
|
|
|
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
|
2012-11-28 02:33:25 +08:00
|
|
|
|
2016-06-15 21:09:28 +08:00
|
|
|
static inline void guest_enter(void)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
guest_enter_irqoff();
|
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void guest_exit(void)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
guest_exit_irqoff();
|
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
|
|
|
|
2012-11-28 02:33:25 +08:00
|
|
|
#endif
|