context_tracking: Consolidate guest enter/exit wrappers

Consolidate the guest enter/exit wrappers, providing and tweaking stubs
as needed.  This will allow moving the wrappers under KVM without having
to bleed #ifdefs into the soon-to-be KVM code.

No functional change intended.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/r/20210505002735.1684165-7-seanjc@google.com
This commit is contained in:
Sean Christopherson 2021-05-04 17:27:33 -07:00 committed by Thomas Gleixner
parent 6f922b89e5
commit 14296e0c44
1 changed files with 24 additions and 41 deletions

View File

@ -71,6 +71,19 @@ static inline void exception_exit(enum ctx_state prev_ctx)
} }
} }
static __always_inline bool context_tracking_guest_enter(void)
{
if (context_tracking_enabled())
__context_tracking_enter(CONTEXT_GUEST);
return context_tracking_enabled_this_cpu();
}
static __always_inline void context_tracking_guest_exit(void)
{
if (context_tracking_enabled())
__context_tracking_exit(CONTEXT_GUEST);
}
/** /**
* ct_state() - return the current context tracking state if known * ct_state() - return the current context tracking state if known
@ -92,6 +105,9 @@ static inline void user_exit_irqoff(void) { }
static inline enum ctx_state exception_enter(void) { return 0; } static inline enum ctx_state exception_enter(void) { return 0; }
static inline void exception_exit(enum ctx_state prev_ctx) { } static inline void exception_exit(enum ctx_state prev_ctx) { }
static inline enum ctx_state ct_state(void) { return CONTEXT_DISABLED; } static inline enum ctx_state ct_state(void) { return CONTEXT_DISABLED; }
static inline bool context_tracking_guest_enter(void) { return false; }
static inline void context_tracking_guest_exit(void) { }
#endif /* !CONFIG_CONTEXT_TRACKING */ #endif /* !CONFIG_CONTEXT_TRACKING */
#define CT_WARN_ON(cond) WARN_ON(context_tracking_enabled() && (cond)) #define CT_WARN_ON(cond) WARN_ON(context_tracking_enabled() && (cond))
@ -102,74 +118,41 @@ extern void context_tracking_init(void);
static inline void context_tracking_init(void) { } static inline void context_tracking_init(void) { }
#endif /* CONFIG_CONTEXT_TRACKING_FORCE */ #endif /* CONFIG_CONTEXT_TRACKING_FORCE */
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
/* must be called with irqs disabled */ /* must be called with irqs disabled */
static __always_inline void guest_enter_irqoff(void) static __always_inline void guest_enter_irqoff(void)
{ {
/*
* This is running in ioctl context so its safe to assume that it's the
* stime pending cputime to flush.
*/
instrumentation_begin(); instrumentation_begin();
if (vtime_accounting_enabled_this_cpu()) vtime_account_guest_enter();
vtime_guest_enter(current);
else
current->flags |= PF_VCPU;
instrumentation_end(); instrumentation_end();
if (context_tracking_enabled()) /*
__context_tracking_enter(CONTEXT_GUEST); * KVM does not hold any references to rcu protected data when it
/* KVM does not hold any references to rcu protected data when it
* switches CPU into a guest mode. In fact switching to a guest mode * switches CPU into a guest mode. In fact switching to a guest mode
* is very similar to exiting to userspace from rcu point of view. In * is very similar to exiting to userspace from rcu point of view. In
* addition CPU may stay in a guest mode for quite a long time (up to * addition CPU may stay in a guest mode for quite a long time (up to
* one time slice). Lets treat guest mode as quiescent state, just like * one time slice). Lets treat guest mode as quiescent state, just like
* we do with user-mode execution. * we do with user-mode execution.
*/ */
if (!context_tracking_enabled_this_cpu()) { if (!context_tracking_guest_enter()) {
instrumentation_begin(); instrumentation_begin();
rcu_virt_note_context_switch(smp_processor_id()); rcu_virt_note_context_switch(smp_processor_id());
instrumentation_end(); instrumentation_end();
} }
} }
static __always_inline void context_tracking_guest_exit(void)
{
if (context_tracking_enabled())
__context_tracking_exit(CONTEXT_GUEST);
}
static __always_inline void guest_exit_irqoff(void) static __always_inline void guest_exit_irqoff(void)
{ {
context_tracking_guest_exit(); context_tracking_guest_exit();
instrumentation_begin();
vtime_account_guest_exit();
instrumentation_end();
}
#else
static __always_inline void guest_enter_irqoff(void)
{
/*
* This is running in ioctl context so its safe
* to assume that it's the stime pending cputime
* to flush.
*/
instrumentation_begin();
vtime_account_guest_enter();
rcu_virt_note_context_switch(smp_processor_id());
instrumentation_end();
}
static __always_inline void context_tracking_guest_exit(void) { }
static __always_inline void guest_exit_irqoff(void)
{
instrumentation_begin(); instrumentation_begin();
/* Flush the guest cputime we spent on the guest */ /* Flush the guest cputime we spent on the guest */
vtime_account_guest_exit(); vtime_account_guest_exit();
instrumentation_end(); instrumentation_end();
} }
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
static inline void guest_exit(void) static inline void guest_exit(void)
{ {