2020-07-23 05:59:59 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
#ifndef __LINUX_ENTRYKVM_H
|
|
|
|
#define __LINUX_ENTRYKVM_H
|
|
|
|
|
2021-08-03 03:28:08 +08:00
|
|
|
#include <linux/static_call_types.h>
|
2022-02-10 02:20:45 +08:00
|
|
|
#include <linux/resume_user_mode.h>
|
2021-08-03 03:28:08 +08:00
|
|
|
#include <linux/syscalls.h>
|
|
|
|
#include <linux/seccomp.h>
|
|
|
|
#include <linux/sched.h>
|
2021-05-27 19:34:41 +08:00
|
|
|
#include <linux/tick.h>
|
2020-07-23 05:59:59 +08:00
|
|
|
|
|
|
|
/* Transfer to guest mode work */
|
|
|
|
#ifdef CONFIG_KVM_XFER_TO_GUEST_WORK
|
|
|
|
|
|
|
|
#ifndef ARCH_XFER_TO_GUEST_MODE_WORK
|
|
|
|
# define ARCH_XFER_TO_GUEST_MODE_WORK (0)
|
|
|
|
#endif
|
|
|
|
|
2020-10-27 04:32:28 +08:00
|
|
|
#define XFER_TO_GUEST_MODE_WORK \
|
|
|
|
(_TIF_NEED_RESCHED | _TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL | \
|
2020-07-23 05:59:59 +08:00
|
|
|
_TIF_NOTIFY_RESUME | ARCH_XFER_TO_GUEST_MODE_WORK)
|
|
|
|
|
|
|
|
struct kvm_vcpu;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* arch_xfer_to_guest_mode_handle_work - Architecture specific xfer to guest
|
|
|
|
* mode work handling function.
|
|
|
|
* @vcpu: Pointer to current's VCPU data
|
|
|
|
* @ti_work: Cached TIF flags gathered in xfer_to_guest_mode_handle_work()
|
|
|
|
*
|
|
|
|
* Invoked from xfer_to_guest_mode_handle_work(). Defaults to NOOP. Can be
|
|
|
|
* replaced by architecture specific code.
|
|
|
|
*/
|
|
|
|
static inline int arch_xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu,
|
|
|
|
unsigned long ti_work);
|
|
|
|
|
|
|
|
#ifndef arch_xfer_to_guest_mode_work
|
|
|
|
static inline int arch_xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu,
|
|
|
|
unsigned long ti_work)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/**
|
|
|
|
* xfer_to_guest_mode_handle_work - Check and handle pending work which needs
|
|
|
|
* to be handled before going to guest mode
|
|
|
|
* @vcpu: Pointer to current's VCPU data
|
|
|
|
*
|
|
|
|
* Returns: 0 or an error code
|
|
|
|
*/
|
|
|
|
int xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu);
|
|
|
|
|
2021-02-01 07:05:48 +08:00
|
|
|
/**
|
|
|
|
* xfer_to_guest_mode_prepare - Perform last minute preparation work that
|
|
|
|
* need to be handled while IRQs are disabled
|
|
|
|
* upon entering to guest.
|
|
|
|
*
|
|
|
|
* Has to be invoked with interrupts disabled before the last call
|
|
|
|
* to xfer_to_guest_mode_work_pending().
|
|
|
|
*/
|
|
|
|
static inline void xfer_to_guest_mode_prepare(void)
|
|
|
|
{
|
|
|
|
lockdep_assert_irqs_disabled();
|
2021-05-27 19:34:41 +08:00
|
|
|
tick_nohz_user_enter_prepare();
|
2021-02-01 07:05:48 +08:00
|
|
|
}
|
|
|
|
|
2020-07-23 05:59:59 +08:00
|
|
|
/**
|
|
|
|
* __xfer_to_guest_mode_work_pending - Check if work is pending
|
|
|
|
*
|
|
|
|
* Returns: True if work pending, False otherwise.
|
|
|
|
*
|
|
|
|
* Bare variant of xfer_to_guest_mode_work_pending(). Can be called from
|
|
|
|
* interrupt enabled code for racy quick checks with care.
|
|
|
|
*/
|
|
|
|
static inline bool __xfer_to_guest_mode_work_pending(void)
|
|
|
|
{
|
2021-11-29 21:06:44 +08:00
|
|
|
unsigned long ti_work = read_thread_flags();
|
2020-07-23 05:59:59 +08:00
|
|
|
|
|
|
|
return !!(ti_work & XFER_TO_GUEST_MODE_WORK);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* xfer_to_guest_mode_work_pending - Check if work is pending which needs to be
|
|
|
|
* handled before returning to guest mode
|
|
|
|
*
|
|
|
|
* Returns: True if work pending, False otherwise.
|
|
|
|
*
|
|
|
|
* Has to be invoked with interrupts disabled before the transition to
|
|
|
|
* guest mode.
|
|
|
|
*/
|
|
|
|
static inline bool xfer_to_guest_mode_work_pending(void)
|
|
|
|
{
|
|
|
|
lockdep_assert_irqs_disabled();
|
|
|
|
return __xfer_to_guest_mode_work_pending();
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_KVM_XFER_TO_GUEST_WORK */
|
|
|
|
|
|
|
|
#endif
|