kvm: x86: disable shattered huge page recovery for PREEMPT_RT.
If a huge page is recovered (and becomes no executable) while another thread is executing it, the resulting contention on mmu_lock can cause latency spikes. Disabling recovery for PREEMPT_RT kernels fixes this issue. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
8c5bd25bf4
commit
13fb59276b
|
@ -51,7 +51,12 @@
|
|||
extern bool itlb_multihit_kvm_mitigation;
|
||||
|
||||
static int __read_mostly nx_huge_pages = -1;
|
||||
#ifdef CONFIG_PREEMPT_RT
|
||||
/* Recovery can cause latency spikes, disable it for PREEMPT_RT. */
|
||||
static uint __read_mostly nx_huge_pages_recovery_ratio = 0;
|
||||
#else
|
||||
static uint __read_mostly nx_huge_pages_recovery_ratio = 60;
|
||||
#endif
|
||||
|
||||
static int set_nx_huge_pages(const char *val, const struct kernel_param *kp);
|
||||
static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp);
|
||||
|
|
Loading…
Reference in New Issue