x86/kvm: Don't bother __pv_cpu_mask when !CONFIG_SMP

Enable PV TLB shootdown when !CONFIG_SMP doesn't make sense. Let's
move it inside CONFIG_SMP. In addition, we can avoid define and
alloc __pv_cpu_mask when !CONFIG_SMP and get rid of 'alloc' variable
in kvm_alloc_cpumask.

Signed-off-by: Wanpeng Li <wanpengli@tencent.com>
Message-Id: <1617941911-5338-1-git-send-email-wanpengli@tencent.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Wanpeng Li 2021-04-09 12:18:29 +08:00 committed by Paolo Bonzini
parent 4c6654bd16
commit 2b519b5797
1 changed files with 55 additions and 63 deletions

View File

@ -451,6 +451,10 @@ static void __init sev_map_percpu_data(void)
} }
} }
#ifdef CONFIG_SMP
static DEFINE_PER_CPU(cpumask_var_t, __pv_cpu_mask);
static bool pv_tlb_flush_supported(void) static bool pv_tlb_flush_supported(void)
{ {
return (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && return (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
@ -458,10 +462,6 @@ static bool pv_tlb_flush_supported(void)
kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)); kvm_para_has_feature(KVM_FEATURE_STEAL_TIME));
} }
static DEFINE_PER_CPU(cpumask_var_t, __pv_cpu_mask);
#ifdef CONFIG_SMP
static bool pv_ipi_supported(void) static bool pv_ipi_supported(void)
{ {
return kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI); return kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI);
@ -574,6 +574,49 @@ static void kvm_smp_send_call_func_ipi(const struct cpumask *mask)
} }
} }
static void kvm_flush_tlb_others(const struct cpumask *cpumask,
const struct flush_tlb_info *info)
{
u8 state;
int cpu;
struct kvm_steal_time *src;
struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
cpumask_copy(flushmask, cpumask);
/*
* We have to call flush only on online vCPUs. And
* queue flush_on_enter for pre-empted vCPUs
*/
for_each_cpu(cpu, flushmask) {
src = &per_cpu(steal_time, cpu);
state = READ_ONCE(src->preempted);
if ((state & KVM_VCPU_PREEMPTED)) {
if (try_cmpxchg(&src->preempted, &state,
state | KVM_VCPU_FLUSH_TLB))
__cpumask_clear_cpu(cpu, flushmask);
}
}
native_flush_tlb_others(flushmask, info);
}
static __init int kvm_alloc_cpumask(void)
{
int cpu;
if (!kvm_para_available() || nopv)
return 0;
if (pv_tlb_flush_supported() || pv_ipi_supported())
for_each_possible_cpu(cpu) {
zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),
GFP_KERNEL, cpu_to_node(cpu));
}
return 0;
}
arch_initcall(kvm_alloc_cpumask);
static void __init kvm_smp_prepare_boot_cpu(void) static void __init kvm_smp_prepare_boot_cpu(void)
{ {
/* /*
@ -611,34 +654,9 @@ static int kvm_cpu_down_prepare(unsigned int cpu)
local_irq_enable(); local_irq_enable();
return 0; return 0;
} }
#endif #endif
static void kvm_flush_tlb_others(const struct cpumask *cpumask,
const struct flush_tlb_info *info)
{
u8 state;
int cpu;
struct kvm_steal_time *src;
struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
cpumask_copy(flushmask, cpumask);
/*
* We have to call flush only on online vCPUs. And
* queue flush_on_enter for pre-empted vCPUs
*/
for_each_cpu(cpu, flushmask) {
src = &per_cpu(steal_time, cpu);
state = READ_ONCE(src->preempted);
if ((state & KVM_VCPU_PREEMPTED)) {
if (try_cmpxchg(&src->preempted, &state,
state | KVM_VCPU_FLUSH_TLB))
__cpumask_clear_cpu(cpu, flushmask);
}
}
native_flush_tlb_others(flushmask, info);
}
static void __init kvm_guest_init(void) static void __init kvm_guest_init(void)
{ {
int i; int i;
@ -653,12 +671,6 @@ static void __init kvm_guest_init(void)
pv_ops.time.steal_clock = kvm_steal_clock; pv_ops.time.steal_clock = kvm_steal_clock;
} }
if (pv_tlb_flush_supported()) {
pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others;
pv_ops.mmu.tlb_remove_table = tlb_remove_table;
pr_info("KVM setup pv remote TLB flush\n");
}
if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
apic_set_eoi_write(kvm_guest_apic_eoi_write); apic_set_eoi_write(kvm_guest_apic_eoi_write);
@ -668,6 +680,12 @@ static void __init kvm_guest_init(void)
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (pv_tlb_flush_supported()) {
pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others;
pv_ops.mmu.tlb_remove_table = tlb_remove_table;
pr_info("KVM setup pv remote TLB flush\n");
}
smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu; smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
if (pv_sched_yield_supported()) { if (pv_sched_yield_supported()) {
smp_ops.send_call_func_ipi = kvm_smp_send_call_func_ipi; smp_ops.send_call_func_ipi = kvm_smp_send_call_func_ipi;
@ -734,7 +752,7 @@ static uint32_t __init kvm_detect(void)
static void __init kvm_apic_init(void) static void __init kvm_apic_init(void)
{ {
#if defined(CONFIG_SMP) #ifdef CONFIG_SMP
if (pv_ipi_supported()) if (pv_ipi_supported())
kvm_setup_pv_ipi(); kvm_setup_pv_ipi();
#endif #endif
@ -794,32 +812,6 @@ static __init int activate_jump_labels(void)
} }
arch_initcall(activate_jump_labels); arch_initcall(activate_jump_labels);
static __init int kvm_alloc_cpumask(void)
{
int cpu;
bool alloc = false;
if (!kvm_para_available() || nopv)
return 0;
if (pv_tlb_flush_supported())
alloc = true;
#if defined(CONFIG_SMP)
if (pv_ipi_supported())
alloc = true;
#endif
if (alloc)
for_each_possible_cpu(cpu) {
zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),
GFP_KERNEL, cpu_to_node(cpu));
}
return 0;
}
arch_initcall(kvm_alloc_cpumask);
#ifdef CONFIG_PARAVIRT_SPINLOCKS #ifdef CONFIG_PARAVIRT_SPINLOCKS
/* Kick a cpu by its apicid. Used to wake up a halted vcpu */ /* Kick a cpu by its apicid. Used to wake up a halted vcpu */