KVM: MIPS/T&E: active_mm = init_mm in guest context
Set init_mm as the active_mm and update mm_cpumask(current->mm) to reflect that it isn't active when in guest context. This prevents cache management code from attempting cache flushes on host virtual addresses while in guest context, for example due to a cache management IPIs or later when writing of dynamically translated code hits copy on write. We do this using helpers in static kernel code to avoid having to export init_mm to modules. Signed-off-by: James Hogan <james.hogan@imgtec.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: "Radim Krčmář" <rkrcmar@redhat.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org
This commit is contained in:
parent
91cdee5710
commit
a7ebb2e410
|
@ -607,6 +607,10 @@ extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi);
|
|||
extern int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu,
|
||||
unsigned long entryhi);
|
||||
extern int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr);
|
||||
|
||||
void kvm_mips_suspend_mm(int cpu);
|
||||
void kvm_mips_resume_mm(int cpu);
|
||||
|
||||
extern unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
|
||||
unsigned long gva);
|
||||
extern void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
|
||||
|
|
|
@ -382,3 +382,38 @@ void kvm_local_flush_tlb_all(void)
|
|||
local_irq_restore(flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_local_flush_tlb_all);
|
||||
|
||||
/**
|
||||
* kvm_mips_suspend_mm() - Suspend the active mm.
|
||||
* @cpu The CPU we're running on.
|
||||
*
|
||||
* Suspend the active_mm, ready for a switch to a KVM guest virtual address
|
||||
* space. This is left active for the duration of guest context, including time
|
||||
* with interrupts enabled, so we need to be careful not to confuse e.g. cache
|
||||
* management IPIs.
|
||||
*
|
||||
* kvm_mips_resume_mm() should be called before context switching to a different
|
||||
* process so we don't need to worry about reference counting.
|
||||
*
|
||||
* This needs to be in static kernel code to avoid exporting init_mm.
|
||||
*/
|
||||
void kvm_mips_suspend_mm(int cpu)
|
||||
{
|
||||
cpumask_clear_cpu(cpu, mm_cpumask(current->active_mm));
|
||||
current->active_mm = &init_mm;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_mips_suspend_mm);
|
||||
|
||||
/**
|
||||
* kvm_mips_resume_mm() - Resume the current process mm.
|
||||
* @cpu The CPU we're running on.
|
||||
*
|
||||
* Resume the mm of the current process, after a switch back from a KVM guest
|
||||
* virtual address space (see kvm_mips_suspend_mm()).
|
||||
*/
|
||||
void kvm_mips_resume_mm(int cpu)
|
||||
{
|
||||
cpumask_set_cpu(cpu, mm_cpumask(current->mm));
|
||||
current->active_mm = current->mm;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_mips_resume_mm);
|
||||
|
|
|
@ -670,6 +670,7 @@ static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
|||
write_c0_entryhi(cpu_asid(cpu, kern_mm));
|
||||
else
|
||||
write_c0_entryhi(cpu_asid(cpu, user_mm));
|
||||
kvm_mips_suspend_mm(cpu);
|
||||
ehb();
|
||||
}
|
||||
|
||||
|
@ -689,6 +690,7 @@ static int kvm_trap_emul_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
|
|||
get_new_mmu_context(current->mm, cpu);
|
||||
}
|
||||
write_c0_entryhi(cpu_asid(cpu, current->mm));
|
||||
kvm_mips_resume_mm(cpu);
|
||||
ehb();
|
||||
}
|
||||
|
||||
|
@ -723,7 +725,7 @@ static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run,
|
|||
|
||||
static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int cpu;
|
||||
int cpu = smp_processor_id();
|
||||
int r;
|
||||
|
||||
/* Check if we have any exceptions/interrupts pending */
|
||||
|
@ -735,6 +737,13 @@ static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
|||
/* Disable hardware page table walking while in guest */
|
||||
htw_stop();
|
||||
|
||||
/*
|
||||
* While in guest context we're in the guest's address space, not the
|
||||
* host process address space, so we need to be careful not to confuse
|
||||
* e.g. cache management IPIs.
|
||||
*/
|
||||
kvm_mips_suspend_mm(cpu);
|
||||
|
||||
r = vcpu->arch.vcpu_run(run, vcpu);
|
||||
|
||||
/* We may have migrated while handling guest exits */
|
||||
|
@ -745,6 +754,7 @@ static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
|||
asid_version_mask(cpu)))
|
||||
get_new_mmu_context(current->mm, cpu);
|
||||
write_c0_entryhi(cpu_asid(cpu, current->mm));
|
||||
kvm_mips_resume_mm(cpu);
|
||||
|
||||
htw_start();
|
||||
|
||||
|
|
Loading…
Reference in New Issue