[S390] kvm: fix address mode switching

598841ca99 ([S390] use gmap address
spaces for kvm guest images) changed kvm to use a separate address
space for kvm guests. This address space was switched in __vcpu_run
In some cases (preemption, page fault) there is the possibility that
this address space switch is lost.
The typical symptom was a huge amount of validity intercepts or
random guest addressing exceptions.
Fix this by doing the switch in sie_loop and sie_exit and saving the
address space in the gmap structure itself. Also use the preempt
notifier.

Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Avi Kivity <avi@redhat.com>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
This commit is contained in:
Christian Borntraeger 2011-09-20 17:07:28 +02:00 committed by Heiko Carstens
parent 9d037a7776
commit 480e5926ce
5 changed files with 15 additions and 10 deletions

View File

@ -658,12 +658,14 @@ static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste)
* struct gmap_struct - guest address space * struct gmap_struct - guest address space
* @mm: pointer to the parent mm_struct * @mm: pointer to the parent mm_struct
* @table: pointer to the page directory * @table: pointer to the page directory
* @asce: address space control element for gmap page table
* @crst_list: list of all crst tables used in the guest address space * @crst_list: list of all crst tables used in the guest address space
*/ */
struct gmap { struct gmap {
struct list_head list; struct list_head list;
struct mm_struct *mm; struct mm_struct *mm;
unsigned long *table; unsigned long *table;
unsigned long asce;
struct list_head crst_list; struct list_head crst_list;
}; };

View File

@ -10,6 +10,7 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <asm/vdso.h> #include <asm/vdso.h>
#include <asm/sigp.h> #include <asm/sigp.h>
#include <asm/pgtable.h>
/* /*
* Make sure that the compiler is new enough. We want a compiler that * Make sure that the compiler is new enough. We want a compiler that
@ -126,6 +127,7 @@ int main(void)
DEFINE(__LC_KERNEL_STACK, offsetof(struct _lowcore, kernel_stack)); DEFINE(__LC_KERNEL_STACK, offsetof(struct _lowcore, kernel_stack));
DEFINE(__LC_ASYNC_STACK, offsetof(struct _lowcore, async_stack)); DEFINE(__LC_ASYNC_STACK, offsetof(struct _lowcore, async_stack));
DEFINE(__LC_PANIC_STACK, offsetof(struct _lowcore, panic_stack)); DEFINE(__LC_PANIC_STACK, offsetof(struct _lowcore, panic_stack));
DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce));
DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock)); DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock));
DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock)); DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock));
DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags)); DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags));
@ -151,6 +153,7 @@ int main(void)
DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data)); DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data));
DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap)); DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap));
DEFINE(__LC_CMF_HPP, offsetof(struct _lowcore, cmf_hpp)); DEFINE(__LC_CMF_HPP, offsetof(struct _lowcore, cmf_hpp));
DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce));
#endif /* CONFIG_32BIT */ #endif /* CONFIG_32BIT */
return 0; return 0;
} }

View File

@ -1076,6 +1076,11 @@ sie_loop:
lg %r14,__LC_THREAD_INFO # pointer thread_info struct lg %r14,__LC_THREAD_INFO # pointer thread_info struct
tm __TI_flags+7(%r14),_TIF_EXIT_SIE tm __TI_flags+7(%r14),_TIF_EXIT_SIE
jnz sie_exit jnz sie_exit
lg %r14,__LC_GMAP # get gmap pointer
ltgr %r14,%r14
jz sie_gmap
lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce
sie_gmap:
lg %r14,__SF_EMPTY(%r15) # get control block pointer lg %r14,__SF_EMPTY(%r15) # get control block pointer
SPP __SF_EMPTY(%r15) # set guest id SPP __SF_EMPTY(%r15) # set guest id
sie 0(%r14) sie 0(%r14)
@ -1083,6 +1088,7 @@ sie_done:
SPP __LC_CMF_HPP # set host id SPP __LC_CMF_HPP # set host id
lg %r14,__LC_THREAD_INFO # pointer thread_info struct lg %r14,__LC_THREAD_INFO # pointer thread_info struct
sie_exit: sie_exit:
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
ni __TI_flags+6(%r14),255-(_TIF_SIE>>8) ni __TI_flags+6(%r14),255-(_TIF_SIE>>8)
lg %r14,__SF_EMPTY+8(%r15) # load guest register save area lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
stmg %r0,%r13,0(%r14) # save guest gprs 0-13 stmg %r0,%r13,0(%r14) # save guest gprs 0-13

View File

@ -263,10 +263,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK; vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
restore_fp_regs(&vcpu->arch.guest_fpregs); restore_fp_regs(&vcpu->arch.guest_fpregs);
restore_access_regs(vcpu->arch.guest_acrs); restore_access_regs(vcpu->arch.guest_acrs);
gmap_enable(vcpu->arch.gmap);
} }
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{ {
gmap_disable(vcpu->arch.gmap);
save_fp_regs(&vcpu->arch.guest_fpregs); save_fp_regs(&vcpu->arch.guest_fpregs);
save_access_regs(vcpu->arch.guest_acrs); save_access_regs(vcpu->arch.guest_acrs);
restore_fp_regs(&vcpu->arch.host_fpregs); restore_fp_regs(&vcpu->arch.host_fpregs);
@ -461,7 +463,6 @@ static void __vcpu_run(struct kvm_vcpu *vcpu)
local_irq_disable(); local_irq_disable();
kvm_guest_enter(); kvm_guest_enter();
local_irq_enable(); local_irq_enable();
gmap_enable(vcpu->arch.gmap);
VCPU_EVENT(vcpu, 6, "entering sie flags %x", VCPU_EVENT(vcpu, 6, "entering sie flags %x",
atomic_read(&vcpu->arch.sie_block->cpuflags)); atomic_read(&vcpu->arch.sie_block->cpuflags));
if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) { if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
@ -470,7 +471,6 @@ static void __vcpu_run(struct kvm_vcpu *vcpu)
} }
VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
vcpu->arch.sie_block->icptcode); vcpu->arch.sie_block->icptcode);
gmap_disable(vcpu->arch.gmap);
local_irq_disable(); local_irq_disable();
kvm_guest_exit(); kvm_guest_exit();
local_irq_enable(); local_irq_enable();

View File

@ -160,6 +160,8 @@ struct gmap *gmap_alloc(struct mm_struct *mm)
table = (unsigned long *) page_to_phys(page); table = (unsigned long *) page_to_phys(page);
crst_table_init(table, _REGION1_ENTRY_EMPTY); crst_table_init(table, _REGION1_ENTRY_EMPTY);
gmap->table = table; gmap->table = table;
gmap->asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS | __pa(table);
list_add(&gmap->list, &mm->context.gmap_list); list_add(&gmap->list, &mm->context.gmap_list);
return gmap; return gmap;
@ -240,10 +242,6 @@ EXPORT_SYMBOL_GPL(gmap_free);
*/ */
void gmap_enable(struct gmap *gmap) void gmap_enable(struct gmap *gmap)
{ {
/* Load primary space page table origin. */
S390_lowcore.user_asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS | __pa(gmap->table);
asm volatile("lctlg 1,1,%0\n" : : "m" (S390_lowcore.user_asce) );
S390_lowcore.gmap = (unsigned long) gmap; S390_lowcore.gmap = (unsigned long) gmap;
} }
EXPORT_SYMBOL_GPL(gmap_enable); EXPORT_SYMBOL_GPL(gmap_enable);
@ -254,10 +252,6 @@ EXPORT_SYMBOL_GPL(gmap_enable);
*/ */
void gmap_disable(struct gmap *gmap) void gmap_disable(struct gmap *gmap)
{ {
/* Load primary space page table origin. */
S390_lowcore.user_asce =
gmap->mm->context.asce_bits | __pa(gmap->mm->pgd);
asm volatile("lctlg 1,1,%0\n" : : "m" (S390_lowcore.user_asce) );
S390_lowcore.gmap = 0UL; S390_lowcore.gmap = 0UL;
} }
EXPORT_SYMBOL_GPL(gmap_disable); EXPORT_SYMBOL_GPL(gmap_disable);