KVM: s390: Fix low-address protection for real addresses
The kvm_s390_check_low_addr_protection() function is used only with real addresses. According to the POP (the "Low-Address Protection" paragraph in chapter 3), if the effective address is real or absolute, the low-address protection procedure should raise a PROTECTION exception only when the low-address protection is enabled in the control register 0 and the address is low. This patch removes ASCE checks from the function and renames it to better reflect its behavior. Cc: Thomas Huth <thuth@linux.vnet.ibm.com> Signed-off-by: Alexander Yarygin <yarygin@linux.vnet.ibm.com> Reviewed-by: Thomas Huth <thuth@linux.vnet.ibm.com> Reviewed-by: David Hildenbrand <dahi@linux.vnet.ibm.com> Acked-by: Cornelia Huck <cornelia.huck@de.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
This commit is contained in:
parent
40f5b735e8
commit
dd9e5b7bdb
|
@ -697,28 +697,29 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* kvm_s390_check_low_addr_protection - check for low-address protection
|
* kvm_s390_check_low_addr_prot_real - check for low-address protection
|
||||||
* @ga: Guest address
|
* @gra: Guest real address
|
||||||
*
|
*
|
||||||
* Checks whether an address is subject to low-address protection and set
|
* Checks whether an address is subject to low-address protection and set
|
||||||
* up vcpu->arch.pgm accordingly if necessary.
|
* up vcpu->arch.pgm accordingly if necessary.
|
||||||
*
|
*
|
||||||
* Return: 0 if no protection exception, or PGM_PROTECTION if protected.
|
* Return: 0 if no protection exception, or PGM_PROTECTION if protected.
|
||||||
*/
|
*/
|
||||||
int kvm_s390_check_low_addr_protection(struct kvm_vcpu *vcpu, unsigned long ga)
|
int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra)
|
||||||
{
|
{
|
||||||
struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
|
struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
|
||||||
psw_t *psw = &vcpu->arch.sie_block->gpsw;
|
psw_t *psw = &vcpu->arch.sie_block->gpsw;
|
||||||
struct trans_exc_code_bits *tec_bits;
|
struct trans_exc_code_bits *tec_bits;
|
||||||
|
union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]};
|
||||||
|
|
||||||
if (!is_low_address(ga) || !low_address_protection_enabled(vcpu))
|
if (!ctlreg0.lap || !is_low_address(gra))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
memset(pgm, 0, sizeof(*pgm));
|
memset(pgm, 0, sizeof(*pgm));
|
||||||
tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
|
tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
|
||||||
tec_bits->fsi = FSI_STORE;
|
tec_bits->fsi = FSI_STORE;
|
||||||
tec_bits->as = psw_bits(*psw).as;
|
tec_bits->as = psw_bits(*psw).as;
|
||||||
tec_bits->addr = ga >> PAGE_SHIFT;
|
tec_bits->addr = gra >> PAGE_SHIFT;
|
||||||
pgm->code = PGM_PROTECTION;
|
pgm->code = PGM_PROTECTION;
|
||||||
|
|
||||||
return pgm->code;
|
return pgm->code;
|
||||||
|
|
|
@ -330,6 +330,6 @@ int read_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
|
||||||
void ipte_lock(struct kvm_vcpu *vcpu);
|
void ipte_lock(struct kvm_vcpu *vcpu);
|
||||||
void ipte_unlock(struct kvm_vcpu *vcpu);
|
void ipte_unlock(struct kvm_vcpu *vcpu);
|
||||||
int ipte_lock_held(struct kvm_vcpu *vcpu);
|
int ipte_lock_held(struct kvm_vcpu *vcpu);
|
||||||
int kvm_s390_check_low_addr_protection(struct kvm_vcpu *vcpu, unsigned long ga);
|
int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra);
|
||||||
|
|
||||||
#endif /* __KVM_S390_GACCESS_H */
|
#endif /* __KVM_S390_GACCESS_H */
|
||||||
|
|
|
@ -207,7 +207,7 @@ static int handle_test_block(struct kvm_vcpu *vcpu)
|
||||||
kvm_s390_get_regs_rre(vcpu, NULL, ®2);
|
kvm_s390_get_regs_rre(vcpu, NULL, ®2);
|
||||||
addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
|
addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
|
||||||
addr = kvm_s390_logical_to_effective(vcpu, addr);
|
addr = kvm_s390_logical_to_effective(vcpu, addr);
|
||||||
if (kvm_s390_check_low_addr_protection(vcpu, addr))
|
if (kvm_s390_check_low_addr_prot_real(vcpu, addr))
|
||||||
return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
|
return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
|
||||||
addr = kvm_s390_real_to_abs(vcpu, addr);
|
addr = kvm_s390_real_to_abs(vcpu, addr);
|
||||||
|
|
||||||
|
@ -680,7 +680,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
|
if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
|
||||||
if (kvm_s390_check_low_addr_protection(vcpu, start))
|
if (kvm_s390_check_low_addr_prot_real(vcpu, start))
|
||||||
return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
|
return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue