These are mostly PPC changes for 3.16-new things. However, there is
an x86 change too and it is a regression from 3.14. As it only affects nested virtualization and there were other changes in this area in 3.16, I am not nominating it for 3.15-stable. -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.22 (GNU/Linux) iQIcBAABAgAGBQJTyTTfAAoJEBvWZb6bTYbyytIQAJare/EWQmNBDK57EcJBIlJS 6MW2XnASEW+KCoUw0+u3sm9eaRXQdmJRb1Aw5zxTiUIR3ZSI8MDSQr1XxEgTAOtE vFZjonPwlbnE8edLMhH3v/6/v9oO7bwNTDYeOE2pKPRfgPRjFmj1QUOJkvzRnRwj kS5M4RtI+VqhdyJW8f4HaWqoRaOAISp3ZjQUJQdab3DWsf9ZpNjwLNjKzGZKNvIN Klcpi7JH32zawUfqnAvph/7NsrBGrpFRE+j+JU9LLnD9PehuXwqZbWh01g2Anbq2 TKVrmXW+YnoD1IZsDw7r/14FaeRweV7yALA/eA9F4KfSyF2Qm9RbjVVdrUYz0CHV aIl0cZeZM8xRCLy/ZWj+dOQ23RWelZaslHSpshKOznoRsuuvVwpx93zVtRwlw2dx 4WJ2A5gYA+ZUQ7eWjk83381JXkbRDUb3cO+NL8t9GnFctCJzT/gQHjqu15f7TJ2Q gKhmeciKOS3xY4sQ+ti6gv8CwIFYqgdTzkxedxSgS9xpiAmw9v57V7WukXoXB6zl AyjEAk9FFOeBZ5nXs0ObK5LKjI+MJoZ3X0bin7PCuT6dFrIA2yHvo5EgMvOcUua9 8Tu9L8sWv/JsKjuqebkKxekAKvv0CV35Q8OsLpEF6RI0eXyiXy2extk1LzUuK9cx ZVYbN263++En/tgH2AJM =Vdqn -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm Pull kvm fixes from Paolo Bonzini: "These are mostly PPC changes for 3.16-new things. However, there is an x86 change too and it is a regression from 3.14. As it only affects nested virtualization and there were other changes in this area in 3.16, I am not nominating it for 3.15-stable" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: x86: Check for nested events if there is an injectable interrupt KVM: PPC: RTAS: Do byte swaps explicitly KVM: PPC: Book3S PR: Fix ABIv2 on LE KVM: PPC: Assembly functions exported to modules need _GLOBAL_TOC() PPC: Add _GLOBAL_TOC for 32bit KVM: PPC: BOOK3S: HV: Use base page size when comparing against slb value KVM: PPC: Book3E: Unlock mmu_lock when setting caching atttribute
This commit is contained in:
commit
5b2b9d7761
|
@ -198,8 +198,10 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
|
|||
return rb;
|
||||
}
|
||||
|
||||
static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
|
||||
static inline unsigned long __hpte_page_size(unsigned long h, unsigned long l,
|
||||
bool is_base_size)
|
||||
{
|
||||
|
||||
int size, a_psize;
|
||||
/* Look at the 8 bit LP value */
|
||||
unsigned int lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1);
|
||||
|
@ -214,14 +216,27 @@ static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
|
|||
continue;
|
||||
|
||||
a_psize = __hpte_actual_psize(lp, size);
|
||||
if (a_psize != -1)
|
||||
if (a_psize != -1) {
|
||||
if (is_base_size)
|
||||
return 1ul << mmu_psize_defs[size].shift;
|
||||
return 1ul << mmu_psize_defs[a_psize].shift;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
|
||||
{
|
||||
return __hpte_page_size(h, l, 0);
|
||||
}
|
||||
|
||||
static inline unsigned long hpte_base_page_size(unsigned long h, unsigned long l)
|
||||
{
|
||||
return __hpte_page_size(h, l, 1);
|
||||
}
|
||||
|
||||
static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
|
||||
{
|
||||
return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
|
||||
|
|
|
@ -277,6 +277,8 @@ n:
|
|||
.globl n; \
|
||||
n:
|
||||
|
||||
#define _GLOBAL_TOC(name) _GLOBAL(name)
|
||||
|
||||
#define _KPROBE(n) \
|
||||
.section ".kprobes.text","a"; \
|
||||
.globl n; \
|
||||
|
|
|
@ -1562,7 +1562,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
|
|||
goto out;
|
||||
}
|
||||
if (!rma_setup && is_vrma_hpte(v)) {
|
||||
unsigned long psize = hpte_page_size(v, r);
|
||||
unsigned long psize = hpte_base_page_size(v, r);
|
||||
unsigned long senc = slb_pgsize_encoding(psize);
|
||||
unsigned long lpcr;
|
||||
|
||||
|
|
|
@ -814,13 +814,10 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
|
|||
r = hpte[i+1];
|
||||
|
||||
/*
|
||||
* Check the HPTE again, including large page size
|
||||
* Since we don't currently allow any MPSS (mixed
|
||||
* page-size segment) page sizes, it is sufficient
|
||||
* to check against the actual page size.
|
||||
* Check the HPTE again, including base page size
|
||||
*/
|
||||
if ((v & valid) && (v & mask) == val &&
|
||||
hpte_page_size(v, r) == (1ul << pshift))
|
||||
hpte_base_page_size(v, r) == (1ul << pshift))
|
||||
/* Return with the HPTE still locked */
|
||||
return (hash << 3) + (i >> 1);
|
||||
|
||||
|
|
|
@ -48,7 +48,7 @@
|
|||
*
|
||||
* LR = return address to continue at after eventually re-enabling MMU
|
||||
*/
|
||||
_GLOBAL(kvmppc_hv_entry_trampoline)
|
||||
_GLOBAL_TOC(kvmppc_hv_entry_trampoline)
|
||||
mflr r0
|
||||
std r0, PPC_LR_STKOFF(r1)
|
||||
stdu r1, -112(r1)
|
||||
|
|
|
@ -25,7 +25,11 @@
|
|||
#include <asm/exception-64s.h>
|
||||
|
||||
#if defined(CONFIG_PPC_BOOK3S_64)
|
||||
#if defined(_CALL_ELF) && _CALL_ELF == 2
|
||||
#define FUNC(name) name
|
||||
#else
|
||||
#define FUNC(name) GLUE(.,name)
|
||||
#endif
|
||||
#define GET_SHADOW_VCPU(reg) addi reg, r13, PACA_SVCPU
|
||||
|
||||
#elif defined(CONFIG_PPC_BOOK3S_32)
|
||||
|
|
|
@ -36,7 +36,11 @@
|
|||
|
||||
#if defined(CONFIG_PPC_BOOK3S_64)
|
||||
|
||||
#if defined(_CALL_ELF) && _CALL_ELF == 2
|
||||
#define FUNC(name) name
|
||||
#else
|
||||
#define FUNC(name) GLUE(.,name)
|
||||
#endif
|
||||
|
||||
#elif defined(CONFIG_PPC_BOOK3S_32)
|
||||
|
||||
|
@ -146,7 +150,7 @@ kvmppc_handler_skip_ins:
|
|||
* On entry, r4 contains the guest shadow MSR
|
||||
* MSR.EE has to be 0 when calling this function
|
||||
*/
|
||||
_GLOBAL(kvmppc_entry_trampoline)
|
||||
_GLOBAL_TOC(kvmppc_entry_trampoline)
|
||||
mfmsr r5
|
||||
LOAD_REG_ADDR(r7, kvmppc_handler_trampoline_enter)
|
||||
toreal(r7)
|
||||
|
|
|
@ -23,20 +23,20 @@ static void kvm_rtas_set_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
|
|||
u32 irq, server, priority;
|
||||
int rc;
|
||||
|
||||
if (args->nargs != 3 || args->nret != 1) {
|
||||
if (be32_to_cpu(args->nargs) != 3 || be32_to_cpu(args->nret) != 1) {
|
||||
rc = -3;
|
||||
goto out;
|
||||
}
|
||||
|
||||
irq = args->args[0];
|
||||
server = args->args[1];
|
||||
priority = args->args[2];
|
||||
irq = be32_to_cpu(args->args[0]);
|
||||
server = be32_to_cpu(args->args[1]);
|
||||
priority = be32_to_cpu(args->args[2]);
|
||||
|
||||
rc = kvmppc_xics_set_xive(vcpu->kvm, irq, server, priority);
|
||||
if (rc)
|
||||
rc = -3;
|
||||
out:
|
||||
args->rets[0] = rc;
|
||||
args->rets[0] = cpu_to_be32(rc);
|
||||
}
|
||||
|
||||
static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
|
||||
|
@ -44,12 +44,12 @@ static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
|
|||
u32 irq, server, priority;
|
||||
int rc;
|
||||
|
||||
if (args->nargs != 1 || args->nret != 3) {
|
||||
if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 3) {
|
||||
rc = -3;
|
||||
goto out;
|
||||
}
|
||||
|
||||
irq = args->args[0];
|
||||
irq = be32_to_cpu(args->args[0]);
|
||||
|
||||
server = priority = 0;
|
||||
rc = kvmppc_xics_get_xive(vcpu->kvm, irq, &server, &priority);
|
||||
|
@ -58,10 +58,10 @@ static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
|
|||
goto out;
|
||||
}
|
||||
|
||||
args->rets[1] = server;
|
||||
args->rets[2] = priority;
|
||||
args->rets[1] = cpu_to_be32(server);
|
||||
args->rets[2] = cpu_to_be32(priority);
|
||||
out:
|
||||
args->rets[0] = rc;
|
||||
args->rets[0] = cpu_to_be32(rc);
|
||||
}
|
||||
|
||||
static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args)
|
||||
|
@ -69,18 +69,18 @@ static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args)
|
|||
u32 irq;
|
||||
int rc;
|
||||
|
||||
if (args->nargs != 1 || args->nret != 1) {
|
||||
if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 1) {
|
||||
rc = -3;
|
||||
goto out;
|
||||
}
|
||||
|
||||
irq = args->args[0];
|
||||
irq = be32_to_cpu(args->args[0]);
|
||||
|
||||
rc = kvmppc_xics_int_off(vcpu->kvm, irq);
|
||||
if (rc)
|
||||
rc = -3;
|
||||
out:
|
||||
args->rets[0] = rc;
|
||||
args->rets[0] = cpu_to_be32(rc);
|
||||
}
|
||||
|
||||
static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args)
|
||||
|
@ -88,18 +88,18 @@ static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args)
|
|||
u32 irq;
|
||||
int rc;
|
||||
|
||||
if (args->nargs != 1 || args->nret != 1) {
|
||||
if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 1) {
|
||||
rc = -3;
|
||||
goto out;
|
||||
}
|
||||
|
||||
irq = args->args[0];
|
||||
irq = be32_to_cpu(args->args[0]);
|
||||
|
||||
rc = kvmppc_xics_int_on(vcpu->kvm, irq);
|
||||
if (rc)
|
||||
rc = -3;
|
||||
out:
|
||||
args->rets[0] = rc;
|
||||
args->rets[0] = cpu_to_be32(rc);
|
||||
}
|
||||
#endif /* CONFIG_KVM_XICS */
|
||||
|
||||
|
@ -205,32 +205,6 @@ int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp)
|
|||
return rc;
|
||||
}
|
||||
|
||||
static void kvmppc_rtas_swap_endian_in(struct rtas_args *args)
|
||||
{
|
||||
#ifdef __LITTLE_ENDIAN__
|
||||
int i;
|
||||
|
||||
args->token = be32_to_cpu(args->token);
|
||||
args->nargs = be32_to_cpu(args->nargs);
|
||||
args->nret = be32_to_cpu(args->nret);
|
||||
for (i = 0; i < args->nargs; i++)
|
||||
args->args[i] = be32_to_cpu(args->args[i]);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void kvmppc_rtas_swap_endian_out(struct rtas_args *args)
|
||||
{
|
||||
#ifdef __LITTLE_ENDIAN__
|
||||
int i;
|
||||
|
||||
for (i = 0; i < args->nret; i++)
|
||||
args->args[i] = cpu_to_be32(args->args[i]);
|
||||
args->token = cpu_to_be32(args->token);
|
||||
args->nargs = cpu_to_be32(args->nargs);
|
||||
args->nret = cpu_to_be32(args->nret);
|
||||
#endif
|
||||
}
|
||||
|
||||
int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct rtas_token_definition *d;
|
||||
|
@ -249,8 +223,6 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
|
|||
if (rc)
|
||||
goto fail;
|
||||
|
||||
kvmppc_rtas_swap_endian_in(&args);
|
||||
|
||||
/*
|
||||
* args->rets is a pointer into args->args. Now that we've
|
||||
* copied args we need to fix it up to point into our copy,
|
||||
|
@ -258,13 +230,13 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
|
|||
* value so we can restore it on the way out.
|
||||
*/
|
||||
orig_rets = args.rets;
|
||||
args.rets = &args.args[args.nargs];
|
||||
args.rets = &args.args[be32_to_cpu(args.nargs)];
|
||||
|
||||
mutex_lock(&vcpu->kvm->lock);
|
||||
|
||||
rc = -ENOENT;
|
||||
list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) {
|
||||
if (d->token == args.token) {
|
||||
if (d->token == be32_to_cpu(args.token)) {
|
||||
d->handler->handler(vcpu, &args);
|
||||
rc = 0;
|
||||
break;
|
||||
|
@ -275,7 +247,6 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
|
|||
|
||||
if (rc == 0) {
|
||||
args.rets = orig_rets;
|
||||
kvmppc_rtas_swap_endian_out(&args);
|
||||
rc = kvm_write_guest(vcpu->kvm, args_phys, &args, sizeof(args));
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
|
|
@ -473,7 +473,8 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
|
|||
if (printk_ratelimit())
|
||||
pr_err("%s: pte not present: gfn %lx, pfn %lx\n",
|
||||
__func__, (long)gfn, pfn);
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
|
||||
|
||||
|
|
|
@ -5887,6 +5887,18 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
|
|||
kvm_x86_ops->set_nmi(vcpu);
|
||||
}
|
||||
} else if (kvm_cpu_has_injectable_intr(vcpu)) {
|
||||
/*
|
||||
* Because interrupts can be injected asynchronously, we are
|
||||
* calling check_nested_events again here to avoid a race condition.
|
||||
* See https://lkml.org/lkml/2014/7/2/60 for discussion about this
|
||||
* proposal and current concerns. Perhaps we should be setting
|
||||
* KVM_REQ_EVENT only on certain events and not unconditionally?
|
||||
*/
|
||||
if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
|
||||
r = kvm_x86_ops->check_nested_events(vcpu, req_int_win);
|
||||
if (r != 0)
|
||||
return r;
|
||||
}
|
||||
if (kvm_x86_ops->interrupt_allowed(vcpu)) {
|
||||
kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
|
||||
false);
|
||||
|
|
Loading…
Reference in New Issue