Merge branch 'kvm-updates/2.6.34' of git://git.kernel.org/pub/scm/virt/kvm/kvm
* 'kvm-updates/2.6.34' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: PPC: Keep index within boundaries in kvmppc_44x_emul_tlbwe() KVM: VMX: blocked-by-sti must not defer NMI injections KVM: x86: Call vcpu_load and vcpu_put in cpuid_update KVM: SVM: Fix wrong intercept masks on 32 bit KVM: convert ioapic lock to spinlock
This commit is contained in:
commit
9e766d81b1
|
@ -440,7 +440,7 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
|
|||
unsigned int gtlb_index;
|
||||
|
||||
gtlb_index = kvmppc_get_gpr(vcpu, ra);
|
||||
if (gtlb_index > KVM44x_GUEST_TLB_SIZE) {
|
||||
if (gtlb_index >= KVM44x_GUEST_TLB_SIZE) {
|
||||
printk("%s: index %d\n", __func__, gtlb_index);
|
||||
kvmppc_dump_vcpu(vcpu);
|
||||
return EMULATE_FAIL;
|
||||
|
|
|
@ -2067,7 +2067,7 @@ static int cpuid_interception(struct vcpu_svm *svm)
|
|||
static int iret_interception(struct vcpu_svm *svm)
|
||||
{
|
||||
++svm->vcpu.stat.nmi_window_exits;
|
||||
svm->vmcb->control.intercept &= ~(1UL << INTERCEPT_IRET);
|
||||
svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_IRET);
|
||||
svm->vcpu.arch.hflags |= HF_IRET_MASK;
|
||||
return 1;
|
||||
}
|
||||
|
@ -2479,7 +2479,7 @@ static void svm_inject_nmi(struct kvm_vcpu *vcpu)
|
|||
|
||||
svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
|
||||
vcpu->arch.hflags |= HF_NMI_MASK;
|
||||
svm->vmcb->control.intercept |= (1UL << INTERCEPT_IRET);
|
||||
svm->vmcb->control.intercept |= (1ULL << INTERCEPT_IRET);
|
||||
++vcpu->stat.nmi_injections;
|
||||
}
|
||||
|
||||
|
@ -2539,10 +2539,10 @@ static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
|
|||
|
||||
if (masked) {
|
||||
svm->vcpu.arch.hflags |= HF_NMI_MASK;
|
||||
svm->vmcb->control.intercept |= (1UL << INTERCEPT_IRET);
|
||||
svm->vmcb->control.intercept |= (1ULL << INTERCEPT_IRET);
|
||||
} else {
|
||||
svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
|
||||
svm->vmcb->control.intercept &= ~(1UL << INTERCEPT_IRET);
|
||||
svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_IRET);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -2703,8 +2703,7 @@ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
|
|||
return 0;
|
||||
|
||||
return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
|
||||
(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS |
|
||||
GUEST_INTR_STATE_NMI));
|
||||
(GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_NMI));
|
||||
}
|
||||
|
||||
static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
|
||||
|
|
|
@ -1712,6 +1712,7 @@ static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
|
|||
if (copy_from_user(cpuid_entries, entries,
|
||||
cpuid->nent * sizeof(struct kvm_cpuid_entry)))
|
||||
goto out_free;
|
||||
vcpu_load(vcpu);
|
||||
for (i = 0; i < cpuid->nent; i++) {
|
||||
vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
|
||||
vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
|
||||
|
@ -1729,6 +1730,7 @@ static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
|
|||
r = 0;
|
||||
kvm_apic_set_version(vcpu);
|
||||
kvm_x86_ops->cpuid_update(vcpu);
|
||||
vcpu_put(vcpu);
|
||||
|
||||
out_free:
|
||||
vfree(cpuid_entries);
|
||||
|
@ -1749,9 +1751,11 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
|
|||
if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
|
||||
cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
|
||||
goto out;
|
||||
vcpu_load(vcpu);
|
||||
vcpu->arch.cpuid_nent = cpuid->nent;
|
||||
kvm_apic_set_version(vcpu);
|
||||
kvm_x86_ops->cpuid_update(vcpu);
|
||||
vcpu_put(vcpu);
|
||||
return 0;
|
||||
|
||||
out:
|
||||
|
|
|
@ -197,7 +197,7 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level)
|
|||
union kvm_ioapic_redirect_entry entry;
|
||||
int ret = 1;
|
||||
|
||||
mutex_lock(&ioapic->lock);
|
||||
spin_lock(&ioapic->lock);
|
||||
if (irq >= 0 && irq < IOAPIC_NUM_PINS) {
|
||||
entry = ioapic->redirtbl[irq];
|
||||
level ^= entry.fields.polarity;
|
||||
|
@ -214,7 +214,7 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level)
|
|||
}
|
||||
trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0);
|
||||
}
|
||||
mutex_unlock(&ioapic->lock);
|
||||
spin_unlock(&ioapic->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -238,9 +238,9 @@ static void __kvm_ioapic_update_eoi(struct kvm_ioapic *ioapic, int vector,
|
|||
* is dropped it will be put into irr and will be delivered
|
||||
* after ack notifier returns.
|
||||
*/
|
||||
mutex_unlock(&ioapic->lock);
|
||||
spin_unlock(&ioapic->lock);
|
||||
kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i);
|
||||
mutex_lock(&ioapic->lock);
|
||||
spin_lock(&ioapic->lock);
|
||||
|
||||
if (trigger_mode != IOAPIC_LEVEL_TRIG)
|
||||
continue;
|
||||
|
@ -259,9 +259,9 @@ void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode)
|
|||
smp_rmb();
|
||||
if (!test_bit(vector, ioapic->handled_vectors))
|
||||
return;
|
||||
mutex_lock(&ioapic->lock);
|
||||
spin_lock(&ioapic->lock);
|
||||
__kvm_ioapic_update_eoi(ioapic, vector, trigger_mode);
|
||||
mutex_unlock(&ioapic->lock);
|
||||
spin_unlock(&ioapic->lock);
|
||||
}
|
||||
|
||||
static inline struct kvm_ioapic *to_ioapic(struct kvm_io_device *dev)
|
||||
|
@ -287,7 +287,7 @@ static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len,
|
|||
ASSERT(!(addr & 0xf)); /* check alignment */
|
||||
|
||||
addr &= 0xff;
|
||||
mutex_lock(&ioapic->lock);
|
||||
spin_lock(&ioapic->lock);
|
||||
switch (addr) {
|
||||
case IOAPIC_REG_SELECT:
|
||||
result = ioapic->ioregsel;
|
||||
|
@ -301,7 +301,7 @@ static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len,
|
|||
result = 0;
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&ioapic->lock);
|
||||
spin_unlock(&ioapic->lock);
|
||||
|
||||
switch (len) {
|
||||
case 8:
|
||||
|
@ -338,7 +338,7 @@ static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len,
|
|||
}
|
||||
|
||||
addr &= 0xff;
|
||||
mutex_lock(&ioapic->lock);
|
||||
spin_lock(&ioapic->lock);
|
||||
switch (addr) {
|
||||
case IOAPIC_REG_SELECT:
|
||||
ioapic->ioregsel = data;
|
||||
|
@ -356,7 +356,7 @@ static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len,
|
|||
default:
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&ioapic->lock);
|
||||
spin_unlock(&ioapic->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -386,7 +386,7 @@ int kvm_ioapic_init(struct kvm *kvm)
|
|||
ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL);
|
||||
if (!ioapic)
|
||||
return -ENOMEM;
|
||||
mutex_init(&ioapic->lock);
|
||||
spin_lock_init(&ioapic->lock);
|
||||
kvm->arch.vioapic = ioapic;
|
||||
kvm_ioapic_reset(ioapic);
|
||||
kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops);
|
||||
|
@ -419,9 +419,9 @@ int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
|
|||
if (!ioapic)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&ioapic->lock);
|
||||
spin_lock(&ioapic->lock);
|
||||
memcpy(state, ioapic, sizeof(struct kvm_ioapic_state));
|
||||
mutex_unlock(&ioapic->lock);
|
||||
spin_unlock(&ioapic->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -431,9 +431,9 @@ int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
|
|||
if (!ioapic)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&ioapic->lock);
|
||||
spin_lock(&ioapic->lock);
|
||||
memcpy(ioapic, state, sizeof(struct kvm_ioapic_state));
|
||||
update_handled_vectors(ioapic);
|
||||
mutex_unlock(&ioapic->lock);
|
||||
spin_unlock(&ioapic->lock);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -45,7 +45,7 @@ struct kvm_ioapic {
|
|||
struct kvm_io_device dev;
|
||||
struct kvm *kvm;
|
||||
void (*ack_notifier)(void *opaque, int irq);
|
||||
struct mutex lock;
|
||||
spinlock_t lock;
|
||||
DECLARE_BITMAP(handled_vectors, 256);
|
||||
};
|
||||
|
||||
|
|
Loading…
Reference in New Issue