Merge branch 'kvm-updates/2.6.33' of git://git.kernel.org/pub/scm/virt/kvm/kvm
* 'kvm-updates/2.6.33' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: get rid of kvm_create_vm() unused label warning on s390 KVM: powerpc: Fix mtsrin in book3s_64 mmu KVM: ia64: fix build breakage due to host spinlock change KVM: x86: Extend KVM_SET_VCPU_EVENTS with selective updates KVM: LAPIC: make sure IRR bitmap is scanned after vm load KVM: Fix possible circular locking in kvm_vm_ioctl_assign_device() KVM: MMU: remove prefault from invlpg handler
This commit is contained in:
commit
b07d41b77e
|
@ -685,7 +685,7 @@ struct kvm_vcpu_events {
|
||||||
__u8 pad;
|
__u8 pad;
|
||||||
} nmi;
|
} nmi;
|
||||||
__u32 sipi_vector;
|
__u32 sipi_vector;
|
||||||
__u32 flags; /* must be zero */
|
__u32 flags;
|
||||||
};
|
};
|
||||||
|
|
||||||
4.30 KVM_SET_VCPU_EVENTS
|
4.30 KVM_SET_VCPU_EVENTS
|
||||||
|
@ -701,6 +701,14 @@ vcpu.
|
||||||
|
|
||||||
See KVM_GET_VCPU_EVENTS for the data structure.
|
See KVM_GET_VCPU_EVENTS for the data structure.
|
||||||
|
|
||||||
|
Fields that may be modified asynchronously by running VCPUs can be excluded
|
||||||
|
from the update. These fields are nmi.pending and sipi_vector. Keep the
|
||||||
|
corresponding bits in the flags field cleared to suppress overwriting the
|
||||||
|
current in-kernel state. The bits are:
|
||||||
|
|
||||||
|
KVM_VCPUEVENT_VALID_NMI_PENDING - transfer nmi.pending to the kernel
|
||||||
|
KVM_VCPUEVENT_VALID_SIPI_VECTOR - transfer sipi_vector
|
||||||
|
|
||||||
|
|
||||||
5. The kvm_run structure
|
5. The kvm_run structure
|
||||||
|
|
||||||
|
|
|
@ -388,6 +388,9 @@ static inline u64 __gpfn_is_io(u64 gpfn)
|
||||||
#define _vmm_raw_spin_lock(x) do {}while(0)
|
#define _vmm_raw_spin_lock(x) do {}while(0)
|
||||||
#define _vmm_raw_spin_unlock(x) do {}while(0)
|
#define _vmm_raw_spin_unlock(x) do {}while(0)
|
||||||
#else
|
#else
|
||||||
|
typedef struct {
|
||||||
|
volatile unsigned int lock;
|
||||||
|
} vmm_spinlock_t;
|
||||||
#define _vmm_raw_spin_lock(x) \
|
#define _vmm_raw_spin_lock(x) \
|
||||||
do { \
|
do { \
|
||||||
__u32 *ia64_spinlock_ptr = (__u32 *) (x); \
|
__u32 *ia64_spinlock_ptr = (__u32 *) (x); \
|
||||||
|
@ -405,12 +408,12 @@ static inline u64 __gpfn_is_io(u64 gpfn)
|
||||||
|
|
||||||
#define _vmm_raw_spin_unlock(x) \
|
#define _vmm_raw_spin_unlock(x) \
|
||||||
do { barrier(); \
|
do { barrier(); \
|
||||||
((spinlock_t *)x)->raw_lock.lock = 0; } \
|
((vmm_spinlock_t *)x)->lock = 0; } \
|
||||||
while (0)
|
while (0)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void vmm_spin_lock(spinlock_t *lock);
|
void vmm_spin_lock(vmm_spinlock_t *lock);
|
||||||
void vmm_spin_unlock(spinlock_t *lock);
|
void vmm_spin_unlock(vmm_spinlock_t *lock);
|
||||||
enum {
|
enum {
|
||||||
I_TLB = 1,
|
I_TLB = 1,
|
||||||
D_TLB = 2
|
D_TLB = 2
|
||||||
|
|
|
@ -60,12 +60,12 @@ static void __exit kvm_vmm_exit(void)
|
||||||
return ;
|
return ;
|
||||||
}
|
}
|
||||||
|
|
||||||
void vmm_spin_lock(spinlock_t *lock)
|
void vmm_spin_lock(vmm_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
_vmm_raw_spin_lock(lock);
|
_vmm_raw_spin_lock(lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
void vmm_spin_unlock(spinlock_t *lock)
|
void vmm_spin_unlock(vmm_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
_vmm_raw_spin_unlock(lock);
|
_vmm_raw_spin_unlock(lock);
|
||||||
}
|
}
|
||||||
|
|
|
@ -182,7 +182,7 @@ void mark_pages_dirty(struct kvm_vcpu *v, u64 pte, u64 ps)
|
||||||
{
|
{
|
||||||
u64 i, dirty_pages = 1;
|
u64 i, dirty_pages = 1;
|
||||||
u64 base_gfn = (pte&_PAGE_PPN_MASK) >> PAGE_SHIFT;
|
u64 base_gfn = (pte&_PAGE_PPN_MASK) >> PAGE_SHIFT;
|
||||||
spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa);
|
vmm_spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa);
|
||||||
void *dirty_bitmap = (void *)KVM_MEM_DIRTY_LOG_BASE;
|
void *dirty_bitmap = (void *)KVM_MEM_DIRTY_LOG_BASE;
|
||||||
|
|
||||||
dirty_pages <<= ps <= PAGE_SHIFT ? 0 : ps - PAGE_SHIFT;
|
dirty_pages <<= ps <= PAGE_SHIFT ? 0 : ps - PAGE_SHIFT;
|
||||||
|
|
|
@ -390,6 +390,26 @@ static void kvmppc_mmu_book3s_64_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
|
||||||
{
|
{
|
||||||
u64 rb = 0, rs = 0;
|
u64 rb = 0, rs = 0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* According to Book3 2.01 mtsrin is implemented as:
|
||||||
|
*
|
||||||
|
* The SLB entry specified by (RB)32:35 is loaded from register
|
||||||
|
* RS, as follows.
|
||||||
|
*
|
||||||
|
* SLBE Bit Source SLB Field
|
||||||
|
*
|
||||||
|
* 0:31 0x0000_0000 ESID-0:31
|
||||||
|
* 32:35 (RB)32:35 ESID-32:35
|
||||||
|
* 36 0b1 V
|
||||||
|
* 37:61 0x00_0000|| 0b0 VSID-0:24
|
||||||
|
* 62:88 (RS)37:63 VSID-25:51
|
||||||
|
* 89:91 (RS)33:35 Ks Kp N
|
||||||
|
* 92 (RS)36 L ((RS)36 must be 0b0)
|
||||||
|
* 93 0b0 C
|
||||||
|
*/
|
||||||
|
|
||||||
|
dprintk("KVM MMU: mtsrin(0x%x, 0x%lx)\n", srnum, value);
|
||||||
|
|
||||||
/* ESID = srnum */
|
/* ESID = srnum */
|
||||||
rb |= (srnum & 0xf) << 28;
|
rb |= (srnum & 0xf) << 28;
|
||||||
/* Set the valid bit */
|
/* Set the valid bit */
|
||||||
|
@ -400,7 +420,7 @@ static void kvmppc_mmu_book3s_64_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
|
||||||
/* VSID = VSID */
|
/* VSID = VSID */
|
||||||
rs |= (value & 0xfffffff) << 12;
|
rs |= (value & 0xfffffff) << 12;
|
||||||
/* flags = flags */
|
/* flags = flags */
|
||||||
rs |= ((value >> 27) & 0xf) << 9;
|
rs |= ((value >> 28) & 0x7) << 9;
|
||||||
|
|
||||||
kvmppc_mmu_book3s_64_slbmte(vcpu, rs, rb);
|
kvmppc_mmu_book3s_64_slbmte(vcpu, rs, rb);
|
||||||
}
|
}
|
||||||
|
|
|
@ -254,6 +254,10 @@ struct kvm_reinject_control {
|
||||||
__u8 reserved[31];
|
__u8 reserved[31];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* When set in flags, include corresponding fields on KVM_SET_VCPU_EVENTS */
|
||||||
|
#define KVM_VCPUEVENT_VALID_NMI_PENDING 0x00000001
|
||||||
|
#define KVM_VCPUEVENT_VALID_SIPI_VECTOR 0x00000002
|
||||||
|
|
||||||
/* for KVM_GET/SET_VCPU_EVENTS */
|
/* for KVM_GET/SET_VCPU_EVENTS */
|
||||||
struct kvm_vcpu_events {
|
struct kvm_vcpu_events {
|
||||||
struct {
|
struct {
|
||||||
|
|
|
@ -1150,6 +1150,7 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu)
|
||||||
hrtimer_cancel(&apic->lapic_timer.timer);
|
hrtimer_cancel(&apic->lapic_timer.timer);
|
||||||
update_divide_count(apic);
|
update_divide_count(apic);
|
||||||
start_apic_timer(apic);
|
start_apic_timer(apic);
|
||||||
|
apic->irr_pending = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
|
void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
|
||||||
|
|
|
@ -455,8 +455,6 @@ out_unlock:
|
||||||
static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
|
static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
|
||||||
{
|
{
|
||||||
struct kvm_shadow_walk_iterator iterator;
|
struct kvm_shadow_walk_iterator iterator;
|
||||||
pt_element_t gpte;
|
|
||||||
gpa_t pte_gpa = -1;
|
|
||||||
int level;
|
int level;
|
||||||
u64 *sptep;
|
u64 *sptep;
|
||||||
int need_flush = 0;
|
int need_flush = 0;
|
||||||
|
@ -470,10 +468,6 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
|
||||||
if (level == PT_PAGE_TABLE_LEVEL ||
|
if (level == PT_PAGE_TABLE_LEVEL ||
|
||||||
((level == PT_DIRECTORY_LEVEL && is_large_pte(*sptep))) ||
|
((level == PT_DIRECTORY_LEVEL && is_large_pte(*sptep))) ||
|
||||||
((level == PT_PDPE_LEVEL && is_large_pte(*sptep)))) {
|
((level == PT_PDPE_LEVEL && is_large_pte(*sptep)))) {
|
||||||
struct kvm_mmu_page *sp = page_header(__pa(sptep));
|
|
||||||
|
|
||||||
pte_gpa = (sp->gfn << PAGE_SHIFT);
|
|
||||||
pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
|
|
||||||
|
|
||||||
if (is_shadow_present_pte(*sptep)) {
|
if (is_shadow_present_pte(*sptep)) {
|
||||||
rmap_remove(vcpu->kvm, sptep);
|
rmap_remove(vcpu->kvm, sptep);
|
||||||
|
@ -492,18 +486,6 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
|
||||||
if (need_flush)
|
if (need_flush)
|
||||||
kvm_flush_remote_tlbs(vcpu->kvm);
|
kvm_flush_remote_tlbs(vcpu->kvm);
|
||||||
spin_unlock(&vcpu->kvm->mmu_lock);
|
spin_unlock(&vcpu->kvm->mmu_lock);
|
||||||
|
|
||||||
if (pte_gpa == -1)
|
|
||||||
return;
|
|
||||||
if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
|
|
||||||
sizeof(pt_element_t)))
|
|
||||||
return;
|
|
||||||
if (is_present_gpte(gpte) && (gpte & PT_ACCESSED_MASK)) {
|
|
||||||
if (mmu_topup_memory_caches(vcpu))
|
|
||||||
return;
|
|
||||||
kvm_mmu_pte_write(vcpu, pte_gpa, (const u8 *)&gpte,
|
|
||||||
sizeof(pt_element_t), 0);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
|
static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
|
||||||
|
|
|
@ -1913,7 +1913,8 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
|
||||||
|
|
||||||
events->sipi_vector = vcpu->arch.sipi_vector;
|
events->sipi_vector = vcpu->arch.sipi_vector;
|
||||||
|
|
||||||
events->flags = 0;
|
events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
|
||||||
|
| KVM_VCPUEVENT_VALID_SIPI_VECTOR);
|
||||||
|
|
||||||
vcpu_put(vcpu);
|
vcpu_put(vcpu);
|
||||||
}
|
}
|
||||||
|
@ -1921,7 +1922,8 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
|
||||||
static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
|
static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
|
||||||
struct kvm_vcpu_events *events)
|
struct kvm_vcpu_events *events)
|
||||||
{
|
{
|
||||||
if (events->flags)
|
if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
|
||||||
|
| KVM_VCPUEVENT_VALID_SIPI_VECTOR))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
vcpu_load(vcpu);
|
vcpu_load(vcpu);
|
||||||
|
@ -1938,10 +1940,12 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
|
||||||
kvm_pic_clear_isr_ack(vcpu->kvm);
|
kvm_pic_clear_isr_ack(vcpu->kvm);
|
||||||
|
|
||||||
vcpu->arch.nmi_injected = events->nmi.injected;
|
vcpu->arch.nmi_injected = events->nmi.injected;
|
||||||
vcpu->arch.nmi_pending = events->nmi.pending;
|
if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
|
||||||
|
vcpu->arch.nmi_pending = events->nmi.pending;
|
||||||
kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);
|
kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);
|
||||||
|
|
||||||
vcpu->arch.sipi_vector = events->sipi_vector;
|
if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR)
|
||||||
|
vcpu->arch.sipi_vector = events->sipi_vector;
|
||||||
|
|
||||||
vcpu_put(vcpu);
|
vcpu_put(vcpu);
|
||||||
|
|
||||||
|
|
|
@ -508,8 +508,8 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
|
||||||
struct kvm_assigned_dev_kernel *match;
|
struct kvm_assigned_dev_kernel *match;
|
||||||
struct pci_dev *dev;
|
struct pci_dev *dev;
|
||||||
|
|
||||||
down_read(&kvm->slots_lock);
|
|
||||||
mutex_lock(&kvm->lock);
|
mutex_lock(&kvm->lock);
|
||||||
|
down_read(&kvm->slots_lock);
|
||||||
|
|
||||||
match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
|
match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
|
||||||
assigned_dev->assigned_dev_id);
|
assigned_dev->assigned_dev_id);
|
||||||
|
@ -573,8 +573,8 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
mutex_unlock(&kvm->lock);
|
|
||||||
up_read(&kvm->slots_lock);
|
up_read(&kvm->slots_lock);
|
||||||
|
mutex_unlock(&kvm->lock);
|
||||||
return r;
|
return r;
|
||||||
out_list_del:
|
out_list_del:
|
||||||
list_del(&match->list);
|
list_del(&match->list);
|
||||||
|
@ -585,8 +585,8 @@ out_put:
|
||||||
pci_dev_put(dev);
|
pci_dev_put(dev);
|
||||||
out_free:
|
out_free:
|
||||||
kfree(match);
|
kfree(match);
|
||||||
mutex_unlock(&kvm->lock);
|
|
||||||
up_read(&kvm->slots_lock);
|
up_read(&kvm->slots_lock);
|
||||||
|
mutex_unlock(&kvm->lock);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -64,7 +64,7 @@ MODULE_LICENSE("GPL");
|
||||||
/*
|
/*
|
||||||
* Ordering of locks:
|
* Ordering of locks:
|
||||||
*
|
*
|
||||||
* kvm->slots_lock --> kvm->lock --> kvm->irq_lock
|
* kvm->lock --> kvm->slots_lock --> kvm->irq_lock
|
||||||
*/
|
*/
|
||||||
|
|
||||||
DEFINE_SPINLOCK(kvm_lock);
|
DEFINE_SPINLOCK(kvm_lock);
|
||||||
|
@ -406,8 +406,11 @@ static struct kvm *kvm_create_vm(void)
|
||||||
out:
|
out:
|
||||||
return kvm;
|
return kvm;
|
||||||
|
|
||||||
|
#if defined(KVM_COALESCED_MMIO_PAGE_OFFSET) || \
|
||||||
|
(defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER))
|
||||||
out_err:
|
out_err:
|
||||||
hardware_disable_all();
|
hardware_disable_all();
|
||||||
|
#endif
|
||||||
out_err_nodisable:
|
out_err_nodisable:
|
||||||
kfree(kvm);
|
kfree(kvm);
|
||||||
return ERR_PTR(r);
|
return ERR_PTR(r);
|
||||||
|
|
Loading…
Reference in New Issue