KVM: Portability: Move round_robin_prev_vcpu and tss_addr to kvm_arch

This patches moves two fields round_robin_prev_vcpu and tss to kvm_arch.

Signed-off-by: Zhang Xiantao <xiantao.zhang@intel.com>
Acked-by: Carsten Otte <cotte@de.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
This commit is contained in:
Zhang Xiantao 2007-12-14 10:20:16 +08:00 committed by Avi Kivity
parent d7deeeb02c
commit bfc6d222bd
4 changed files with 12 additions and 11 deletions

View File

@ -124,9 +124,6 @@ struct kvm {
struct file *filp;
struct kvm_io_bus mmio_bus;
struct kvm_io_bus pio_bus;
int round_robin_prev_vcpu;
unsigned int tss_addr;
struct page *apic_access_page;
struct kvm_vm_stat stat;
struct kvm_arch arch;
};

View File

@ -404,7 +404,7 @@ static struct kvm_lapic *kvm_apic_round_robin(struct kvm *kvm, u8 vector,
int next;
struct kvm_lapic *apic = NULL;
last = kvm->round_robin_prev_vcpu;
last = kvm->arch.round_robin_prev_vcpu;
next = last;
do {
@ -417,7 +417,7 @@ static struct kvm_lapic *kvm_apic_round_robin(struct kvm *kvm, u8 vector,
break;
apic = NULL;
} while (next != last);
kvm->round_robin_prev_vcpu = next;
kvm->arch.round_robin_prev_vcpu = next;
if (!apic)
printk(KERN_DEBUG "vcpu not ready for apic_round_robin\n");

View File

@ -1143,12 +1143,12 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
static gva_t rmode_tss_base(struct kvm *kvm)
{
if (!kvm->tss_addr) {
if (!kvm->arch.tss_addr) {
gfn_t base_gfn = kvm->memslots[0].base_gfn +
kvm->memslots[0].npages - 3;
return base_gfn << PAGE_SHIFT;
}
return kvm->tss_addr;
return kvm->arch.tss_addr;
}
static void fix_rmode_seg(int seg, struct kvm_save_segment *save)
@ -1473,7 +1473,7 @@ static int alloc_apic_access_page(struct kvm *kvm)
int r = 0;
mutex_lock(&kvm->lock);
if (kvm->apic_access_page)
if (kvm->arch.apic_access_page)
goto out;
kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
kvm_userspace_mem.flags = 0;
@ -1482,7 +1482,7 @@ static int alloc_apic_access_page(struct kvm *kvm)
r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0);
if (r)
goto out;
kvm->apic_access_page = gfn_to_page(kvm, 0xfee00);
kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00);
out:
mutex_unlock(&kvm->lock);
return r;
@ -1699,7 +1699,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
vmcs_write64(APIC_ACCESS_ADDR,
page_to_phys(vmx->vcpu.kvm->apic_access_page));
page_to_phys(vmx->vcpu.kvm->arch.apic_access_page));
vmx->vcpu.arch.cr0 = 0x60000010;
vmx_set_cr0(&vmx->vcpu, vmx->vcpu.arch.cr0); /* enter rmode */
@ -1789,7 +1789,7 @@ static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
ret = kvm_set_memory_region(kvm, &tss_mem, 0);
if (ret)
return ret;
kvm->tss_addr = addr;
kvm->arch.tss_addr = addr;
return 0;
}

View File

@ -277,6 +277,10 @@ struct kvm_arch{
struct list_head active_mmu_pages;
struct kvm_pic *vpic;
struct kvm_ioapic *vioapic;
int round_robin_prev_vcpu;
unsigned int tss_addr;
struct page *apic_access_page;
};
struct kvm_vcpu_stat {