KVM: MIPS: VZ support, Octeon III, and TLBR
Add basic support for the MIPS Virtualization Module (generally known as MIPS VZ) in KVM. We primarily support the ImgTec P5600, P6600, I6400, and Cavium Octeon III cores so far. Support is included for the following VZ / guest hardware features: - MIPS32 and MIPS64, r5 (VZ requires r5 or later) and r6 - TLBs with GuestID (IMG cores) or Root ASID Dealias (Octeon III) - Shared physical root/guest TLB (IMG cores) - FPU / MSA - Cop0 timer (up to 1GHz for now due to soft timer limit) - Segmentation control (EVA) - Hardware page table walker (HTW) both for root and guest TLB Also included is a proper implementation of the TLBR instruction for the trap & emulate MIPS KVM implementation. Preliminary MIPS architecture changes are applied directly with Ralf's ack. -----BEGIN PGP SIGNATURE----- iQIcBAABCAAGBQJY5XPkAAoJEGwLaZPeOHZ6aLcQAI43z58kkUopHJVfXtUbS+p0 Bno+oi6XKwEL0AD361A6jflbfxaSHQocilhCBvGKf7c7Rm/oRWrAxrXDnNEDi59s U7tH8KATdzgySu8mZOJNp8a0VcWS08yAbwOeZcqASPowBARPhlga3DCQdC6mWePi rlfHzRi2hBNKOc1q3KmGKDfiwi4x3dcLQYd9O8RmdpAjW5bfem0mJ76w9LRkPZHz YiCxnHYa0n4sNscT7HREe+P9/MzD2MQY04m+jhSMo/IHYPec9ap8kFN+de/4P1cT J2yTscywsQlC56E/pcRT5X0TYAZz/rsDhmRnIKRYuJBrGIXV8BKdYyqmBrxC7o6/ K4HvXJtMzkyG/xGj5l4TqTgTlPH0k4iu/bBWvyRjd40v3ZpSq5GqNG+6VX1QfYDW ZNa0fviC9uHqbfHijHs9IV1Kdb4bII/xd2eotCUy8jKbikd6FJWUT/XqQB4NGQpW PZtgPXVs958vWLG1qrdh2dSMpGR21uPwp9NsqGim/3raQOlDeTUK+x384urqLcU/ pQT2WROmXw8H9qPPKpkCs9xdhp0ja2TotTJcqH+mNk+r3QzWa4N95rpd9MZKtbyc YaQqC5FWru79ZfO53n2PsZidWyHHUS1rxYuYkopeGC7pgmoUdKdHwkzkvFdWLXHE Ol8lksYDC5aHiWD6V8Sh =smiL -----END PGP SIGNATURE----- Merge tag 'kvm_mips_4.12_1' of git://git.kernel.org/pub/scm/linux/kernel/git/jhogan/kvm-mips From: James Hogan <james.hogan@imgtec.com> KVM: MIPS: VZ support, Octeon III, and TLBR Add basic support for the MIPS Virtualization Module (generally known as MIPS VZ) in KVM. We primarily support the ImgTec P5600, P6600, I6400, and Cavium Octeon III cores so far. Support is included for the following VZ / guest hardware features: - MIPS32 and MIPS64, r5 (VZ requires r5 or later) and r6 - TLBs with GuestID (IMG cores) or Root ASID Dealias (Octeon III) - Shared physical root/guest TLB (IMG cores) - FPU / MSA - Cop0 timer (up to 1GHz for now due to soft timer limit) - Segmentation control (EVA) - Hardware page table walker (HTW) both for root and guest TLB Also included is a proper implementation of the TLBR instruction for the trap & emulate MIPS KVM implementation. Preliminary MIPS architecture changes are applied directly with Ralf's ack.
This commit is contained in:
commit
715958f921
|
@ -115,12 +115,17 @@ will access the virtual machine's physical address space; offset zero
|
|||
corresponds to guest physical address zero. Use of mmap() on a VM fd
|
||||
is discouraged if userspace memory allocation (KVM_CAP_USER_MEMORY) is
|
||||
available.
|
||||
You most certainly want to use 0 as machine type.
|
||||
You probably want to use 0 as machine type.
|
||||
|
||||
In order to create user controlled virtual machines on S390, check
|
||||
KVM_CAP_S390_UCONTROL and use the flag KVM_VM_S390_UCONTROL as
|
||||
privileged user (CAP_SYS_ADMIN).
|
||||
|
||||
To use hardware assisted virtualization on MIPS (VZ ASE) rather than
|
||||
the default trap & emulate implementation (which changes the virtual
|
||||
memory layout to fit in user mode), check KVM_CAP_MIPS_VZ and use the
|
||||
flag KVM_VM_MIPS_VZ.
|
||||
|
||||
|
||||
4.3 KVM_GET_MSR_INDEX_LIST
|
||||
|
||||
|
@ -2068,11 +2073,23 @@ registers, find a list below:
|
|||
MIPS | KVM_REG_MIPS_CP0_ENTRYLO0 | 64
|
||||
MIPS | KVM_REG_MIPS_CP0_ENTRYLO1 | 64
|
||||
MIPS | KVM_REG_MIPS_CP0_CONTEXT | 64
|
||||
MIPS | KVM_REG_MIPS_CP0_CONTEXTCONFIG| 32
|
||||
MIPS | KVM_REG_MIPS_CP0_USERLOCAL | 64
|
||||
MIPS | KVM_REG_MIPS_CP0_XCONTEXTCONFIG| 64
|
||||
MIPS | KVM_REG_MIPS_CP0_PAGEMASK | 32
|
||||
MIPS | KVM_REG_MIPS_CP0_PAGEGRAIN | 32
|
||||
MIPS | KVM_REG_MIPS_CP0_SEGCTL0 | 64
|
||||
MIPS | KVM_REG_MIPS_CP0_SEGCTL1 | 64
|
||||
MIPS | KVM_REG_MIPS_CP0_SEGCTL2 | 64
|
||||
MIPS | KVM_REG_MIPS_CP0_PWBASE | 64
|
||||
MIPS | KVM_REG_MIPS_CP0_PWFIELD | 64
|
||||
MIPS | KVM_REG_MIPS_CP0_PWSIZE | 64
|
||||
MIPS | KVM_REG_MIPS_CP0_WIRED | 32
|
||||
MIPS | KVM_REG_MIPS_CP0_PWCTL | 32
|
||||
MIPS | KVM_REG_MIPS_CP0_HWRENA | 32
|
||||
MIPS | KVM_REG_MIPS_CP0_BADVADDR | 64
|
||||
MIPS | KVM_REG_MIPS_CP0_BADINSTR | 32
|
||||
MIPS | KVM_REG_MIPS_CP0_BADINSTRP | 32
|
||||
MIPS | KVM_REG_MIPS_CP0_COUNT | 32
|
||||
MIPS | KVM_REG_MIPS_CP0_ENTRYHI | 64
|
||||
MIPS | KVM_REG_MIPS_CP0_COMPARE | 32
|
||||
|
@ -2089,6 +2106,7 @@ registers, find a list below:
|
|||
MIPS | KVM_REG_MIPS_CP0_CONFIG4 | 32
|
||||
MIPS | KVM_REG_MIPS_CP0_CONFIG5 | 32
|
||||
MIPS | KVM_REG_MIPS_CP0_CONFIG7 | 32
|
||||
MIPS | KVM_REG_MIPS_CP0_XCONTEXT | 64
|
||||
MIPS | KVM_REG_MIPS_CP0_ERROREPC | 64
|
||||
MIPS | KVM_REG_MIPS_CP0_KSCRATCH1 | 64
|
||||
MIPS | KVM_REG_MIPS_CP0_KSCRATCH2 | 64
|
||||
|
@ -2096,6 +2114,7 @@ registers, find a list below:
|
|||
MIPS | KVM_REG_MIPS_CP0_KSCRATCH4 | 64
|
||||
MIPS | KVM_REG_MIPS_CP0_KSCRATCH5 | 64
|
||||
MIPS | KVM_REG_MIPS_CP0_KSCRATCH6 | 64
|
||||
MIPS | KVM_REG_MIPS_CP0_MAAR(0..63) | 64
|
||||
MIPS | KVM_REG_MIPS_COUNT_CTL | 64
|
||||
MIPS | KVM_REG_MIPS_COUNT_RESUME | 64
|
||||
MIPS | KVM_REG_MIPS_COUNT_HZ | 64
|
||||
|
@ -2162,6 +2181,10 @@ hardware, host kernel, guest, and whether XPA is present in the guest, i.e.
|
|||
with the RI and XI bits (if they exist) in bits 63 and 62 respectively, and
|
||||
the PFNX field starting at bit 30.
|
||||
|
||||
MIPS MAARs (see KVM_REG_MIPS_CP0_MAAR(*) above) have the following id bit
|
||||
patterns:
|
||||
0x7030 0000 0001 01 <reg:8>
|
||||
|
||||
MIPS KVM control registers (see above) have the following id bit patterns:
|
||||
0x7030 0000 0002 <reg:16>
|
||||
|
||||
|
@ -4210,3 +4233,68 @@ This capability, if KVM_CHECK_EXTENSION indicates that it is
|
|||
available, means that that the kernel can support guests using the
|
||||
hashed page table MMU defined in Power ISA V3.00 (as implemented in
|
||||
the POWER9 processor), including in-memory segment tables.
|
||||
|
||||
8.5 KVM_CAP_MIPS_VZ
|
||||
|
||||
Architectures: mips
|
||||
|
||||
This capability, if KVM_CHECK_EXTENSION on the main kvm handle indicates that
|
||||
it is available, means that full hardware assisted virtualization capabilities
|
||||
of the hardware are available for use through KVM. An appropriate
|
||||
KVM_VM_MIPS_* type must be passed to KVM_CREATE_VM to create a VM which
|
||||
utilises it.
|
||||
|
||||
If KVM_CHECK_EXTENSION on a kvm VM handle indicates that this capability is
|
||||
available, it means that the VM is using full hardware assisted virtualization
|
||||
capabilities of the hardware. This is useful to check after creating a VM with
|
||||
KVM_VM_MIPS_DEFAULT.
|
||||
|
||||
The value returned by KVM_CHECK_EXTENSION should be compared against known
|
||||
values (see below). All other values are reserved. This is to allow for the
|
||||
possibility of other hardware assisted virtualization implementations which
|
||||
may be incompatible with the MIPS VZ ASE.
|
||||
|
||||
0: The trap & emulate implementation is in use to run guest code in user
|
||||
mode. Guest virtual memory segments are rearranged to fit the guest in the
|
||||
user mode address space.
|
||||
|
||||
1: The MIPS VZ ASE is in use, providing full hardware assisted
|
||||
virtualization, including standard guest virtual memory segments.
|
||||
|
||||
8.6 KVM_CAP_MIPS_TE
|
||||
|
||||
Architectures: mips
|
||||
|
||||
This capability, if KVM_CHECK_EXTENSION on the main kvm handle indicates that
|
||||
it is available, means that the trap & emulate implementation is available to
|
||||
run guest code in user mode, even if KVM_CAP_MIPS_VZ indicates that hardware
|
||||
assisted virtualisation is also available. KVM_VM_MIPS_TE (0) must be passed
|
||||
to KVM_CREATE_VM to create a VM which utilises it.
|
||||
|
||||
If KVM_CHECK_EXTENSION on a kvm VM handle indicates that this capability is
|
||||
available, it means that the VM is using trap & emulate.
|
||||
|
||||
8.7 KVM_CAP_MIPS_64BIT
|
||||
|
||||
Architectures: mips
|
||||
|
||||
This capability indicates the supported architecture type of the guest, i.e. the
|
||||
supported register and address width.
|
||||
|
||||
The values returned when this capability is checked by KVM_CHECK_EXTENSION on a
|
||||
kvm VM handle correspond roughly to the CP0_Config.AT register field, and should
|
||||
be checked specifically against known values (see below). All other values are
|
||||
reserved.
|
||||
|
||||
0: MIPS32 or microMIPS32.
|
||||
Both registers and addresses are 32-bits wide.
|
||||
It will only be possible to run 32-bit guest code.
|
||||
|
||||
1: MIPS64 or microMIPS64 with access only to 32-bit compatibility segments.
|
||||
Registers are 64-bits wide, but addresses are 32-bits wide.
|
||||
64-bit guest code may run but cannot access MIPS64 memory segments.
|
||||
It will also be possible to run 32-bit guest code.
|
||||
|
||||
2: MIPS64 or microMIPS64 with access to all address segments.
|
||||
Both registers and addresses are 64-bits wide.
|
||||
It will be possible to run 64-bit or 32-bit guest code.
|
||||
|
|
|
@ -28,6 +28,11 @@ S390:
|
|||
property inside the device tree's /hypervisor node.
|
||||
For more information refer to Documentation/virtual/kvm/ppc-pv.txt
|
||||
|
||||
MIPS:
|
||||
KVM hypercalls use the HYPCALL instruction with code 0 and the hypercall
|
||||
number in $2 (v0). Up to four arguments may be placed in $4-$7 (a0-a3) and
|
||||
the return value is placed in $2 (v0).
|
||||
|
||||
KVM Hypercalls Documentation
|
||||
===========================
|
||||
The template for each hypercall is:
|
||||
|
|
|
@ -1687,6 +1687,7 @@ config CPU_CAVIUM_OCTEON
|
|||
select USB_EHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
|
||||
select USB_OHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
|
||||
select MIPS_L1_CACHE_SHIFT_7
|
||||
select HAVE_KVM
|
||||
help
|
||||
The Cavium Octeon processor is a highly integrated chip containing
|
||||
many ethernet hardware widgets for networking tasks. The processor
|
||||
|
|
|
@ -444,6 +444,10 @@
|
|||
# define cpu_has_msa 0
|
||||
#endif
|
||||
|
||||
#ifndef cpu_has_ufr
|
||||
# define cpu_has_ufr (cpu_data[0].options & MIPS_CPU_UFR)
|
||||
#endif
|
||||
|
||||
#ifndef cpu_has_fre
|
||||
# define cpu_has_fre (cpu_data[0].options & MIPS_CPU_FRE)
|
||||
#endif
|
||||
|
@ -528,6 +532,9 @@
|
|||
#ifndef cpu_guest_has_htw
|
||||
#define cpu_guest_has_htw (cpu_data[0].guest.options & MIPS_CPU_HTW)
|
||||
#endif
|
||||
#ifndef cpu_guest_has_mvh
|
||||
#define cpu_guest_has_mvh (cpu_data[0].guest.options & MIPS_CPU_MVH)
|
||||
#endif
|
||||
#ifndef cpu_guest_has_msa
|
||||
#define cpu_guest_has_msa (cpu_data[0].guest.ases & MIPS_ASE_MSA)
|
||||
#endif
|
||||
|
@ -543,6 +550,9 @@
|
|||
#ifndef cpu_guest_has_maar
|
||||
#define cpu_guest_has_maar (cpu_data[0].guest.options & MIPS_CPU_MAAR)
|
||||
#endif
|
||||
#ifndef cpu_guest_has_userlocal
|
||||
#define cpu_guest_has_userlocal (cpu_data[0].guest.options & MIPS_CPU_ULRI)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Guest dynamic capabilities
|
||||
|
|
|
@ -33,6 +33,7 @@ struct guest_info {
|
|||
unsigned long ases_dyn;
|
||||
unsigned long long options;
|
||||
unsigned long long options_dyn;
|
||||
int tlbsize;
|
||||
u8 conf;
|
||||
u8 kscratch_mask;
|
||||
};
|
||||
|
@ -109,6 +110,7 @@ struct cpuinfo_mips {
|
|||
struct guest_info guest;
|
||||
unsigned int gtoffset_mask;
|
||||
unsigned int guestid_mask;
|
||||
unsigned int guestid_cache;
|
||||
} __attribute__((aligned(SMP_CACHE_BYTES)));
|
||||
|
||||
extern struct cpuinfo_mips cpu_data[];
|
||||
|
|
|
@ -415,6 +415,7 @@ enum cpu_type_enum {
|
|||
#define MIPS_CPU_GUESTCTL2 MBIT_ULL(50) /* CPU has VZ GuestCtl2 register */
|
||||
#define MIPS_CPU_GUESTID MBIT_ULL(51) /* CPU uses VZ ASE GuestID feature */
|
||||
#define MIPS_CPU_DRG MBIT_ULL(52) /* CPU has VZ Direct Root to Guest (DRG) */
|
||||
#define MIPS_CPU_UFR MBIT_ULL(53) /* CPU supports User mode FR switching */
|
||||
|
||||
/*
|
||||
* CPU ASE encodings
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#ifndef __MIPS_KVM_HOST_H__
|
||||
#define __MIPS_KVM_HOST_H__
|
||||
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/hrtimer.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
@ -33,12 +34,23 @@
|
|||
#define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0)
|
||||
#define KVM_REG_MIPS_CP0_ENTRYLO1 MIPS_CP0_64(3, 0)
|
||||
#define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0)
|
||||
#define KVM_REG_MIPS_CP0_CONTEXTCONFIG MIPS_CP0_32(4, 1)
|
||||
#define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2)
|
||||
#define KVM_REG_MIPS_CP0_XCONTEXTCONFIG MIPS_CP0_64(4, 3)
|
||||
#define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0)
|
||||
#define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1)
|
||||
#define KVM_REG_MIPS_CP0_SEGCTL0 MIPS_CP0_64(5, 2)
|
||||
#define KVM_REG_MIPS_CP0_SEGCTL1 MIPS_CP0_64(5, 3)
|
||||
#define KVM_REG_MIPS_CP0_SEGCTL2 MIPS_CP0_64(5, 4)
|
||||
#define KVM_REG_MIPS_CP0_PWBASE MIPS_CP0_64(5, 5)
|
||||
#define KVM_REG_MIPS_CP0_PWFIELD MIPS_CP0_64(5, 6)
|
||||
#define KVM_REG_MIPS_CP0_PWSIZE MIPS_CP0_64(5, 7)
|
||||
#define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0)
|
||||
#define KVM_REG_MIPS_CP0_PWCTL MIPS_CP0_32(6, 6)
|
||||
#define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0)
|
||||
#define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0)
|
||||
#define KVM_REG_MIPS_CP0_BADINSTR MIPS_CP0_32(8, 1)
|
||||
#define KVM_REG_MIPS_CP0_BADINSTRP MIPS_CP0_32(8, 2)
|
||||
#define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0)
|
||||
#define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0)
|
||||
#define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0)
|
||||
|
@ -55,6 +67,7 @@
|
|||
#define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4)
|
||||
#define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5)
|
||||
#define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7)
|
||||
#define KVM_REG_MIPS_CP0_MAARI MIPS_CP0_64(17, 2)
|
||||
#define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0)
|
||||
#define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
|
||||
#define KVM_REG_MIPS_CP0_KSCRATCH1 MIPS_CP0_64(31, 2)
|
||||
|
@ -73,6 +86,11 @@
|
|||
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
|
||||
#define KVM_HALT_POLL_NS_DEFAULT 500000
|
||||
|
||||
#ifdef CONFIG_KVM_MIPS_VZ
|
||||
extern unsigned long GUESTID_MASK;
|
||||
extern unsigned long GUESTID_FIRST_VERSION;
|
||||
extern unsigned long GUESTID_VERSION_MASK;
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
|
@ -145,6 +163,16 @@ struct kvm_vcpu_stat {
|
|||
u64 fpe_exits;
|
||||
u64 msa_disabled_exits;
|
||||
u64 flush_dcache_exits;
|
||||
#ifdef CONFIG_KVM_MIPS_VZ
|
||||
u64 vz_gpsi_exits;
|
||||
u64 vz_gsfc_exits;
|
||||
u64 vz_hc_exits;
|
||||
u64 vz_grr_exits;
|
||||
u64 vz_gva_exits;
|
||||
u64 vz_ghfc_exits;
|
||||
u64 vz_gpa_exits;
|
||||
u64 vz_resvd_exits;
|
||||
#endif
|
||||
u64 halt_successful_poll;
|
||||
u64 halt_attempted_poll;
|
||||
u64 halt_poll_invalid;
|
||||
|
@ -157,6 +185,8 @@ struct kvm_arch_memory_slot {
|
|||
struct kvm_arch {
|
||||
/* Guest physical mm */
|
||||
struct mm_struct gpa_mm;
|
||||
/* Mask of CPUs needing GPA ASID flush */
|
||||
cpumask_t asid_flush_mask;
|
||||
};
|
||||
|
||||
#define N_MIPS_COPROC_REGS 32
|
||||
|
@ -214,6 +244,11 @@ struct mips_coproc {
|
|||
#define MIPS_CP0_CONFIG4_SEL 4
|
||||
#define MIPS_CP0_CONFIG5_SEL 5
|
||||
|
||||
#define MIPS_CP0_GUESTCTL2 10
|
||||
#define MIPS_CP0_GUESTCTL2_SEL 5
|
||||
#define MIPS_CP0_GTOFFSET 12
|
||||
#define MIPS_CP0_GTOFFSET_SEL 7
|
||||
|
||||
/* Resume Flags */
|
||||
#define RESUME_FLAG_DR (1<<0) /* Reload guest nonvolatile state? */
|
||||
#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
|
||||
|
@ -229,6 +264,7 @@ enum emulation_result {
|
|||
EMULATE_WAIT, /* WAIT instruction */
|
||||
EMULATE_PRIV_FAIL,
|
||||
EMULATE_EXCEPT, /* A guest exception has been generated */
|
||||
EMULATE_HYPERCALL, /* HYPCALL instruction */
|
||||
};
|
||||
|
||||
#define mips3_paddr_to_tlbpfn(x) \
|
||||
|
@ -276,13 +312,18 @@ struct kvm_mmu_memory_cache {
|
|||
struct kvm_vcpu_arch {
|
||||
void *guest_ebase;
|
||||
int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
|
||||
|
||||
/* Host registers preserved across guest mode execution */
|
||||
unsigned long host_stack;
|
||||
unsigned long host_gp;
|
||||
unsigned long host_pgd;
|
||||
unsigned long host_entryhi;
|
||||
|
||||
/* Host CP0 registers used when handling exits from guest */
|
||||
unsigned long host_cp0_badvaddr;
|
||||
unsigned long host_cp0_epc;
|
||||
u32 host_cp0_cause;
|
||||
u32 host_cp0_guestctl0;
|
||||
u32 host_cp0_badinstr;
|
||||
u32 host_cp0_badinstrp;
|
||||
|
||||
|
@ -340,7 +381,23 @@ struct kvm_vcpu_arch {
|
|||
/* Cache some mmu pages needed inside spinlock regions */
|
||||
struct kvm_mmu_memory_cache mmu_page_cache;
|
||||
|
||||
#ifdef CONFIG_KVM_MIPS_VZ
|
||||
/* vcpu's vzguestid is different on each host cpu in an smp system */
|
||||
u32 vzguestid[NR_CPUS];
|
||||
|
||||
/* wired guest TLB entries */
|
||||
struct kvm_mips_tlb *wired_tlb;
|
||||
unsigned int wired_tlb_limit;
|
||||
unsigned int wired_tlb_used;
|
||||
|
||||
/* emulated guest MAAR registers */
|
||||
unsigned long maar[6];
|
||||
#endif
|
||||
|
||||
/* Last CPU the VCPU state was loaded on */
|
||||
int last_sched_cpu;
|
||||
/* Last CPU the VCPU actually executed guest code on */
|
||||
int last_exec_cpu;
|
||||
|
||||
/* WAIT executed */
|
||||
int wait;
|
||||
|
@ -349,78 +406,6 @@ struct kvm_vcpu_arch {
|
|||
u8 msa_enabled;
|
||||
};
|
||||
|
||||
|
||||
#define kvm_read_c0_guest_index(cop0) (cop0->reg[MIPS_CP0_TLB_INDEX][0])
|
||||
#define kvm_write_c0_guest_index(cop0, val) (cop0->reg[MIPS_CP0_TLB_INDEX][0] = val)
|
||||
#define kvm_read_c0_guest_entrylo0(cop0) (cop0->reg[MIPS_CP0_TLB_LO0][0])
|
||||
#define kvm_write_c0_guest_entrylo0(cop0, val) (cop0->reg[MIPS_CP0_TLB_LO0][0] = (val))
|
||||
#define kvm_read_c0_guest_entrylo1(cop0) (cop0->reg[MIPS_CP0_TLB_LO1][0])
|
||||
#define kvm_write_c0_guest_entrylo1(cop0, val) (cop0->reg[MIPS_CP0_TLB_LO1][0] = (val))
|
||||
#define kvm_read_c0_guest_context(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0])
|
||||
#define kvm_write_c0_guest_context(cop0, val) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0] = (val))
|
||||
#define kvm_read_c0_guest_userlocal(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][2])
|
||||
#define kvm_write_c0_guest_userlocal(cop0, val) (cop0->reg[MIPS_CP0_TLB_CONTEXT][2] = (val))
|
||||
#define kvm_read_c0_guest_pagemask(cop0) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0])
|
||||
#define kvm_write_c0_guest_pagemask(cop0, val) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0] = (val))
|
||||
#define kvm_read_c0_guest_wired(cop0) (cop0->reg[MIPS_CP0_TLB_WIRED][0])
|
||||
#define kvm_write_c0_guest_wired(cop0, val) (cop0->reg[MIPS_CP0_TLB_WIRED][0] = (val))
|
||||
#define kvm_read_c0_guest_hwrena(cop0) (cop0->reg[MIPS_CP0_HWRENA][0])
|
||||
#define kvm_write_c0_guest_hwrena(cop0, val) (cop0->reg[MIPS_CP0_HWRENA][0] = (val))
|
||||
#define kvm_read_c0_guest_badvaddr(cop0) (cop0->reg[MIPS_CP0_BAD_VADDR][0])
|
||||
#define kvm_write_c0_guest_badvaddr(cop0, val) (cop0->reg[MIPS_CP0_BAD_VADDR][0] = (val))
|
||||
#define kvm_read_c0_guest_count(cop0) (cop0->reg[MIPS_CP0_COUNT][0])
|
||||
#define kvm_write_c0_guest_count(cop0, val) (cop0->reg[MIPS_CP0_COUNT][0] = (val))
|
||||
#define kvm_read_c0_guest_entryhi(cop0) (cop0->reg[MIPS_CP0_TLB_HI][0])
|
||||
#define kvm_write_c0_guest_entryhi(cop0, val) (cop0->reg[MIPS_CP0_TLB_HI][0] = (val))
|
||||
#define kvm_read_c0_guest_compare(cop0) (cop0->reg[MIPS_CP0_COMPARE][0])
|
||||
#define kvm_write_c0_guest_compare(cop0, val) (cop0->reg[MIPS_CP0_COMPARE][0] = (val))
|
||||
#define kvm_read_c0_guest_status(cop0) (cop0->reg[MIPS_CP0_STATUS][0])
|
||||
#define kvm_write_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] = (val))
|
||||
#define kvm_read_c0_guest_intctl(cop0) (cop0->reg[MIPS_CP0_STATUS][1])
|
||||
#define kvm_write_c0_guest_intctl(cop0, val) (cop0->reg[MIPS_CP0_STATUS][1] = (val))
|
||||
#define kvm_read_c0_guest_cause(cop0) (cop0->reg[MIPS_CP0_CAUSE][0])
|
||||
#define kvm_write_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] = (val))
|
||||
#define kvm_read_c0_guest_epc(cop0) (cop0->reg[MIPS_CP0_EXC_PC][0])
|
||||
#define kvm_write_c0_guest_epc(cop0, val) (cop0->reg[MIPS_CP0_EXC_PC][0] = (val))
|
||||
#define kvm_read_c0_guest_prid(cop0) (cop0->reg[MIPS_CP0_PRID][0])
|
||||
#define kvm_write_c0_guest_prid(cop0, val) (cop0->reg[MIPS_CP0_PRID][0] = (val))
|
||||
#define kvm_read_c0_guest_ebase(cop0) (cop0->reg[MIPS_CP0_PRID][1])
|
||||
#define kvm_write_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] = (val))
|
||||
#define kvm_read_c0_guest_config(cop0) (cop0->reg[MIPS_CP0_CONFIG][0])
|
||||
#define kvm_read_c0_guest_config1(cop0) (cop0->reg[MIPS_CP0_CONFIG][1])
|
||||
#define kvm_read_c0_guest_config2(cop0) (cop0->reg[MIPS_CP0_CONFIG][2])
|
||||
#define kvm_read_c0_guest_config3(cop0) (cop0->reg[MIPS_CP0_CONFIG][3])
|
||||
#define kvm_read_c0_guest_config4(cop0) (cop0->reg[MIPS_CP0_CONFIG][4])
|
||||
#define kvm_read_c0_guest_config5(cop0) (cop0->reg[MIPS_CP0_CONFIG][5])
|
||||
#define kvm_read_c0_guest_config7(cop0) (cop0->reg[MIPS_CP0_CONFIG][7])
|
||||
#define kvm_write_c0_guest_config(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][0] = (val))
|
||||
#define kvm_write_c0_guest_config1(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][1] = (val))
|
||||
#define kvm_write_c0_guest_config2(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][2] = (val))
|
||||
#define kvm_write_c0_guest_config3(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][3] = (val))
|
||||
#define kvm_write_c0_guest_config4(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][4] = (val))
|
||||
#define kvm_write_c0_guest_config5(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][5] = (val))
|
||||
#define kvm_write_c0_guest_config7(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][7] = (val))
|
||||
#define kvm_read_c0_guest_errorepc(cop0) (cop0->reg[MIPS_CP0_ERROR_PC][0])
|
||||
#define kvm_write_c0_guest_errorepc(cop0, val) (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val))
|
||||
#define kvm_read_c0_guest_kscratch1(cop0) (cop0->reg[MIPS_CP0_DESAVE][2])
|
||||
#define kvm_read_c0_guest_kscratch2(cop0) (cop0->reg[MIPS_CP0_DESAVE][3])
|
||||
#define kvm_read_c0_guest_kscratch3(cop0) (cop0->reg[MIPS_CP0_DESAVE][4])
|
||||
#define kvm_read_c0_guest_kscratch4(cop0) (cop0->reg[MIPS_CP0_DESAVE][5])
|
||||
#define kvm_read_c0_guest_kscratch5(cop0) (cop0->reg[MIPS_CP0_DESAVE][6])
|
||||
#define kvm_read_c0_guest_kscratch6(cop0) (cop0->reg[MIPS_CP0_DESAVE][7])
|
||||
#define kvm_write_c0_guest_kscratch1(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][2] = (val))
|
||||
#define kvm_write_c0_guest_kscratch2(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][3] = (val))
|
||||
#define kvm_write_c0_guest_kscratch3(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][4] = (val))
|
||||
#define kvm_write_c0_guest_kscratch4(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][5] = (val))
|
||||
#define kvm_write_c0_guest_kscratch5(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][6] = (val))
|
||||
#define kvm_write_c0_guest_kscratch6(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][7] = (val))
|
||||
|
||||
/*
|
||||
* Some of the guest registers may be modified asynchronously (e.g. from a
|
||||
* hrtimer callback in hard irq context) and therefore need stronger atomicity
|
||||
* guarantees than other registers.
|
||||
*/
|
||||
|
||||
static inline void _kvm_atomic_set_c0_guest_reg(unsigned long *reg,
|
||||
unsigned long val)
|
||||
{
|
||||
|
@ -471,26 +456,286 @@ static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg,
|
|||
} while (unlikely(!temp));
|
||||
}
|
||||
|
||||
#define kvm_set_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] |= (val))
|
||||
#define kvm_clear_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] &= ~(val))
|
||||
/* Guest register types, used in accessor build below */
|
||||
#define __KVMT32 u32
|
||||
#define __KVMTl unsigned long
|
||||
|
||||
/* Cause can be modified asynchronously from hardirq hrtimer callback */
|
||||
#define kvm_set_c0_guest_cause(cop0, val) \
|
||||
_kvm_atomic_set_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], val)
|
||||
#define kvm_clear_c0_guest_cause(cop0, val) \
|
||||
_kvm_atomic_clear_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], val)
|
||||
#define kvm_change_c0_guest_cause(cop0, change, val) \
|
||||
_kvm_atomic_change_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], \
|
||||
change, val)
|
||||
/*
|
||||
* __BUILD_KVM_$ops_SAVED(): kvm_$op_sw_gc0_$reg()
|
||||
* These operate on the saved guest C0 state in RAM.
|
||||
*/
|
||||
|
||||
#define kvm_set_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] |= (val))
|
||||
#define kvm_clear_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] &= ~(val))
|
||||
#define kvm_change_c0_guest_ebase(cop0, change, val) \
|
||||
/* Generate saved context simple accessors */
|
||||
#define __BUILD_KVM_RW_SAVED(name, type, _reg, sel) \
|
||||
static inline __KVMT##type kvm_read_sw_gc0_##name(struct mips_coproc *cop0) \
|
||||
{ \
|
||||
kvm_clear_c0_guest_ebase(cop0, change); \
|
||||
kvm_set_c0_guest_ebase(cop0, ((val) & (change))); \
|
||||
return cop0->reg[(_reg)][(sel)]; \
|
||||
} \
|
||||
static inline void kvm_write_sw_gc0_##name(struct mips_coproc *cop0, \
|
||||
__KVMT##type val) \
|
||||
{ \
|
||||
cop0->reg[(_reg)][(sel)] = val; \
|
||||
}
|
||||
|
||||
/* Generate saved context bitwise modifiers */
|
||||
#define __BUILD_KVM_SET_SAVED(name, type, _reg, sel) \
|
||||
static inline void kvm_set_sw_gc0_##name(struct mips_coproc *cop0, \
|
||||
__KVMT##type val) \
|
||||
{ \
|
||||
cop0->reg[(_reg)][(sel)] |= val; \
|
||||
} \
|
||||
static inline void kvm_clear_sw_gc0_##name(struct mips_coproc *cop0, \
|
||||
__KVMT##type val) \
|
||||
{ \
|
||||
cop0->reg[(_reg)][(sel)] &= ~val; \
|
||||
} \
|
||||
static inline void kvm_change_sw_gc0_##name(struct mips_coproc *cop0, \
|
||||
__KVMT##type mask, \
|
||||
__KVMT##type val) \
|
||||
{ \
|
||||
unsigned long _mask = mask; \
|
||||
cop0->reg[(_reg)][(sel)] &= ~_mask; \
|
||||
cop0->reg[(_reg)][(sel)] |= val & _mask; \
|
||||
}
|
||||
|
||||
/* Generate saved context atomic bitwise modifiers */
|
||||
#define __BUILD_KVM_ATOMIC_SAVED(name, type, _reg, sel) \
|
||||
static inline void kvm_set_sw_gc0_##name(struct mips_coproc *cop0, \
|
||||
__KVMT##type val) \
|
||||
{ \
|
||||
_kvm_atomic_set_c0_guest_reg(&cop0->reg[(_reg)][(sel)], val); \
|
||||
} \
|
||||
static inline void kvm_clear_sw_gc0_##name(struct mips_coproc *cop0, \
|
||||
__KVMT##type val) \
|
||||
{ \
|
||||
_kvm_atomic_clear_c0_guest_reg(&cop0->reg[(_reg)][(sel)], val); \
|
||||
} \
|
||||
static inline void kvm_change_sw_gc0_##name(struct mips_coproc *cop0, \
|
||||
__KVMT##type mask, \
|
||||
__KVMT##type val) \
|
||||
{ \
|
||||
_kvm_atomic_change_c0_guest_reg(&cop0->reg[(_reg)][(sel)], mask, \
|
||||
val); \
|
||||
}
|
||||
|
||||
/*
|
||||
* __BUILD_KVM_$ops_VZ(): kvm_$op_vz_gc0_$reg()
|
||||
* These operate on the VZ guest C0 context in hardware.
|
||||
*/
|
||||
|
||||
/* Generate VZ guest context simple accessors */
|
||||
#define __BUILD_KVM_RW_VZ(name, type, _reg, sel) \
|
||||
static inline __KVMT##type kvm_read_vz_gc0_##name(struct mips_coproc *cop0) \
|
||||
{ \
|
||||
return read_gc0_##name(); \
|
||||
} \
|
||||
static inline void kvm_write_vz_gc0_##name(struct mips_coproc *cop0, \
|
||||
__KVMT##type val) \
|
||||
{ \
|
||||
write_gc0_##name(val); \
|
||||
}
|
||||
|
||||
/* Generate VZ guest context bitwise modifiers */
|
||||
#define __BUILD_KVM_SET_VZ(name, type, _reg, sel) \
|
||||
static inline void kvm_set_vz_gc0_##name(struct mips_coproc *cop0, \
|
||||
__KVMT##type val) \
|
||||
{ \
|
||||
set_gc0_##name(val); \
|
||||
} \
|
||||
static inline void kvm_clear_vz_gc0_##name(struct mips_coproc *cop0, \
|
||||
__KVMT##type val) \
|
||||
{ \
|
||||
clear_gc0_##name(val); \
|
||||
} \
|
||||
static inline void kvm_change_vz_gc0_##name(struct mips_coproc *cop0, \
|
||||
__KVMT##type mask, \
|
||||
__KVMT##type val) \
|
||||
{ \
|
||||
change_gc0_##name(mask, val); \
|
||||
}
|
||||
|
||||
/* Generate VZ guest context save/restore to/from saved context */
|
||||
#define __BUILD_KVM_SAVE_VZ(name, _reg, sel) \
|
||||
static inline void kvm_restore_gc0_##name(struct mips_coproc *cop0) \
|
||||
{ \
|
||||
write_gc0_##name(cop0->reg[(_reg)][(sel)]); \
|
||||
} \
|
||||
static inline void kvm_save_gc0_##name(struct mips_coproc *cop0) \
|
||||
{ \
|
||||
cop0->reg[(_reg)][(sel)] = read_gc0_##name(); \
|
||||
}
|
||||
|
||||
/*
|
||||
* __BUILD_KVM_$ops_WRAP(): kvm_$op_$name1() -> kvm_$op_$name2()
|
||||
* These wrap a set of operations to provide them with a different name.
|
||||
*/
|
||||
|
||||
/* Generate simple accessor wrapper */
|
||||
#define __BUILD_KVM_RW_WRAP(name1, name2, type) \
|
||||
static inline __KVMT##type kvm_read_##name1(struct mips_coproc *cop0) \
|
||||
{ \
|
||||
return kvm_read_##name2(cop0); \
|
||||
} \
|
||||
static inline void kvm_write_##name1(struct mips_coproc *cop0, \
|
||||
__KVMT##type val) \
|
||||
{ \
|
||||
kvm_write_##name2(cop0, val); \
|
||||
}
|
||||
|
||||
/* Generate bitwise modifier wrapper */
|
||||
#define __BUILD_KVM_SET_WRAP(name1, name2, type) \
|
||||
static inline void kvm_set_##name1(struct mips_coproc *cop0, \
|
||||
__KVMT##type val) \
|
||||
{ \
|
||||
kvm_set_##name2(cop0, val); \
|
||||
} \
|
||||
static inline void kvm_clear_##name1(struct mips_coproc *cop0, \
|
||||
__KVMT##type val) \
|
||||
{ \
|
||||
kvm_clear_##name2(cop0, val); \
|
||||
} \
|
||||
static inline void kvm_change_##name1(struct mips_coproc *cop0, \
|
||||
__KVMT##type mask, \
|
||||
__KVMT##type val) \
|
||||
{ \
|
||||
kvm_change_##name2(cop0, mask, val); \
|
||||
}
|
||||
|
||||
/*
|
||||
* __BUILD_KVM_$ops_SW(): kvm_$op_c0_guest_$reg() -> kvm_$op_sw_gc0_$reg()
|
||||
* These generate accessors operating on the saved context in RAM, and wrap them
|
||||
* with the common guest C0 accessors (for use by common emulation code).
|
||||
*/
|
||||
|
||||
#define __BUILD_KVM_RW_SW(name, type, _reg, sel) \
|
||||
__BUILD_KVM_RW_SAVED(name, type, _reg, sel) \
|
||||
__BUILD_KVM_RW_WRAP(c0_guest_##name, sw_gc0_##name, type)
|
||||
|
||||
#define __BUILD_KVM_SET_SW(name, type, _reg, sel) \
|
||||
__BUILD_KVM_SET_SAVED(name, type, _reg, sel) \
|
||||
__BUILD_KVM_SET_WRAP(c0_guest_##name, sw_gc0_##name, type)
|
||||
|
||||
#define __BUILD_KVM_ATOMIC_SW(name, type, _reg, sel) \
|
||||
__BUILD_KVM_ATOMIC_SAVED(name, type, _reg, sel) \
|
||||
__BUILD_KVM_SET_WRAP(c0_guest_##name, sw_gc0_##name, type)
|
||||
|
||||
#ifndef CONFIG_KVM_MIPS_VZ
|
||||
|
||||
/*
|
||||
* T&E (trap & emulate software based virtualisation)
|
||||
* We generate the common accessors operating exclusively on the saved context
|
||||
* in RAM.
|
||||
*/
|
||||
|
||||
#define __BUILD_KVM_RW_HW __BUILD_KVM_RW_SW
|
||||
#define __BUILD_KVM_SET_HW __BUILD_KVM_SET_SW
|
||||
#define __BUILD_KVM_ATOMIC_HW __BUILD_KVM_ATOMIC_SW
|
||||
|
||||
#else
|
||||
|
||||
/*
|
||||
* VZ (hardware assisted virtualisation)
|
||||
* These macros use the active guest state in VZ mode (hardware registers),
|
||||
*/
|
||||
|
||||
/*
|
||||
* __BUILD_KVM_$ops_HW(): kvm_$op_c0_guest_$reg() -> kvm_$op_vz_gc0_$reg()
|
||||
* These generate accessors operating on the VZ guest context in hardware, and
|
||||
* wrap them with the common guest C0 accessors (for use by common emulation
|
||||
* code).
|
||||
*
|
||||
* Accessors operating on the saved context in RAM are also generated to allow
|
||||
* convenient explicit saving and restoring of the state.
|
||||
*/
|
||||
|
||||
#define __BUILD_KVM_RW_HW(name, type, _reg, sel) \
|
||||
__BUILD_KVM_RW_SAVED(name, type, _reg, sel) \
|
||||
__BUILD_KVM_RW_VZ(name, type, _reg, sel) \
|
||||
__BUILD_KVM_RW_WRAP(c0_guest_##name, vz_gc0_##name, type) \
|
||||
__BUILD_KVM_SAVE_VZ(name, _reg, sel)
|
||||
|
||||
#define __BUILD_KVM_SET_HW(name, type, _reg, sel) \
|
||||
__BUILD_KVM_SET_SAVED(name, type, _reg, sel) \
|
||||
__BUILD_KVM_SET_VZ(name, type, _reg, sel) \
|
||||
__BUILD_KVM_SET_WRAP(c0_guest_##name, vz_gc0_##name, type)
|
||||
|
||||
/*
|
||||
* We can't do atomic modifications of COP0 state if hardware can modify it.
|
||||
* Races must be handled explicitly.
|
||||
*/
|
||||
#define __BUILD_KVM_ATOMIC_HW __BUILD_KVM_SET_HW
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Define accessors for CP0 registers that are accessible to the guest. These
|
||||
* are primarily used by common emulation code, which may need to access the
|
||||
* registers differently depending on the implementation.
|
||||
*
|
||||
* fns_hw/sw name type reg num select
|
||||
*/
|
||||
__BUILD_KVM_RW_HW(index, 32, MIPS_CP0_TLB_INDEX, 0)
|
||||
__BUILD_KVM_RW_HW(entrylo0, l, MIPS_CP0_TLB_LO0, 0)
|
||||
__BUILD_KVM_RW_HW(entrylo1, l, MIPS_CP0_TLB_LO1, 0)
|
||||
__BUILD_KVM_RW_HW(context, l, MIPS_CP0_TLB_CONTEXT, 0)
|
||||
__BUILD_KVM_RW_HW(contextconfig, 32, MIPS_CP0_TLB_CONTEXT, 1)
|
||||
__BUILD_KVM_RW_HW(userlocal, l, MIPS_CP0_TLB_CONTEXT, 2)
|
||||
__BUILD_KVM_RW_HW(xcontextconfig, l, MIPS_CP0_TLB_CONTEXT, 3)
|
||||
__BUILD_KVM_RW_HW(pagemask, l, MIPS_CP0_TLB_PG_MASK, 0)
|
||||
__BUILD_KVM_RW_HW(pagegrain, 32, MIPS_CP0_TLB_PG_MASK, 1)
|
||||
__BUILD_KVM_RW_HW(segctl0, l, MIPS_CP0_TLB_PG_MASK, 2)
|
||||
__BUILD_KVM_RW_HW(segctl1, l, MIPS_CP0_TLB_PG_MASK, 3)
|
||||
__BUILD_KVM_RW_HW(segctl2, l, MIPS_CP0_TLB_PG_MASK, 4)
|
||||
__BUILD_KVM_RW_HW(pwbase, l, MIPS_CP0_TLB_PG_MASK, 5)
|
||||
__BUILD_KVM_RW_HW(pwfield, l, MIPS_CP0_TLB_PG_MASK, 6)
|
||||
__BUILD_KVM_RW_HW(pwsize, l, MIPS_CP0_TLB_PG_MASK, 7)
|
||||
__BUILD_KVM_RW_HW(wired, 32, MIPS_CP0_TLB_WIRED, 0)
|
||||
__BUILD_KVM_RW_HW(pwctl, 32, MIPS_CP0_TLB_WIRED, 6)
|
||||
__BUILD_KVM_RW_HW(hwrena, 32, MIPS_CP0_HWRENA, 0)
|
||||
__BUILD_KVM_RW_HW(badvaddr, l, MIPS_CP0_BAD_VADDR, 0)
|
||||
__BUILD_KVM_RW_HW(badinstr, 32, MIPS_CP0_BAD_VADDR, 1)
|
||||
__BUILD_KVM_RW_HW(badinstrp, 32, MIPS_CP0_BAD_VADDR, 2)
|
||||
__BUILD_KVM_RW_SW(count, 32, MIPS_CP0_COUNT, 0)
|
||||
__BUILD_KVM_RW_HW(entryhi, l, MIPS_CP0_TLB_HI, 0)
|
||||
__BUILD_KVM_RW_HW(compare, 32, MIPS_CP0_COMPARE, 0)
|
||||
__BUILD_KVM_RW_HW(status, 32, MIPS_CP0_STATUS, 0)
|
||||
__BUILD_KVM_RW_HW(intctl, 32, MIPS_CP0_STATUS, 1)
|
||||
__BUILD_KVM_RW_HW(cause, 32, MIPS_CP0_CAUSE, 0)
|
||||
__BUILD_KVM_RW_HW(epc, l, MIPS_CP0_EXC_PC, 0)
|
||||
__BUILD_KVM_RW_SW(prid, 32, MIPS_CP0_PRID, 0)
|
||||
__BUILD_KVM_RW_HW(ebase, l, MIPS_CP0_PRID, 1)
|
||||
__BUILD_KVM_RW_HW(config, 32, MIPS_CP0_CONFIG, 0)
|
||||
__BUILD_KVM_RW_HW(config1, 32, MIPS_CP0_CONFIG, 1)
|
||||
__BUILD_KVM_RW_HW(config2, 32, MIPS_CP0_CONFIG, 2)
|
||||
__BUILD_KVM_RW_HW(config3, 32, MIPS_CP0_CONFIG, 3)
|
||||
__BUILD_KVM_RW_HW(config4, 32, MIPS_CP0_CONFIG, 4)
|
||||
__BUILD_KVM_RW_HW(config5, 32, MIPS_CP0_CONFIG, 5)
|
||||
__BUILD_KVM_RW_HW(config6, 32, MIPS_CP0_CONFIG, 6)
|
||||
__BUILD_KVM_RW_HW(config7, 32, MIPS_CP0_CONFIG, 7)
|
||||
__BUILD_KVM_RW_SW(maari, l, MIPS_CP0_LLADDR, 2)
|
||||
__BUILD_KVM_RW_HW(xcontext, l, MIPS_CP0_TLB_XCONTEXT, 0)
|
||||
__BUILD_KVM_RW_HW(errorepc, l, MIPS_CP0_ERROR_PC, 0)
|
||||
__BUILD_KVM_RW_HW(kscratch1, l, MIPS_CP0_DESAVE, 2)
|
||||
__BUILD_KVM_RW_HW(kscratch2, l, MIPS_CP0_DESAVE, 3)
|
||||
__BUILD_KVM_RW_HW(kscratch3, l, MIPS_CP0_DESAVE, 4)
|
||||
__BUILD_KVM_RW_HW(kscratch4, l, MIPS_CP0_DESAVE, 5)
|
||||
__BUILD_KVM_RW_HW(kscratch5, l, MIPS_CP0_DESAVE, 6)
|
||||
__BUILD_KVM_RW_HW(kscratch6, l, MIPS_CP0_DESAVE, 7)
|
||||
|
||||
/* Bitwise operations (on HW state) */
|
||||
__BUILD_KVM_SET_HW(status, 32, MIPS_CP0_STATUS, 0)
|
||||
/* Cause can be modified asynchronously from hardirq hrtimer callback */
|
||||
__BUILD_KVM_ATOMIC_HW(cause, 32, MIPS_CP0_CAUSE, 0)
|
||||
__BUILD_KVM_SET_HW(ebase, l, MIPS_CP0_PRID, 1)
|
||||
|
||||
/* Bitwise operations (on saved state) */
|
||||
__BUILD_KVM_SET_SAVED(config, 32, MIPS_CP0_CONFIG, 0)
|
||||
__BUILD_KVM_SET_SAVED(config1, 32, MIPS_CP0_CONFIG, 1)
|
||||
__BUILD_KVM_SET_SAVED(config2, 32, MIPS_CP0_CONFIG, 2)
|
||||
__BUILD_KVM_SET_SAVED(config3, 32, MIPS_CP0_CONFIG, 3)
|
||||
__BUILD_KVM_SET_SAVED(config4, 32, MIPS_CP0_CONFIG, 4)
|
||||
__BUILD_KVM_SET_SAVED(config5, 32, MIPS_CP0_CONFIG, 5)
|
||||
|
||||
/* Helpers */
|
||||
|
||||
static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu)
|
||||
|
@ -531,6 +776,10 @@ struct kvm_mips_callbacks {
|
|||
int (*handle_msa_fpe)(struct kvm_vcpu *vcpu);
|
||||
int (*handle_fpe)(struct kvm_vcpu *vcpu);
|
||||
int (*handle_msa_disabled)(struct kvm_vcpu *vcpu);
|
||||
int (*handle_guest_exit)(struct kvm_vcpu *vcpu);
|
||||
int (*hardware_enable)(void);
|
||||
void (*hardware_disable)(void);
|
||||
int (*check_extension)(struct kvm *kvm, long ext);
|
||||
int (*vcpu_init)(struct kvm_vcpu *vcpu);
|
||||
void (*vcpu_uninit)(struct kvm_vcpu *vcpu);
|
||||
int (*vcpu_setup)(struct kvm_vcpu *vcpu);
|
||||
|
@ -599,6 +848,10 @@ u32 kvm_get_user_asid(struct kvm_vcpu *vcpu);
|
|||
|
||||
u32 kvm_get_commpage_asid (struct kvm_vcpu *vcpu);
|
||||
|
||||
#ifdef CONFIG_KVM_MIPS_VZ
|
||||
int kvm_mips_handle_vz_root_tlb_fault(unsigned long badvaddr,
|
||||
struct kvm_vcpu *vcpu, bool write_fault);
|
||||
#endif
|
||||
extern int kvm_mips_handle_kseg0_tlb_fault(unsigned long badbaddr,
|
||||
struct kvm_vcpu *vcpu,
|
||||
bool write_fault);
|
||||
|
@ -625,6 +878,18 @@ extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi,
|
|||
extern int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu,
|
||||
unsigned long entryhi);
|
||||
|
||||
#ifdef CONFIG_KVM_MIPS_VZ
|
||||
int kvm_vz_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi);
|
||||
int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva,
|
||||
unsigned long *gpa);
|
||||
void kvm_vz_local_flush_roottlb_all_guests(void);
|
||||
void kvm_vz_local_flush_guesttlb_all(void);
|
||||
void kvm_vz_save_guesttlb(struct kvm_mips_tlb *buf, unsigned int index,
|
||||
unsigned int count);
|
||||
void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index,
|
||||
unsigned int count);
|
||||
#endif
|
||||
|
||||
void kvm_mips_suspend_mm(int cpu);
|
||||
void kvm_mips_resume_mm(int cpu);
|
||||
|
||||
|
@ -795,7 +1060,7 @@ extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
|
|||
u32 kvm_mips_read_count(struct kvm_vcpu *vcpu);
|
||||
void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count);
|
||||
void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack);
|
||||
void kvm_mips_init_count(struct kvm_vcpu *vcpu);
|
||||
void kvm_mips_init_count(struct kvm_vcpu *vcpu, unsigned long count_hz);
|
||||
int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl);
|
||||
int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume);
|
||||
int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz);
|
||||
|
@ -803,6 +1068,20 @@ void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu);
|
|||
void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu);
|
||||
enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu);
|
||||
|
||||
/* fairly internal functions requiring some care to use */
|
||||
int kvm_mips_count_disabled(struct kvm_vcpu *vcpu);
|
||||
ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count);
|
||||
int kvm_mips_restore_hrtimer(struct kvm_vcpu *vcpu, ktime_t before,
|
||||
u32 count, int min_drift);
|
||||
|
||||
#ifdef CONFIG_KVM_MIPS_VZ
|
||||
void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu);
|
||||
void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu);
|
||||
#else
|
||||
static inline void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu) {}
|
||||
static inline void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu) {}
|
||||
#endif
|
||||
|
||||
enum emulation_result kvm_mips_check_privilege(u32 cause,
|
||||
u32 *opc,
|
||||
struct kvm_run *run,
|
||||
|
@ -827,11 +1106,20 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
|
|||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu);
|
||||
|
||||
/* COP0 */
|
||||
enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu);
|
||||
|
||||
unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu);
|
||||
unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu);
|
||||
unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu);
|
||||
unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu);
|
||||
|
||||
/* Hypercalls (hypcall.c) */
|
||||
|
||||
enum emulation_result kvm_mips_emul_hypcall(struct kvm_vcpu *vcpu,
|
||||
union mips_instruction inst);
|
||||
int kvm_mips_handle_hypcall(struct kvm_vcpu *vcpu);
|
||||
|
||||
/* Dynamic binary translation */
|
||||
extern int kvm_mips_trans_cache_index(union mips_instruction inst,
|
||||
u32 *opc, struct kvm_vcpu *vcpu);
|
||||
|
@ -846,7 +1134,6 @@ extern int kvm_mips_trans_mtc0(union mips_instruction inst, u32 *opc,
|
|||
extern void kvm_mips_dump_stats(struct kvm_vcpu *vcpu);
|
||||
extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm);
|
||||
|
||||
static inline void kvm_arch_hardware_disable(void) {}
|
||||
static inline void kvm_arch_hardware_unsetup(void) {}
|
||||
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
|
||||
static inline void kvm_arch_free_memslot(struct kvm *kvm,
|
||||
|
|
|
@ -36,7 +36,7 @@ unsigned platform_maar_init(unsigned num_pairs);
|
|||
* @upper: The highest address that the MAAR pair will affect. Must be
|
||||
* aligned to one byte before a 2^16 byte boundary.
|
||||
* @attrs: The accessibility attributes to program, eg. MIPS_MAAR_S. The
|
||||
* MIPS_MAAR_V attribute will automatically be set.
|
||||
* MIPS_MAAR_VL attribute will automatically be set.
|
||||
*
|
||||
* Program the pair of MAAR registers specified by idx to apply the attributes
|
||||
* specified by attrs to the range of addresses from lower to higher.
|
||||
|
@ -49,10 +49,10 @@ static inline void write_maar_pair(unsigned idx, phys_addr_t lower,
|
|||
BUG_ON(((upper & 0xffff) != 0xffff)
|
||||
|| ((upper & ~0xffffull) & ~(MIPS_MAAR_ADDR << 4)));
|
||||
|
||||
/* Automatically set MIPS_MAAR_V */
|
||||
attrs |= MIPS_MAAR_V;
|
||||
/* Automatically set MIPS_MAAR_VL */
|
||||
attrs |= MIPS_MAAR_VL;
|
||||
|
||||
/* Write the upper address & attributes (only MIPS_MAAR_V matters) */
|
||||
/* Write the upper address & attributes (only MIPS_MAAR_VL matters) */
|
||||
write_c0_maari(idx << 1);
|
||||
back_to_back_c0_hazard();
|
||||
write_c0_maar(((upper >> 4) & MIPS_MAAR_ADDR) | attrs);
|
||||
|
@ -81,7 +81,7 @@ extern void maar_init(void);
|
|||
* @upper: The highest address that the MAAR pair will affect. Must be
|
||||
* aligned to one byte before a 2^16 byte boundary.
|
||||
* @attrs: The accessibility attributes to program, eg. MIPS_MAAR_S. The
|
||||
* MIPS_MAAR_V attribute will automatically be set.
|
||||
* MIPS_MAAR_VL attribute will automatically be set.
|
||||
*
|
||||
* Describes the configuration of a pair of Memory Accessibility Attribute
|
||||
* Registers - applying attributes from attrs to the range of physical
|
||||
|
|
|
@ -34,8 +34,10 @@
|
|||
*/
|
||||
#ifdef __ASSEMBLY__
|
||||
#define _ULCAST_
|
||||
#define _U64CAST_
|
||||
#else
|
||||
#define _ULCAST_ (unsigned long)
|
||||
#define _U64CAST_ (u64)
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -217,8 +219,10 @@
|
|||
/*
|
||||
* Wired register bits
|
||||
*/
|
||||
#define MIPSR6_WIRED_LIMIT (_ULCAST_(0xffff) << 16)
|
||||
#define MIPSR6_WIRED_WIRED (_ULCAST_(0xffff) << 0)
|
||||
#define MIPSR6_WIRED_LIMIT_SHIFT 16
|
||||
#define MIPSR6_WIRED_LIMIT (_ULCAST_(0xffff) << MIPSR6_WIRED_LIMIT_SHIFT)
|
||||
#define MIPSR6_WIRED_WIRED_SHIFT 0
|
||||
#define MIPSR6_WIRED_WIRED (_ULCAST_(0xffff) << MIPSR6_WIRED_WIRED_SHIFT)
|
||||
|
||||
/*
|
||||
* Values used for computation of new tlb entries
|
||||
|
@ -645,6 +649,7 @@
|
|||
#define MIPS_CONF5_LLB (_ULCAST_(1) << 4)
|
||||
#define MIPS_CONF5_MVH (_ULCAST_(1) << 5)
|
||||
#define MIPS_CONF5_VP (_ULCAST_(1) << 7)
|
||||
#define MIPS_CONF5_SBRI (_ULCAST_(1) << 6)
|
||||
#define MIPS_CONF5_FRE (_ULCAST_(1) << 8)
|
||||
#define MIPS_CONF5_UFE (_ULCAST_(1) << 9)
|
||||
#define MIPS_CONF5_MSAEN (_ULCAST_(1) << 27)
|
||||
|
@ -719,10 +724,14 @@
|
|||
#define XLR_PERFCTRL_ALLTHREADS (_ULCAST_(1) << 13)
|
||||
|
||||
/* MAAR bit definitions */
|
||||
#define MIPS_MAAR_VH (_U64CAST_(1) << 63)
|
||||
#define MIPS_MAAR_ADDR ((BIT_ULL(BITS_PER_LONG - 12) - 1) << 12)
|
||||
#define MIPS_MAAR_ADDR_SHIFT 12
|
||||
#define MIPS_MAAR_S (_ULCAST_(1) << 1)
|
||||
#define MIPS_MAAR_V (_ULCAST_(1) << 0)
|
||||
#define MIPS_MAAR_VL (_ULCAST_(1) << 0)
|
||||
|
||||
/* MAARI bit definitions */
|
||||
#define MIPS_MAARI_INDEX (_ULCAST_(0x3f) << 0)
|
||||
|
||||
/* EBase bit definitions */
|
||||
#define MIPS_EBASE_CPUNUM_SHIFT 0
|
||||
|
@ -736,6 +745,10 @@
|
|||
#define MIPS_CMGCRB_BASE 11
|
||||
#define MIPS_CMGCRF_BASE (~_ULCAST_((1 << MIPS_CMGCRB_BASE) - 1))
|
||||
|
||||
/* LLAddr bit definitions */
|
||||
#define MIPS_LLADDR_LLB_SHIFT 0
|
||||
#define MIPS_LLADDR_LLB (_ULCAST_(1) << MIPS_LLADDR_LLB_SHIFT)
|
||||
|
||||
/*
|
||||
* Bits in the MIPS32 Memory Segmentation registers.
|
||||
*/
|
||||
|
@ -961,6 +974,22 @@
|
|||
/* Flush FTLB */
|
||||
#define LOONGSON_DIAG_FTLB (_ULCAST_(1) << 13)
|
||||
|
||||
/* CvmCtl register field definitions */
|
||||
#define CVMCTL_IPPCI_SHIFT 7
|
||||
#define CVMCTL_IPPCI (_U64CAST_(0x7) << CVMCTL_IPPCI_SHIFT)
|
||||
#define CVMCTL_IPTI_SHIFT 4
|
||||
#define CVMCTL_IPTI (_U64CAST_(0x7) << CVMCTL_IPTI_SHIFT)
|
||||
|
||||
/* CvmMemCtl2 register field definitions */
|
||||
#define CVMMEMCTL2_INHIBITTS (_U64CAST_(1) << 17)
|
||||
|
||||
/* CvmVMConfig register field definitions */
|
||||
#define CVMVMCONF_DGHT (_U64CAST_(1) << 60)
|
||||
#define CVMVMCONF_MMUSIZEM1_S 12
|
||||
#define CVMVMCONF_MMUSIZEM1 (_U64CAST_(0xff) << CVMVMCONF_MMUSIZEM1_S)
|
||||
#define CVMVMCONF_RMMUSIZEM1_S 0
|
||||
#define CVMVMCONF_RMMUSIZEM1 (_U64CAST_(0xff) << CVMVMCONF_RMMUSIZEM1_S)
|
||||
|
||||
/*
|
||||
* Coprocessor 1 (FPU) register names
|
||||
*/
|
||||
|
@ -1720,6 +1749,13 @@ do { \
|
|||
|
||||
#define read_c0_cvmmemctl() __read_64bit_c0_register($11, 7)
|
||||
#define write_c0_cvmmemctl(val) __write_64bit_c0_register($11, 7, val)
|
||||
|
||||
#define read_c0_cvmmemctl2() __read_64bit_c0_register($16, 6)
|
||||
#define write_c0_cvmmemctl2(val) __write_64bit_c0_register($16, 6, val)
|
||||
|
||||
#define read_c0_cvmvmconfig() __read_64bit_c0_register($16, 7)
|
||||
#define write_c0_cvmvmconfig(val) __write_64bit_c0_register($16, 7, val)
|
||||
|
||||
/*
|
||||
* The cacheerr registers are not standardized. On OCTEON, they are
|
||||
* 64 bits wide.
|
||||
|
@ -1989,6 +2025,8 @@ do { \
|
|||
#define read_gc0_epc() __read_ulong_gc0_register(14, 0)
|
||||
#define write_gc0_epc(val) __write_ulong_gc0_register(14, 0, val)
|
||||
|
||||
#define read_gc0_prid() __read_32bit_gc0_register(15, 0)
|
||||
|
||||
#define read_gc0_ebase() __read_32bit_gc0_register(15, 1)
|
||||
#define write_gc0_ebase(val) __write_32bit_gc0_register(15, 1, val)
|
||||
|
||||
|
@ -2012,6 +2050,9 @@ do { \
|
|||
#define write_gc0_config6(val) __write_32bit_gc0_register(16, 6, val)
|
||||
#define write_gc0_config7(val) __write_32bit_gc0_register(16, 7, val)
|
||||
|
||||
#define read_gc0_lladdr() __read_ulong_gc0_register(17, 0)
|
||||
#define write_gc0_lladdr(val) __write_ulong_gc0_register(17, 0, val)
|
||||
|
||||
#define read_gc0_watchlo0() __read_ulong_gc0_register(18, 0)
|
||||
#define read_gc0_watchlo1() __read_ulong_gc0_register(18, 1)
|
||||
#define read_gc0_watchlo2() __read_ulong_gc0_register(18, 2)
|
||||
|
@ -2090,6 +2131,19 @@ do { \
|
|||
#define write_gc0_kscratch5(val) __write_ulong_gc0_register(31, 6, val)
|
||||
#define write_gc0_kscratch6(val) __write_ulong_gc0_register(31, 7, val)
|
||||
|
||||
/* Cavium OCTEON (cnMIPS) */
|
||||
#define read_gc0_cvmcount() __read_ulong_gc0_register(9, 6)
|
||||
#define write_gc0_cvmcount(val) __write_ulong_gc0_register(9, 6, val)
|
||||
|
||||
#define read_gc0_cvmctl() __read_64bit_gc0_register(9, 7)
|
||||
#define write_gc0_cvmctl(val) __write_64bit_gc0_register(9, 7, val)
|
||||
|
||||
#define read_gc0_cvmmemctl() __read_64bit_gc0_register(11, 7)
|
||||
#define write_gc0_cvmmemctl(val) __write_64bit_gc0_register(11, 7, val)
|
||||
|
||||
#define read_gc0_cvmmemctl2() __read_64bit_gc0_register(16, 6)
|
||||
#define write_gc0_cvmmemctl2(val) __write_64bit_gc0_register(16, 6, val)
|
||||
|
||||
/*
|
||||
* Macros to access the floating point coprocessor control registers
|
||||
*/
|
||||
|
@ -2696,9 +2750,11 @@ __BUILD_SET_C0(brcm_mode)
|
|||
*/
|
||||
#define __BUILD_SET_GC0(name) __BUILD_SET_COMMON(gc0_##name)
|
||||
|
||||
__BUILD_SET_GC0(wired)
|
||||
__BUILD_SET_GC0(status)
|
||||
__BUILD_SET_GC0(cause)
|
||||
__BUILD_SET_GC0(ebase)
|
||||
__BUILD_SET_GC0(config1)
|
||||
|
||||
/*
|
||||
* Return low 10 bits of ebase.
|
||||
|
|
|
@ -21,9 +21,11 @@
|
|||
*/
|
||||
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
|
||||
|
||||
#define UNIQUE_ENTRYHI(idx) \
|
||||
((CKSEG0 + ((idx) << (PAGE_SHIFT + 1))) | \
|
||||
#define _UNIQUE_ENTRYHI(base, idx) \
|
||||
(((base) + ((idx) << (PAGE_SHIFT + 1))) | \
|
||||
(cpu_has_tlbinv ? MIPS_ENTRYHI_EHINV : 0))
|
||||
#define UNIQUE_ENTRYHI(idx) _UNIQUE_ENTRYHI(CKSEG0, idx)
|
||||
#define UNIQUE_GUEST_ENTRYHI(idx) _UNIQUE_ENTRYHI(CKSEG1, idx)
|
||||
|
||||
static inline unsigned int num_wired_entries(void)
|
||||
{
|
||||
|
|
|
@ -179,7 +179,7 @@ enum cop0_coi_func {
|
|||
tlbr_op = 0x01, tlbwi_op = 0x02,
|
||||
tlbwr_op = 0x06, tlbp_op = 0x08,
|
||||
rfe_op = 0x10, eret_op = 0x18,
|
||||
wait_op = 0x20,
|
||||
wait_op = 0x20, hypcall_op = 0x28
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -54,9 +54,14 @@ struct kvm_fpu {
|
|||
* Register set = 0: GP registers from kvm_regs (see definitions below).
|
||||
*
|
||||
* Register set = 1: CP0 registers.
|
||||
* bits[15..8] - Must be zero.
|
||||
* bits[7..3] - Register 'rd' index.
|
||||
* bits[2..0] - Register 'sel' index.
|
||||
* bits[15..8] - COP0 register set.
|
||||
*
|
||||
* COP0 register set = 0: Main CP0 registers.
|
||||
* bits[7..3] - Register 'rd' index.
|
||||
* bits[2..0] - Register 'sel' index.
|
||||
*
|
||||
* COP0 register set = 1: MAARs.
|
||||
* bits[7..0] - MAAR index.
|
||||
*
|
||||
* Register set = 2: KVM specific registers (see definitions below).
|
||||
*
|
||||
|
@ -114,6 +119,15 @@ struct kvm_fpu {
|
|||
#define KVM_REG_MIPS_PC (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 34)
|
||||
|
||||
|
||||
/*
|
||||
* KVM_REG_MIPS_CP0 - Coprocessor 0 registers.
|
||||
*/
|
||||
|
||||
#define KVM_REG_MIPS_MAAR (KVM_REG_MIPS_CP0 | (1 << 8))
|
||||
#define KVM_REG_MIPS_CP0_MAAR(n) (KVM_REG_MIPS_MAAR | \
|
||||
KVM_REG_SIZE_U64 | (n))
|
||||
|
||||
|
||||
/*
|
||||
* KVM_REG_MIPS_KVM - KVM specific control registers.
|
||||
*/
|
||||
|
|
|
@ -289,6 +289,8 @@ static void cpu_set_fpu_opts(struct cpuinfo_mips *c)
|
|||
MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) {
|
||||
if (c->fpu_id & MIPS_FPIR_3D)
|
||||
c->ases |= MIPS_ASE_MIPS3D;
|
||||
if (c->fpu_id & MIPS_FPIR_UFRP)
|
||||
c->options |= MIPS_CPU_UFR;
|
||||
if (c->fpu_id & MIPS_FPIR_FREP)
|
||||
c->options |= MIPS_CPU_FRE;
|
||||
}
|
||||
|
@ -1003,7 +1005,8 @@ static inline unsigned int decode_guest_config3(struct cpuinfo_mips *c)
|
|||
unsigned int config3, config3_dyn;
|
||||
|
||||
probe_gc0_config_dyn(config3, config3, config3_dyn,
|
||||
MIPS_CONF_M | MIPS_CONF3_MSA | MIPS_CONF3_CTXTC);
|
||||
MIPS_CONF_M | MIPS_CONF3_MSA | MIPS_CONF3_ULRI |
|
||||
MIPS_CONF3_CTXTC);
|
||||
|
||||
if (config3 & MIPS_CONF3_CTXTC)
|
||||
c->guest.options |= MIPS_CPU_CTXTC;
|
||||
|
@ -1013,6 +1016,9 @@ static inline unsigned int decode_guest_config3(struct cpuinfo_mips *c)
|
|||
if (config3 & MIPS_CONF3_PW)
|
||||
c->guest.options |= MIPS_CPU_HTW;
|
||||
|
||||
if (config3 & MIPS_CONF3_ULRI)
|
||||
c->guest.options |= MIPS_CPU_ULRI;
|
||||
|
||||
if (config3 & MIPS_CONF3_SC)
|
||||
c->guest.options |= MIPS_CPU_SEGMENTS;
|
||||
|
||||
|
@ -1051,7 +1057,7 @@ static inline unsigned int decode_guest_config5(struct cpuinfo_mips *c)
|
|||
unsigned int config5, config5_dyn;
|
||||
|
||||
probe_gc0_config_dyn(config5, config5, config5_dyn,
|
||||
MIPS_CONF_M | MIPS_CONF5_MRP);
|
||||
MIPS_CONF_M | MIPS_CONF5_MVH | MIPS_CONF5_MRP);
|
||||
|
||||
if (config5 & MIPS_CONF5_MRP)
|
||||
c->guest.options |= MIPS_CPU_MAAR;
|
||||
|
@ -1061,6 +1067,9 @@ static inline unsigned int decode_guest_config5(struct cpuinfo_mips *c)
|
|||
if (config5 & MIPS_CONF5_LLB)
|
||||
c->guest.options |= MIPS_CPU_RW_LLB;
|
||||
|
||||
if (config5 & MIPS_CONF5_MVH)
|
||||
c->guest.options |= MIPS_CPU_MVH;
|
||||
|
||||
if (config5 & MIPS_CONF_M)
|
||||
c->guest.conf |= BIT(6);
|
||||
return config5 & MIPS_CONF_M;
|
||||
|
|
|
@ -70,6 +70,7 @@ EXPORT_SYMBOL(perf_irq);
|
|||
*/
|
||||
|
||||
unsigned int mips_hpt_frequency;
|
||||
EXPORT_SYMBOL_GPL(mips_hpt_frequency);
|
||||
|
||||
/*
|
||||
* This function exists in order to cause an error due to a duplicate
|
||||
|
|
|
@ -26,11 +26,34 @@ config KVM
|
|||
select SRCU
|
||||
---help---
|
||||
Support for hosting Guest kernels.
|
||||
Currently supported on MIPS32 processors.
|
||||
|
||||
choice
|
||||
prompt "Virtualization mode"
|
||||
depends on KVM
|
||||
default KVM_MIPS_TE
|
||||
|
||||
config KVM_MIPS_TE
|
||||
bool "Trap & Emulate"
|
||||
---help---
|
||||
Use trap and emulate to virtualize 32-bit guests in user mode. This
|
||||
does not require any special hardware Virtualization support beyond
|
||||
standard MIPS32/64 r2 or later, but it does require the guest kernel
|
||||
to be configured with CONFIG_KVM_GUEST=y so that it resides in the
|
||||
user address segment.
|
||||
|
||||
config KVM_MIPS_VZ
|
||||
bool "MIPS Virtualization (VZ) ASE"
|
||||
---help---
|
||||
Use the MIPS Virtualization (VZ) ASE to virtualize guests. This
|
||||
supports running unmodified guest kernels (with CONFIG_KVM_GUEST=n),
|
||||
but requires hardware support.
|
||||
|
||||
endchoice
|
||||
|
||||
config KVM_MIPS_DYN_TRANS
|
||||
bool "KVM/MIPS: Dynamic binary translation to reduce traps"
|
||||
depends on KVM
|
||||
depends on KVM_MIPS_TE
|
||||
default y
|
||||
---help---
|
||||
When running in Trap & Emulate mode patch privileged
|
||||
instructions to reduce the number of traps.
|
||||
|
|
|
@ -9,8 +9,15 @@ common-objs-$(CONFIG_CPU_HAS_MSA) += msa.o
|
|||
|
||||
kvm-objs := $(common-objs-y) mips.o emulate.o entry.o \
|
||||
interrupt.o stats.o commpage.o \
|
||||
dyntrans.o trap_emul.o fpu.o
|
||||
fpu.o
|
||||
kvm-objs += hypcall.o
|
||||
kvm-objs += mmu.o
|
||||
|
||||
ifdef CONFIG_KVM_MIPS_VZ
|
||||
kvm-objs += vz.o
|
||||
else
|
||||
kvm-objs += dyntrans.o
|
||||
kvm-objs += trap_emul.o
|
||||
endif
|
||||
obj-$(CONFIG_KVM) += kvm.o
|
||||
obj-y += callback.o tlb.o
|
||||
|
|
|
@ -308,7 +308,7 @@ int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
|
|||
* CP0_Cause.DC bit or the count_ctl.DC bit.
|
||||
* 0 otherwise (in which case CP0_Count timer is running).
|
||||
*/
|
||||
static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
|
||||
int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
|
||||
|
@ -467,7 +467,7 @@ u32 kvm_mips_read_count(struct kvm_vcpu *vcpu)
|
|||
*
|
||||
* Returns: The ktime at the point of freeze.
|
||||
*/
|
||||
static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count)
|
||||
ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count)
|
||||
{
|
||||
ktime_t now;
|
||||
|
||||
|
@ -516,6 +516,82 @@ static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
|
|||
hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_mips_restore_hrtimer() - Restore hrtimer after a gap, updating expiry.
|
||||
* @vcpu: Virtual CPU.
|
||||
* @before: Time before Count was saved, lower bound of drift calculation.
|
||||
* @count: CP0_Count at point of restore.
|
||||
* @min_drift: Minimum amount of drift permitted before correction.
|
||||
* Must be <= 0.
|
||||
*
|
||||
* Restores the timer from a particular @count, accounting for drift. This can
|
||||
* be used in conjunction with kvm_mips_freeze_timer() when a hardware timer is
|
||||
* to be used for a period of time, but the exact ktime corresponding to the
|
||||
* final Count that must be restored is not known.
|
||||
*
|
||||
* It is gauranteed that a timer interrupt immediately after restore will be
|
||||
* handled, but not if CP0_Compare is exactly at @count. That case should
|
||||
* already be handled when the hardware timer state is saved.
|
||||
*
|
||||
* Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is not
|
||||
* stopped).
|
||||
*
|
||||
* Returns: Amount of correction to count_bias due to drift.
|
||||
*/
|
||||
int kvm_mips_restore_hrtimer(struct kvm_vcpu *vcpu, ktime_t before,
|
||||
u32 count, int min_drift)
|
||||
{
|
||||
ktime_t now, count_time;
|
||||
u32 now_count, before_count;
|
||||
u64 delta;
|
||||
int drift, ret = 0;
|
||||
|
||||
/* Calculate expected count at before */
|
||||
before_count = vcpu->arch.count_bias +
|
||||
kvm_mips_ktime_to_count(vcpu, before);
|
||||
|
||||
/*
|
||||
* Detect significantly negative drift, where count is lower than
|
||||
* expected. Some negative drift is expected when hardware counter is
|
||||
* set after kvm_mips_freeze_timer(), and it is harmless to allow the
|
||||
* time to jump forwards a little, within reason. If the drift is too
|
||||
* significant, adjust the bias to avoid a big Guest.CP0_Count jump.
|
||||
*/
|
||||
drift = count - before_count;
|
||||
if (drift < min_drift) {
|
||||
count_time = before;
|
||||
vcpu->arch.count_bias += drift;
|
||||
ret = drift;
|
||||
goto resume;
|
||||
}
|
||||
|
||||
/* Calculate expected count right now */
|
||||
now = ktime_get();
|
||||
now_count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
|
||||
|
||||
/*
|
||||
* Detect positive drift, where count is higher than expected, and
|
||||
* adjust the bias to avoid guest time going backwards.
|
||||
*/
|
||||
drift = count - now_count;
|
||||
if (drift > 0) {
|
||||
count_time = now;
|
||||
vcpu->arch.count_bias += drift;
|
||||
ret = drift;
|
||||
goto resume;
|
||||
}
|
||||
|
||||
/* Subtract nanosecond delta to find ktime when count was read */
|
||||
delta = (u64)(u32)(now_count - count);
|
||||
delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
|
||||
count_time = ktime_sub_ns(now, delta);
|
||||
|
||||
resume:
|
||||
/* Resume using the calculated ktime */
|
||||
kvm_mips_resume_hrtimer(vcpu, count_time, count);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_mips_write_count() - Modify the count and update timer.
|
||||
* @vcpu: Virtual CPU.
|
||||
|
@ -543,16 +619,15 @@ void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count)
|
|||
/**
|
||||
* kvm_mips_init_count() - Initialise timer.
|
||||
* @vcpu: Virtual CPU.
|
||||
* @count_hz: Frequency of timer.
|
||||
*
|
||||
* Initialise the timer to a sensible frequency, namely 100MHz, zero it, and set
|
||||
* it going if it's enabled.
|
||||
* Initialise the timer to the specified frequency, zero it, and set it going if
|
||||
* it's enabled.
|
||||
*/
|
||||
void kvm_mips_init_count(struct kvm_vcpu *vcpu)
|
||||
void kvm_mips_init_count(struct kvm_vcpu *vcpu, unsigned long count_hz)
|
||||
{
|
||||
/* 100 MHz */
|
||||
vcpu->arch.count_hz = 100*1000*1000;
|
||||
vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32,
|
||||
vcpu->arch.count_hz);
|
||||
vcpu->arch.count_hz = count_hz;
|
||||
vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
|
||||
vcpu->arch.count_dyn_bias = 0;
|
||||
|
||||
/* Starting at 0 */
|
||||
|
@ -622,7 +697,9 @@ void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
|
|||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
int dc;
|
||||
u32 old_compare = kvm_read_c0_guest_compare(cop0);
|
||||
ktime_t now;
|
||||
s32 delta = compare - old_compare;
|
||||
u32 cause;
|
||||
ktime_t now = ktime_set(0, 0); /* silence bogus GCC warning */
|
||||
u32 count;
|
||||
|
||||
/* if unchanged, must just be an ack */
|
||||
|
@ -634,6 +711,21 @@ void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
|
|||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* If guest CP0_Compare moves forward, CP0_GTOffset should be adjusted
|
||||
* too to prevent guest CP0_Count hitting guest CP0_Compare.
|
||||
*
|
||||
* The new GTOffset corresponds to the new value of CP0_Compare, and is
|
||||
* set prior to it being written into the guest context. We disable
|
||||
* preemption until the new value is written to prevent restore of a
|
||||
* GTOffset corresponding to the old CP0_Compare value.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && delta > 0) {
|
||||
preempt_disable();
|
||||
write_c0_gtoffset(compare - read_c0_count());
|
||||
back_to_back_c0_hazard();
|
||||
}
|
||||
|
||||
/* freeze_hrtimer() takes care of timer interrupts <= count */
|
||||
dc = kvm_mips_count_disabled(vcpu);
|
||||
if (!dc)
|
||||
|
@ -641,12 +733,36 @@ void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
|
|||
|
||||
if (ack)
|
||||
kvm_mips_callbacks->dequeue_timer_int(vcpu);
|
||||
else if (IS_ENABLED(CONFIG_KVM_MIPS_VZ))
|
||||
/*
|
||||
* With VZ, writing CP0_Compare acks (clears) CP0_Cause.TI, so
|
||||
* preserve guest CP0_Cause.TI if we don't want to ack it.
|
||||
*/
|
||||
cause = kvm_read_c0_guest_cause(cop0);
|
||||
|
||||
kvm_write_c0_guest_compare(cop0, compare);
|
||||
|
||||
if (IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
|
||||
if (delta > 0)
|
||||
preempt_enable();
|
||||
|
||||
back_to_back_c0_hazard();
|
||||
|
||||
if (!ack && cause & CAUSEF_TI)
|
||||
kvm_write_c0_guest_cause(cop0, cause);
|
||||
}
|
||||
|
||||
/* resume_hrtimer() takes care of timer interrupts > count */
|
||||
if (!dc)
|
||||
kvm_mips_resume_hrtimer(vcpu, now, count);
|
||||
|
||||
/*
|
||||
* If guest CP0_Compare is moving backward, we delay CP0_GTOffset change
|
||||
* until after the new CP0_Compare is written, otherwise new guest
|
||||
* CP0_Count could hit new guest CP0_Compare.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && delta <= 0)
|
||||
write_c0_gtoffset(compare - read_c0_count());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -857,6 +973,7 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
|
|||
++vcpu->stat.wait_exits;
|
||||
trace_kvm_exit(vcpu, KVM_TRACE_EXIT_WAIT);
|
||||
if (!vcpu->arch.pending_exceptions) {
|
||||
kvm_vz_lose_htimer(vcpu);
|
||||
vcpu->arch.wait = 1;
|
||||
kvm_vcpu_block(vcpu);
|
||||
|
||||
|
@ -873,17 +990,62 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
|
|||
return EMULATE_DONE;
|
||||
}
|
||||
|
||||
/*
|
||||
* XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that
|
||||
* we can catch this, if things ever change
|
||||
*/
|
||||
static void kvm_mips_change_entryhi(struct kvm_vcpu *vcpu,
|
||||
unsigned long entryhi)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
|
||||
int cpu, i;
|
||||
u32 nasid = entryhi & KVM_ENTRYHI_ASID;
|
||||
|
||||
if (((kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID) != nasid)) {
|
||||
trace_kvm_asid_change(vcpu, kvm_read_c0_guest_entryhi(cop0) &
|
||||
KVM_ENTRYHI_ASID, nasid);
|
||||
|
||||
/*
|
||||
* Flush entries from the GVA page tables.
|
||||
* Guest user page table will get flushed lazily on re-entry to
|
||||
* guest user if the guest ASID actually changes.
|
||||
*/
|
||||
kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_KERN);
|
||||
|
||||
/*
|
||||
* Regenerate/invalidate kernel MMU context.
|
||||
* The user MMU context will be regenerated lazily on re-entry
|
||||
* to guest user if the guest ASID actually changes.
|
||||
*/
|
||||
preempt_disable();
|
||||
cpu = smp_processor_id();
|
||||
get_new_mmu_context(kern_mm, cpu);
|
||||
for_each_possible_cpu(i)
|
||||
if (i != cpu)
|
||||
cpu_context(i, kern_mm) = 0;
|
||||
preempt_enable();
|
||||
}
|
||||
kvm_write_c0_guest_entryhi(cop0, entryhi);
|
||||
}
|
||||
|
||||
enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
struct kvm_mips_tlb *tlb;
|
||||
unsigned long pc = vcpu->arch.pc;
|
||||
int index;
|
||||
|
||||
kvm_err("[%#lx] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
|
||||
return EMULATE_FAIL;
|
||||
index = kvm_read_c0_guest_index(cop0);
|
||||
if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
|
||||
/* UNDEFINED */
|
||||
kvm_debug("[%#lx] TLBR Index %#x out of range\n", pc, index);
|
||||
index &= KVM_MIPS_GUEST_TLB_SIZE - 1;
|
||||
}
|
||||
|
||||
tlb = &vcpu->arch.guest_tlb[index];
|
||||
kvm_write_c0_guest_pagemask(cop0, tlb->tlb_mask);
|
||||
kvm_write_c0_guest_entrylo0(cop0, tlb->tlb_lo[0]);
|
||||
kvm_write_c0_guest_entrylo1(cop0, tlb->tlb_lo[1]);
|
||||
kvm_mips_change_entryhi(vcpu, tlb->tlb_hi);
|
||||
|
||||
return EMULATE_DONE;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1105,11 +1267,9 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
|
|||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
u32 rt, rd, sel;
|
||||
unsigned long curr_pc;
|
||||
int cpu, i;
|
||||
|
||||
/*
|
||||
* Update PC and hold onto current PC in case there is
|
||||
|
@ -1143,6 +1303,9 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
|
|||
case wait_op:
|
||||
er = kvm_mips_emul_wait(vcpu);
|
||||
break;
|
||||
case hypcall_op:
|
||||
er = kvm_mips_emul_hypcall(vcpu, inst);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
rt = inst.c0r_format.rt;
|
||||
|
@ -1208,44 +1371,8 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
|
|||
kvm_change_c0_guest_ebase(cop0, 0x1ffff000,
|
||||
vcpu->arch.gprs[rt]);
|
||||
} else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
|
||||
u32 nasid =
|
||||
vcpu->arch.gprs[rt] & KVM_ENTRYHI_ASID;
|
||||
if (((kvm_read_c0_guest_entryhi(cop0) &
|
||||
KVM_ENTRYHI_ASID) != nasid)) {
|
||||
trace_kvm_asid_change(vcpu,
|
||||
kvm_read_c0_guest_entryhi(cop0)
|
||||
& KVM_ENTRYHI_ASID,
|
||||
nasid);
|
||||
|
||||
/*
|
||||
* Flush entries from the GVA page
|
||||
* tables.
|
||||
* Guest user page table will get
|
||||
* flushed lazily on re-entry to guest
|
||||
* user if the guest ASID actually
|
||||
* changes.
|
||||
*/
|
||||
kvm_mips_flush_gva_pt(kern_mm->pgd,
|
||||
KMF_KERN);
|
||||
|
||||
/*
|
||||
* Regenerate/invalidate kernel MMU
|
||||
* context.
|
||||
* The user MMU context will be
|
||||
* regenerated lazily on re-entry to
|
||||
* guest user if the guest ASID actually
|
||||
* changes.
|
||||
*/
|
||||
preempt_disable();
|
||||
cpu = smp_processor_id();
|
||||
get_new_mmu_context(kern_mm, cpu);
|
||||
for_each_possible_cpu(i)
|
||||
if (i != cpu)
|
||||
cpu_context(i, kern_mm) = 0;
|
||||
preempt_enable();
|
||||
}
|
||||
kvm_write_c0_guest_entryhi(cop0,
|
||||
vcpu->arch.gprs[rt]);
|
||||
kvm_mips_change_entryhi(vcpu,
|
||||
vcpu->arch.gprs[rt]);
|
||||
}
|
||||
/* Are we writing to COUNT */
|
||||
else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
|
||||
|
@ -1474,9 +1601,8 @@ enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
|
|||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
enum emulation_result er = EMULATE_DO_MMIO;
|
||||
enum emulation_result er;
|
||||
u32 rt;
|
||||
u32 bytes;
|
||||
void *data = run->mmio.data;
|
||||
unsigned long curr_pc;
|
||||
|
||||
|
@ -1491,103 +1617,74 @@ enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
|
|||
|
||||
rt = inst.i_format.rt;
|
||||
|
||||
switch (inst.i_format.opcode) {
|
||||
case sb_op:
|
||||
bytes = 1;
|
||||
if (bytes > sizeof(run->mmio.data)) {
|
||||
kvm_err("%s: bad MMIO length: %d\n", __func__,
|
||||
run->mmio.len);
|
||||
}
|
||||
run->mmio.phys_addr =
|
||||
kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
|
||||
host_cp0_badvaddr);
|
||||
if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
|
||||
er = EMULATE_FAIL;
|
||||
break;
|
||||
}
|
||||
run->mmio.len = bytes;
|
||||
run->mmio.is_write = 1;
|
||||
vcpu->mmio_needed = 1;
|
||||
vcpu->mmio_is_write = 1;
|
||||
*(u8 *) data = vcpu->arch.gprs[rt];
|
||||
kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
|
||||
vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
|
||||
*(u8 *) data);
|
||||
run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
|
||||
vcpu->arch.host_cp0_badvaddr);
|
||||
if (run->mmio.phys_addr == KVM_INVALID_ADDR)
|
||||
goto out_fail;
|
||||
|
||||
switch (inst.i_format.opcode) {
|
||||
#if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ)
|
||||
case sd_op:
|
||||
run->mmio.len = 8;
|
||||
*(u64 *)data = vcpu->arch.gprs[rt];
|
||||
|
||||
kvm_debug("[%#lx] OP_SD: eaddr: %#lx, gpr: %#lx, data: %#llx\n",
|
||||
vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
|
||||
vcpu->arch.gprs[rt], *(u64 *)data);
|
||||
break;
|
||||
#endif
|
||||
|
||||
case sw_op:
|
||||
bytes = 4;
|
||||
if (bytes > sizeof(run->mmio.data)) {
|
||||
kvm_err("%s: bad MMIO length: %d\n", __func__,
|
||||
run->mmio.len);
|
||||
}
|
||||
run->mmio.phys_addr =
|
||||
kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
|
||||
host_cp0_badvaddr);
|
||||
if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
|
||||
er = EMULATE_FAIL;
|
||||
break;
|
||||
}
|
||||
|
||||
run->mmio.len = bytes;
|
||||
run->mmio.is_write = 1;
|
||||
vcpu->mmio_needed = 1;
|
||||
vcpu->mmio_is_write = 1;
|
||||
*(u32 *) data = vcpu->arch.gprs[rt];
|
||||
run->mmio.len = 4;
|
||||
*(u32 *)data = vcpu->arch.gprs[rt];
|
||||
|
||||
kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
|
||||
vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
|
||||
vcpu->arch.gprs[rt], *(u32 *) data);
|
||||
vcpu->arch.gprs[rt], *(u32 *)data);
|
||||
break;
|
||||
|
||||
case sh_op:
|
||||
bytes = 2;
|
||||
if (bytes > sizeof(run->mmio.data)) {
|
||||
kvm_err("%s: bad MMIO length: %d\n", __func__,
|
||||
run->mmio.len);
|
||||
}
|
||||
run->mmio.phys_addr =
|
||||
kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
|
||||
host_cp0_badvaddr);
|
||||
if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
|
||||
er = EMULATE_FAIL;
|
||||
break;
|
||||
}
|
||||
|
||||
run->mmio.len = bytes;
|
||||
run->mmio.is_write = 1;
|
||||
vcpu->mmio_needed = 1;
|
||||
vcpu->mmio_is_write = 1;
|
||||
*(u16 *) data = vcpu->arch.gprs[rt];
|
||||
run->mmio.len = 2;
|
||||
*(u16 *)data = vcpu->arch.gprs[rt];
|
||||
|
||||
kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
|
||||
vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
|
||||
vcpu->arch.gprs[rt], *(u32 *) data);
|
||||
vcpu->arch.gprs[rt], *(u16 *)data);
|
||||
break;
|
||||
|
||||
case sb_op:
|
||||
run->mmio.len = 1;
|
||||
*(u8 *)data = vcpu->arch.gprs[rt];
|
||||
|
||||
kvm_debug("[%#lx] OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
|
||||
vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
|
||||
vcpu->arch.gprs[rt], *(u8 *)data);
|
||||
break;
|
||||
|
||||
default:
|
||||
kvm_err("Store not yet supported (inst=0x%08x)\n",
|
||||
inst.word);
|
||||
er = EMULATE_FAIL;
|
||||
break;
|
||||
goto out_fail;
|
||||
}
|
||||
|
||||
/* Rollback PC if emulation was unsuccessful */
|
||||
if (er == EMULATE_FAIL)
|
||||
vcpu->arch.pc = curr_pc;
|
||||
run->mmio.is_write = 1;
|
||||
vcpu->mmio_needed = 1;
|
||||
vcpu->mmio_is_write = 1;
|
||||
return EMULATE_DO_MMIO;
|
||||
|
||||
return er;
|
||||
out_fail:
|
||||
/* Rollback PC if emulation was unsuccessful */
|
||||
vcpu->arch.pc = curr_pc;
|
||||
return EMULATE_FAIL;
|
||||
}
|
||||
|
||||
enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
|
||||
u32 cause, struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
enum emulation_result er = EMULATE_DO_MMIO;
|
||||
enum emulation_result er;
|
||||
unsigned long curr_pc;
|
||||
u32 op, rt;
|
||||
u32 bytes;
|
||||
|
||||
rt = inst.i_format.rt;
|
||||
op = inst.i_format.opcode;
|
||||
|
@ -1606,96 +1703,53 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
|
|||
|
||||
vcpu->arch.io_gpr = rt;
|
||||
|
||||
switch (op) {
|
||||
case lw_op:
|
||||
bytes = 4;
|
||||
if (bytes > sizeof(run->mmio.data)) {
|
||||
kvm_err("%s: bad MMIO length: %d\n", __func__,
|
||||
run->mmio.len);
|
||||
er = EMULATE_FAIL;
|
||||
break;
|
||||
}
|
||||
run->mmio.phys_addr =
|
||||
kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
|
||||
host_cp0_badvaddr);
|
||||
if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
|
||||
er = EMULATE_FAIL;
|
||||
break;
|
||||
}
|
||||
run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
|
||||
vcpu->arch.host_cp0_badvaddr);
|
||||
if (run->mmio.phys_addr == KVM_INVALID_ADDR)
|
||||
return EMULATE_FAIL;
|
||||
|
||||
run->mmio.len = bytes;
|
||||
run->mmio.is_write = 0;
|
||||
vcpu->mmio_needed = 1;
|
||||
vcpu->mmio_is_write = 0;
|
||||
vcpu->mmio_needed = 2; /* signed */
|
||||
switch (op) {
|
||||
#if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ)
|
||||
case ld_op:
|
||||
run->mmio.len = 8;
|
||||
break;
|
||||
|
||||
case lwu_op:
|
||||
vcpu->mmio_needed = 1; /* unsigned */
|
||||
/* fall through */
|
||||
#endif
|
||||
case lw_op:
|
||||
run->mmio.len = 4;
|
||||
break;
|
||||
|
||||
case lh_op:
|
||||
case lhu_op:
|
||||
bytes = 2;
|
||||
if (bytes > sizeof(run->mmio.data)) {
|
||||
kvm_err("%s: bad MMIO length: %d\n", __func__,
|
||||
run->mmio.len);
|
||||
er = EMULATE_FAIL;
|
||||
break;
|
||||
}
|
||||
run->mmio.phys_addr =
|
||||
kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
|
||||
host_cp0_badvaddr);
|
||||
if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
|
||||
er = EMULATE_FAIL;
|
||||
break;
|
||||
}
|
||||
|
||||
run->mmio.len = bytes;
|
||||
run->mmio.is_write = 0;
|
||||
vcpu->mmio_needed = 1;
|
||||
vcpu->mmio_is_write = 0;
|
||||
|
||||
if (op == lh_op)
|
||||
vcpu->mmio_needed = 2;
|
||||
else
|
||||
vcpu->mmio_needed = 1;
|
||||
|
||||
vcpu->mmio_needed = 1; /* unsigned */
|
||||
/* fall through */
|
||||
case lh_op:
|
||||
run->mmio.len = 2;
|
||||
break;
|
||||
|
||||
case lbu_op:
|
||||
vcpu->mmio_needed = 1; /* unsigned */
|
||||
/* fall through */
|
||||
case lb_op:
|
||||
bytes = 1;
|
||||
if (bytes > sizeof(run->mmio.data)) {
|
||||
kvm_err("%s: bad MMIO length: %d\n", __func__,
|
||||
run->mmio.len);
|
||||
er = EMULATE_FAIL;
|
||||
break;
|
||||
}
|
||||
run->mmio.phys_addr =
|
||||
kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
|
||||
host_cp0_badvaddr);
|
||||
if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
|
||||
er = EMULATE_FAIL;
|
||||
break;
|
||||
}
|
||||
|
||||
run->mmio.len = bytes;
|
||||
run->mmio.is_write = 0;
|
||||
vcpu->mmio_is_write = 0;
|
||||
|
||||
if (op == lb_op)
|
||||
vcpu->mmio_needed = 2;
|
||||
else
|
||||
vcpu->mmio_needed = 1;
|
||||
|
||||
run->mmio.len = 1;
|
||||
break;
|
||||
|
||||
default:
|
||||
kvm_err("Load not yet supported (inst=0x%08x)\n",
|
||||
inst.word);
|
||||
er = EMULATE_FAIL;
|
||||
break;
|
||||
vcpu->mmio_needed = 0;
|
||||
return EMULATE_FAIL;
|
||||
}
|
||||
|
||||
return er;
|
||||
run->mmio.is_write = 0;
|
||||
vcpu->mmio_is_write = 0;
|
||||
return EMULATE_DO_MMIO;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_KVM_MIPS_VZ
|
||||
static enum emulation_result kvm_mips_guest_cache_op(int (*fn)(unsigned long),
|
||||
unsigned long curr_pc,
|
||||
unsigned long addr,
|
||||
|
@ -1786,11 +1840,35 @@ enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
|
|||
vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
|
||||
arch->gprs[base], offset);
|
||||
|
||||
if (cache == Cache_D)
|
||||
if (cache == Cache_D) {
|
||||
#ifdef CONFIG_CPU_R4K_CACHE_TLB
|
||||
r4k_blast_dcache();
|
||||
else if (cache == Cache_I)
|
||||
#else
|
||||
switch (boot_cpu_type()) {
|
||||
case CPU_CAVIUM_OCTEON3:
|
||||
/* locally flush icache */
|
||||
local_flush_icache_range(0, 0);
|
||||
break;
|
||||
default:
|
||||
__flush_cache_all();
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
} else if (cache == Cache_I) {
|
||||
#ifdef CONFIG_CPU_R4K_CACHE_TLB
|
||||
r4k_blast_icache();
|
||||
else {
|
||||
#else
|
||||
switch (boot_cpu_type()) {
|
||||
case CPU_CAVIUM_OCTEON3:
|
||||
/* locally flush icache */
|
||||
local_flush_icache_range(0, 0);
|
||||
break;
|
||||
default:
|
||||
flush_icache_all();
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
} else {
|
||||
kvm_err("%s: unsupported CACHE INDEX operation\n",
|
||||
__func__);
|
||||
return EMULATE_FAIL;
|
||||
|
@ -1870,18 +1948,6 @@ enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc,
|
|||
case cop0_op:
|
||||
er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
|
||||
break;
|
||||
case sb_op:
|
||||
case sh_op:
|
||||
case sw_op:
|
||||
er = kvm_mips_emulate_store(inst, cause, run, vcpu);
|
||||
break;
|
||||
case lb_op:
|
||||
case lbu_op:
|
||||
case lhu_op:
|
||||
case lh_op:
|
||||
case lw_op:
|
||||
er = kvm_mips_emulate_load(inst, cause, run, vcpu);
|
||||
break;
|
||||
|
||||
#ifndef CONFIG_CPU_MIPSR6
|
||||
case cache_op:
|
||||
|
@ -1915,6 +1981,7 @@ unknown:
|
|||
|
||||
return er;
|
||||
}
|
||||
#endif /* CONFIG_KVM_MIPS_VZ */
|
||||
|
||||
/**
|
||||
* kvm_mips_guest_exception_base() - Find guest exception vector base address.
|
||||
|
@ -2524,8 +2591,15 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
|
|||
vcpu->arch.pc = vcpu->arch.io_pc;
|
||||
|
||||
switch (run->mmio.len) {
|
||||
case 8:
|
||||
*gpr = *(s64 *)run->mmio.data;
|
||||
break;
|
||||
|
||||
case 4:
|
||||
*gpr = *(s32 *) run->mmio.data;
|
||||
if (vcpu->mmio_needed == 2)
|
||||
*gpr = *(s32 *)run->mmio.data;
|
||||
else
|
||||
*gpr = *(u32 *)run->mmio.data;
|
||||
break;
|
||||
|
||||
case 2:
|
||||
|
|
|
@ -51,12 +51,15 @@
|
|||
#define RA 31
|
||||
|
||||
/* Some CP0 registers */
|
||||
#define C0_PWBASE 5, 5
|
||||
#define C0_HWRENA 7, 0
|
||||
#define C0_BADVADDR 8, 0
|
||||
#define C0_BADINSTR 8, 1
|
||||
#define C0_BADINSTRP 8, 2
|
||||
#define C0_ENTRYHI 10, 0
|
||||
#define C0_GUESTCTL1 10, 4
|
||||
#define C0_STATUS 12, 0
|
||||
#define C0_GUESTCTL0 12, 6
|
||||
#define C0_CAUSE 13, 0
|
||||
#define C0_EPC 14, 0
|
||||
#define C0_EBASE 15, 1
|
||||
|
@ -292,8 +295,8 @@ static void *kvm_mips_build_enter_guest(void *addr)
|
|||
unsigned int i;
|
||||
struct uasm_label labels[2];
|
||||
struct uasm_reloc relocs[2];
|
||||
struct uasm_label *l = labels;
|
||||
struct uasm_reloc *r = relocs;
|
||||
struct uasm_label __maybe_unused *l = labels;
|
||||
struct uasm_reloc __maybe_unused *r = relocs;
|
||||
|
||||
memset(labels, 0, sizeof(labels));
|
||||
memset(relocs, 0, sizeof(relocs));
|
||||
|
@ -302,7 +305,67 @@ static void *kvm_mips_build_enter_guest(void *addr)
|
|||
UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, pc), K1);
|
||||
UASM_i_MTC0(&p, T0, C0_EPC);
|
||||
|
||||
/* Set the ASID for the Guest Kernel */
|
||||
#ifdef CONFIG_KVM_MIPS_VZ
|
||||
/* Save normal linux process pgd (VZ guarantees pgd_reg is set) */
|
||||
UASM_i_MFC0(&p, K0, c0_kscratch(), pgd_reg);
|
||||
UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_pgd), K1);
|
||||
|
||||
/*
|
||||
* Set up KVM GPA pgd.
|
||||
* This does roughly the same as TLBMISS_HANDLER_SETUP_PGD():
|
||||
* - call tlbmiss_handler_setup_pgd(mm->pgd)
|
||||
* - write mm->pgd into CP0_PWBase
|
||||
*
|
||||
* We keep S0 pointing at struct kvm so we can load the ASID below.
|
||||
*/
|
||||
UASM_i_LW(&p, S0, (int)offsetof(struct kvm_vcpu, kvm) -
|
||||
(int)offsetof(struct kvm_vcpu, arch), K1);
|
||||
UASM_i_LW(&p, A0, offsetof(struct kvm, arch.gpa_mm.pgd), S0);
|
||||
UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd);
|
||||
uasm_i_jalr(&p, RA, T9);
|
||||
/* delay slot */
|
||||
if (cpu_has_htw)
|
||||
UASM_i_MTC0(&p, A0, C0_PWBASE);
|
||||
else
|
||||
uasm_i_nop(&p);
|
||||
|
||||
/* Set GM bit to setup eret to VZ guest context */
|
||||
uasm_i_addiu(&p, V1, ZERO, 1);
|
||||
uasm_i_mfc0(&p, K0, C0_GUESTCTL0);
|
||||
uasm_i_ins(&p, K0, V1, MIPS_GCTL0_GM_SHIFT, 1);
|
||||
uasm_i_mtc0(&p, K0, C0_GUESTCTL0);
|
||||
|
||||
if (cpu_has_guestid) {
|
||||
/*
|
||||
* Set root mode GuestID, so that root TLB refill handler can
|
||||
* use the correct GuestID in the root TLB.
|
||||
*/
|
||||
|
||||
/* Get current GuestID */
|
||||
uasm_i_mfc0(&p, T0, C0_GUESTCTL1);
|
||||
/* Set GuestCtl1.RID = GuestCtl1.ID */
|
||||
uasm_i_ext(&p, T1, T0, MIPS_GCTL1_ID_SHIFT,
|
||||
MIPS_GCTL1_ID_WIDTH);
|
||||
uasm_i_ins(&p, T0, T1, MIPS_GCTL1_RID_SHIFT,
|
||||
MIPS_GCTL1_RID_WIDTH);
|
||||
uasm_i_mtc0(&p, T0, C0_GUESTCTL1);
|
||||
|
||||
/* GuestID handles dealiasing so we don't need to touch ASID */
|
||||
goto skip_asid_restore;
|
||||
}
|
||||
|
||||
/* Root ASID Dealias (RAD) */
|
||||
|
||||
/* Save host ASID */
|
||||
UASM_i_MFC0(&p, K0, C0_ENTRYHI);
|
||||
UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_entryhi),
|
||||
K1);
|
||||
|
||||
/* Set the root ASID for the Guest */
|
||||
UASM_i_ADDIU(&p, T1, S0,
|
||||
offsetof(struct kvm, arch.gpa_mm.context.asid));
|
||||
#else
|
||||
/* Set the ASID for the Guest Kernel or User */
|
||||
UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, cop0), K1);
|
||||
UASM_i_LW(&p, T0, offsetof(struct mips_coproc, reg[MIPS_CP0_STATUS][0]),
|
||||
T0);
|
||||
|
@ -315,6 +378,7 @@ static void *kvm_mips_build_enter_guest(void *addr)
|
|||
UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch,
|
||||
guest_user_mm.context.asid));
|
||||
uasm_l_kernel_asid(&l, p);
|
||||
#endif
|
||||
|
||||
/* t1: contains the base of the ASID array, need to get the cpu id */
|
||||
/* smp_processor_id */
|
||||
|
@ -339,6 +403,7 @@ static void *kvm_mips_build_enter_guest(void *addr)
|
|||
uasm_i_andi(&p, K0, K0, MIPS_ENTRYHI_ASID);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_KVM_MIPS_VZ
|
||||
/*
|
||||
* Set up KVM T&E GVA pgd.
|
||||
* This does roughly the same as TLBMISS_HANDLER_SETUP_PGD():
|
||||
|
@ -351,7 +416,11 @@ static void *kvm_mips_build_enter_guest(void *addr)
|
|||
UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd);
|
||||
uasm_i_jalr(&p, RA, T9);
|
||||
uasm_i_mtc0(&p, K0, C0_ENTRYHI);
|
||||
|
||||
#else
|
||||
/* Set up KVM VZ root ASID (!guestid) */
|
||||
uasm_i_mtc0(&p, K0, C0_ENTRYHI);
|
||||
skip_asid_restore:
|
||||
#endif
|
||||
uasm_i_ehb(&p);
|
||||
|
||||
/* Disable RDHWR access */
|
||||
|
@ -559,13 +628,10 @@ void *kvm_mips_build_exit(void *addr)
|
|||
/* Now that context has been saved, we can use other registers */
|
||||
|
||||
/* Restore vcpu */
|
||||
UASM_i_MFC0(&p, A1, scratch_vcpu[0], scratch_vcpu[1]);
|
||||
uasm_i_move(&p, S1, A1);
|
||||
UASM_i_MFC0(&p, S1, scratch_vcpu[0], scratch_vcpu[1]);
|
||||
|
||||
/* Restore run (vcpu->run) */
|
||||
UASM_i_LW(&p, A0, offsetof(struct kvm_vcpu, run), A1);
|
||||
/* Save pointer to run in s0, will be saved by the compiler */
|
||||
uasm_i_move(&p, S0, A0);
|
||||
UASM_i_LW(&p, S0, offsetof(struct kvm_vcpu, run), S1);
|
||||
|
||||
/*
|
||||
* Save Host level EPC, BadVaddr and Cause to VCPU, useful to process
|
||||
|
@ -641,6 +707,52 @@ void *kvm_mips_build_exit(void *addr)
|
|||
uasm_l_msa_1(&l, p);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KVM_MIPS_VZ
|
||||
/* Restore host ASID */
|
||||
if (!cpu_has_guestid) {
|
||||
UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, host_entryhi),
|
||||
K1);
|
||||
UASM_i_MTC0(&p, K0, C0_ENTRYHI);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up normal Linux process pgd.
|
||||
* This does roughly the same as TLBMISS_HANDLER_SETUP_PGD():
|
||||
* - call tlbmiss_handler_setup_pgd(mm->pgd)
|
||||
* - write mm->pgd into CP0_PWBase
|
||||
*/
|
||||
UASM_i_LW(&p, A0,
|
||||
offsetof(struct kvm_vcpu_arch, host_pgd), K1);
|
||||
UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd);
|
||||
uasm_i_jalr(&p, RA, T9);
|
||||
/* delay slot */
|
||||
if (cpu_has_htw)
|
||||
UASM_i_MTC0(&p, A0, C0_PWBASE);
|
||||
else
|
||||
uasm_i_nop(&p);
|
||||
|
||||
/* Clear GM bit so we don't enter guest mode when EXL is cleared */
|
||||
uasm_i_mfc0(&p, K0, C0_GUESTCTL0);
|
||||
uasm_i_ins(&p, K0, ZERO, MIPS_GCTL0_GM_SHIFT, 1);
|
||||
uasm_i_mtc0(&p, K0, C0_GUESTCTL0);
|
||||
|
||||
/* Save GuestCtl0 so we can access GExcCode after CPU migration */
|
||||
uasm_i_sw(&p, K0,
|
||||
offsetof(struct kvm_vcpu_arch, host_cp0_guestctl0), K1);
|
||||
|
||||
if (cpu_has_guestid) {
|
||||
/*
|
||||
* Clear root mode GuestID, so that root TLB operations use the
|
||||
* root GuestID in the root TLB.
|
||||
*/
|
||||
uasm_i_mfc0(&p, T0, C0_GUESTCTL1);
|
||||
/* Set GuestCtl1.RID = MIPS_GCTL1_ROOT_GUESTID (i.e. 0) */
|
||||
uasm_i_ins(&p, T0, ZERO, MIPS_GCTL1_RID_SHIFT,
|
||||
MIPS_GCTL1_RID_WIDTH);
|
||||
uasm_i_mtc0(&p, T0, C0_GUESTCTL1);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
|
||||
uasm_i_addiu(&p, AT, ZERO, ~(ST0_EXL | KSU_USER | ST0_IE));
|
||||
uasm_i_and(&p, V0, V0, AT);
|
||||
|
@ -680,6 +792,8 @@ void *kvm_mips_build_exit(void *addr)
|
|||
* Now jump to the kvm_mips_handle_exit() to see if we can deal
|
||||
* with this in the kernel
|
||||
*/
|
||||
uasm_i_move(&p, A0, S0);
|
||||
uasm_i_move(&p, A1, S1);
|
||||
UASM_i_LA(&p, T9, (unsigned long)kvm_mips_handle_exit);
|
||||
uasm_i_jalr(&p, RA, T9);
|
||||
UASM_i_ADDIU(&p, SP, SP, -CALLFRAME_SIZ);
|
||||
|
|
|
@ -0,0 +1,53 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* KVM/MIPS: Hypercall handling.
|
||||
*
|
||||
* Copyright (C) 2015 Imagination Technologies Ltd.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/kvm_para.h>
|
||||
|
||||
#define MAX_HYPCALL_ARGS 4
|
||||
|
||||
enum emulation_result kvm_mips_emul_hypcall(struct kvm_vcpu *vcpu,
|
||||
union mips_instruction inst)
|
||||
{
|
||||
unsigned int code = (inst.co_format.code >> 5) & 0x3ff;
|
||||
|
||||
kvm_debug("[%#lx] HYPCALL %#03x\n", vcpu->arch.pc, code);
|
||||
|
||||
switch (code) {
|
||||
case 0:
|
||||
return EMULATE_HYPERCALL;
|
||||
default:
|
||||
return EMULATE_FAIL;
|
||||
};
|
||||
}
|
||||
|
||||
static int kvm_mips_hypercall(struct kvm_vcpu *vcpu, unsigned long num,
|
||||
const unsigned long *args, unsigned long *hret)
|
||||
{
|
||||
/* Report unimplemented hypercall to guest */
|
||||
*hret = -KVM_ENOSYS;
|
||||
return RESUME_GUEST;
|
||||
}
|
||||
|
||||
int kvm_mips_handle_hypcall(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long num, args[MAX_HYPCALL_ARGS];
|
||||
|
||||
/* read hypcall number and arguments */
|
||||
num = vcpu->arch.gprs[2]; /* v0 */
|
||||
args[0] = vcpu->arch.gprs[4]; /* a0 */
|
||||
args[1] = vcpu->arch.gprs[5]; /* a1 */
|
||||
args[2] = vcpu->arch.gprs[6]; /* a2 */
|
||||
args[3] = vcpu->arch.gprs[7]; /* a3 */
|
||||
|
||||
return kvm_mips_hypercall(vcpu, num,
|
||||
args, &vcpu->arch.gprs[2] /* v0 */);
|
||||
}
|
|
@ -30,8 +30,13 @@
|
|||
|
||||
#define C_TI (_ULCAST_(1) << 30)
|
||||
|
||||
#ifdef CONFIG_KVM_MIPS_VZ
|
||||
#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (1)
|
||||
#define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE (1)
|
||||
#else
|
||||
#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0)
|
||||
#define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE (0)
|
||||
#endif
|
||||
|
||||
void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority);
|
||||
void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority);
|
||||
|
|
|
@ -59,6 +59,16 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
|
|||
{ "fpe", VCPU_STAT(fpe_exits), KVM_STAT_VCPU },
|
||||
{ "msa_disabled", VCPU_STAT(msa_disabled_exits), KVM_STAT_VCPU },
|
||||
{ "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
|
||||
#ifdef CONFIG_KVM_MIPS_VZ
|
||||
{ "vz_gpsi", VCPU_STAT(vz_gpsi_exits), KVM_STAT_VCPU },
|
||||
{ "vz_gsfc", VCPU_STAT(vz_gsfc_exits), KVM_STAT_VCPU },
|
||||
{ "vz_hc", VCPU_STAT(vz_hc_exits), KVM_STAT_VCPU },
|
||||
{ "vz_grr", VCPU_STAT(vz_grr_exits), KVM_STAT_VCPU },
|
||||
{ "vz_gva", VCPU_STAT(vz_gva_exits), KVM_STAT_VCPU },
|
||||
{ "vz_ghfc", VCPU_STAT(vz_ghfc_exits), KVM_STAT_VCPU },
|
||||
{ "vz_gpa", VCPU_STAT(vz_gpa_exits), KVM_STAT_VCPU },
|
||||
{ "vz_resvd", VCPU_STAT(vz_resvd_exits), KVM_STAT_VCPU },
|
||||
#endif
|
||||
{ "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU },
|
||||
{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), KVM_STAT_VCPU },
|
||||
{ "halt_poll_invalid", VCPU_STAT(halt_poll_invalid), KVM_STAT_VCPU },
|
||||
|
@ -66,6 +76,19 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
|
|||
{NULL}
|
||||
};
|
||||
|
||||
bool kvm_trace_guest_mode_change;
|
||||
|
||||
int kvm_guest_mode_change_trace_reg(void)
|
||||
{
|
||||
kvm_trace_guest_mode_change = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void kvm_guest_mode_change_trace_unreg(void)
|
||||
{
|
||||
kvm_trace_guest_mode_change = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* XXXKYMA: We are simulatoring a processor that has the WII bit set in
|
||||
* Config7, so we are "runnable" if interrupts are pending
|
||||
|
@ -82,7 +105,12 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
|
|||
|
||||
int kvm_arch_hardware_enable(void)
|
||||
{
|
||||
return 0;
|
||||
return kvm_mips_callbacks->hardware_enable();
|
||||
}
|
||||
|
||||
void kvm_arch_hardware_disable(void)
|
||||
{
|
||||
kvm_mips_callbacks->hardware_disable();
|
||||
}
|
||||
|
||||
int kvm_arch_hardware_setup(void)
|
||||
|
@ -97,6 +125,18 @@ void kvm_arch_check_processor_compat(void *rtn)
|
|||
|
||||
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
||||
{
|
||||
switch (type) {
|
||||
#ifdef CONFIG_KVM_MIPS_VZ
|
||||
case KVM_VM_MIPS_VZ:
|
||||
#else
|
||||
case KVM_VM_MIPS_TE:
|
||||
#endif
|
||||
break;
|
||||
default:
|
||||
/* Unsupported KVM type */
|
||||
return -EINVAL;
|
||||
};
|
||||
|
||||
/* Allocate page table to map GPA -> RPA */
|
||||
kvm->arch.gpa_mm.pgd = kvm_pgd_alloc();
|
||||
if (!kvm->arch.gpa_mm.pgd)
|
||||
|
@ -301,8 +341,10 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
|
|||
/* Build guest exception vectors dynamically in unmapped memory */
|
||||
handler = gebase + 0x2000;
|
||||
|
||||
/* TLB refill */
|
||||
/* TLB refill (or XTLB refill on 64-bit VZ where KX=1) */
|
||||
refill_start = gebase;
|
||||
if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && IS_ENABLED(CONFIG_64BIT))
|
||||
refill_start += 0x080;
|
||||
refill_end = kvm_mips_build_tlb_refill_exception(refill_start, handler);
|
||||
|
||||
/* General Exception Entry point */
|
||||
|
@ -353,9 +395,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
|
|||
|
||||
/* Init */
|
||||
vcpu->arch.last_sched_cpu = -1;
|
||||
|
||||
/* Start off the timer */
|
||||
kvm_mips_init_count(vcpu);
|
||||
vcpu->arch.last_exec_cpu = -1;
|
||||
|
||||
return vcpu;
|
||||
|
||||
|
@ -1059,7 +1099,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
|||
r = cpu_has_msa && !(boot_cpu_data.msa_id & MSA_IR_WRPF);
|
||||
break;
|
||||
default:
|
||||
r = 0;
|
||||
r = kvm_mips_callbacks->check_extension(kvm, ext);
|
||||
break;
|
||||
}
|
||||
return r;
|
||||
|
@ -1067,7 +1107,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
|||
|
||||
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_mips_pending_timer(vcpu);
|
||||
return kvm_mips_pending_timer(vcpu) ||
|
||||
kvm_read_c0_guest_cause(vcpu->arch.cop0) & C_TI;
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
|
||||
|
@ -1092,7 +1133,7 @@ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
|
|||
kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo);
|
||||
|
||||
cop0 = vcpu->arch.cop0;
|
||||
kvm_debug("\tStatus: 0x%08lx, Cause: 0x%08lx\n",
|
||||
kvm_debug("\tStatus: 0x%08x, Cause: 0x%08x\n",
|
||||
kvm_read_c0_guest_status(cop0),
|
||||
kvm_read_c0_guest_cause(cop0));
|
||||
|
||||
|
@ -1208,7 +1249,8 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
|||
vcpu->mode = OUTSIDE_GUEST_MODE;
|
||||
|
||||
/* re-enable HTW before enabling interrupts */
|
||||
htw_start();
|
||||
if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ))
|
||||
htw_start();
|
||||
|
||||
/* Set a default exit reason */
|
||||
run->exit_reason = KVM_EXIT_UNKNOWN;
|
||||
|
@ -1226,17 +1268,20 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
|||
cause, opc, run, vcpu);
|
||||
trace_kvm_exit(vcpu, exccode);
|
||||
|
||||
/*
|
||||
* Do a privilege check, if in UM most of these exit conditions end up
|
||||
* causing an exception to be delivered to the Guest Kernel
|
||||
*/
|
||||
er = kvm_mips_check_privilege(cause, opc, run, vcpu);
|
||||
if (er == EMULATE_PRIV_FAIL) {
|
||||
goto skip_emul;
|
||||
} else if (er == EMULATE_FAIL) {
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
ret = RESUME_HOST;
|
||||
goto skip_emul;
|
||||
if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
|
||||
/*
|
||||
* Do a privilege check, if in UM most of these exit conditions
|
||||
* end up causing an exception to be delivered to the Guest
|
||||
* Kernel
|
||||
*/
|
||||
er = kvm_mips_check_privilege(cause, opc, run, vcpu);
|
||||
if (er == EMULATE_PRIV_FAIL) {
|
||||
goto skip_emul;
|
||||
} else if (er == EMULATE_FAIL) {
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
ret = RESUME_HOST;
|
||||
goto skip_emul;
|
||||
}
|
||||
}
|
||||
|
||||
switch (exccode) {
|
||||
|
@ -1267,7 +1312,7 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
|||
break;
|
||||
|
||||
case EXCCODE_TLBS:
|
||||
kvm_debug("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n",
|
||||
kvm_debug("TLB ST fault: cause %#x, status %#x, PC: %p, BadVaddr: %#lx\n",
|
||||
cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
|
||||
badvaddr);
|
||||
|
||||
|
@ -1328,12 +1373,17 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
|||
ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);
|
||||
break;
|
||||
|
||||
case EXCCODE_GE:
|
||||
/* defer exit accounting to handler */
|
||||
ret = kvm_mips_callbacks->handle_guest_exit(vcpu);
|
||||
break;
|
||||
|
||||
default:
|
||||
if (cause & CAUSEF_BD)
|
||||
opc += 1;
|
||||
inst = 0;
|
||||
kvm_get_badinstr(opc, vcpu, &inst);
|
||||
kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n",
|
||||
kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
|
||||
exccode, opc, inst, badvaddr,
|
||||
kvm_read_c0_guest_status(vcpu->arch.cop0));
|
||||
kvm_arch_vcpu_dump_regs(vcpu);
|
||||
|
@ -1346,6 +1396,9 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
|||
skip_emul:
|
||||
local_irq_disable();
|
||||
|
||||
if (ret == RESUME_GUEST)
|
||||
kvm_vz_acquire_htimer(vcpu);
|
||||
|
||||
if (er == EMULATE_DONE && !(ret & RESUME_HOST))
|
||||
kvm_mips_deliver_interrupts(vcpu, cause);
|
||||
|
||||
|
@ -1391,7 +1444,8 @@ skip_emul:
|
|||
}
|
||||
|
||||
/* Disable HTW before returning to guest or host */
|
||||
htw_stop();
|
||||
if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ))
|
||||
htw_stop();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1527,16 +1581,18 @@ void kvm_drop_fpu(struct kvm_vcpu *vcpu)
|
|||
void kvm_lose_fpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/*
|
||||
* FPU & MSA get disabled in root context (hardware) when it is disabled
|
||||
* in guest context (software), but the register state in the hardware
|
||||
* may still be in use. This is why we explicitly re-enable the hardware
|
||||
* before saving.
|
||||
* With T&E, FPU & MSA get disabled in root context (hardware) when it
|
||||
* is disabled in guest context (software), but the register state in
|
||||
* the hardware may still be in use.
|
||||
* This is why we explicitly re-enable the hardware before saving.
|
||||
*/
|
||||
|
||||
preempt_disable();
|
||||
if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
|
||||
set_c0_config5(MIPS_CONF5_MSAEN);
|
||||
enable_fpu_hazard();
|
||||
if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
|
||||
set_c0_config5(MIPS_CONF5_MSAEN);
|
||||
enable_fpu_hazard();
|
||||
}
|
||||
|
||||
__kvm_save_msa(&vcpu->arch);
|
||||
trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA);
|
||||
|
@ -1549,8 +1605,10 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu)
|
|||
}
|
||||
vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA);
|
||||
} else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
|
||||
set_c0_status(ST0_CU1);
|
||||
enable_fpu_hazard();
|
||||
if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
|
||||
set_c0_status(ST0_CU1);
|
||||
enable_fpu_hazard();
|
||||
}
|
||||
|
||||
__kvm_save_fpu(&vcpu->arch);
|
||||
vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
|
||||
|
|
|
@ -992,6 +992,22 @@ static pte_t kvm_mips_gpa_pte_to_gva_mapped(pte_t pte, long entrylo)
|
|||
return kvm_mips_gpa_pte_to_gva_unmapped(pte);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KVM_MIPS_VZ
|
||||
int kvm_mips_handle_vz_root_tlb_fault(unsigned long badvaddr,
|
||||
struct kvm_vcpu *vcpu,
|
||||
bool write_fault)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = kvm_mips_map_page(vcpu, badvaddr, write_fault, NULL, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Invalidate this entry in the TLB */
|
||||
return kvm_vz_host_tlb_inv(vcpu, badvaddr);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* XXXKYMA: Must be called with interrupts disabled */
|
||||
int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
|
||||
struct kvm_vcpu *vcpu,
|
||||
|
@ -1225,6 +1241,10 @@ int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
|
|||
{
|
||||
int err;
|
||||
|
||||
if (WARN(IS_ENABLED(CONFIG_KVM_MIPS_VZ),
|
||||
"Expect BadInstr/BadInstrP registers to be used with VZ\n"))
|
||||
return -EINVAL;
|
||||
|
||||
retry:
|
||||
kvm_trap_emul_gva_lockless_begin(vcpu);
|
||||
err = get_user(*out, opc);
|
||||
|
|
|
@ -33,6 +33,25 @@
|
|||
#define KVM_GUEST_PC_TLB 0
|
||||
#define KVM_GUEST_SP_TLB 1
|
||||
|
||||
#ifdef CONFIG_KVM_MIPS_VZ
|
||||
unsigned long GUESTID_MASK;
|
||||
EXPORT_SYMBOL_GPL(GUESTID_MASK);
|
||||
unsigned long GUESTID_FIRST_VERSION;
|
||||
EXPORT_SYMBOL_GPL(GUESTID_FIRST_VERSION);
|
||||
unsigned long GUESTID_VERSION_MASK;
|
||||
EXPORT_SYMBOL_GPL(GUESTID_VERSION_MASK);
|
||||
|
||||
static u32 kvm_mips_get_root_asid(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mm_struct *gpa_mm = &vcpu->kvm->arch.gpa_mm;
|
||||
|
||||
if (cpu_has_guestid)
|
||||
return 0;
|
||||
else
|
||||
return cpu_asid(smp_processor_id(), gpa_mm);
|
||||
}
|
||||
#endif
|
||||
|
||||
static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
|
||||
|
@ -166,6 +185,13 @@ int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va,
|
|||
|
||||
local_irq_restore(flags);
|
||||
|
||||
/*
|
||||
* We don't want to get reserved instruction exceptions for missing tlb
|
||||
* entries.
|
||||
*/
|
||||
if (cpu_has_vtag_icache)
|
||||
flush_icache_all();
|
||||
|
||||
if (user && idx_user >= 0)
|
||||
kvm_debug("%s: Invalidated guest user entryhi %#lx @ idx %d\n",
|
||||
__func__, (va & VPN2_MASK) |
|
||||
|
@ -179,6 +205,421 @@ int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_inv);
|
||||
|
||||
#ifdef CONFIG_KVM_MIPS_VZ
|
||||
|
||||
/* GuestID management */
|
||||
|
||||
/**
|
||||
* clear_root_gid() - Set GuestCtl1.RID for normal root operation.
|
||||
*/
|
||||
static inline void clear_root_gid(void)
|
||||
{
|
||||
if (cpu_has_guestid) {
|
||||
clear_c0_guestctl1(MIPS_GCTL1_RID);
|
||||
mtc0_tlbw_hazard();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* set_root_gid_to_guest_gid() - Set GuestCtl1.RID to match GuestCtl1.ID.
|
||||
*
|
||||
* Sets the root GuestID to match the current guest GuestID, for TLB operation
|
||||
* on the GPA->RPA mappings in the root TLB.
|
||||
*
|
||||
* The caller must be sure to disable HTW while the root GID is set, and
|
||||
* possibly longer if TLB registers are modified.
|
||||
*/
|
||||
static inline void set_root_gid_to_guest_gid(void)
|
||||
{
|
||||
unsigned int guestctl1;
|
||||
|
||||
if (cpu_has_guestid) {
|
||||
back_to_back_c0_hazard();
|
||||
guestctl1 = read_c0_guestctl1();
|
||||
guestctl1 = (guestctl1 & ~MIPS_GCTL1_RID) |
|
||||
((guestctl1 & MIPS_GCTL1_ID) >> MIPS_GCTL1_ID_SHIFT)
|
||||
<< MIPS_GCTL1_RID_SHIFT;
|
||||
write_c0_guestctl1(guestctl1);
|
||||
mtc0_tlbw_hazard();
|
||||
}
|
||||
}
|
||||
|
||||
int kvm_vz_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
|
||||
{
|
||||
int idx;
|
||||
unsigned long flags, old_entryhi;
|
||||
|
||||
local_irq_save(flags);
|
||||
htw_stop();
|
||||
|
||||
/* Set root GuestID for root probe and write of guest TLB entry */
|
||||
set_root_gid_to_guest_gid();
|
||||
|
||||
old_entryhi = read_c0_entryhi();
|
||||
|
||||
idx = _kvm_mips_host_tlb_inv((va & VPN2_MASK) |
|
||||
kvm_mips_get_root_asid(vcpu));
|
||||
|
||||
write_c0_entryhi(old_entryhi);
|
||||
clear_root_gid();
|
||||
mtc0_tlbw_hazard();
|
||||
|
||||
htw_start();
|
||||
local_irq_restore(flags);
|
||||
|
||||
/*
|
||||
* We don't want to get reserved instruction exceptions for missing tlb
|
||||
* entries.
|
||||
*/
|
||||
if (cpu_has_vtag_icache)
|
||||
flush_icache_all();
|
||||
|
||||
if (idx > 0)
|
||||
kvm_debug("%s: Invalidated root entryhi %#lx @ idx %d\n",
|
||||
__func__, (va & VPN2_MASK) |
|
||||
kvm_mips_get_root_asid(vcpu), idx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_vz_host_tlb_inv);
|
||||
|
||||
/**
|
||||
* kvm_vz_guest_tlb_lookup() - Lookup a guest VZ TLB mapping.
|
||||
* @vcpu: KVM VCPU pointer.
|
||||
* @gpa: Guest virtual address in a TLB mapped guest segment.
|
||||
* @gpa: Ponter to output guest physical address it maps to.
|
||||
*
|
||||
* Converts a guest virtual address in a guest TLB mapped segment to a guest
|
||||
* physical address, by probing the guest TLB.
|
||||
*
|
||||
* Returns: 0 if guest TLB mapping exists for @gva. *@gpa will have been
|
||||
* written.
|
||||
* -EFAULT if no guest TLB mapping exists for @gva. *@gpa may not
|
||||
* have been written.
|
||||
*/
|
||||
int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva,
|
||||
unsigned long *gpa)
|
||||
{
|
||||
unsigned long o_entryhi, o_entrylo[2], o_pagemask;
|
||||
unsigned int o_index;
|
||||
unsigned long entrylo[2], pagemask, pagemaskbit, pa;
|
||||
unsigned long flags;
|
||||
int index;
|
||||
|
||||
/* Probe the guest TLB for a mapping */
|
||||
local_irq_save(flags);
|
||||
/* Set root GuestID for root probe of guest TLB entry */
|
||||
htw_stop();
|
||||
set_root_gid_to_guest_gid();
|
||||
|
||||
o_entryhi = read_gc0_entryhi();
|
||||
o_index = read_gc0_index();
|
||||
|
||||
write_gc0_entryhi((o_entryhi & 0x3ff) | (gva & ~0xfffl));
|
||||
mtc0_tlbw_hazard();
|
||||
guest_tlb_probe();
|
||||
tlb_probe_hazard();
|
||||
|
||||
index = read_gc0_index();
|
||||
if (index < 0) {
|
||||
/* No match, fail */
|
||||
write_gc0_entryhi(o_entryhi);
|
||||
write_gc0_index(o_index);
|
||||
|
||||
clear_root_gid();
|
||||
htw_start();
|
||||
local_irq_restore(flags);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
/* Match! read the TLB entry */
|
||||
o_entrylo[0] = read_gc0_entrylo0();
|
||||
o_entrylo[1] = read_gc0_entrylo1();
|
||||
o_pagemask = read_gc0_pagemask();
|
||||
|
||||
mtc0_tlbr_hazard();
|
||||
guest_tlb_read();
|
||||
tlb_read_hazard();
|
||||
|
||||
entrylo[0] = read_gc0_entrylo0();
|
||||
entrylo[1] = read_gc0_entrylo1();
|
||||
pagemask = ~read_gc0_pagemask() & ~0x1fffl;
|
||||
|
||||
write_gc0_entryhi(o_entryhi);
|
||||
write_gc0_index(o_index);
|
||||
write_gc0_entrylo0(o_entrylo[0]);
|
||||
write_gc0_entrylo1(o_entrylo[1]);
|
||||
write_gc0_pagemask(o_pagemask);
|
||||
|
||||
clear_root_gid();
|
||||
htw_start();
|
||||
local_irq_restore(flags);
|
||||
|
||||
/* Select one of the EntryLo values and interpret the GPA */
|
||||
pagemaskbit = (pagemask ^ (pagemask & (pagemask - 1))) >> 1;
|
||||
pa = entrylo[!!(gva & pagemaskbit)];
|
||||
|
||||
/*
|
||||
* TLB entry may have become invalid since TLB probe if physical FTLB
|
||||
* entries are shared between threads (e.g. I6400).
|
||||
*/
|
||||
if (!(pa & ENTRYLO_V))
|
||||
return -EFAULT;
|
||||
|
||||
/*
|
||||
* Note, this doesn't take guest MIPS32 XPA into account, where PFN is
|
||||
* split with XI/RI in the middle.
|
||||
*/
|
||||
pa = (pa << 6) & ~0xfffl;
|
||||
pa |= gva & ~(pagemask | pagemaskbit);
|
||||
|
||||
*gpa = pa;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_vz_guest_tlb_lookup);
|
||||
|
||||
/**
|
||||
* kvm_vz_local_flush_roottlb_all_guests() - Flush all root TLB entries for
|
||||
* guests.
|
||||
*
|
||||
* Invalidate all entries in root tlb which are GPA mappings.
|
||||
*/
|
||||
void kvm_vz_local_flush_roottlb_all_guests(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned long old_entryhi, old_pagemask, old_guestctl1;
|
||||
int entry;
|
||||
|
||||
if (WARN_ON(!cpu_has_guestid))
|
||||
return;
|
||||
|
||||
local_irq_save(flags);
|
||||
htw_stop();
|
||||
|
||||
/* TLBR may clobber EntryHi.ASID, PageMask, and GuestCtl1.RID */
|
||||
old_entryhi = read_c0_entryhi();
|
||||
old_pagemask = read_c0_pagemask();
|
||||
old_guestctl1 = read_c0_guestctl1();
|
||||
|
||||
/*
|
||||
* Invalidate guest entries in root TLB while leaving root entries
|
||||
* intact when possible.
|
||||
*/
|
||||
for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
|
||||
write_c0_index(entry);
|
||||
mtc0_tlbw_hazard();
|
||||
tlb_read();
|
||||
tlb_read_hazard();
|
||||
|
||||
/* Don't invalidate non-guest (RVA) mappings in the root TLB */
|
||||
if (!(read_c0_guestctl1() & MIPS_GCTL1_RID))
|
||||
continue;
|
||||
|
||||
/* Make sure all entries differ. */
|
||||
write_c0_entryhi(UNIQUE_ENTRYHI(entry));
|
||||
write_c0_entrylo0(0);
|
||||
write_c0_entrylo1(0);
|
||||
write_c0_guestctl1(0);
|
||||
mtc0_tlbw_hazard();
|
||||
tlb_write_indexed();
|
||||
}
|
||||
|
||||
write_c0_entryhi(old_entryhi);
|
||||
write_c0_pagemask(old_pagemask);
|
||||
write_c0_guestctl1(old_guestctl1);
|
||||
tlbw_use_hazard();
|
||||
|
||||
htw_start();
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_vz_local_flush_roottlb_all_guests);
|
||||
|
||||
/**
|
||||
* kvm_vz_local_flush_guesttlb_all() - Flush all guest TLB entries.
|
||||
*
|
||||
* Invalidate all entries in guest tlb irrespective of guestid.
|
||||
*/
|
||||
void kvm_vz_local_flush_guesttlb_all(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned long old_index;
|
||||
unsigned long old_entryhi;
|
||||
unsigned long old_entrylo[2];
|
||||
unsigned long old_pagemask;
|
||||
int entry;
|
||||
u64 cvmmemctl2 = 0;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
/* Preserve all clobbered guest registers */
|
||||
old_index = read_gc0_index();
|
||||
old_entryhi = read_gc0_entryhi();
|
||||
old_entrylo[0] = read_gc0_entrylo0();
|
||||
old_entrylo[1] = read_gc0_entrylo1();
|
||||
old_pagemask = read_gc0_pagemask();
|
||||
|
||||
switch (current_cpu_type()) {
|
||||
case CPU_CAVIUM_OCTEON3:
|
||||
/* Inhibit machine check due to multiple matching TLB entries */
|
||||
cvmmemctl2 = read_c0_cvmmemctl2();
|
||||
cvmmemctl2 |= CVMMEMCTL2_INHIBITTS;
|
||||
write_c0_cvmmemctl2(cvmmemctl2);
|
||||
break;
|
||||
};
|
||||
|
||||
/* Invalidate guest entries in guest TLB */
|
||||
write_gc0_entrylo0(0);
|
||||
write_gc0_entrylo1(0);
|
||||
write_gc0_pagemask(0);
|
||||
for (entry = 0; entry < current_cpu_data.guest.tlbsize; entry++) {
|
||||
/* Make sure all entries differ. */
|
||||
write_gc0_index(entry);
|
||||
write_gc0_entryhi(UNIQUE_GUEST_ENTRYHI(entry));
|
||||
mtc0_tlbw_hazard();
|
||||
guest_tlb_write_indexed();
|
||||
}
|
||||
|
||||
if (cvmmemctl2) {
|
||||
cvmmemctl2 &= ~CVMMEMCTL2_INHIBITTS;
|
||||
write_c0_cvmmemctl2(cvmmemctl2);
|
||||
};
|
||||
|
||||
write_gc0_index(old_index);
|
||||
write_gc0_entryhi(old_entryhi);
|
||||
write_gc0_entrylo0(old_entrylo[0]);
|
||||
write_gc0_entrylo1(old_entrylo[1]);
|
||||
write_gc0_pagemask(old_pagemask);
|
||||
tlbw_use_hazard();
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_vz_local_flush_guesttlb_all);
|
||||
|
||||
/**
|
||||
* kvm_vz_save_guesttlb() - Save a range of guest TLB entries.
|
||||
* @buf: Buffer to write TLB entries into.
|
||||
* @index: Start index.
|
||||
* @count: Number of entries to save.
|
||||
*
|
||||
* Save a range of guest TLB entries. The caller must ensure interrupts are
|
||||
* disabled.
|
||||
*/
|
||||
void kvm_vz_save_guesttlb(struct kvm_mips_tlb *buf, unsigned int index,
|
||||
unsigned int count)
|
||||
{
|
||||
unsigned int end = index + count;
|
||||
unsigned long old_entryhi, old_entrylo0, old_entrylo1, old_pagemask;
|
||||
unsigned int guestctl1 = 0;
|
||||
int old_index, i;
|
||||
|
||||
/* Save registers we're about to clobber */
|
||||
old_index = read_gc0_index();
|
||||
old_entryhi = read_gc0_entryhi();
|
||||
old_entrylo0 = read_gc0_entrylo0();
|
||||
old_entrylo1 = read_gc0_entrylo1();
|
||||
old_pagemask = read_gc0_pagemask();
|
||||
|
||||
/* Set root GuestID for root probe */
|
||||
htw_stop();
|
||||
set_root_gid_to_guest_gid();
|
||||
if (cpu_has_guestid)
|
||||
guestctl1 = read_c0_guestctl1();
|
||||
|
||||
/* Read each entry from guest TLB */
|
||||
for (i = index; i < end; ++i, ++buf) {
|
||||
write_gc0_index(i);
|
||||
|
||||
mtc0_tlbr_hazard();
|
||||
guest_tlb_read();
|
||||
tlb_read_hazard();
|
||||
|
||||
if (cpu_has_guestid &&
|
||||
(read_c0_guestctl1() ^ guestctl1) & MIPS_GCTL1_RID) {
|
||||
/* Entry invalid or belongs to another guest */
|
||||
buf->tlb_hi = UNIQUE_GUEST_ENTRYHI(i);
|
||||
buf->tlb_lo[0] = 0;
|
||||
buf->tlb_lo[1] = 0;
|
||||
buf->tlb_mask = 0;
|
||||
} else {
|
||||
/* Entry belongs to the right guest */
|
||||
buf->tlb_hi = read_gc0_entryhi();
|
||||
buf->tlb_lo[0] = read_gc0_entrylo0();
|
||||
buf->tlb_lo[1] = read_gc0_entrylo1();
|
||||
buf->tlb_mask = read_gc0_pagemask();
|
||||
}
|
||||
}
|
||||
|
||||
/* Clear root GuestID again */
|
||||
clear_root_gid();
|
||||
htw_start();
|
||||
|
||||
/* Restore clobbered registers */
|
||||
write_gc0_index(old_index);
|
||||
write_gc0_entryhi(old_entryhi);
|
||||
write_gc0_entrylo0(old_entrylo0);
|
||||
write_gc0_entrylo1(old_entrylo1);
|
||||
write_gc0_pagemask(old_pagemask);
|
||||
|
||||
tlbw_use_hazard();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_vz_save_guesttlb);
|
||||
|
||||
/**
|
||||
* kvm_vz_load_guesttlb() - Save a range of guest TLB entries.
|
||||
* @buf: Buffer to read TLB entries from.
|
||||
* @index: Start index.
|
||||
* @count: Number of entries to load.
|
||||
*
|
||||
* Load a range of guest TLB entries. The caller must ensure interrupts are
|
||||
* disabled.
|
||||
*/
|
||||
void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index,
|
||||
unsigned int count)
|
||||
{
|
||||
unsigned int end = index + count;
|
||||
unsigned long old_entryhi, old_entrylo0, old_entrylo1, old_pagemask;
|
||||
int old_index, i;
|
||||
|
||||
/* Save registers we're about to clobber */
|
||||
old_index = read_gc0_index();
|
||||
old_entryhi = read_gc0_entryhi();
|
||||
old_entrylo0 = read_gc0_entrylo0();
|
||||
old_entrylo1 = read_gc0_entrylo1();
|
||||
old_pagemask = read_gc0_pagemask();
|
||||
|
||||
/* Set root GuestID for root probe */
|
||||
htw_stop();
|
||||
set_root_gid_to_guest_gid();
|
||||
|
||||
/* Write each entry to guest TLB */
|
||||
for (i = index; i < end; ++i, ++buf) {
|
||||
write_gc0_index(i);
|
||||
write_gc0_entryhi(buf->tlb_hi);
|
||||
write_gc0_entrylo0(buf->tlb_lo[0]);
|
||||
write_gc0_entrylo1(buf->tlb_lo[1]);
|
||||
write_gc0_pagemask(buf->tlb_mask);
|
||||
|
||||
mtc0_tlbw_hazard();
|
||||
guest_tlb_write_indexed();
|
||||
}
|
||||
|
||||
/* Clear root GuestID again */
|
||||
clear_root_gid();
|
||||
htw_start();
|
||||
|
||||
/* Restore clobbered registers */
|
||||
write_gc0_index(old_index);
|
||||
write_gc0_entryhi(old_entryhi);
|
||||
write_gc0_entrylo0(old_entrylo0);
|
||||
write_gc0_entrylo1(old_entrylo1);
|
||||
write_gc0_pagemask(old_pagemask);
|
||||
|
||||
tlbw_use_hazard();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_vz_load_guesttlb);
|
||||
|
||||
#endif
|
||||
|
||||
/**
|
||||
* kvm_mips_suspend_mm() - Suspend the active mm.
|
||||
* @cpu The CPU we're running on.
|
||||
|
|
|
@ -17,6 +17,13 @@
|
|||
#define TRACE_INCLUDE_PATH .
|
||||
#define TRACE_INCLUDE_FILE trace
|
||||
|
||||
/*
|
||||
* arch/mips/kvm/mips.c
|
||||
*/
|
||||
extern bool kvm_trace_guest_mode_change;
|
||||
int kvm_guest_mode_change_trace_reg(void);
|
||||
void kvm_guest_mode_change_trace_unreg(void);
|
||||
|
||||
/*
|
||||
* Tracepoints for VM enters
|
||||
*/
|
||||
|
@ -62,10 +69,20 @@ DEFINE_EVENT(kvm_transition, kvm_out,
|
|||
#define KVM_TRACE_EXIT_MSA_FPE 14
|
||||
#define KVM_TRACE_EXIT_FPE 15
|
||||
#define KVM_TRACE_EXIT_MSA_DISABLED 21
|
||||
#define KVM_TRACE_EXIT_GUEST_EXIT 27
|
||||
/* Further exit reasons */
|
||||
#define KVM_TRACE_EXIT_WAIT 32
|
||||
#define KVM_TRACE_EXIT_CACHE 33
|
||||
#define KVM_TRACE_EXIT_SIGNAL 34
|
||||
/* 32 exit reasons correspond to GuestCtl0.GExcCode (VZ) */
|
||||
#define KVM_TRACE_EXIT_GEXCCODE_BASE 64
|
||||
#define KVM_TRACE_EXIT_GPSI 64 /* 0 */
|
||||
#define KVM_TRACE_EXIT_GSFC 65 /* 1 */
|
||||
#define KVM_TRACE_EXIT_HC 66 /* 2 */
|
||||
#define KVM_TRACE_EXIT_GRR 67 /* 3 */
|
||||
#define KVM_TRACE_EXIT_GVA 72 /* 8 */
|
||||
#define KVM_TRACE_EXIT_GHFC 73 /* 9 */
|
||||
#define KVM_TRACE_EXIT_GPA 74 /* 10 */
|
||||
|
||||
/* Tracepoints for VM exits */
|
||||
#define kvm_trace_symbol_exit_types \
|
||||
|
@ -83,9 +100,17 @@ DEFINE_EVENT(kvm_transition, kvm_out,
|
|||
{ KVM_TRACE_EXIT_MSA_FPE, "MSA FPE" }, \
|
||||
{ KVM_TRACE_EXIT_FPE, "FPE" }, \
|
||||
{ KVM_TRACE_EXIT_MSA_DISABLED, "MSA Disabled" }, \
|
||||
{ KVM_TRACE_EXIT_GUEST_EXIT, "Guest Exit" }, \
|
||||
{ KVM_TRACE_EXIT_WAIT, "WAIT" }, \
|
||||
{ KVM_TRACE_EXIT_CACHE, "CACHE" }, \
|
||||
{ KVM_TRACE_EXIT_SIGNAL, "Signal" }
|
||||
{ KVM_TRACE_EXIT_SIGNAL, "Signal" }, \
|
||||
{ KVM_TRACE_EXIT_GPSI, "GPSI" }, \
|
||||
{ KVM_TRACE_EXIT_GSFC, "GSFC" }, \
|
||||
{ KVM_TRACE_EXIT_HC, "HC" }, \
|
||||
{ KVM_TRACE_EXIT_GRR, "GRR" }, \
|
||||
{ KVM_TRACE_EXIT_GVA, "GVA" }, \
|
||||
{ KVM_TRACE_EXIT_GHFC, "GHFC" }, \
|
||||
{ KVM_TRACE_EXIT_GPA, "GPA" }
|
||||
|
||||
TRACE_EVENT(kvm_exit,
|
||||
TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
|
||||
|
@ -158,6 +183,8 @@ TRACE_EVENT(kvm_exit,
|
|||
{ KVM_TRACE_COP0(16, 4), "Config4" }, \
|
||||
{ KVM_TRACE_COP0(16, 5), "Config5" }, \
|
||||
{ KVM_TRACE_COP0(16, 7), "Config7" }, \
|
||||
{ KVM_TRACE_COP0(17, 1), "MAAR" }, \
|
||||
{ KVM_TRACE_COP0(17, 2), "MAARI" }, \
|
||||
{ KVM_TRACE_COP0(26, 0), "ECC" }, \
|
||||
{ KVM_TRACE_COP0(30, 0), "ErrorEPC" }, \
|
||||
{ KVM_TRACE_COP0(31, 2), "KScratch1" }, \
|
||||
|
@ -268,6 +295,51 @@ TRACE_EVENT(kvm_asid_change,
|
|||
__entry->new_asid)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_guestid_change,
|
||||
TP_PROTO(struct kvm_vcpu *vcpu, unsigned int guestid),
|
||||
TP_ARGS(vcpu, guestid),
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned int, guestid)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->guestid = guestid;
|
||||
),
|
||||
|
||||
TP_printk("GuestID: 0x%02x",
|
||||
__entry->guestid)
|
||||
);
|
||||
|
||||
TRACE_EVENT_FN(kvm_guest_mode_change,
|
||||
TP_PROTO(struct kvm_vcpu *vcpu),
|
||||
TP_ARGS(vcpu),
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned long, epc)
|
||||
__field(unsigned long, pc)
|
||||
__field(unsigned long, badvaddr)
|
||||
__field(unsigned int, status)
|
||||
__field(unsigned int, cause)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->epc = kvm_read_c0_guest_epc(vcpu->arch.cop0);
|
||||
__entry->pc = vcpu->arch.pc;
|
||||
__entry->badvaddr = kvm_read_c0_guest_badvaddr(vcpu->arch.cop0);
|
||||
__entry->status = kvm_read_c0_guest_status(vcpu->arch.cop0);
|
||||
__entry->cause = kvm_read_c0_guest_cause(vcpu->arch.cop0);
|
||||
),
|
||||
|
||||
TP_printk("EPC: 0x%08lx PC: 0x%08lx Status: 0x%08x Cause: 0x%08x BadVAddr: 0x%08lx",
|
||||
__entry->epc,
|
||||
__entry->pc,
|
||||
__entry->status,
|
||||
__entry->cause,
|
||||
__entry->badvaddr),
|
||||
|
||||
kvm_guest_mode_change_trace_reg,
|
||||
kvm_guest_mode_change_trace_unreg
|
||||
);
|
||||
|
||||
#endif /* _TRACE_KVM_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include <linux/errno.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/log2.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
@ -40,6 +41,29 @@ static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
|
|||
return gpa;
|
||||
}
|
||||
|
||||
static int kvm_trap_emul_no_handler(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 __user *opc = (u32 __user *) vcpu->arch.pc;
|
||||
u32 cause = vcpu->arch.host_cp0_cause;
|
||||
u32 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
|
||||
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
|
||||
u32 inst = 0;
|
||||
|
||||
/*
|
||||
* Fetch the instruction.
|
||||
*/
|
||||
if (cause & CAUSEF_BD)
|
||||
opc += 1;
|
||||
kvm_get_badinstr(opc, vcpu, &inst);
|
||||
|
||||
kvm_err("Exception Code: %d not handled @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
|
||||
exccode, opc, inst, badvaddr,
|
||||
kvm_read_c0_guest_status(vcpu->arch.cop0));
|
||||
kvm_arch_vcpu_dump_regs(vcpu);
|
||||
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
return RESUME_HOST;
|
||||
}
|
||||
|
||||
static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
|
@ -82,6 +106,10 @@ static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
|
|||
ret = RESUME_HOST;
|
||||
break;
|
||||
|
||||
case EMULATE_HYPERCALL:
|
||||
ret = kvm_mips_handle_hypcall(vcpu);
|
||||
break;
|
||||
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
@ -484,6 +512,31 @@ static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int kvm_trap_emul_hardware_enable(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void kvm_trap_emul_hardware_disable(void)
|
||||
{
|
||||
}
|
||||
|
||||
static int kvm_trap_emul_check_extension(struct kvm *kvm, long ext)
|
||||
{
|
||||
int r;
|
||||
|
||||
switch (ext) {
|
||||
case KVM_CAP_MIPS_TE:
|
||||
r = 1;
|
||||
break;
|
||||
default:
|
||||
r = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
|
||||
|
@ -561,6 +614,9 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
|
|||
u32 config, config1;
|
||||
int vcpu_id = vcpu->vcpu_id;
|
||||
|
||||
/* Start off the timer at 100 MHz */
|
||||
kvm_mips_init_count(vcpu, 100*1000*1000);
|
||||
|
||||
/*
|
||||
* Arch specific stuff, set up config registers properly so that the
|
||||
* guest will come up as expected
|
||||
|
@ -589,6 +645,13 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
|
|||
/* Read the cache characteristics from the host Config1 Register */
|
||||
config1 = (read_c0_config1() & ~0x7f);
|
||||
|
||||
/* DCache line size not correctly reported in Config1 on Octeon CPUs */
|
||||
if (cpu_dcache_line_size()) {
|
||||
config1 &= ~MIPS_CONF1_DL;
|
||||
config1 |= ((ilog2(cpu_dcache_line_size()) - 1) <<
|
||||
MIPS_CONF1_DL_SHF) & MIPS_CONF1_DL;
|
||||
}
|
||||
|
||||
/* Set up MMU size */
|
||||
config1 &= ~(0x3f << 25);
|
||||
config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
|
||||
|
@ -892,10 +955,12 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
|
|||
if (v & CAUSEF_DC) {
|
||||
/* disable timer first */
|
||||
kvm_mips_count_disable_cause(vcpu);
|
||||
kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
|
||||
kvm_change_c0_guest_cause(cop0, (u32)~CAUSEF_DC,
|
||||
v);
|
||||
} else {
|
||||
/* enable timer last */
|
||||
kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
|
||||
kvm_change_c0_guest_cause(cop0, (u32)~CAUSEF_DC,
|
||||
v);
|
||||
kvm_mips_count_enable_cause(vcpu);
|
||||
}
|
||||
} else {
|
||||
|
@ -1230,7 +1295,11 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
|
|||
.handle_msa_fpe = kvm_trap_emul_handle_msa_fpe,
|
||||
.handle_fpe = kvm_trap_emul_handle_fpe,
|
||||
.handle_msa_disabled = kvm_trap_emul_handle_msa_disabled,
|
||||
.handle_guest_exit = kvm_trap_emul_no_handler,
|
||||
|
||||
.hardware_enable = kvm_trap_emul_hardware_enable,
|
||||
.hardware_disable = kvm_trap_emul_hardware_disable,
|
||||
.check_extension = kvm_trap_emul_check_extension,
|
||||
.vcpu_init = kvm_trap_emul_vcpu_init,
|
||||
.vcpu_uninit = kvm_trap_emul_vcpu_uninit,
|
||||
.vcpu_setup = kvm_trap_emul_vcpu_setup,
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -24,6 +24,7 @@
|
|||
/* Cache operations. */
|
||||
void (*flush_cache_all)(void);
|
||||
void (*__flush_cache_all)(void);
|
||||
EXPORT_SYMBOL_GPL(__flush_cache_all);
|
||||
void (*flush_cache_mm)(struct mm_struct *mm);
|
||||
void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
|
||||
unsigned long end);
|
||||
|
|
|
@ -348,7 +348,7 @@ void maar_init(void)
|
|||
upper = ((upper & MIPS_MAAR_ADDR) << 4) | 0xffff;
|
||||
|
||||
pr_info(" [%d]: ", i / 2);
|
||||
if (!(attr & MIPS_MAAR_V)) {
|
||||
if (!(attr & MIPS_MAAR_VL)) {
|
||||
pr_cont("disabled\n");
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -702,6 +702,10 @@ struct kvm_ppc_resize_hpt {
|
|||
#define KVM_VM_PPC_HV 1
|
||||
#define KVM_VM_PPC_PR 2
|
||||
|
||||
/* on MIPS, 0 forces trap & emulate, 1 forces VZ ASE */
|
||||
#define KVM_VM_MIPS_TE 0
|
||||
#define KVM_VM_MIPS_VZ 1
|
||||
|
||||
#define KVM_S390_SIE_PAGE_OFFSET 1
|
||||
|
||||
/*
|
||||
|
@ -883,6 +887,9 @@ struct kvm_ppc_resize_hpt {
|
|||
#define KVM_CAP_PPC_MMU_RADIX 134
|
||||
#define KVM_CAP_PPC_MMU_HASH_V3 135
|
||||
#define KVM_CAP_IMMEDIATE_EXIT 136
|
||||
#define KVM_CAP_MIPS_VZ 137
|
||||
#define KVM_CAP_MIPS_TE 138
|
||||
#define KVM_CAP_MIPS_64BIT 139
|
||||
|
||||
#ifdef KVM_CAP_IRQ_ROUTING
|
||||
|
||||
|
|
Loading…
Reference in New Issue