KVM: PPC: Book3S HV: Remove virt mode checks from real mode handlers
Now that the P7/8 path no longer supports radix, real-mode handlers
do not need to deal with being called in virt mode.
This change effectively reverts commit acde25726b
("KVM: PPC: Book3S
HV: Add radix checks in real-mode hypercall handlers").
It removes a few more real-mode tests in rm hcall handlers, which
allows the indirect ops for the xive module to be removed from the
built-in xics rm handlers.
kvmppc_h_random is renamed to kvmppc_rm_h_random to be a bit more
descriptive and consistent with other rm handlers.
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Reviewed-by: Cédric Le Goater <clg@kaod.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20210528090752.3542186-25-npiggin@gmail.com
This commit is contained in:
parent
9769a7fd79
commit
dcbac73a5b
|
@ -659,8 +659,6 @@ extern int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
|
||||||
u32 *priority);
|
u32 *priority);
|
||||||
extern int kvmppc_xive_int_on(struct kvm *kvm, u32 irq);
|
extern int kvmppc_xive_int_on(struct kvm *kvm, u32 irq);
|
||||||
extern int kvmppc_xive_int_off(struct kvm *kvm, u32 irq);
|
extern int kvmppc_xive_int_off(struct kvm *kvm, u32 irq);
|
||||||
extern void kvmppc_xive_init_module(void);
|
|
||||||
extern void kvmppc_xive_exit_module(void);
|
|
||||||
|
|
||||||
extern int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
|
extern int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
|
||||||
struct kvm_vcpu *vcpu, u32 cpu);
|
struct kvm_vcpu *vcpu, u32 cpu);
|
||||||
|
@ -686,8 +684,6 @@ static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
|
||||||
extern int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
|
extern int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
|
||||||
struct kvm_vcpu *vcpu, u32 cpu);
|
struct kvm_vcpu *vcpu, u32 cpu);
|
||||||
extern void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu);
|
extern void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu);
|
||||||
extern void kvmppc_xive_native_init_module(void);
|
|
||||||
extern void kvmppc_xive_native_exit_module(void);
|
|
||||||
extern int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
|
extern int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
|
||||||
union kvmppc_one_reg *val);
|
union kvmppc_one_reg *val);
|
||||||
extern int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu,
|
extern int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu,
|
||||||
|
@ -701,8 +697,6 @@ static inline int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
|
||||||
u32 *priority) { return -1; }
|
u32 *priority) { return -1; }
|
||||||
static inline int kvmppc_xive_int_on(struct kvm *kvm, u32 irq) { return -1; }
|
static inline int kvmppc_xive_int_on(struct kvm *kvm, u32 irq) { return -1; }
|
||||||
static inline int kvmppc_xive_int_off(struct kvm *kvm, u32 irq) { return -1; }
|
static inline int kvmppc_xive_int_off(struct kvm *kvm, u32 irq) { return -1; }
|
||||||
static inline void kvmppc_xive_init_module(void) { }
|
|
||||||
static inline void kvmppc_xive_exit_module(void) { }
|
|
||||||
|
|
||||||
static inline int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
|
static inline int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
|
||||||
struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
|
struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
|
||||||
|
@ -725,8 +719,6 @@ static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
|
||||||
static inline int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
|
static inline int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
|
||||||
struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
|
struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
|
||||||
static inline void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
|
static inline void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
|
||||||
static inline void kvmppc_xive_native_init_module(void) { }
|
|
||||||
static inline void kvmppc_xive_native_exit_module(void) { }
|
|
||||||
static inline int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
|
static inline int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
|
||||||
union kvmppc_one_reg *val)
|
union kvmppc_one_reg *val)
|
||||||
{ return 0; }
|
{ return 0; }
|
||||||
|
@ -762,7 +754,7 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
|
||||||
unsigned long tce_value, unsigned long npages);
|
unsigned long tce_value, unsigned long npages);
|
||||||
long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
|
long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
|
||||||
unsigned int yield_count);
|
unsigned int yield_count);
|
||||||
long kvmppc_h_random(struct kvm_vcpu *vcpu);
|
long kvmppc_rm_h_random(struct kvm_vcpu *vcpu);
|
||||||
void kvmhv_commence_exit(int trap);
|
void kvmhv_commence_exit(int trap);
|
||||||
void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu);
|
void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu);
|
||||||
void kvmppc_subcore_enter_guest(void);
|
void kvmppc_subcore_enter_guest(void);
|
||||||
|
|
|
@ -1050,13 +1050,10 @@ static int kvmppc_book3s_init(void)
|
||||||
#ifdef CONFIG_KVM_XICS
|
#ifdef CONFIG_KVM_XICS
|
||||||
#ifdef CONFIG_KVM_XIVE
|
#ifdef CONFIG_KVM_XIVE
|
||||||
if (xics_on_xive()) {
|
if (xics_on_xive()) {
|
||||||
kvmppc_xive_init_module();
|
|
||||||
kvm_register_device_ops(&kvm_xive_ops, KVM_DEV_TYPE_XICS);
|
kvm_register_device_ops(&kvm_xive_ops, KVM_DEV_TYPE_XICS);
|
||||||
if (kvmppc_xive_native_supported()) {
|
if (kvmppc_xive_native_supported())
|
||||||
kvmppc_xive_native_init_module();
|
|
||||||
kvm_register_device_ops(&kvm_xive_native_ops,
|
kvm_register_device_ops(&kvm_xive_native_ops,
|
||||||
KVM_DEV_TYPE_XIVE);
|
KVM_DEV_TYPE_XIVE);
|
||||||
}
|
|
||||||
} else
|
} else
|
||||||
#endif
|
#endif
|
||||||
kvm_register_device_ops(&kvm_xics_ops, KVM_DEV_TYPE_XICS);
|
kvm_register_device_ops(&kvm_xics_ops, KVM_DEV_TYPE_XICS);
|
||||||
|
@ -1066,12 +1063,6 @@ static int kvmppc_book3s_init(void)
|
||||||
|
|
||||||
static void kvmppc_book3s_exit(void)
|
static void kvmppc_book3s_exit(void)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_KVM_XICS
|
|
||||||
if (xics_on_xive()) {
|
|
||||||
kvmppc_xive_exit_module();
|
|
||||||
kvmppc_xive_native_exit_module();
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
|
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
|
||||||
kvmppc_book3s_exit_pr();
|
kvmppc_book3s_exit_pr();
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -391,10 +391,6 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
|
||||||
/* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
|
/* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
|
||||||
/* liobn, ioba, tce); */
|
/* liobn, ioba, tce); */
|
||||||
|
|
||||||
/* For radix, we might be in virtual mode, so punt */
|
|
||||||
if (kvm_is_radix(vcpu->kvm))
|
|
||||||
return H_TOO_HARD;
|
|
||||||
|
|
||||||
stt = kvmppc_find_table(vcpu->kvm, liobn);
|
stt = kvmppc_find_table(vcpu->kvm, liobn);
|
||||||
if (!stt)
|
if (!stt)
|
||||||
return H_TOO_HARD;
|
return H_TOO_HARD;
|
||||||
|
@ -489,10 +485,6 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
|
||||||
bool prereg = false;
|
bool prereg = false;
|
||||||
struct kvmppc_spapr_tce_iommu_table *stit;
|
struct kvmppc_spapr_tce_iommu_table *stit;
|
||||||
|
|
||||||
/* For radix, we might be in virtual mode, so punt */
|
|
||||||
if (kvm_is_radix(vcpu->kvm))
|
|
||||||
return H_TOO_HARD;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* used to check for invalidations in progress
|
* used to check for invalidations in progress
|
||||||
*/
|
*/
|
||||||
|
@ -602,10 +594,6 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
|
||||||
long i, ret;
|
long i, ret;
|
||||||
struct kvmppc_spapr_tce_iommu_table *stit;
|
struct kvmppc_spapr_tce_iommu_table *stit;
|
||||||
|
|
||||||
/* For radix, we might be in virtual mode, so punt */
|
|
||||||
if (kvm_is_radix(vcpu->kvm))
|
|
||||||
return H_TOO_HARD;
|
|
||||||
|
|
||||||
stt = kvmppc_find_table(vcpu->kvm, liobn);
|
stt = kvmppc_find_table(vcpu->kvm, liobn);
|
||||||
if (!stt)
|
if (!stt)
|
||||||
return H_TOO_HARD;
|
return H_TOO_HARD;
|
||||||
|
|
|
@ -34,21 +34,6 @@
|
||||||
#include "book3s_xics.h"
|
#include "book3s_xics.h"
|
||||||
#include "book3s_xive.h"
|
#include "book3s_xive.h"
|
||||||
|
|
||||||
/*
|
|
||||||
* The XIVE module will populate these when it loads
|
|
||||||
*/
|
|
||||||
unsigned long (*__xive_vm_h_xirr)(struct kvm_vcpu *vcpu);
|
|
||||||
unsigned long (*__xive_vm_h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server);
|
|
||||||
int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
|
|
||||||
unsigned long mfrr);
|
|
||||||
int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr);
|
|
||||||
int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr);
|
|
||||||
EXPORT_SYMBOL_GPL(__xive_vm_h_xirr);
|
|
||||||
EXPORT_SYMBOL_GPL(__xive_vm_h_ipoll);
|
|
||||||
EXPORT_SYMBOL_GPL(__xive_vm_h_ipi);
|
|
||||||
EXPORT_SYMBOL_GPL(__xive_vm_h_cppr);
|
|
||||||
EXPORT_SYMBOL_GPL(__xive_vm_h_eoi);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Hash page table alignment on newer cpus(CPU_FTR_ARCH_206)
|
* Hash page table alignment on newer cpus(CPU_FTR_ARCH_206)
|
||||||
* should be power of 2.
|
* should be power of 2.
|
||||||
|
@ -196,16 +181,9 @@ int kvmppc_hwrng_present(void)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvmppc_hwrng_present);
|
EXPORT_SYMBOL_GPL(kvmppc_hwrng_present);
|
||||||
|
|
||||||
long kvmppc_h_random(struct kvm_vcpu *vcpu)
|
long kvmppc_rm_h_random(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
int r;
|
if (powernv_get_random_real_mode(&vcpu->arch.regs.gpr[4]))
|
||||||
|
|
||||||
/* Only need to do the expensive mfmsr() on radix */
|
|
||||||
if (kvm_is_radix(vcpu->kvm) && (mfmsr() & MSR_IR))
|
|
||||||
r = powernv_get_random_long(&vcpu->arch.regs.gpr[4]);
|
|
||||||
else
|
|
||||||
r = powernv_get_random_real_mode(&vcpu->arch.regs.gpr[4]);
|
|
||||||
if (r)
|
|
||||||
return H_SUCCESS;
|
return H_SUCCESS;
|
||||||
|
|
||||||
return H_HARDWARE;
|
return H_HARDWARE;
|
||||||
|
@ -541,22 +519,13 @@ static long kvmppc_read_one_intr(bool *again)
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_KVM_XICS
|
#ifdef CONFIG_KVM_XICS
|
||||||
static inline bool is_rm(void)
|
|
||||||
{
|
|
||||||
return !(mfmsr() & MSR_DR);
|
|
||||||
}
|
|
||||||
|
|
||||||
unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
|
unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
if (!kvmppc_xics_enabled(vcpu))
|
if (!kvmppc_xics_enabled(vcpu))
|
||||||
return H_TOO_HARD;
|
return H_TOO_HARD;
|
||||||
if (xics_on_xive()) {
|
if (xics_on_xive())
|
||||||
if (is_rm())
|
return xive_rm_h_xirr(vcpu);
|
||||||
return xive_rm_h_xirr(vcpu);
|
else
|
||||||
if (unlikely(!__xive_vm_h_xirr))
|
|
||||||
return H_NOT_AVAILABLE;
|
|
||||||
return __xive_vm_h_xirr(vcpu);
|
|
||||||
} else
|
|
||||||
return xics_rm_h_xirr(vcpu);
|
return xics_rm_h_xirr(vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -565,13 +534,9 @@ unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu)
|
||||||
if (!kvmppc_xics_enabled(vcpu))
|
if (!kvmppc_xics_enabled(vcpu))
|
||||||
return H_TOO_HARD;
|
return H_TOO_HARD;
|
||||||
vcpu->arch.regs.gpr[5] = get_tb();
|
vcpu->arch.regs.gpr[5] = get_tb();
|
||||||
if (xics_on_xive()) {
|
if (xics_on_xive())
|
||||||
if (is_rm())
|
return xive_rm_h_xirr(vcpu);
|
||||||
return xive_rm_h_xirr(vcpu);
|
else
|
||||||
if (unlikely(!__xive_vm_h_xirr))
|
|
||||||
return H_NOT_AVAILABLE;
|
|
||||||
return __xive_vm_h_xirr(vcpu);
|
|
||||||
} else
|
|
||||||
return xics_rm_h_xirr(vcpu);
|
return xics_rm_h_xirr(vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -579,13 +544,9 @@ unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
|
||||||
{
|
{
|
||||||
if (!kvmppc_xics_enabled(vcpu))
|
if (!kvmppc_xics_enabled(vcpu))
|
||||||
return H_TOO_HARD;
|
return H_TOO_HARD;
|
||||||
if (xics_on_xive()) {
|
if (xics_on_xive())
|
||||||
if (is_rm())
|
return xive_rm_h_ipoll(vcpu, server);
|
||||||
return xive_rm_h_ipoll(vcpu, server);
|
else
|
||||||
if (unlikely(!__xive_vm_h_ipoll))
|
|
||||||
return H_NOT_AVAILABLE;
|
|
||||||
return __xive_vm_h_ipoll(vcpu, server);
|
|
||||||
} else
|
|
||||||
return H_TOO_HARD;
|
return H_TOO_HARD;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -594,13 +555,9 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
|
||||||
{
|
{
|
||||||
if (!kvmppc_xics_enabled(vcpu))
|
if (!kvmppc_xics_enabled(vcpu))
|
||||||
return H_TOO_HARD;
|
return H_TOO_HARD;
|
||||||
if (xics_on_xive()) {
|
if (xics_on_xive())
|
||||||
if (is_rm())
|
return xive_rm_h_ipi(vcpu, server, mfrr);
|
||||||
return xive_rm_h_ipi(vcpu, server, mfrr);
|
else
|
||||||
if (unlikely(!__xive_vm_h_ipi))
|
|
||||||
return H_NOT_AVAILABLE;
|
|
||||||
return __xive_vm_h_ipi(vcpu, server, mfrr);
|
|
||||||
} else
|
|
||||||
return xics_rm_h_ipi(vcpu, server, mfrr);
|
return xics_rm_h_ipi(vcpu, server, mfrr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -608,13 +565,9 @@ int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
|
||||||
{
|
{
|
||||||
if (!kvmppc_xics_enabled(vcpu))
|
if (!kvmppc_xics_enabled(vcpu))
|
||||||
return H_TOO_HARD;
|
return H_TOO_HARD;
|
||||||
if (xics_on_xive()) {
|
if (xics_on_xive())
|
||||||
if (is_rm())
|
return xive_rm_h_cppr(vcpu, cppr);
|
||||||
return xive_rm_h_cppr(vcpu, cppr);
|
else
|
||||||
if (unlikely(!__xive_vm_h_cppr))
|
|
||||||
return H_NOT_AVAILABLE;
|
|
||||||
return __xive_vm_h_cppr(vcpu, cppr);
|
|
||||||
} else
|
|
||||||
return xics_rm_h_cppr(vcpu, cppr);
|
return xics_rm_h_cppr(vcpu, cppr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -622,13 +575,9 @@ int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
|
||||||
{
|
{
|
||||||
if (!kvmppc_xics_enabled(vcpu))
|
if (!kvmppc_xics_enabled(vcpu))
|
||||||
return H_TOO_HARD;
|
return H_TOO_HARD;
|
||||||
if (xics_on_xive()) {
|
if (xics_on_xive())
|
||||||
if (is_rm())
|
return xive_rm_h_eoi(vcpu, xirr);
|
||||||
return xive_rm_h_eoi(vcpu, xirr);
|
else
|
||||||
if (unlikely(!__xive_vm_h_eoi))
|
|
||||||
return H_NOT_AVAILABLE;
|
|
||||||
return __xive_vm_h_eoi(vcpu, xirr);
|
|
||||||
} else
|
|
||||||
return xics_rm_h_eoi(vcpu, xirr);
|
return xics_rm_h_eoi(vcpu, xirr);
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_KVM_XICS */
|
#endif /* CONFIG_KVM_XICS */
|
||||||
|
|
|
@ -2306,7 +2306,7 @@ hcall_real_table:
|
||||||
#else
|
#else
|
||||||
.long 0 /* 0x2fc - H_XIRR_X*/
|
.long 0 /* 0x2fc - H_XIRR_X*/
|
||||||
#endif
|
#endif
|
||||||
.long DOTSYM(kvmppc_h_random) - hcall_real_table
|
.long DOTSYM(kvmppc_rm_h_random) - hcall_real_table
|
||||||
.globl hcall_real_table_end
|
.globl hcall_real_table_end
|
||||||
hcall_real_table_end:
|
hcall_real_table_end:
|
||||||
|
|
||||||
|
|
|
@ -2352,21 +2352,3 @@ struct kvm_device_ops kvm_xive_ops = {
|
||||||
.get_attr = xive_get_attr,
|
.get_attr = xive_get_attr,
|
||||||
.has_attr = xive_has_attr,
|
.has_attr = xive_has_attr,
|
||||||
};
|
};
|
||||||
|
|
||||||
void kvmppc_xive_init_module(void)
|
|
||||||
{
|
|
||||||
__xive_vm_h_xirr = xive_vm_h_xirr;
|
|
||||||
__xive_vm_h_ipoll = xive_vm_h_ipoll;
|
|
||||||
__xive_vm_h_ipi = xive_vm_h_ipi;
|
|
||||||
__xive_vm_h_cppr = xive_vm_h_cppr;
|
|
||||||
__xive_vm_h_eoi = xive_vm_h_eoi;
|
|
||||||
}
|
|
||||||
|
|
||||||
void kvmppc_xive_exit_module(void)
|
|
||||||
{
|
|
||||||
__xive_vm_h_xirr = NULL;
|
|
||||||
__xive_vm_h_ipoll = NULL;
|
|
||||||
__xive_vm_h_ipi = NULL;
|
|
||||||
__xive_vm_h_cppr = NULL;
|
|
||||||
__xive_vm_h_eoi = NULL;
|
|
||||||
}
|
|
||||||
|
|
|
@ -289,13 +289,6 @@ extern int xive_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
|
||||||
extern int xive_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
|
extern int xive_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
|
||||||
extern int xive_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
|
extern int xive_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
|
||||||
|
|
||||||
extern unsigned long (*__xive_vm_h_xirr)(struct kvm_vcpu *vcpu);
|
|
||||||
extern unsigned long (*__xive_vm_h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server);
|
|
||||||
extern int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
|
|
||||||
unsigned long mfrr);
|
|
||||||
extern int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr);
|
|
||||||
extern int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Common Xive routines for XICS-over-XIVE and XIVE native
|
* Common Xive routines for XICS-over-XIVE and XIVE native
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -1281,13 +1281,3 @@ struct kvm_device_ops kvm_xive_native_ops = {
|
||||||
.has_attr = kvmppc_xive_native_has_attr,
|
.has_attr = kvmppc_xive_native_has_attr,
|
||||||
.mmap = kvmppc_xive_native_mmap,
|
.mmap = kvmppc_xive_native_mmap,
|
||||||
};
|
};
|
||||||
|
|
||||||
void kvmppc_xive_native_init_module(void)
|
|
||||||
{
|
|
||||||
;
|
|
||||||
}
|
|
||||||
|
|
||||||
void kvmppc_xive_native_exit_module(void)
|
|
||||||
{
|
|
||||||
;
|
|
||||||
}
|
|
||||||
|
|
Loading…
Reference in New Issue