x86/xen: remove 32-bit pv leftovers

There are some remaining 32-bit pv-guest support leftovers in the Xen
hypercall interface. Remove them.

Signed-off-by: Juergen Gross <jgross@suse.com>
Link: https://lore.kernel.org/r/20211028081221.2475-2-jgross@suse.com
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
This commit is contained in:
Juergen Gross 2021-10-28 10:12:18 +02:00 committed by Boris Ostrovsky
parent a67efff288
commit d99bb72a30
2 changed files with 18 additions and 47 deletions

View File

@ -323,9 +323,7 @@ HYPERVISOR_get_debugreg(int reg)
static inline int
HYPERVISOR_update_descriptor(u64 ma, u64 desc)
{
if (sizeof(u64) == sizeof(long))
return _hypercall2(int, update_descriptor, ma, desc);
return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32);
return _hypercall2(int, update_descriptor, ma, desc);
}
static inline long
@ -344,12 +342,7 @@ static inline int
HYPERVISOR_update_va_mapping(unsigned long va, pte_t new_val,
unsigned long flags)
{
if (sizeof(new_val) == sizeof(long))
return _hypercall3(int, update_va_mapping, va,
new_val.pte, flags);
else
return _hypercall4(int, update_va_mapping, va,
new_val.pte, new_val.pte >> 32, flags);
return _hypercall3(int, update_va_mapping, va, new_val.pte, flags);
}
static inline int
@ -461,16 +454,10 @@ MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va,
{
mcl->op = __HYPERVISOR_update_va_mapping;
mcl->args[0] = va;
if (sizeof(new_val) == sizeof(long)) {
mcl->args[1] = new_val.pte;
mcl->args[2] = flags;
} else {
mcl->args[1] = new_val.pte;
mcl->args[2] = new_val.pte >> 32;
mcl->args[3] = flags;
}
mcl->args[1] = new_val.pte;
mcl->args[2] = flags;
trace_xen_mc_entry(mcl, sizeof(new_val) == sizeof(long) ? 3 : 4);
trace_xen_mc_entry(mcl, 3);
}
static inline void
@ -478,19 +465,10 @@ MULTI_update_descriptor(struct multicall_entry *mcl, u64 maddr,
struct desc_struct desc)
{
mcl->op = __HYPERVISOR_update_descriptor;
if (sizeof(maddr) == sizeof(long)) {
mcl->args[0] = maddr;
mcl->args[1] = *(unsigned long *)&desc;
} else {
u32 *p = (u32 *)&desc;
mcl->args[0] = maddr;
mcl->args[1] = *(unsigned long *)&desc;
mcl->args[0] = maddr;
mcl->args[1] = maddr >> 32;
mcl->args[2] = *p++;
mcl->args[3] = *p;
}
trace_xen_mc_entry(mcl, sizeof(maddr) == sizeof(long) ? 2 : 4);
trace_xen_mc_entry(mcl, 2);
}
static inline void

View File

@ -35,6 +35,7 @@ void __xenmem_reservation_va_mapping_update(unsigned long count,
for (i = 0; i < count; i++) {
struct page *page = pages[i];
unsigned long pfn = page_to_pfn(page);
int ret;
BUG_ON(!page);
@ -46,16 +47,10 @@ void __xenmem_reservation_va_mapping_update(unsigned long count,
set_phys_to_machine(pfn, frames[i]);
/* Link back into the page tables if not highmem. */
if (!PageHighMem(page)) {
int ret;
ret = HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
mfn_pte(frames[i], PAGE_KERNEL),
0);
BUG_ON(ret);
}
ret = HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
mfn_pte(frames[i], PAGE_KERNEL), 0);
BUG_ON(ret);
}
}
EXPORT_SYMBOL_GPL(__xenmem_reservation_va_mapping_update);
@ -68,6 +63,7 @@ void __xenmem_reservation_va_mapping_reset(unsigned long count,
for (i = 0; i < count; i++) {
struct page *page = pages[i];
unsigned long pfn = page_to_pfn(page);
int ret;
/*
* We don't support PV MMU when Linux and Xen are using
@ -75,14 +71,11 @@ void __xenmem_reservation_va_mapping_reset(unsigned long count,
*/
BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
if (!PageHighMem(page)) {
int ret;
ret = HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
__pte_ma(0), 0);
BUG_ON(ret);
ret = HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
__pte_ma(0), 0);
BUG_ON(ret);
}
__set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
}
}