x86/xen: Move pv specific parts of arch/x86/xen/mmu.c to mmu_pv.c
There are some PV specific functions in arch/x86/xen/mmu.c which can be moved to mmu_pv.c. This in turn enables to build multicalls.c dependent on CONFIG_XEN_PV. Signed-off-by: Juergen Gross <jgross@suse.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: xen-devel@lists.xenproject.org Cc: virtualization@lists.linux-foundation.org Cc: akataria@vmware.com Cc: rusty@rustcorp.com.au Cc: hpa@zytor.com Link: https://lkml.kernel.org/r/20180828074026.820-3-jgross@suse.com
This commit is contained in:
parent
28c11b0f79
commit
f030aade91
|
@ -62,29 +62,6 @@ static __read_mostly unsigned int xen_events_irq;
|
|||
uint32_t xen_start_flags;
|
||||
EXPORT_SYMBOL(xen_start_flags);
|
||||
|
||||
int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
|
||||
unsigned long addr,
|
||||
xen_pfn_t *gfn, int nr,
|
||||
int *err_ptr, pgprot_t prot,
|
||||
unsigned domid,
|
||||
struct page **pages)
|
||||
{
|
||||
return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
|
||||
prot, domid, pages);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array);
|
||||
|
||||
/* Not used by XENFEAT_auto_translated guests. */
|
||||
int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
|
||||
unsigned long addr,
|
||||
xen_pfn_t gfn, int nr,
|
||||
pgprot_t prot, unsigned domid,
|
||||
struct page **pages)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range);
|
||||
|
||||
int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
|
||||
int nr, struct page **pages)
|
||||
{
|
||||
|
@ -92,17 +69,6 @@ int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range);
|
||||
|
||||
/* Not used by XENFEAT_auto_translated guests. */
|
||||
int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
|
||||
unsigned long addr,
|
||||
xen_pfn_t *mfn, int nr,
|
||||
int *err_ptr, pgprot_t prot,
|
||||
unsigned int domid, struct page **pages)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_array);
|
||||
|
||||
static void xen_read_wallclock(struct timespec64 *ts)
|
||||
{
|
||||
u32 version;
|
||||
|
|
|
@ -15,7 +15,6 @@ CFLAGS_enlighten_pv.o := $(nostackp)
|
|||
CFLAGS_mmu_pv.o := $(nostackp)
|
||||
|
||||
obj-y += enlighten.o
|
||||
obj-y += multicalls.o
|
||||
obj-y += mmu.o
|
||||
obj-y += time.o
|
||||
obj-y += grant-table.o
|
||||
|
@ -34,6 +33,7 @@ obj-$(CONFIG_XEN_PV) += p2m.o
|
|||
obj-$(CONFIG_XEN_PV) += enlighten_pv.o
|
||||
obj-$(CONFIG_XEN_PV) += mmu_pv.o
|
||||
obj-$(CONFIG_XEN_PV) += irq.o
|
||||
obj-$(CONFIG_XEN_PV) += multicalls.o
|
||||
obj-$(CONFIG_XEN_PV) += xen-asm.o
|
||||
obj-$(CONFIG_XEN_PV) += xen-asm_$(BITS).o
|
||||
|
||||
|
|
|
@ -6,12 +6,6 @@
|
|||
#include "multicalls.h"
|
||||
#include "mmu.h"
|
||||
|
||||
/*
|
||||
* Protects atomic reservation decrease/increase against concurrent increases.
|
||||
* Also protects non-atomic updates of current_pages and balloon lists.
|
||||
*/
|
||||
DEFINE_SPINLOCK(xen_reservation_lock);
|
||||
|
||||
unsigned long arbitrary_virt_to_mfn(void *vaddr)
|
||||
{
|
||||
xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
|
||||
|
@ -42,186 +36,6 @@ xmaddr_t arbitrary_virt_to_machine(void *vaddr)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
|
||||
|
||||
static noinline void xen_flush_tlb_all(void)
|
||||
{
|
||||
struct mmuext_op *op;
|
||||
struct multicall_space mcs;
|
||||
|
||||
preempt_disable();
|
||||
|
||||
mcs = xen_mc_entry(sizeof(*op));
|
||||
|
||||
op = mcs.args;
|
||||
op->cmd = MMUEXT_TLB_FLUSH_ALL;
|
||||
MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
|
||||
|
||||
xen_mc_issue(PARAVIRT_LAZY_MMU);
|
||||
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
#define REMAP_BATCH_SIZE 16
|
||||
|
||||
struct remap_data {
|
||||
xen_pfn_t *pfn;
|
||||
bool contiguous;
|
||||
bool no_translate;
|
||||
pgprot_t prot;
|
||||
struct mmu_update *mmu_update;
|
||||
};
|
||||
|
||||
static int remap_area_pfn_pte_fn(pte_t *ptep, pgtable_t token,
|
||||
unsigned long addr, void *data)
|
||||
{
|
||||
struct remap_data *rmd = data;
|
||||
pte_t pte = pte_mkspecial(mfn_pte(*rmd->pfn, rmd->prot));
|
||||
|
||||
/*
|
||||
* If we have a contiguous range, just update the pfn itself,
|
||||
* else update pointer to be "next pfn".
|
||||
*/
|
||||
if (rmd->contiguous)
|
||||
(*rmd->pfn)++;
|
||||
else
|
||||
rmd->pfn++;
|
||||
|
||||
rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
|
||||
rmd->mmu_update->ptr |= rmd->no_translate ?
|
||||
MMU_PT_UPDATE_NO_TRANSLATE :
|
||||
MMU_NORMAL_PT_UPDATE;
|
||||
rmd->mmu_update->val = pte_val_ma(pte);
|
||||
rmd->mmu_update++;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int do_remap_pfn(struct vm_area_struct *vma,
|
||||
unsigned long addr,
|
||||
xen_pfn_t *pfn, int nr,
|
||||
int *err_ptr, pgprot_t prot,
|
||||
unsigned int domid,
|
||||
bool no_translate,
|
||||
struct page **pages)
|
||||
{
|
||||
int err = 0;
|
||||
struct remap_data rmd;
|
||||
struct mmu_update mmu_update[REMAP_BATCH_SIZE];
|
||||
unsigned long range;
|
||||
int mapped = 0;
|
||||
|
||||
BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
|
||||
|
||||
rmd.pfn = pfn;
|
||||
rmd.prot = prot;
|
||||
/*
|
||||
* We use the err_ptr to indicate if there we are doing a contiguous
|
||||
* mapping or a discontigious mapping.
|
||||
*/
|
||||
rmd.contiguous = !err_ptr;
|
||||
rmd.no_translate = no_translate;
|
||||
|
||||
while (nr) {
|
||||
int index = 0;
|
||||
int done = 0;
|
||||
int batch = min(REMAP_BATCH_SIZE, nr);
|
||||
int batch_left = batch;
|
||||
range = (unsigned long)batch << PAGE_SHIFT;
|
||||
|
||||
rmd.mmu_update = mmu_update;
|
||||
err = apply_to_page_range(vma->vm_mm, addr, range,
|
||||
remap_area_pfn_pte_fn, &rmd);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
/* We record the error for each page that gives an error, but
|
||||
* continue mapping until the whole set is done */
|
||||
do {
|
||||
int i;
|
||||
|
||||
err = HYPERVISOR_mmu_update(&mmu_update[index],
|
||||
batch_left, &done, domid);
|
||||
|
||||
/*
|
||||
* @err_ptr may be the same buffer as @gfn, so
|
||||
* only clear it after each chunk of @gfn is
|
||||
* used.
|
||||
*/
|
||||
if (err_ptr) {
|
||||
for (i = index; i < index + done; i++)
|
||||
err_ptr[i] = 0;
|
||||
}
|
||||
if (err < 0) {
|
||||
if (!err_ptr)
|
||||
goto out;
|
||||
err_ptr[i] = err;
|
||||
done++; /* Skip failed frame. */
|
||||
} else
|
||||
mapped += done;
|
||||
batch_left -= done;
|
||||
index += done;
|
||||
} while (batch_left);
|
||||
|
||||
nr -= batch;
|
||||
addr += range;
|
||||
if (err_ptr)
|
||||
err_ptr += batch;
|
||||
cond_resched();
|
||||
}
|
||||
out:
|
||||
|
||||
xen_flush_tlb_all();
|
||||
|
||||
return err < 0 ? err : mapped;
|
||||
}
|
||||
|
||||
int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
|
||||
unsigned long addr,
|
||||
xen_pfn_t gfn, int nr,
|
||||
pgprot_t prot, unsigned domid,
|
||||
struct page **pages)
|
||||
{
|
||||
if (xen_feature(XENFEAT_auto_translated_physmap))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return do_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false,
|
||||
pages);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range);
|
||||
|
||||
int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
|
||||
unsigned long addr,
|
||||
xen_pfn_t *gfn, int nr,
|
||||
int *err_ptr, pgprot_t prot,
|
||||
unsigned domid, struct page **pages)
|
||||
{
|
||||
if (xen_feature(XENFEAT_auto_translated_physmap))
|
||||
return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
|
||||
prot, domid, pages);
|
||||
|
||||
/* We BUG_ON because it's a programmer error to pass a NULL err_ptr,
|
||||
* and the consequences later is quite hard to detect what the actual
|
||||
* cause of "wrong memory was mapped in".
|
||||
*/
|
||||
BUG_ON(err_ptr == NULL);
|
||||
return do_remap_pfn(vma, addr, gfn, nr, err_ptr, prot, domid,
|
||||
false, pages);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array);
|
||||
|
||||
int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
|
||||
unsigned long addr,
|
||||
xen_pfn_t *mfn, int nr,
|
||||
int *err_ptr, pgprot_t prot,
|
||||
unsigned int domid, struct page **pages)
|
||||
{
|
||||
if (xen_feature(XENFEAT_auto_translated_physmap))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return do_remap_pfn(vma, addr, mfn, nr, err_ptr, prot, domid,
|
||||
true, pages);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_array);
|
||||
|
||||
/* Returns: 0 success */
|
||||
int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
|
||||
int nr, struct page **pages)
|
||||
|
|
|
@ -98,6 +98,12 @@ static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES);
|
|||
static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
|
||||
#endif /* CONFIG_X86_64 */
|
||||
|
||||
/*
|
||||
* Protects atomic reservation decrease/increase against concurrent increases.
|
||||
* Also protects non-atomic updates of current_pages and balloon lists.
|
||||
*/
|
||||
DEFINE_SPINLOCK(xen_reservation_lock);
|
||||
|
||||
/*
|
||||
* Note about cr3 (pagetable base) values:
|
||||
*
|
||||
|
@ -2662,6 +2668,138 @@ void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
|
||||
|
||||
static noinline void xen_flush_tlb_all(void)
|
||||
{
|
||||
struct mmuext_op *op;
|
||||
struct multicall_space mcs;
|
||||
|
||||
preempt_disable();
|
||||
|
||||
mcs = xen_mc_entry(sizeof(*op));
|
||||
|
||||
op = mcs.args;
|
||||
op->cmd = MMUEXT_TLB_FLUSH_ALL;
|
||||
MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
|
||||
|
||||
xen_mc_issue(PARAVIRT_LAZY_MMU);
|
||||
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
#define REMAP_BATCH_SIZE 16
|
||||
|
||||
struct remap_data {
|
||||
xen_pfn_t *pfn;
|
||||
bool contiguous;
|
||||
bool no_translate;
|
||||
pgprot_t prot;
|
||||
struct mmu_update *mmu_update;
|
||||
};
|
||||
|
||||
static int remap_area_pfn_pte_fn(pte_t *ptep, pgtable_t token,
|
||||
unsigned long addr, void *data)
|
||||
{
|
||||
struct remap_data *rmd = data;
|
||||
pte_t pte = pte_mkspecial(mfn_pte(*rmd->pfn, rmd->prot));
|
||||
|
||||
/*
|
||||
* If we have a contiguous range, just update the pfn itself,
|
||||
* else update pointer to be "next pfn".
|
||||
*/
|
||||
if (rmd->contiguous)
|
||||
(*rmd->pfn)++;
|
||||
else
|
||||
rmd->pfn++;
|
||||
|
||||
rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
|
||||
rmd->mmu_update->ptr |= rmd->no_translate ?
|
||||
MMU_PT_UPDATE_NO_TRANSLATE :
|
||||
MMU_NORMAL_PT_UPDATE;
|
||||
rmd->mmu_update->val = pte_val_ma(pte);
|
||||
rmd->mmu_update++;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
|
||||
xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
|
||||
unsigned int domid, bool no_translate, struct page **pages)
|
||||
{
|
||||
int err = 0;
|
||||
struct remap_data rmd;
|
||||
struct mmu_update mmu_update[REMAP_BATCH_SIZE];
|
||||
unsigned long range;
|
||||
int mapped = 0;
|
||||
|
||||
BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
|
||||
|
||||
rmd.pfn = pfn;
|
||||
rmd.prot = prot;
|
||||
/*
|
||||
* We use the err_ptr to indicate if there we are doing a contiguous
|
||||
* mapping or a discontigious mapping.
|
||||
*/
|
||||
rmd.contiguous = !err_ptr;
|
||||
rmd.no_translate = no_translate;
|
||||
|
||||
while (nr) {
|
||||
int index = 0;
|
||||
int done = 0;
|
||||
int batch = min(REMAP_BATCH_SIZE, nr);
|
||||
int batch_left = batch;
|
||||
|
||||
range = (unsigned long)batch << PAGE_SHIFT;
|
||||
|
||||
rmd.mmu_update = mmu_update;
|
||||
err = apply_to_page_range(vma->vm_mm, addr, range,
|
||||
remap_area_pfn_pte_fn, &rmd);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* We record the error for each page that gives an error, but
|
||||
* continue mapping until the whole set is done
|
||||
*/
|
||||
do {
|
||||
int i;
|
||||
|
||||
err = HYPERVISOR_mmu_update(&mmu_update[index],
|
||||
batch_left, &done, domid);
|
||||
|
||||
/*
|
||||
* @err_ptr may be the same buffer as @gfn, so
|
||||
* only clear it after each chunk of @gfn is
|
||||
* used.
|
||||
*/
|
||||
if (err_ptr) {
|
||||
for (i = index; i < index + done; i++)
|
||||
err_ptr[i] = 0;
|
||||
}
|
||||
if (err < 0) {
|
||||
if (!err_ptr)
|
||||
goto out;
|
||||
err_ptr[i] = err;
|
||||
done++; /* Skip failed frame. */
|
||||
} else
|
||||
mapped += done;
|
||||
batch_left -= done;
|
||||
index += done;
|
||||
} while (batch_left);
|
||||
|
||||
nr -= batch;
|
||||
addr += range;
|
||||
if (err_ptr)
|
||||
err_ptr += batch;
|
||||
cond_resched();
|
||||
}
|
||||
out:
|
||||
|
||||
xen_flush_tlb_all();
|
||||
|
||||
return err < 0 ? err : mapped;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xen_remap_pfn);
|
||||
|
||||
#ifdef CONFIG_KEXEC_CORE
|
||||
phys_addr_t paddr_vmcoreinfo_note(void)
|
||||
{
|
||||
|
|
|
@ -244,12 +244,6 @@ DEFINE_GUEST_HANDLE_STRUCT(xen_memory_map);
|
|||
#define XENMEM_machine_memory_map 10
|
||||
|
||||
|
||||
/*
|
||||
* Prevent the balloon driver from changing the memory reservation
|
||||
* during a driver critical region.
|
||||
*/
|
||||
extern spinlock_t xen_reservation_lock;
|
||||
|
||||
/*
|
||||
* Unmaps the page appearing at a particular GPFN from the specified guest's
|
||||
* pseudophysical address space.
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
#include <linux/percpu.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/efi.h>
|
||||
#include <xen/features.h>
|
||||
#include <asm/xen/interface.h>
|
||||
#include <xen/interface/vcpu.h>
|
||||
|
||||
|
@ -47,6 +48,10 @@ int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
|
|||
dma_addr_t *dma_handle);
|
||||
|
||||
void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
|
||||
|
||||
int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
|
||||
xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
|
||||
unsigned int domid, bool no_translate, struct page **pages);
|
||||
#else
|
||||
static inline int xen_create_contiguous_region(phys_addr_t pstart,
|
||||
unsigned int order,
|
||||
|
@ -58,82 +63,25 @@ static inline int xen_create_contiguous_region(phys_addr_t pstart,
|
|||
|
||||
static inline void xen_destroy_contiguous_region(phys_addr_t pstart,
|
||||
unsigned int order) { }
|
||||
|
||||
static inline int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
|
||||
xen_pfn_t *pfn, int nr, int *err_ptr,
|
||||
pgprot_t prot, unsigned int domid,
|
||||
bool no_translate, struct page **pages)
|
||||
{
|
||||
BUG();
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
struct vm_area_struct;
|
||||
|
||||
/*
|
||||
* xen_remap_domain_gfn_array() - map an array of foreign frames by gfn
|
||||
* @vma: VMA to map the pages into
|
||||
* @addr: Address at which to map the pages
|
||||
* @gfn: Array of GFNs to map
|
||||
* @nr: Number entries in the GFN array
|
||||
* @err_ptr: Returns per-GFN error status.
|
||||
* @prot: page protection mask
|
||||
* @domid: Domain owning the pages
|
||||
* @pages: Array of pages if this domain has an auto-translated physmap
|
||||
*
|
||||
* @gfn and @err_ptr may point to the same buffer, the GFNs will be
|
||||
* overwritten by the error codes after they are mapped.
|
||||
*
|
||||
* Returns the number of successfully mapped frames, or a -ve error
|
||||
* code.
|
||||
*/
|
||||
int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
|
||||
unsigned long addr,
|
||||
xen_pfn_t *gfn, int nr,
|
||||
int *err_ptr, pgprot_t prot,
|
||||
unsigned domid,
|
||||
struct page **pages);
|
||||
|
||||
/*
|
||||
* xen_remap_domain_mfn_array() - map an array of foreign frames by mfn
|
||||
* @vma: VMA to map the pages into
|
||||
* @addr: Address at which to map the pages
|
||||
* @mfn: Array of MFNs to map
|
||||
* @nr: Number entries in the MFN array
|
||||
* @err_ptr: Returns per-MFN error status.
|
||||
* @prot: page protection mask
|
||||
* @domid: Domain owning the pages
|
||||
* @pages: Array of pages if this domain has an auto-translated physmap
|
||||
*
|
||||
* @mfn and @err_ptr may point to the same buffer, the MFNs will be
|
||||
* overwritten by the error codes after they are mapped.
|
||||
*
|
||||
* Returns the number of successfully mapped frames, or a -ve error
|
||||
* code.
|
||||
*/
|
||||
int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
|
||||
unsigned long addr, xen_pfn_t *mfn, int nr,
|
||||
int *err_ptr, pgprot_t prot,
|
||||
unsigned int domid, struct page **pages);
|
||||
|
||||
/* xen_remap_domain_gfn_range() - map a range of foreign frames
|
||||
* @vma: VMA to map the pages into
|
||||
* @addr: Address at which to map the pages
|
||||
* @gfn: First GFN to map.
|
||||
* @nr: Number frames to map
|
||||
* @prot: page protection mask
|
||||
* @domid: Domain owning the pages
|
||||
* @pages: Array of pages if this domain has an auto-translated physmap
|
||||
*
|
||||
* Returns the number of successfully mapped frames, or a -ve error
|
||||
* code.
|
||||
*/
|
||||
int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
|
||||
unsigned long addr,
|
||||
xen_pfn_t gfn, int nr,
|
||||
pgprot_t prot, unsigned domid,
|
||||
struct page **pages);
|
||||
int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
|
||||
int numpgs, struct page **pages);
|
||||
|
||||
#ifdef CONFIG_XEN_AUTO_XLATE
|
||||
int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
|
||||
unsigned long addr,
|
||||
xen_pfn_t *gfn, int nr,
|
||||
int *err_ptr, pgprot_t prot,
|
||||
unsigned domid,
|
||||
unsigned int domid,
|
||||
struct page **pages);
|
||||
int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
|
||||
int nr, struct page **pages);
|
||||
|
@ -159,6 +107,101 @@ static inline int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
|
|||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* xen_remap_domain_gfn_array() - map an array of foreign frames by gfn
|
||||
* @vma: VMA to map the pages into
|
||||
* @addr: Address at which to map the pages
|
||||
* @gfn: Array of GFNs to map
|
||||
* @nr: Number entries in the GFN array
|
||||
* @err_ptr: Returns per-GFN error status.
|
||||
* @prot: page protection mask
|
||||
* @domid: Domain owning the pages
|
||||
* @pages: Array of pages if this domain has an auto-translated physmap
|
||||
*
|
||||
* @gfn and @err_ptr may point to the same buffer, the GFNs will be
|
||||
* overwritten by the error codes after they are mapped.
|
||||
*
|
||||
* Returns the number of successfully mapped frames, or a -ve error
|
||||
* code.
|
||||
*/
|
||||
static inline int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
|
||||
unsigned long addr,
|
||||
xen_pfn_t *gfn, int nr,
|
||||
int *err_ptr, pgprot_t prot,
|
||||
unsigned int domid,
|
||||
struct page **pages)
|
||||
{
|
||||
if (xen_feature(XENFEAT_auto_translated_physmap))
|
||||
return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
|
||||
prot, domid, pages);
|
||||
|
||||
/* We BUG_ON because it's a programmer error to pass a NULL err_ptr,
|
||||
* and the consequences later is quite hard to detect what the actual
|
||||
* cause of "wrong memory was mapped in".
|
||||
*/
|
||||
BUG_ON(err_ptr == NULL);
|
||||
return xen_remap_pfn(vma, addr, gfn, nr, err_ptr, prot, domid,
|
||||
false, pages);
|
||||
}
|
||||
|
||||
/*
|
||||
* xen_remap_domain_mfn_array() - map an array of foreign frames by mfn
|
||||
* @vma: VMA to map the pages into
|
||||
* @addr: Address at which to map the pages
|
||||
* @mfn: Array of MFNs to map
|
||||
* @nr: Number entries in the MFN array
|
||||
* @err_ptr: Returns per-MFN error status.
|
||||
* @prot: page protection mask
|
||||
* @domid: Domain owning the pages
|
||||
* @pages: Array of pages if this domain has an auto-translated physmap
|
||||
*
|
||||
* @mfn and @err_ptr may point to the same buffer, the MFNs will be
|
||||
* overwritten by the error codes after they are mapped.
|
||||
*
|
||||
* Returns the number of successfully mapped frames, or a -ve error
|
||||
* code.
|
||||
*/
|
||||
static inline int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
|
||||
unsigned long addr, xen_pfn_t *mfn,
|
||||
int nr, int *err_ptr,
|
||||
pgprot_t prot, unsigned int domid,
|
||||
struct page **pages)
|
||||
{
|
||||
if (xen_feature(XENFEAT_auto_translated_physmap))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return xen_remap_pfn(vma, addr, mfn, nr, err_ptr, prot, domid,
|
||||
true, pages);
|
||||
}
|
||||
|
||||
/* xen_remap_domain_gfn_range() - map a range of foreign frames
|
||||
* @vma: VMA to map the pages into
|
||||
* @addr: Address at which to map the pages
|
||||
* @gfn: First GFN to map.
|
||||
* @nr: Number frames to map
|
||||
* @prot: page protection mask
|
||||
* @domid: Domain owning the pages
|
||||
* @pages: Array of pages if this domain has an auto-translated physmap
|
||||
*
|
||||
* Returns the number of successfully mapped frames, or a -ve error
|
||||
* code.
|
||||
*/
|
||||
static inline int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
|
||||
unsigned long addr,
|
||||
xen_pfn_t gfn, int nr,
|
||||
pgprot_t prot, unsigned int domid,
|
||||
struct page **pages)
|
||||
{
|
||||
if (xen_feature(XENFEAT_auto_translated_physmap))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return xen_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false,
|
||||
pages);
|
||||
}
|
||||
|
||||
int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
|
||||
int numpgs, struct page **pages);
|
||||
|
||||
int xen_xlate_map_ballooned_pages(xen_pfn_t **pfns, void **vaddr,
|
||||
unsigned long nr_grant_frames);
|
||||
|
||||
|
|
Loading…
Reference in New Issue