119 lines
3.8 KiB
Diff
119 lines
3.8 KiB
Diff
From: Jan Beulich <jbeulich@suse.com>
|
|
Subject: x86/EPT: ept_set_middle_entry() related adjustments
|
|
|
|
ept_split_super_page() wants to further modify the newly allocated
|
|
table, so have ept_set_middle_entry() return the mapped pointer rather
|
|
than tearing it down and then getting re-established right again.
|
|
|
|
Similarly ept_next_level() wants to hand back a mapped pointer of
|
|
the next level page, so re-use the one established by
|
|
ept_set_middle_entry() in case that path was taken.
|
|
|
|
Pull the setting of suppress_ve ahead of insertion into the higher level
|
|
table, and don't have ept_split_super_page() set the field a 2nd time.
|
|
|
|
This is part of XSA-328.
|
|
|
|
Signed-off-by: Jan Beulich <jbeulich@suse.com>
|
|
|
|
--- a/xen/arch/x86/mm/p2m-ept.c
|
|
+++ b/xen/arch/x86/mm/p2m-ept.c
|
|
@@ -187,8 +187,9 @@ static void ept_p2m_type_to_flags(struct
|
|
#define GUEST_TABLE_SUPER_PAGE 2
|
|
#define GUEST_TABLE_POD_PAGE 3
|
|
|
|
-/* Fill in middle levels of ept table */
|
|
-static int ept_set_middle_entry(struct p2m_domain *p2m, ept_entry_t *ept_entry)
|
|
+/* Fill in middle level of ept table; return pointer to mapped new table. */
|
|
+static ept_entry_t *ept_set_middle_entry(struct p2m_domain *p2m,
|
|
+ ept_entry_t *ept_entry)
|
|
{
|
|
mfn_t mfn;
|
|
ept_entry_t *table;
|
|
@@ -196,7 +197,12 @@ static int ept_set_middle_entry(struct p
|
|
|
|
mfn = p2m_alloc_ptp(p2m, 0);
|
|
if ( mfn_eq(mfn, INVALID_MFN) )
|
|
- return 0;
|
|
+ return NULL;
|
|
+
|
|
+ table = map_domain_page(mfn);
|
|
+
|
|
+ for ( i = 0; i < EPT_PAGETABLE_ENTRIES; i++ )
|
|
+ table[i].suppress_ve = 1;
|
|
|
|
ept_entry->epte = 0;
|
|
ept_entry->mfn = mfn_x(mfn);
|
|
@@ -208,14 +214,7 @@ static int ept_set_middle_entry(struct p
|
|
|
|
ept_entry->suppress_ve = 1;
|
|
|
|
- table = map_domain_page(mfn);
|
|
-
|
|
- for ( i = 0; i < EPT_PAGETABLE_ENTRIES; i++ )
|
|
- table[i].suppress_ve = 1;
|
|
-
|
|
- unmap_domain_page(table);
|
|
-
|
|
- return 1;
|
|
+ return table;
|
|
}
|
|
|
|
/* free ept sub tree behind an entry */
|
|
@@ -253,10 +252,10 @@ static bool_t ept_split_super_page(struc
|
|
|
|
ASSERT(is_epte_superpage(ept_entry));
|
|
|
|
- if ( !ept_set_middle_entry(p2m, &new_ept) )
|
|
+ table = ept_set_middle_entry(p2m, &new_ept);
|
|
+ if ( !table )
|
|
return 0;
|
|
|
|
- table = map_domain_page(_mfn(new_ept.mfn));
|
|
trunk = 1UL << ((level - 1) * EPT_TABLE_ORDER);
|
|
|
|
for ( i = 0; i < EPT_PAGETABLE_ENTRIES; i++ )
|
|
@@ -267,7 +266,6 @@ static bool_t ept_split_super_page(struc
|
|
epte->sp = (level > 1);
|
|
epte->mfn += i * trunk;
|
|
epte->snp = is_iommu_enabled(p2m->domain) && iommu_snoop;
|
|
- epte->suppress_ve = 1;
|
|
|
|
ept_p2m_type_to_flags(p2m, epte, epte->sa_p2mt, epte->access);
|
|
|
|
@@ -306,8 +304,7 @@ static int ept_next_level(struct p2m_dom
|
|
ept_entry_t **table, unsigned long *gfn_remainder,
|
|
int next_level)
|
|
{
|
|
- unsigned long mfn;
|
|
- ept_entry_t *ept_entry, e;
|
|
+ ept_entry_t *ept_entry, *next = NULL, e;
|
|
u32 shift, index;
|
|
|
|
shift = next_level * EPT_TABLE_ORDER;
|
|
@@ -332,19 +329,17 @@ static int ept_next_level(struct p2m_dom
|
|
if ( read_only )
|
|
return GUEST_TABLE_MAP_FAILED;
|
|
|
|
- if ( !ept_set_middle_entry(p2m, ept_entry) )
|
|
+ next = ept_set_middle_entry(p2m, ept_entry);
|
|
+ if ( !next )
|
|
return GUEST_TABLE_MAP_FAILED;
|
|
- else
|
|
- e = atomic_read_ept_entry(ept_entry); /* Refresh */
|
|
+ /* e is now stale and hence may not be used anymore below. */
|
|
}
|
|
-
|
|
/* The only time sp would be set here is if we had hit a superpage */
|
|
- if ( is_epte_superpage(&e) )
|
|
+ else if ( is_epte_superpage(&e) )
|
|
return GUEST_TABLE_SUPER_PAGE;
|
|
|
|
- mfn = e.mfn;
|
|
unmap_domain_page(*table);
|
|
- *table = map_domain_page(_mfn(mfn));
|
|
+ *table = next ?: map_domain_page(_mfn(e.mfn));
|
|
*gfn_remainder &= (1UL << shift) - 1;
|
|
return GUEST_TABLE_NORMAL_PAGE;
|
|
}
|