KVM: MMU: Optimize is_last_gpte()
Instead of branchy code depending on level, gpte.ps, and mmu configuration, prepare everything in a bitmap during mode changes and look it up during runtime. Reviewed-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
parent
13d22b6aeb
commit
6fd01b711b
|
@ -298,6 +298,13 @@ struct kvm_mmu {
|
||||||
u64 *lm_root;
|
u64 *lm_root;
|
||||||
u64 rsvd_bits_mask[2][4];
|
u64 rsvd_bits_mask[2][4];
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Bitmap: bit set = last pte in walk
|
||||||
|
* index[0:1]: level (zero-based)
|
||||||
|
* index[2]: pte.ps
|
||||||
|
*/
|
||||||
|
u8 last_pte_bitmap;
|
||||||
|
|
||||||
bool nx;
|
bool nx;
|
||||||
|
|
||||||
u64 pdptrs[4]; /* pae */
|
u64 pdptrs[4]; /* pae */
|
||||||
|
|
|
@ -3447,6 +3447,15 @@ static inline unsigned gpte_access(struct kvm_vcpu *vcpu, u64 gpte)
|
||||||
return access;
|
return access;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool is_last_gpte(struct kvm_mmu *mmu, unsigned level, unsigned gpte)
|
||||||
|
{
|
||||||
|
unsigned index;
|
||||||
|
|
||||||
|
index = level - 1;
|
||||||
|
index |= (gpte & PT_PAGE_SIZE_MASK) >> (PT_PAGE_SIZE_SHIFT - 2);
|
||||||
|
return mmu->last_pte_bitmap & (1 << index);
|
||||||
|
}
|
||||||
|
|
||||||
#define PTTYPE 64
|
#define PTTYPE 64
|
||||||
#include "paging_tmpl.h"
|
#include "paging_tmpl.h"
|
||||||
#undef PTTYPE
|
#undef PTTYPE
|
||||||
|
@ -3548,6 +3557,24 @@ static void update_permission_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void update_last_pte_bitmap(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
|
||||||
|
{
|
||||||
|
u8 map;
|
||||||
|
unsigned level, root_level = mmu->root_level;
|
||||||
|
const unsigned ps_set_index = 1 << 2; /* bit 2 of index: ps */
|
||||||
|
|
||||||
|
if (root_level == PT32E_ROOT_LEVEL)
|
||||||
|
--root_level;
|
||||||
|
/* PT_PAGE_TABLE_LEVEL always terminates */
|
||||||
|
map = 1 | (1 << ps_set_index);
|
||||||
|
for (level = PT_DIRECTORY_LEVEL; level <= root_level; ++level) {
|
||||||
|
if (level <= PT_PDPE_LEVEL
|
||||||
|
&& (mmu->root_level >= PT32E_ROOT_LEVEL || is_pse(vcpu)))
|
||||||
|
map |= 1 << (ps_set_index | (level - 1));
|
||||||
|
}
|
||||||
|
mmu->last_pte_bitmap = map;
|
||||||
|
}
|
||||||
|
|
||||||
static int paging64_init_context_common(struct kvm_vcpu *vcpu,
|
static int paging64_init_context_common(struct kvm_vcpu *vcpu,
|
||||||
struct kvm_mmu *context,
|
struct kvm_mmu *context,
|
||||||
int level)
|
int level)
|
||||||
|
@ -3557,6 +3584,7 @@ static int paging64_init_context_common(struct kvm_vcpu *vcpu,
|
||||||
|
|
||||||
reset_rsvds_bits_mask(vcpu, context);
|
reset_rsvds_bits_mask(vcpu, context);
|
||||||
update_permission_bitmask(vcpu, context);
|
update_permission_bitmask(vcpu, context);
|
||||||
|
update_last_pte_bitmap(vcpu, context);
|
||||||
|
|
||||||
ASSERT(is_pae(vcpu));
|
ASSERT(is_pae(vcpu));
|
||||||
context->new_cr3 = paging_new_cr3;
|
context->new_cr3 = paging_new_cr3;
|
||||||
|
@ -3586,6 +3614,7 @@ static int paging32_init_context(struct kvm_vcpu *vcpu,
|
||||||
|
|
||||||
reset_rsvds_bits_mask(vcpu, context);
|
reset_rsvds_bits_mask(vcpu, context);
|
||||||
update_permission_bitmask(vcpu, context);
|
update_permission_bitmask(vcpu, context);
|
||||||
|
update_last_pte_bitmap(vcpu, context);
|
||||||
|
|
||||||
context->new_cr3 = paging_new_cr3;
|
context->new_cr3 = paging_new_cr3;
|
||||||
context->page_fault = paging32_page_fault;
|
context->page_fault = paging32_page_fault;
|
||||||
|
@ -3647,6 +3676,7 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
|
||||||
}
|
}
|
||||||
|
|
||||||
update_permission_bitmask(vcpu, context);
|
update_permission_bitmask(vcpu, context);
|
||||||
|
update_last_pte_bitmap(vcpu, context);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -3724,6 +3754,7 @@ static int init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
|
||||||
}
|
}
|
||||||
|
|
||||||
update_permission_bitmask(vcpu, g_context);
|
update_permission_bitmask(vcpu, g_context);
|
||||||
|
update_last_pte_bitmap(vcpu, g_context);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,7 +20,8 @@
|
||||||
#define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT)
|
#define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT)
|
||||||
#define PT_DIRTY_SHIFT 6
|
#define PT_DIRTY_SHIFT 6
|
||||||
#define PT_DIRTY_MASK (1ULL << PT_DIRTY_SHIFT)
|
#define PT_DIRTY_MASK (1ULL << PT_DIRTY_SHIFT)
|
||||||
#define PT_PAGE_SIZE_MASK (1ULL << 7)
|
#define PT_PAGE_SIZE_SHIFT 7
|
||||||
|
#define PT_PAGE_SIZE_MASK (1ULL << PT_PAGE_SIZE_SHIFT)
|
||||||
#define PT_PAT_MASK (1ULL << 7)
|
#define PT_PAT_MASK (1ULL << 7)
|
||||||
#define PT_GLOBAL_MASK (1ULL << 8)
|
#define PT_GLOBAL_MASK (1ULL << 8)
|
||||||
#define PT64_NX_SHIFT 63
|
#define PT64_NX_SHIFT 63
|
||||||
|
|
|
@ -103,24 +103,6 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
|
||||||
return (ret != orig_pte);
|
return (ret != orig_pte);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool FNAME(is_last_gpte)(struct guest_walker *walker,
|
|
||||||
struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
|
|
||||||
pt_element_t gpte)
|
|
||||||
{
|
|
||||||
if (walker->level == PT_PAGE_TABLE_LEVEL)
|
|
||||||
return true;
|
|
||||||
|
|
||||||
if ((walker->level == PT_DIRECTORY_LEVEL) && is_large_pte(gpte) &&
|
|
||||||
(PTTYPE == 64 || is_pse(vcpu)))
|
|
||||||
return true;
|
|
||||||
|
|
||||||
if ((walker->level == PT_PDPE_LEVEL) && is_large_pte(gpte) &&
|
|
||||||
(mmu->root_level == PT64_ROOT_LEVEL))
|
|
||||||
return true;
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
|
static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
|
||||||
struct kvm_mmu *mmu,
|
struct kvm_mmu *mmu,
|
||||||
struct guest_walker *walker,
|
struct guest_walker *walker,
|
||||||
|
@ -247,7 +229,7 @@ retry_walk:
|
||||||
pte_access = pt_access & gpte_access(vcpu, pte);
|
pte_access = pt_access & gpte_access(vcpu, pte);
|
||||||
|
|
||||||
walker->ptes[walker->level - 1] = pte;
|
walker->ptes[walker->level - 1] = pte;
|
||||||
} while (!FNAME(is_last_gpte)(walker, vcpu, mmu, pte));
|
} while (!is_last_gpte(mmu, walker->level, pte));
|
||||||
|
|
||||||
eperm |= permission_fault(mmu, pte_access, access);
|
eperm |= permission_fault(mmu, pte_access, access);
|
||||||
if (unlikely(eperm)) {
|
if (unlikely(eperm)) {
|
||||||
|
|
Loading…
Reference in New Issue