KVM: arm64: Walk userspace page tables to compute the THP mapping size
We currently rely on the kvm_is_transparent_hugepage() helper to discover whether a given page has the potential to be mapped as a block mapping. However, this API doesn't really give un everything we want: - we don't get the size: this is not crucial today as we only support PMD-sized THPs, but we'd like to have larger sizes in the future - we're the only user left of the API, and there is a will to remove it altogether To address the above, implement a simple walker using the existing page table infrastructure, and plumb it into transparent_hugepage_adjust(). No new page sizes are supported in the process. Signed-off-by: Marc Zyngier <maz@kernel.org> Reviewed-by: Alexandru Elisei <alexandru.elisei@arm.com> Link: https://lore.kernel.org/r/20210726153552.1535838-3-maz@kernel.org
This commit is contained in:
parent
63db506e07
commit
6011cf68c8
|
@ -433,6 +433,32 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct kvm_pgtable_mm_ops kvm_user_mm_ops = {
|
||||
/* We shouldn't need any other callback to walk the PT */
|
||||
.phys_to_virt = kvm_host_va,
|
||||
};
|
||||
|
||||
static int get_user_mapping_size(struct kvm *kvm, u64 addr)
|
||||
{
|
||||
struct kvm_pgtable pgt = {
|
||||
.pgd = (kvm_pte_t *)kvm->mm->pgd,
|
||||
.ia_bits = VA_BITS,
|
||||
.start_level = (KVM_PGTABLE_MAX_LEVELS -
|
||||
CONFIG_PGTABLE_LEVELS),
|
||||
.mm_ops = &kvm_user_mm_ops,
|
||||
};
|
||||
kvm_pte_t pte = 0; /* Keep GCC quiet... */
|
||||
u32 level = ~0;
|
||||
int ret;
|
||||
|
||||
ret = kvm_pgtable_get_leaf(&pgt, addr, &pte, &level);
|
||||
VM_BUG_ON(ret);
|
||||
VM_BUG_ON(level >= KVM_PGTABLE_MAX_LEVELS);
|
||||
VM_BUG_ON(!(pte & PTE_VALID));
|
||||
|
||||
return BIT(ARM64_HW_PGTABLE_LEVEL_SHIFT(level));
|
||||
}
|
||||
|
||||
static struct kvm_pgtable_mm_ops kvm_s2_mm_ops = {
|
||||
.zalloc_page = stage2_memcache_zalloc_page,
|
||||
.zalloc_pages_exact = kvm_host_zalloc_pages_exact,
|
||||
|
@ -780,7 +806,7 @@ static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
|
|||
* Returns the size of the mapping.
|
||||
*/
|
||||
static unsigned long
|
||||
transparent_hugepage_adjust(struct kvm_memory_slot *memslot,
|
||||
transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot,
|
||||
unsigned long hva, kvm_pfn_t *pfnp,
|
||||
phys_addr_t *ipap)
|
||||
{
|
||||
|
@ -791,8 +817,8 @@ transparent_hugepage_adjust(struct kvm_memory_slot *memslot,
|
|||
* sure that the HVA and IPA are sufficiently aligned and that the
|
||||
* block map is contained within the memslot.
|
||||
*/
|
||||
if (kvm_is_transparent_hugepage(pfn) &&
|
||||
fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) {
|
||||
if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE) &&
|
||||
get_user_mapping_size(kvm, hva) >= PMD_SIZE) {
|
||||
/*
|
||||
* The address we faulted on is backed by a transparent huge
|
||||
* page. However, because we map the compound huge page and
|
||||
|
@ -1051,7 +1077,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
|||
* backed by a THP and thus use block mapping if possible.
|
||||
*/
|
||||
if (vma_pagesize == PAGE_SIZE && !(force_pte || device))
|
||||
vma_pagesize = transparent_hugepage_adjust(memslot, hva,
|
||||
vma_pagesize = transparent_hugepage_adjust(kvm, memslot, hva,
|
||||
&pfn, &fault_ipa);
|
||||
|
||||
if (fault_status != FSC_PERM && !device && kvm_has_mte(kvm)) {
|
||||
|
|
Loading…
Reference in New Issue