Merge branch 'kvm-arm64/pt-new' into kvmarm-master/next

Signed-off-by: Marc Zyngier <maz@kernel.org>

# Conflicts:
#	arch/arm64/kvm/mmu.c
This commit is contained in:
Marc Zyngier 2020-09-18 16:22:18 +01:00
commit 8910f08960
1 changed files with 18 additions and 8 deletions

View File

@ -114,9 +114,10 @@ static bool kvm_is_device_pfn(unsigned long pfn)
*/
/**
* unmap_stage2_range -- Clear stage2 page table entries to unmap a range
* @kvm: The VM pointer
* @mmu: The KVM stage-2 MMU pointer
* @start: The intermediate physical base address of the range to unmap
* @size: The size of the area to unmap
* @may_block: Whether or not we are permitted to block
*
* Clear a range of stage-2 mappings, lowering the various ref-counts. Must
* be called while holding mmu_lock (unless for freeing the stage2 pgd before
@ -493,6 +494,7 @@ void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu)
* @guest_ipa: The IPA at which to insert the mapping
* @pa: The physical address of the device
* @size: The size of the mapping
* @writable: Whether or not to create a writable mapping
*/
int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
phys_addr_t pa, unsigned long size, bool writable)
@ -530,7 +532,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
/**
* stage2_wp_range() - write protect stage2 memory region range
* @kvm: The KVM pointer
* @mmu: The KVM stage-2 MMU pointer
* @addr: Start address of range
* @end: End address of range
*/
@ -779,17 +781,25 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
else
vma_shift = PAGE_SHIFT;
vma_pagesize = 1ULL << vma_shift;
if (logging_active ||
(vma->vm_flags & VM_PFNMAP) ||
!fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) {
(vma->vm_flags & VM_PFNMAP)) {
force_pte = true;
vma_pagesize = PAGE_SIZE;
vma_shift = PAGE_SHIFT;
}
if (vma_shift == PUD_SHIFT &&
!fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE))
vma_shift = PMD_SHIFT;
if (vma_shift == PMD_SHIFT &&
!fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) {
force_pte = true;
vma_shift = PAGE_SHIFT;
}
vma_pagesize = 1UL << vma_shift;
if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE)
fault_ipa &= huge_page_mask(hstate_vma(vma));
fault_ipa &= ~(vma_pagesize - 1);
gfn = fault_ipa >> PAGE_SHIFT;
mmap_read_unlock(current->mm);
@ -1336,7 +1346,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
spin_lock(&kvm->mmu_lock);
if (ret)
unmap_stage2_range(&kvm->arch.mmu, mem->guest_phys_addr, mem->memory_size);
else
else if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
stage2_flush_memslot(kvm, memslot);
spin_unlock(&kvm->mmu_lock);
out: