arm64/mm: wire up CONFIG_ARCH_HAS_SET_DIRECT_MAP

Wire up the special helper functions to manipulate aliases of vmalloc
regions in the linear map.

Acked-by: Will Deacon <will@kernel.org>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
This commit is contained in:
Ard Biesheuvel 2019-05-23 11:22:54 +01:00 committed by Catalin Marinas
parent 7dfac3c5f4
commit 4739d53fcd
4 changed files with 44 additions and 19 deletions

View File

@ -26,6 +26,7 @@ config ARM64
select ARCH_HAS_MEMBARRIER_SYNC_CORE select ARCH_HAS_MEMBARRIER_SYNC_CORE
select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_SETUP_DMA_OPS select ARCH_HAS_SETUP_DMA_OPS
select ARCH_HAS_SET_DIRECT_MAP
select ARCH_HAS_SET_MEMORY select ARCH_HAS_SET_MEMORY
select ARCH_HAS_STRICT_KERNEL_RWX select ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_HAS_STRICT_MODULE_RWX select ARCH_HAS_STRICT_MODULE_RWX

View File

@ -187,4 +187,7 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
int set_memory_valid(unsigned long addr, int numpages, int enable); int set_memory_valid(unsigned long addr, int numpages, int enable);
int set_direct_map_invalid_noflush(struct page *page);
int set_direct_map_default_noflush(struct page *page);
#endif #endif

View File

@ -159,17 +159,48 @@ int set_memory_valid(unsigned long addr, int numpages, int enable)
__pgprot(PTE_VALID)); __pgprot(PTE_VALID));
} }
#ifdef CONFIG_DEBUG_PAGEALLOC int set_direct_map_invalid_noflush(struct page *page)
{
struct page_change_data data = {
.set_mask = __pgprot(0),
.clear_mask = __pgprot(PTE_VALID),
};
if (!rodata_full)
return 0;
return apply_to_page_range(&init_mm,
(unsigned long)page_address(page),
PAGE_SIZE, change_page_range, &data);
}
int set_direct_map_default_noflush(struct page *page)
{
struct page_change_data data = {
.set_mask = __pgprot(PTE_VALID | PTE_WRITE),
.clear_mask = __pgprot(PTE_RDONLY),
};
if (!rodata_full)
return 0;
return apply_to_page_range(&init_mm,
(unsigned long)page_address(page),
PAGE_SIZE, change_page_range, &data);
}
void __kernel_map_pages(struct page *page, int numpages, int enable) void __kernel_map_pages(struct page *page, int numpages, int enable)
{ {
if (!debug_pagealloc_enabled() && !rodata_full)
return;
set_memory_valid((unsigned long)page_address(page), numpages, enable); set_memory_valid((unsigned long)page_address(page), numpages, enable);
} }
#ifdef CONFIG_HIBERNATION
/* /*
* When built with CONFIG_DEBUG_PAGEALLOC and CONFIG_HIBERNATION, this function * This function is used to determine if a linear map page has been marked as
* is used to determine if a linear map page has been marked as not-valid by * not-valid. Walk the page table and check the PTE_VALID bit. This is based
* CONFIG_DEBUG_PAGEALLOC. Walk the page table and check the PTE_VALID bit. * on kern_addr_valid(), which almost does what we need.
* This is based on kern_addr_valid(), which almost does what we need.
* *
* Because this is only called on the kernel linear map, p?d_sect() implies * Because this is only called on the kernel linear map, p?d_sect() implies
* p?d_present(). When debug_pagealloc is enabled, sections mappings are * p?d_present(). When debug_pagealloc is enabled, sections mappings are
@ -183,6 +214,9 @@ bool kernel_page_present(struct page *page)
pte_t *ptep; pte_t *ptep;
unsigned long addr = (unsigned long)page_address(page); unsigned long addr = (unsigned long)page_address(page);
if (!debug_pagealloc_enabled() && !rodata_full)
return true;
pgdp = pgd_offset_k(addr); pgdp = pgd_offset_k(addr);
if (pgd_none(READ_ONCE(*pgdp))) if (pgd_none(READ_ONCE(*pgdp)))
return false; return false;
@ -204,5 +238,3 @@ bool kernel_page_present(struct page *page)
ptep = pte_offset_kernel(pmdp, addr); ptep = pte_offset_kernel(pmdp, addr);
return pte_valid(READ_ONCE(*ptep)); return pte_valid(READ_ONCE(*ptep));
} }
#endif /* CONFIG_HIBERNATION */
#endif /* CONFIG_DEBUG_PAGEALLOC */

View File

@ -2128,17 +2128,6 @@ static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
int flush_reset = area->flags & VM_FLUSH_RESET_PERMS; int flush_reset = area->flags & VM_FLUSH_RESET_PERMS;
int i; int i;
/*
* The below block can be removed when all architectures that have
* direct map permissions also have set_direct_map_() implementations.
* This is concerned with resetting the direct map any an vm alias with
* execute permissions, without leaving a RW+X window.
*/
if (flush_reset && !IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) {
set_memory_nx(addr, area->nr_pages);
set_memory_rw(addr, area->nr_pages);
}
remove_vm_area(area->addr); remove_vm_area(area->addr);
/* If this is not VM_FLUSH_RESET_PERMS memory, no need for the below. */ /* If this is not VM_FLUSH_RESET_PERMS memory, no need for the below. */