s390/mm: enable ARCH_HAS_SET_DIRECT_MAP
Implement the set_direct_map_*() API, which allows to invalidate and set default permissions to pages within the direct mapping. Note that kernel_page_present(), which is also supposed to be part of this API, is intentionally not implemented. The reason for this is that kernel_page_present() is only used (and currently only makes sense) for suspend/resume, which isn't supported on s390. Reviewed-by: Alexander Gordeev <agordeev@linux.ibm.com> Signed-off-by: Heiko Carstens <hca@linux.ibm.com> Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
This commit is contained in:
parent
17c51b1ba9
commit
0490d6d7ba
|
@ -77,6 +77,7 @@ config S390
|
|||
select ARCH_HAS_NMI_SAFE_THIS_CPU_OPS
|
||||
select ARCH_HAS_PTE_SPECIAL
|
||||
select ARCH_HAS_SCALED_CPUTIME
|
||||
select ARCH_HAS_SET_DIRECT_MAP
|
||||
select ARCH_HAS_SET_MEMORY
|
||||
select ARCH_HAS_STRICT_KERNEL_RWX
|
||||
select ARCH_HAS_STRICT_MODULE_RWX
|
||||
|
|
|
@ -12,6 +12,8 @@ enum {
|
|||
_SET_MEMORY_NX_BIT,
|
||||
_SET_MEMORY_X_BIT,
|
||||
_SET_MEMORY_4K_BIT,
|
||||
_SET_MEMORY_INV_BIT,
|
||||
_SET_MEMORY_DEF_BIT,
|
||||
};
|
||||
|
||||
#define SET_MEMORY_RO BIT(_SET_MEMORY_RO_BIT)
|
||||
|
@ -19,6 +21,8 @@ enum {
|
|||
#define SET_MEMORY_NX BIT(_SET_MEMORY_NX_BIT)
|
||||
#define SET_MEMORY_X BIT(_SET_MEMORY_X_BIT)
|
||||
#define SET_MEMORY_4K BIT(_SET_MEMORY_4K_BIT)
|
||||
#define SET_MEMORY_INV BIT(_SET_MEMORY_INV_BIT)
|
||||
#define SET_MEMORY_DEF BIT(_SET_MEMORY_DEF_BIT)
|
||||
|
||||
int __set_memory(unsigned long addr, int numpages, unsigned long flags);
|
||||
|
||||
|
@ -58,4 +62,7 @@ static inline int set_memory_4k(unsigned long addr, int numpages)
|
|||
return __set_memory(addr, numpages, SET_MEMORY_4K);
|
||||
}
|
||||
|
||||
int set_direct_map_invalid_noflush(struct page *page);
|
||||
int set_direct_map_default_noflush(struct page *page);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
* Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
|
||||
*/
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/mm.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/facility.h>
|
||||
|
@ -101,6 +102,14 @@ static int walk_pte_level(pmd_t *pmdp, unsigned long addr, unsigned long end,
|
|||
new = set_pte_bit(new, __pgprot(_PAGE_NOEXEC));
|
||||
else if (flags & SET_MEMORY_X)
|
||||
new = clear_pte_bit(new, __pgprot(_PAGE_NOEXEC));
|
||||
if (flags & SET_MEMORY_INV) {
|
||||
new = set_pte_bit(new, __pgprot(_PAGE_INVALID));
|
||||
} else if (flags & SET_MEMORY_DEF) {
|
||||
new = __pte(pte_val(new) & PAGE_MASK);
|
||||
new = set_pte_bit(new, PAGE_KERNEL);
|
||||
if (!MACHINE_HAS_NX)
|
||||
new = clear_pte_bit(new, __pgprot(_PAGE_NOEXEC));
|
||||
}
|
||||
pgt_set((unsigned long *)ptep, pte_val(new), addr, CRDTE_DTT_PAGE);
|
||||
ptep++;
|
||||
addr += PAGE_SIZE;
|
||||
|
@ -151,6 +160,14 @@ static void modify_pmd_page(pmd_t *pmdp, unsigned long addr,
|
|||
new = set_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_NOEXEC));
|
||||
else if (flags & SET_MEMORY_X)
|
||||
new = clear_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_NOEXEC));
|
||||
if (flags & SET_MEMORY_INV) {
|
||||
new = set_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_INVALID));
|
||||
} else if (flags & SET_MEMORY_DEF) {
|
||||
new = __pmd(pmd_val(new) & PMD_MASK);
|
||||
new = set_pmd_bit(new, SEGMENT_KERNEL);
|
||||
if (!MACHINE_HAS_NX)
|
||||
new = clear_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_NOEXEC));
|
||||
}
|
||||
pgt_set((unsigned long *)pmdp, pmd_val(new), addr, CRDTE_DTT_SEGMENT);
|
||||
}
|
||||
|
||||
|
@ -232,6 +249,14 @@ static void modify_pud_page(pud_t *pudp, unsigned long addr,
|
|||
new = set_pud_bit(new, __pgprot(_REGION_ENTRY_NOEXEC));
|
||||
else if (flags & SET_MEMORY_X)
|
||||
new = clear_pud_bit(new, __pgprot(_REGION_ENTRY_NOEXEC));
|
||||
if (flags & SET_MEMORY_INV) {
|
||||
new = set_pud_bit(new, __pgprot(_REGION_ENTRY_INVALID));
|
||||
} else if (flags & SET_MEMORY_DEF) {
|
||||
new = __pud(pud_val(new) & PUD_MASK);
|
||||
new = set_pud_bit(new, REGION3_KERNEL);
|
||||
if (!MACHINE_HAS_NX)
|
||||
new = clear_pud_bit(new, __pgprot(_REGION_ENTRY_NOEXEC));
|
||||
}
|
||||
pgt_set((unsigned long *)pudp, pud_val(new), addr, CRDTE_DTT_REGION3);
|
||||
}
|
||||
|
||||
|
@ -325,6 +350,16 @@ int __set_memory(unsigned long addr, int numpages, unsigned long flags)
|
|||
return change_page_attr(addr, addr + numpages * PAGE_SIZE, flags);
|
||||
}
|
||||
|
||||
int set_direct_map_invalid_noflush(struct page *page)
|
||||
{
|
||||
return __set_memory((unsigned long)page_to_virt(page), 1, SET_MEMORY_INV);
|
||||
}
|
||||
|
||||
int set_direct_map_default_noflush(struct page *page)
|
||||
{
|
||||
return __set_memory((unsigned long)page_to_virt(page), 1, SET_MEMORY_DEF);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
|
||||
|
||||
static void ipte_range(pte_t *pte, unsigned long address, int nr)
|
||||
|
|
Loading…
Reference in New Issue