s390/mm: fix pmd_huge() usage for kernel mapping

pmd_huge() will always return 0 on !HUGETLBFS, however we use that helper
function when walking the kernel page tables to decide if we have a
1MB page frame or not.
Since we create 1MB frames for the kernel 1:1 mapping independently of
HUGETLBFS this can lead to incorrect storage accesses since the code
can assume that we have a pointer to a page table instead of a pointer
to a 1MB frame.

Fix this by adding a pmd_large() primitive like other architectures have
it already and remove all references to HUGETLBFS/HUGETLBPAGE from the
code that walks kernel page tables.

Reviewed-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
Heiko Carstens 2012-10-01 12:58:34 +02:00 committed by Martin Schwidefsky
parent 521b3d790c
commit 378b1e7a80
3 changed files with 25 additions and 15 deletions

View File

@ -507,6 +507,15 @@ static inline int pmd_none(pmd_t pmd)
return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) != 0UL; return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) != 0UL;
} }
static inline int pmd_large(pmd_t pmd)
{
#ifdef CONFIG_64BIT
return !!(pmd_val(pmd) & _SEGMENT_ENTRY_LARGE);
#else
return 0;
#endif
}
static inline int pmd_bad(pmd_t pmd) static inline int pmd_bad(pmd_t pmd)
{ {
unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV; unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV;

View File

@ -21,7 +21,7 @@ static void change_page_attr(unsigned long addr, int numpages,
pgdp = pgd_offset(&init_mm, addr); pgdp = pgd_offset(&init_mm, addr);
pudp = pud_offset(pgdp, addr); pudp = pud_offset(pgdp, addr);
pmdp = pmd_offset(pudp, addr); pmdp = pmd_offset(pudp, addr);
if (pmd_huge(*pmdp)) { if (pmd_large(*pmdp)) {
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
continue; continue;
} }

View File

@ -79,7 +79,8 @@ static pte_t __ref *vmem_pte_alloc(unsigned long address)
*/ */
static int vmem_add_mem(unsigned long start, unsigned long size, int ro) static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
{ {
unsigned long address; unsigned long end = start + size;
unsigned long address = start;
pgd_t *pg_dir; pgd_t *pg_dir;
pud_t *pu_dir; pud_t *pu_dir;
pmd_t *pm_dir; pmd_t *pm_dir;
@ -87,7 +88,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
pte_t pte; pte_t pte;
int ret = -ENOMEM; int ret = -ENOMEM;
for (address = start; address < start + size; address += PAGE_SIZE) { while (address < end) {
pg_dir = pgd_offset_k(address); pg_dir = pgd_offset_k(address);
if (pgd_none(*pg_dir)) { if (pgd_none(*pg_dir)) {
pu_dir = vmem_pud_alloc(); pu_dir = vmem_pud_alloc();
@ -108,12 +109,11 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
pm_dir = pmd_offset(pu_dir, address); pm_dir = pmd_offset(pu_dir, address);
#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC) #if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) && if (MACHINE_HAS_EDAT1 && address && !(address & ~PMD_MASK) &&
(address + HPAGE_SIZE <= start + size) && (address + PMD_SIZE <= end)) {
(address >= HPAGE_SIZE)) {
pte_val(pte) |= _SEGMENT_ENTRY_LARGE; pte_val(pte) |= _SEGMENT_ENTRY_LARGE;
pmd_val(*pm_dir) = pte_val(pte); pmd_val(*pm_dir) = pte_val(pte);
address += HPAGE_SIZE - PAGE_SIZE; address += PMD_SIZE;
continue; continue;
} }
#endif #endif
@ -126,10 +126,11 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
pt_dir = pte_offset_kernel(pm_dir, address); pt_dir = pte_offset_kernel(pm_dir, address);
*pt_dir = pte; *pt_dir = pte;
address += PAGE_SIZE;
} }
ret = 0; ret = 0;
out: out:
flush_tlb_kernel_range(start, start + size); flush_tlb_kernel_range(start, end);
return ret; return ret;
} }
@ -139,7 +140,8 @@ out:
*/ */
static void vmem_remove_range(unsigned long start, unsigned long size) static void vmem_remove_range(unsigned long start, unsigned long size)
{ {
unsigned long address; unsigned long end = start + size;
unsigned long address = start;
pgd_t *pg_dir; pgd_t *pg_dir;
pud_t *pu_dir; pud_t *pu_dir;
pmd_t *pm_dir; pmd_t *pm_dir;
@ -147,7 +149,7 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
pte_t pte; pte_t pte;
pte_val(pte) = _PAGE_TYPE_EMPTY; pte_val(pte) = _PAGE_TYPE_EMPTY;
for (address = start; address < start + size; address += PAGE_SIZE) { while (address < end) {
pg_dir = pgd_offset_k(address); pg_dir = pgd_offset_k(address);
pu_dir = pud_offset(pg_dir, address); pu_dir = pud_offset(pg_dir, address);
if (pud_none(*pu_dir)) if (pud_none(*pu_dir))
@ -155,17 +157,16 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
pm_dir = pmd_offset(pu_dir, address); pm_dir = pmd_offset(pu_dir, address);
if (pmd_none(*pm_dir)) if (pmd_none(*pm_dir))
continue; continue;
if (pmd_large(*pm_dir)) {
if (pmd_huge(*pm_dir)) {
pmd_clear(pm_dir); pmd_clear(pm_dir);
address += HPAGE_SIZE - PAGE_SIZE; address += PMD_SIZE;
continue; continue;
} }
pt_dir = pte_offset_kernel(pm_dir, address); pt_dir = pte_offset_kernel(pm_dir, address);
*pt_dir = pte; *pt_dir = pte;
address += PAGE_SIZE;
} }
flush_tlb_kernel_range(start, start + size); flush_tlb_kernel_range(start, end);
} }
/* /*