s390/mm: fix phys vs virt confusion in pgtable allocation routines
The physical address of page tables is passed around and used as virtual address in various locations. Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com> Reviewed-by: Heiko Carstens <hca@linux.ibm.com> Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
This commit is contained in:
parent
0f3bf303fb
commit
2a444fdc24
|
@ -135,7 +135,7 @@ static inline void pmd_populate(struct mm_struct *mm,
|
|||
#define pmd_populate_kernel(mm, pmd, pte) pmd_populate(mm, pmd, pte)
|
||||
|
||||
#define pmd_pgtable(pmd) \
|
||||
(pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE)
|
||||
((pgtable_t)__va(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE))
|
||||
|
||||
/*
|
||||
* page table entry allocation/free routines.
|
||||
|
|
|
@ -58,7 +58,7 @@ unsigned long *crst_table_alloc(struct mm_struct *mm)
|
|||
if (!page)
|
||||
return NULL;
|
||||
arch_set_page_dat(page, 2);
|
||||
return (unsigned long *) page_to_phys(page);
|
||||
return (unsigned long *) page_to_virt(page);
|
||||
}
|
||||
|
||||
void crst_table_free(struct mm_struct *mm, unsigned long *table)
|
||||
|
@ -161,7 +161,7 @@ struct page *page_table_alloc_pgste(struct mm_struct *mm)
|
|||
|
||||
page = alloc_page(GFP_KERNEL);
|
||||
if (page) {
|
||||
table = (u64 *)page_to_phys(page);
|
||||
table = (u64 *)page_to_virt(page);
|
||||
memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
|
||||
memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
|
||||
}
|
||||
|
@ -194,7 +194,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
|
|||
mask = atomic_read(&page->_refcount) >> 24;
|
||||
mask = (mask | (mask >> 4)) & 3;
|
||||
if (mask != 3) {
|
||||
table = (unsigned long *) page_to_phys(page);
|
||||
table = (unsigned long *) page_to_virt(page);
|
||||
bit = mask & 1; /* =1 -> second 2K */
|
||||
if (bit)
|
||||
table += PTRS_PER_PTE;
|
||||
|
@ -217,7 +217,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
|
|||
}
|
||||
arch_set_page_dat(page, 0);
|
||||
/* Initialize page table */
|
||||
table = (unsigned long *) page_to_phys(page);
|
||||
table = (unsigned long *) page_to_virt(page);
|
||||
if (mm_alloc_pgste(mm)) {
|
||||
/* Return 4K page table with PGSTEs */
|
||||
atomic_xor_bits(&page->_refcount, 3 << 24);
|
||||
|
@ -239,10 +239,10 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
|
|||
struct page *page;
|
||||
unsigned int bit, mask;
|
||||
|
||||
page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
|
||||
page = virt_to_page(table);
|
||||
if (!mm_alloc_pgste(mm)) {
|
||||
/* Free 2K page table fragment of a 4K page */
|
||||
bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
|
||||
bit = ((unsigned long) table & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
|
||||
spin_lock_bh(&mm->context.lock);
|
||||
mask = atomic_xor_bits(&page->_refcount, 1U << (bit + 24));
|
||||
mask >>= 24;
|
||||
|
@ -269,14 +269,14 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
|
|||
unsigned int bit, mask;
|
||||
|
||||
mm = tlb->mm;
|
||||
page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
|
||||
page = virt_to_page(table);
|
||||
if (mm_alloc_pgste(mm)) {
|
||||
gmap_unlink(mm, table, vmaddr);
|
||||
table = (unsigned long *) (__pa(table) | 3);
|
||||
table = (unsigned long *) ((unsigned long)table | 3);
|
||||
tlb_remove_table(tlb, table);
|
||||
return;
|
||||
}
|
||||
bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
|
||||
bit = ((unsigned long) table & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
|
||||
spin_lock_bh(&mm->context.lock);
|
||||
mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24));
|
||||
mask >>= 24;
|
||||
|
@ -285,7 +285,7 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
|
|||
else
|
||||
list_del(&page->lru);
|
||||
spin_unlock_bh(&mm->context.lock);
|
||||
table = (unsigned long *) (__pa(table) | (1U << bit));
|
||||
table = (unsigned long *) ((unsigned long) table | (1U << bit));
|
||||
tlb_remove_table(tlb, table);
|
||||
}
|
||||
|
||||
|
@ -293,7 +293,7 @@ void __tlb_remove_table(void *_table)
|
|||
{
|
||||
unsigned int mask = (unsigned long) _table & 3;
|
||||
void *table = (void *)((unsigned long) _table ^ mask);
|
||||
struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
|
||||
struct page *page = virt_to_page(table);
|
||||
|
||||
switch (mask) {
|
||||
case 0: /* pmd, pud, or p4d */
|
||||
|
|
Loading…
Reference in New Issue