[PATCH] mm: i386 sh sh64 ready for split ptlock
Use pte_offset_map_lock, instead of pte_offset_map (or inappropriate pte_offset_kernel) and mm-wide page_table_lock, in sundry arch places. The i386 vm86 mark_screen_rdonly: yes, there was and is an assumption that the screen fits inside the one page table, as indeed it does. The sh __do_page_fault: which handles both kernel faults (without lock) and user mm faults (locked - though it set_pte without locking before). The sh64 flush_cache_range and helpers: which wrongly thought callers held page_table_lock before (only its tlb_start_vma did, and no longer does so); moved the flush loop down, and adjusted the large versus small range decision to consider a range which spans page tables as large. Signed-off-by: Hugh Dickins <hugh@veritas.com> Acked-by: Paul Mundt <lethal@linux-sh.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
deceb6cd17
commit
60ec558549
|
@ -134,17 +134,16 @@ struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mark_screen_rdonly(struct task_struct * tsk)
|
static void mark_screen_rdonly(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
pgd_t *pgd;
|
pgd_t *pgd;
|
||||||
pud_t *pud;
|
pud_t *pud;
|
||||||
pmd_t *pmd;
|
pmd_t *pmd;
|
||||||
pte_t *pte, *mapped;
|
pte_t *pte;
|
||||||
|
spinlock_t *ptl;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
preempt_disable();
|
pgd = pgd_offset(mm, 0xA0000);
|
||||||
spin_lock(&tsk->mm->page_table_lock);
|
|
||||||
pgd = pgd_offset(tsk->mm, 0xA0000);
|
|
||||||
if (pgd_none_or_clear_bad(pgd))
|
if (pgd_none_or_clear_bad(pgd))
|
||||||
goto out;
|
goto out;
|
||||||
pud = pud_offset(pgd, 0xA0000);
|
pud = pud_offset(pgd, 0xA0000);
|
||||||
|
@ -153,16 +152,14 @@ static void mark_screen_rdonly(struct task_struct * tsk)
|
||||||
pmd = pmd_offset(pud, 0xA0000);
|
pmd = pmd_offset(pud, 0xA0000);
|
||||||
if (pmd_none_or_clear_bad(pmd))
|
if (pmd_none_or_clear_bad(pmd))
|
||||||
goto out;
|
goto out;
|
||||||
pte = mapped = pte_offset_map(pmd, 0xA0000);
|
pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl);
|
||||||
for (i = 0; i < 32; i++) {
|
for (i = 0; i < 32; i++) {
|
||||||
if (pte_present(*pte))
|
if (pte_present(*pte))
|
||||||
set_pte(pte, pte_wrprotect(*pte));
|
set_pte(pte, pte_wrprotect(*pte));
|
||||||
pte++;
|
pte++;
|
||||||
}
|
}
|
||||||
pte_unmap(mapped);
|
pte_unmap_unlock(pte, ptl);
|
||||||
out:
|
out:
|
||||||
spin_unlock(&tsk->mm->page_table_lock);
|
|
||||||
preempt_enable();
|
|
||||||
flush_tlb();
|
flush_tlb();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -306,7 +303,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
|
||||||
|
|
||||||
tsk->thread.screen_bitmap = info->screen_bitmap;
|
tsk->thread.screen_bitmap = info->screen_bitmap;
|
||||||
if (info->flags & VM86_SCREEN_BITMAP)
|
if (info->flags & VM86_SCREEN_BITMAP)
|
||||||
mark_screen_rdonly(tsk);
|
mark_screen_rdonly(tsk->mm);
|
||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
"xorl %%eax,%%eax; movl %%eax,%%fs; movl %%eax,%%gs\n\t"
|
"xorl %%eax,%%eax; movl %%eax,%%fs; movl %%eax,%%gs\n\t"
|
||||||
"movl %0,%%esp\n\t"
|
"movl %0,%%esp\n\t"
|
||||||
|
|
|
@ -194,10 +194,13 @@ asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
|
||||||
unsigned long address)
|
unsigned long address)
|
||||||
{
|
{
|
||||||
unsigned long addrmax = P4SEG;
|
unsigned long addrmax = P4SEG;
|
||||||
pgd_t *dir;
|
pgd_t *pgd;
|
||||||
pmd_t *pmd;
|
pmd_t *pmd;
|
||||||
pte_t *pte;
|
pte_t *pte;
|
||||||
pte_t entry;
|
pte_t entry;
|
||||||
|
struct mm_struct *mm;
|
||||||
|
spinlock_t *ptl;
|
||||||
|
int ret = 1;
|
||||||
|
|
||||||
#ifdef CONFIG_SH_KGDB
|
#ifdef CONFIG_SH_KGDB
|
||||||
if (kgdb_nofault && kgdb_bus_err_hook)
|
if (kgdb_nofault && kgdb_bus_err_hook)
|
||||||
|
@ -208,28 +211,28 @@ asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
|
||||||
addrmax = P4SEG_STORE_QUE + 0x04000000;
|
addrmax = P4SEG_STORE_QUE + 0x04000000;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (address >= P3SEG && address < addrmax)
|
if (address >= P3SEG && address < addrmax) {
|
||||||
dir = pgd_offset_k(address);
|
pgd = pgd_offset_k(address);
|
||||||
else if (address >= TASK_SIZE)
|
mm = NULL;
|
||||||
|
} else if (address >= TASK_SIZE)
|
||||||
return 1;
|
return 1;
|
||||||
else if (!current->mm)
|
else if (!(mm = current->mm))
|
||||||
return 1;
|
return 1;
|
||||||
else
|
else
|
||||||
dir = pgd_offset(current->mm, address);
|
pgd = pgd_offset(mm, address);
|
||||||
|
|
||||||
pmd = pmd_offset(dir, address);
|
pmd = pmd_offset(pgd, address);
|
||||||
if (pmd_none(*pmd))
|
if (pmd_none_or_clear_bad(pmd))
|
||||||
return 1;
|
return 1;
|
||||||
if (pmd_bad(*pmd)) {
|
if (mm)
|
||||||
pmd_ERROR(*pmd);
|
pte = pte_offset_map_lock(mm, pmd, address, &ptl);
|
||||||
pmd_clear(pmd);
|
else
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
pte = pte_offset_kernel(pmd, address);
|
pte = pte_offset_kernel(pmd, address);
|
||||||
|
|
||||||
entry = *pte;
|
entry = *pte;
|
||||||
if (pte_none(entry) || pte_not_present(entry)
|
if (pte_none(entry) || pte_not_present(entry)
|
||||||
|| (writeaccess && !pte_write(entry)))
|
|| (writeaccess && !pte_write(entry)))
|
||||||
return 1;
|
goto unlock;
|
||||||
|
|
||||||
if (writeaccess)
|
if (writeaccess)
|
||||||
entry = pte_mkdirty(entry);
|
entry = pte_mkdirty(entry);
|
||||||
|
@ -251,8 +254,11 @@ asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
|
||||||
|
|
||||||
set_pte(pte, entry);
|
set_pte(pte, entry);
|
||||||
update_mmu_cache(NULL, address, entry);
|
update_mmu_cache(NULL, address, entry);
|
||||||
|
ret = 0;
|
||||||
return 0;
|
unlock:
|
||||||
|
if (mm)
|
||||||
|
pte_unmap_unlock(pte, ptl);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
|
void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
|
||||||
|
|
|
@ -584,32 +584,36 @@ static void sh64_dcache_purge_phy_page(unsigned long paddr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void sh64_dcache_purge_user_page(struct mm_struct *mm, unsigned long eaddr)
|
static void sh64_dcache_purge_user_pages(struct mm_struct *mm,
|
||||||
|
unsigned long addr, unsigned long end)
|
||||||
{
|
{
|
||||||
pgd_t *pgd;
|
pgd_t *pgd;
|
||||||
pmd_t *pmd;
|
pmd_t *pmd;
|
||||||
pte_t *pte;
|
pte_t *pte;
|
||||||
pte_t entry;
|
pte_t entry;
|
||||||
|
spinlock_t *ptl;
|
||||||
unsigned long paddr;
|
unsigned long paddr;
|
||||||
|
|
||||||
/* NOTE : all the callers of this have mm->page_table_lock held, so the
|
if (!mm)
|
||||||
following page table traversal is safe even on SMP/pre-emptible. */
|
return; /* No way to find physical address of page */
|
||||||
|
|
||||||
if (!mm) return; /* No way to find physical address of page */
|
pgd = pgd_offset(mm, addr);
|
||||||
pgd = pgd_offset(mm, eaddr);
|
if (pgd_bad(*pgd))
|
||||||
if (pgd_bad(*pgd)) return;
|
return;
|
||||||
|
|
||||||
pmd = pmd_offset(pgd, eaddr);
|
pmd = pmd_offset(pgd, addr);
|
||||||
if (pmd_none(*pmd) || pmd_bad(*pmd)) return;
|
if (pmd_none(*pmd) || pmd_bad(*pmd))
|
||||||
|
return;
|
||||||
|
|
||||||
pte = pte_offset_kernel(pmd, eaddr);
|
pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
|
||||||
|
do {
|
||||||
entry = *pte;
|
entry = *pte;
|
||||||
if (pte_none(entry) || !pte_present(entry)) return;
|
if (pte_none(entry) || !pte_present(entry))
|
||||||
|
continue;
|
||||||
paddr = pte_val(entry) & PAGE_MASK;
|
paddr = pte_val(entry) & PAGE_MASK;
|
||||||
|
sh64_dcache_purge_coloured_phy_page(paddr, addr);
|
||||||
sh64_dcache_purge_coloured_phy_page(paddr, eaddr);
|
} while (pte++, addr += PAGE_SIZE, addr != end);
|
||||||
|
pte_unmap_unlock(pte - 1, ptl);
|
||||||
}
|
}
|
||||||
/****************************************************************************/
|
/****************************************************************************/
|
||||||
|
|
||||||
|
@ -668,7 +672,7 @@ static void sh64_dcache_purge_user_range(struct mm_struct *mm,
|
||||||
int n_pages;
|
int n_pages;
|
||||||
|
|
||||||
n_pages = ((end - start) >> PAGE_SHIFT);
|
n_pages = ((end - start) >> PAGE_SHIFT);
|
||||||
if (n_pages >= 64) {
|
if (n_pages >= 64 || ((start ^ (end - 1)) & PMD_MASK)) {
|
||||||
#if 1
|
#if 1
|
||||||
sh64_dcache_purge_all();
|
sh64_dcache_purge_all();
|
||||||
#else
|
#else
|
||||||
|
@ -707,20 +711,10 @@ static void sh64_dcache_purge_user_range(struct mm_struct *mm,
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
} else {
|
} else {
|
||||||
/* 'Small' range */
|
/* Small range, covered by a single page table page */
|
||||||
unsigned long aligned_start;
|
start &= PAGE_MASK; /* should already be so */
|
||||||
unsigned long eaddr;
|
end = PAGE_ALIGN(end); /* should already be so */
|
||||||
unsigned long last_page_start;
|
sh64_dcache_purge_user_pages(mm, start, end);
|
||||||
|
|
||||||
aligned_start = start & PAGE_MASK;
|
|
||||||
/* 'end' is 1 byte beyond the end of the range */
|
|
||||||
last_page_start = (end - 1) & PAGE_MASK;
|
|
||||||
|
|
||||||
eaddr = aligned_start;
|
|
||||||
while (eaddr <= last_page_start) {
|
|
||||||
sh64_dcache_purge_user_page(mm, eaddr);
|
|
||||||
eaddr += PAGE_SIZE;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -880,9 +874,7 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
|
||||||
addresses from the user address space specified by mm, after writing
|
addresses from the user address space specified by mm, after writing
|
||||||
back any dirty data.
|
back any dirty data.
|
||||||
|
|
||||||
Note(1), 'end' is 1 byte beyond the end of the range to flush.
|
Note, 'end' is 1 byte beyond the end of the range to flush. */
|
||||||
|
|
||||||
Note(2), this is called with mm->page_table_lock held.*/
|
|
||||||
|
|
||||||
sh64_dcache_purge_user_range(mm, start, end);
|
sh64_dcache_purge_user_range(mm, start, end);
|
||||||
sh64_icache_inv_user_page_range(mm, start, end);
|
sh64_icache_inv_user_page_range(mm, start, end);
|
||||||
|
@ -898,7 +890,7 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long eaddr, unsigned
|
||||||
the I-cache must be searched too in case the page in question is
|
the I-cache must be searched too in case the page in question is
|
||||||
both writable and being executed from (e.g. stack trampolines.)
|
both writable and being executed from (e.g. stack trampolines.)
|
||||||
|
|
||||||
Note(1), this is called with mm->page_table_lock held.
|
Note, this is called with pte lock held.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT);
|
sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT);
|
||||||
|
|
Loading…
Reference in New Issue