[PATCH] x86_64: When checking vmalloc mappings don't use pte_page
The PTEs can point to ioremap mappings too, and these are often outside mem_map. The NUMA hash page lookup functions cannot handle out of bounds accesses properly. Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
f6b8d4778c
commit
3b9ba4d5e2
|
@ -234,6 +234,8 @@ static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs,
|
|||
|
||||
/*
|
||||
* Handle a fault on the vmalloc or module mapping area
|
||||
*
|
||||
* This assumes no large pages in there.
|
||||
*/
|
||||
static int vmalloc_fault(unsigned long address)
|
||||
{
|
||||
|
@ -272,7 +274,10 @@ static int vmalloc_fault(unsigned long address)
|
|||
if (!pte_present(*pte_ref))
|
||||
return -1;
|
||||
pte = pte_offset_kernel(pmd, address);
|
||||
if (!pte_present(*pte) || pte_page(*pte) != pte_page(*pte_ref))
|
||||
/* Don't use pte_page here, because the mappings can point
|
||||
outside mem_map, and the NUMA hash lookup cannot handle
|
||||
that. */
|
||||
if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
|
||||
BUG();
|
||||
__flush_tlb_all();
|
||||
return 0;
|
||||
|
@ -346,7 +351,9 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
|
|||
* protection error (error_code & 1) == 0.
|
||||
*/
|
||||
if (unlikely(address >= TASK_SIZE)) {
|
||||
if (!(error_code & 5)) {
|
||||
if (!(error_code & 5) &&
|
||||
((address >= VMALLOC_START && address < VMALLOC_END) ||
|
||||
(address >= MODULES_VADDR && address < MODULES_END))) {
|
||||
if (vmalloc_fault(address) < 0)
|
||||
goto bad_area_nosemaphore;
|
||||
return;
|
||||
|
|
Loading…
Reference in New Issue