Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip into next
Pull x86 mm update from Ingo Molnar: - speed up 256 GB PCI BAR ioremap()s - speed up PTE swapout page reclaim case * 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86, ioremap: Speed up check for RAM pages x86/mm: In the PTE swapout page reclaim case clear the accessed bit instead of flushing the TLB
This commit is contained in:
commit
e0d23cdc62
|
@ -50,6 +50,21 @@ int ioremap_change_attr(unsigned long vaddr, unsigned long size,
|
|||
return err;
|
||||
}
|
||||
|
||||
static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
|
||||
void *arg)
|
||||
{
|
||||
unsigned long i;
|
||||
|
||||
for (i = 0; i < nr_pages; ++i)
|
||||
if (pfn_valid(start_pfn + i) &&
|
||||
!PageReserved(pfn_to_page(start_pfn + i)))
|
||||
return 1;
|
||||
|
||||
WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Remap an arbitrary physical address space into the kernel virtual
|
||||
* address space. Needed when the kernel wants to access high addresses
|
||||
|
@ -93,14 +108,11 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
|
|||
/*
|
||||
* Don't allow anybody to remap normal RAM that we're using..
|
||||
*/
|
||||
pfn = phys_addr >> PAGE_SHIFT;
|
||||
last_pfn = last_addr >> PAGE_SHIFT;
|
||||
for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
|
||||
int is_ram = page_is_ram(pfn);
|
||||
|
||||
if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
|
||||
return NULL;
|
||||
WARN_ON_ONCE(is_ram);
|
||||
}
|
||||
if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
|
||||
__ioremap_check_ram) == 1)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Mappings have to be page-aligned
|
||||
|
|
|
@ -399,13 +399,20 @@ int pmdp_test_and_clear_young(struct vm_area_struct *vma,
|
|||
int ptep_clear_flush_young(struct vm_area_struct *vma,
|
||||
unsigned long address, pte_t *ptep)
|
||||
{
|
||||
int young;
|
||||
|
||||
young = ptep_test_and_clear_young(vma, address, ptep);
|
||||
if (young)
|
||||
flush_tlb_page(vma, address);
|
||||
|
||||
return young;
|
||||
/*
|
||||
* On x86 CPUs, clearing the accessed bit without a TLB flush
|
||||
* doesn't cause data corruption. [ It could cause incorrect
|
||||
* page aging and the (mistaken) reclaim of hot pages, but the
|
||||
* chance of that should be relatively low. ]
|
||||
*
|
||||
* So as a performance optimization don't flush the TLB when
|
||||
* clearing the accessed bit, it will eventually be flushed by
|
||||
* a context switch or a VM operation anyway. [ In the rare
|
||||
* event of it not getting flushed for a long time the delay
|
||||
* shouldn't really matter because there's no real memory
|
||||
* pressure for swapout to react to. ]
|
||||
*/
|
||||
return ptep_test_and_clear_young(vma, address, ptep);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
|
|
Loading…
Reference in New Issue