mm: page_alloc: generalize order handling in __free_pages_bootmem()
__free_pages_bootmem() used to special-case higher-order frees to save individual page checking with free_pages_bulk(). Nowadays, both zero order and non-zero order frees use free_pages(), which checks each individual page anyway, and so there is little point in making the distinction anymore. The higher-order loop will work just fine for zero order pages. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Uwe Kleine-König <u.kleine-koenig@pengutronix.de> Cc: Tejun Heo <tj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
43d2b11324
commit
c3993076f8
|
@ -730,32 +730,23 @@ static void __free_pages_ok(struct page *page, unsigned int order)
|
|||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* permit the bootmem allocator to evade page validation on high-order frees
|
||||
*/
|
||||
void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
|
||||
{
|
||||
if (order == 0) {
|
||||
__ClearPageReserved(page);
|
||||
set_page_count(page, 0);
|
||||
set_page_refcounted(page);
|
||||
__free_page(page);
|
||||
} else {
|
||||
int loop;
|
||||
unsigned int nr_pages = 1 << order;
|
||||
unsigned int loop;
|
||||
|
||||
prefetchw(page);
|
||||
for (loop = 0; loop < (1 << order); loop++) {
|
||||
struct page *p = &page[loop];
|
||||
prefetchw(page);
|
||||
for (loop = 0; loop < nr_pages; loop++) {
|
||||
struct page *p = &page[loop];
|
||||
|
||||
if (loop + 1 < (1 << order))
|
||||
prefetchw(p + 1);
|
||||
__ClearPageReserved(p);
|
||||
set_page_count(p, 0);
|
||||
}
|
||||
|
||||
set_page_refcounted(page);
|
||||
__free_pages(page, order);
|
||||
if (loop + 1 < nr_pages)
|
||||
prefetchw(p + 1);
|
||||
__ClearPageReserved(p);
|
||||
set_page_count(p, 0);
|
||||
}
|
||||
|
||||
set_page_refcounted(page);
|
||||
__free_pages(page, order);
|
||||
}
|
||||
|
||||
|
||||
|
|
Loading…
Reference in New Issue