mm: compaction: Fix compiler warning

compact_capture_page() is only used if compaction is enabled so it should
be moved into the corresponding #ifdef.

Signed-off-by: Thierry Reding <thierry.reding@avionic-design.de>
Acked-by: Mel Gorman <mgorman@suse.de>
Cc: Rik van Riel <riel@redhat.com>
Cc: Minchan Kim <minchan@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Thierry Reding 2012-12-12 13:51:17 -08:00 committed by Linus Torvalds
parent 3ea41e6210
commit c8bf2d8ba4
1 changed files with 54 additions and 54 deletions

View File

@ -215,60 +215,6 @@ static bool suitable_migration_target(struct page *page)
return false; return false;
} }
static void compact_capture_page(struct compact_control *cc)
{
unsigned long flags;
int mtype, mtype_low, mtype_high;
if (!cc->page || *cc->page)
return;
/*
* For MIGRATE_MOVABLE allocations we capture a suitable page ASAP
* regardless of the migratetype of the freelist is is captured from.
* This is fine because the order for a high-order MIGRATE_MOVABLE
* allocation is typically at least a pageblock size and overall
* fragmentation is not impaired. Other allocation types must
* capture pages from their own migratelist because otherwise they
* could pollute other pageblocks like MIGRATE_MOVABLE with
* difficult to move pages and making fragmentation worse overall.
*/
if (cc->migratetype == MIGRATE_MOVABLE) {
mtype_low = 0;
mtype_high = MIGRATE_PCPTYPES;
} else {
mtype_low = cc->migratetype;
mtype_high = cc->migratetype + 1;
}
/* Speculatively examine the free lists without zone lock */
for (mtype = mtype_low; mtype < mtype_high; mtype++) {
int order;
for (order = cc->order; order < MAX_ORDER; order++) {
struct page *page;
struct free_area *area;
area = &(cc->zone->free_area[order]);
if (list_empty(&area->free_list[mtype]))
continue;
/* Take the lock and attempt capture of the page */
if (!compact_trylock_irqsave(&cc->zone->lock, &flags, cc))
return;
if (!list_empty(&area->free_list[mtype])) {
page = list_entry(area->free_list[mtype].next,
struct page, lru);
if (capture_free_page(page, cc->order, mtype)) {
spin_unlock_irqrestore(&cc->zone->lock,
flags);
*cc->page = page;
return;
}
}
spin_unlock_irqrestore(&cc->zone->lock, flags);
}
}
}
/* /*
* Isolate free pages onto a private freelist. Caller must hold zone->lock. * Isolate free pages onto a private freelist. Caller must hold zone->lock.
* If @strict is true, will abort returning 0 on any invalid PFNs or non-free * If @strict is true, will abort returning 0 on any invalid PFNs or non-free
@ -953,6 +899,60 @@ unsigned long compaction_suitable(struct zone *zone, int order)
return COMPACT_CONTINUE; return COMPACT_CONTINUE;
} }
static void compact_capture_page(struct compact_control *cc)
{
unsigned long flags;
int mtype, mtype_low, mtype_high;
if (!cc->page || *cc->page)
return;
/*
* For MIGRATE_MOVABLE allocations we capture a suitable page ASAP
* regardless of the migratetype of the freelist is is captured from.
* This is fine because the order for a high-order MIGRATE_MOVABLE
* allocation is typically at least a pageblock size and overall
* fragmentation is not impaired. Other allocation types must
* capture pages from their own migratelist because otherwise they
* could pollute other pageblocks like MIGRATE_MOVABLE with
* difficult to move pages and making fragmentation worse overall.
*/
if (cc->migratetype == MIGRATE_MOVABLE) {
mtype_low = 0;
mtype_high = MIGRATE_PCPTYPES;
} else {
mtype_low = cc->migratetype;
mtype_high = cc->migratetype + 1;
}
/* Speculatively examine the free lists without zone lock */
for (mtype = mtype_low; mtype < mtype_high; mtype++) {
int order;
for (order = cc->order; order < MAX_ORDER; order++) {
struct page *page;
struct free_area *area;
area = &(cc->zone->free_area[order]);
if (list_empty(&area->free_list[mtype]))
continue;
/* Take the lock and attempt capture of the page */
if (!compact_trylock_irqsave(&cc->zone->lock, &flags, cc))
return;
if (!list_empty(&area->free_list[mtype])) {
page = list_entry(area->free_list[mtype].next,
struct page, lru);
if (capture_free_page(page, cc->order, mtype)) {
spin_unlock_irqrestore(&cc->zone->lock,
flags);
*cc->page = page;
return;
}
}
spin_unlock_irqrestore(&cc->zone->lock, flags);
}
}
}
static int compact_zone(struct zone *zone, struct compact_control *cc) static int compact_zone(struct zone *zone, struct compact_control *cc)
{ {
int ret; int ret;