vmscan: cleanup the scan batching code
The vmscan batching logic is twisting. Move it into a standalone function nr_scan_try_batch() and document it. No behavior change. Signed-off-by: Wu Fengguang <fengguang.wu@intel.com> Acked-by: Rik van Riel <riel@redhat.com> Cc: Nick Piggin <npiggin@suse.de> Cc: Christoph Lameter <cl@linux-foundation.org> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
56e49d2188
commit
6e08a369ee
|
@ -334,9 +334,9 @@ struct zone {
|
|||
|
||||
/* Fields commonly accessed by the page reclaim scanner */
|
||||
spinlock_t lru_lock;
|
||||
struct {
|
||||
struct zone_lru {
|
||||
struct list_head list;
|
||||
unsigned long nr_scan;
|
||||
unsigned long nr_saved_scan; /* accumulated for batching */
|
||||
} lru[NR_LRU_LISTS];
|
||||
|
||||
struct zone_reclaim_stat reclaim_stat;
|
||||
|
|
|
@ -3657,7 +3657,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
|
|||
zone_pcp_init(zone);
|
||||
for_each_lru(l) {
|
||||
INIT_LIST_HEAD(&zone->lru[l].list);
|
||||
zone->lru[l].nr_scan = 0;
|
||||
zone->lru[l].nr_saved_scan = 0;
|
||||
}
|
||||
zone->reclaim_stat.recent_rotated[0] = 0;
|
||||
zone->reclaim_stat.recent_rotated[1] = 0;
|
||||
|
|
37
mm/vmscan.c
37
mm/vmscan.c
|
@ -1492,6 +1492,26 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
|
|||
percent[1] = 100 - percent[0];
|
||||
}
|
||||
|
||||
/*
|
||||
* Smallish @nr_to_scan's are deposited in @nr_saved_scan,
|
||||
* until we collected @swap_cluster_max pages to scan.
|
||||
*/
|
||||
static unsigned long nr_scan_try_batch(unsigned long nr_to_scan,
|
||||
unsigned long *nr_saved_scan,
|
||||
unsigned long swap_cluster_max)
|
||||
{
|
||||
unsigned long nr;
|
||||
|
||||
*nr_saved_scan += nr_to_scan;
|
||||
nr = *nr_saved_scan;
|
||||
|
||||
if (nr >= swap_cluster_max)
|
||||
*nr_saved_scan = 0;
|
||||
else
|
||||
nr = 0;
|
||||
|
||||
return nr;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
|
||||
|
@ -1517,14 +1537,11 @@ static void shrink_zone(int priority, struct zone *zone,
|
|||
scan >>= priority;
|
||||
scan = (scan * percent[file]) / 100;
|
||||
}
|
||||
if (scanning_global_lru(sc)) {
|
||||
zone->lru[l].nr_scan += scan;
|
||||
nr[l] = zone->lru[l].nr_scan;
|
||||
if (nr[l] >= swap_cluster_max)
|
||||
zone->lru[l].nr_scan = 0;
|
||||
if (scanning_global_lru(sc))
|
||||
nr[l] = nr_scan_try_batch(scan,
|
||||
&zone->lru[l].nr_saved_scan,
|
||||
swap_cluster_max);
|
||||
else
|
||||
nr[l] = 0;
|
||||
} else
|
||||
nr[l] = scan;
|
||||
}
|
||||
|
||||
|
@ -2124,11 +2141,11 @@ static void shrink_all_zones(unsigned long nr_pages, int prio,
|
|||
l == LRU_ACTIVE_FILE))
|
||||
continue;
|
||||
|
||||
zone->lru[l].nr_scan += (lru_pages >> prio) + 1;
|
||||
if (zone->lru[l].nr_scan >= nr_pages || pass > 3) {
|
||||
zone->lru[l].nr_saved_scan += (lru_pages >> prio) + 1;
|
||||
if (zone->lru[l].nr_saved_scan >= nr_pages || pass > 3) {
|
||||
unsigned long nr_to_scan;
|
||||
|
||||
zone->lru[l].nr_scan = 0;
|
||||
zone->lru[l].nr_saved_scan = 0;
|
||||
nr_to_scan = min(nr_pages, lru_pages);
|
||||
nr_reclaimed += shrink_list(l, nr_to_scan, zone,
|
||||
sc, prio);
|
||||
|
|
|
@ -718,10 +718,10 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
|
|||
low_wmark_pages(zone),
|
||||
high_wmark_pages(zone),
|
||||
zone->pages_scanned,
|
||||
zone->lru[LRU_ACTIVE_ANON].nr_scan,
|
||||
zone->lru[LRU_INACTIVE_ANON].nr_scan,
|
||||
zone->lru[LRU_ACTIVE_FILE].nr_scan,
|
||||
zone->lru[LRU_INACTIVE_FILE].nr_scan,
|
||||
zone->lru[LRU_ACTIVE_ANON].nr_saved_scan,
|
||||
zone->lru[LRU_INACTIVE_ANON].nr_saved_scan,
|
||||
zone->lru[LRU_ACTIVE_FILE].nr_saved_scan,
|
||||
zone->lru[LRU_INACTIVE_FILE].nr_saved_scan,
|
||||
zone->spanned_pages,
|
||||
zone->present_pages);
|
||||
|
||||
|
|
Loading…
Reference in New Issue