diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index e4b948080d20..a67b38415768 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -142,8 +142,6 @@ static inline unsigned long zone_page_state_snapshot(struct zone *zone, return x; } -extern unsigned long global_reclaimable_pages(void); - #ifdef CONFIG_NUMA /* * Determine the per node value of a stat item. This function diff --git a/mm/internal.h b/mm/internal.h index 612c14f5e0f5..29e1e761f9eb 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -83,7 +83,6 @@ extern unsigned long highest_memmap_pfn; */ extern int isolate_lru_page(struct page *page); extern void putback_lru_page(struct page *page); -extern unsigned long zone_reclaimable_pages(struct zone *zone); extern bool zone_reclaimable(struct zone *zone); /* diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 61119b8a11e6..2d30e2cfe804 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -205,7 +205,8 @@ static unsigned long zone_dirtyable_memory(struct zone *zone) nr_pages = zone_page_state(zone, NR_FREE_PAGES); nr_pages -= min(nr_pages, zone->dirty_balance_reserve); - nr_pages += zone_reclaimable_pages(zone); + nr_pages += zone_page_state(zone, NR_INACTIVE_FILE); + nr_pages += zone_page_state(zone, NR_ACTIVE_FILE); return nr_pages; } @@ -258,7 +259,8 @@ static unsigned long global_dirtyable_memory(void) x = global_page_state(NR_FREE_PAGES); x -= min(x, dirty_balance_reserve); - x += global_reclaimable_pages(); + x += global_page_state(NR_INACTIVE_FILE); + x += global_page_state(NR_ACTIVE_FILE); if (!vm_highmem_is_dirtyable) x -= highmem_dirtyable_memory(x); diff --git a/mm/vmscan.c b/mm/vmscan.c index 90c4075d8d75..a9c74b409681 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -147,7 +147,7 @@ static bool global_reclaim(struct scan_control *sc) } #endif -unsigned long zone_reclaimable_pages(struct zone *zone) +static unsigned long zone_reclaimable_pages(struct zone *zone) { int nr; @@ -3315,27 +3315,6 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx) wake_up_interruptible(&pgdat->kswapd_wait); } -/* - * The reclaimable count would be mostly accurate. - * The less reclaimable pages may be - * - mlocked pages, which will be moved to unevictable list when encountered - * - mapped pages, which may require several travels to be reclaimed - * - dirty pages, which is not "instantly" reclaimable - */ -unsigned long global_reclaimable_pages(void) -{ - int nr; - - nr = global_page_state(NR_ACTIVE_FILE) + - global_page_state(NR_INACTIVE_FILE); - - if (get_nr_swap_pages() > 0) - nr += global_page_state(NR_ACTIVE_ANON) + - global_page_state(NR_INACTIVE_ANON); - - return nr; -} - #ifdef CONFIG_HIBERNATION /* * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of