mm: rename and change semantics of nr_indirectly_reclaimable_bytes
The vmstat counter NR_INDIRECTLY_RECLAIMABLE_BYTES was introduced by
commit eb59254608
("mm: introduce NR_INDIRECTLY_RECLAIMABLE_BYTES") with
the goal of accounting objects that can be reclaimed, but cannot be
allocated via a SLAB_RECLAIM_ACCOUNT cache. This is now possible via
kmalloc() with __GFP_RECLAIMABLE flag, and the dcache external names user
is converted.
The counter is however still useful for accounting direct page allocations
(i.e. not slab) with a shrinker, such as the ION page pool. So keep it,
and:
- change granularity to pages to be more like other counters; sub-page
allocations should be able to use kmalloc
- rename the counter to NR_KERNEL_MISC_RECLAIMABLE
- expose the counter again in vmstat as "nr_kernel_misc_reclaimable"; we can
again remove the check for not printing "hidden" counters
Link: http://lkml.kernel.org/r/20180731090649.16028-5-vbabka@suse.cz
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Christoph Lameter <cl@linux.com>
Acked-by: Roman Gushchin <guro@fb.com>
Cc: Vijayanand Jitta <vjitta@codeaurora.org>
Cc: Laura Abbott <labbott@redhat.com>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Michal Hocko <mhocko@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
2e03b4bc4a
commit
b29940c1ab
|
@ -33,8 +33,8 @@ static void ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
|
||||||
pool->low_count++;
|
pool->low_count++;
|
||||||
}
|
}
|
||||||
|
|
||||||
mod_node_page_state(page_pgdat(page), NR_INDIRECTLY_RECLAIMABLE_BYTES,
|
mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE,
|
||||||
(1 << (PAGE_SHIFT + pool->order)));
|
1 << pool->order);
|
||||||
mutex_unlock(&pool->mutex);
|
mutex_unlock(&pool->mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -53,8 +53,8 @@ static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
|
||||||
}
|
}
|
||||||
|
|
||||||
list_del(&page->lru);
|
list_del(&page->lru);
|
||||||
mod_node_page_state(page_pgdat(page), NR_INDIRECTLY_RECLAIMABLE_BYTES,
|
mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE,
|
||||||
-(1 << (PAGE_SHIFT + pool->order)));
|
-(1 << pool->order));
|
||||||
return page;
|
return page;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -180,7 +180,7 @@ enum node_stat_item {
|
||||||
NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */
|
NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */
|
||||||
NR_DIRTIED, /* page dirtyings since bootup */
|
NR_DIRTIED, /* page dirtyings since bootup */
|
||||||
NR_WRITTEN, /* page writings since bootup */
|
NR_WRITTEN, /* page writings since bootup */
|
||||||
NR_INDIRECTLY_RECLAIMABLE_BYTES, /* measured in bytes */
|
NR_KERNEL_MISC_RECLAIMABLE, /* reclaimable non-slab kernel pages */
|
||||||
NR_VM_NODE_STAT_ITEMS
|
NR_VM_NODE_STAT_ITEMS
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -4701,6 +4701,7 @@ long si_mem_available(void)
|
||||||
unsigned long pagecache;
|
unsigned long pagecache;
|
||||||
unsigned long wmark_low = 0;
|
unsigned long wmark_low = 0;
|
||||||
unsigned long pages[NR_LRU_LISTS];
|
unsigned long pages[NR_LRU_LISTS];
|
||||||
|
unsigned long reclaimable;
|
||||||
struct zone *zone;
|
struct zone *zone;
|
||||||
int lru;
|
int lru;
|
||||||
|
|
||||||
|
@ -4726,19 +4727,13 @@ long si_mem_available(void)
|
||||||
available += pagecache;
|
available += pagecache;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Part of the reclaimable slab consists of items that are in use,
|
* Part of the reclaimable slab and other kernel memory consists of
|
||||||
* and cannot be freed. Cap this estimate at the low watermark.
|
* items that are in use, and cannot be freed. Cap this estimate at the
|
||||||
|
* low watermark.
|
||||||
*/
|
*/
|
||||||
available += global_node_page_state(NR_SLAB_RECLAIMABLE) -
|
reclaimable = global_node_page_state(NR_SLAB_RECLAIMABLE) +
|
||||||
min(global_node_page_state(NR_SLAB_RECLAIMABLE) / 2,
|
global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE);
|
||||||
wmark_low);
|
available += reclaimable - min(reclaimable / 2, wmark_low);
|
||||||
|
|
||||||
/*
|
|
||||||
* Part of the kernel memory, which can be released under memory
|
|
||||||
* pressure.
|
|
||||||
*/
|
|
||||||
available += global_node_page_state(NR_INDIRECTLY_RECLAIMABLE_BYTES) >>
|
|
||||||
PAGE_SHIFT;
|
|
||||||
|
|
||||||
if (available < 0)
|
if (available < 0)
|
||||||
available = 0;
|
available = 0;
|
||||||
|
|
|
@ -678,8 +678,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
|
||||||
* Part of the kernel memory, which can be released
|
* Part of the kernel memory, which can be released
|
||||||
* under memory pressure.
|
* under memory pressure.
|
||||||
*/
|
*/
|
||||||
free += global_node_page_state(
|
free += global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE);
|
||||||
NR_INDIRECTLY_RECLAIMABLE_BYTES) >> PAGE_SHIFT;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Leave reserved pages. The pages are not for anonymous pages.
|
* Leave reserved pages. The pages are not for anonymous pages.
|
||||||
|
|
|
@ -1161,7 +1161,7 @@ const char * const vmstat_text[] = {
|
||||||
"nr_vmscan_immediate_reclaim",
|
"nr_vmscan_immediate_reclaim",
|
||||||
"nr_dirtied",
|
"nr_dirtied",
|
||||||
"nr_written",
|
"nr_written",
|
||||||
"", /* nr_indirectly_reclaimable */
|
"nr_kernel_misc_reclaimable",
|
||||||
|
|
||||||
/* enum writeback_stat_item counters */
|
/* enum writeback_stat_item counters */
|
||||||
"nr_dirty_threshold",
|
"nr_dirty_threshold",
|
||||||
|
@ -1706,10 +1706,6 @@ static int vmstat_show(struct seq_file *m, void *arg)
|
||||||
unsigned long *l = arg;
|
unsigned long *l = arg;
|
||||||
unsigned long off = l - (unsigned long *)m->private;
|
unsigned long off = l - (unsigned long *)m->private;
|
||||||
|
|
||||||
/* Skip hidden vmstat items. */
|
|
||||||
if (*vmstat_text[off] == '\0')
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
seq_puts(m, vmstat_text[off]);
|
seq_puts(m, vmstat_text[off]);
|
||||||
seq_put_decimal_ull(m, " ", *l);
|
seq_put_decimal_ull(m, " ", *l);
|
||||||
seq_putc(m, '\n');
|
seq_putc(m, '\n');
|
||||||
|
|
Loading…
Reference in New Issue