Merge branch 'akpm' (patches from Andrew)
Merge more fixes from Andrew Morton: "17 fixes" Mostly mm fixes and one ocfs2 locking fix. * emailed patches from Andrew Morton <akpm@linux-foundation.org>: mm: memcontrol: fix network errors from failing __GFP_ATOMIC charges mm/memory_hotplug: fix updating the node span scripts/gdb: fix debugging modules compiled with hot/cold partitioning mm: slab: make page_cgroup_ino() to recognize non-compound slab pages properly MAINTAINERS: update information for "MEMORY MANAGEMENT" dump_stack: avoid the livelock of the dump_lock zswap: add Vitaly to the maintainers list mm/page_alloc.c: ratelimit allocation failure warnings more aggressively mm/khugepaged: fix might_sleep() warn with CONFIG_HIGHPTE=y mm, vmstat: reduce zone->lock holding time by /proc/pagetypeinfo mm, vmstat: hide /proc/pagetypeinfo from normal users mm/mmu_notifiers: use the right return code for WARN_ON ocfs2: protect extent tree in ocfs2_prepare_inode_for_write() mm: thp: handle page cache THP correctly in PageTransCompoundMap mm, meminit: recalculate pcpu batch and high limits after init completes mm/gup_benchmark: fix MAP_HUGETLB case mm: memcontrol: fix NULL-ptr deref in percpu stats flush
This commit is contained in:
commit
4dd5815825
|
@ -10519,8 +10519,12 @@ F: mm/memblock.c
|
||||||
F: Documentation/core-api/boot-time-mm.rst
|
F: Documentation/core-api/boot-time-mm.rst
|
||||||
|
|
||||||
MEMORY MANAGEMENT
|
MEMORY MANAGEMENT
|
||||||
|
M: Andrew Morton <akpm@linux-foundation.org>
|
||||||
L: linux-mm@kvack.org
|
L: linux-mm@kvack.org
|
||||||
W: http://www.linux-mm.org
|
W: http://www.linux-mm.org
|
||||||
|
T: quilt https://ozlabs.org/~akpm/mmotm/
|
||||||
|
T: quilt https://ozlabs.org/~akpm/mmots/
|
||||||
|
T: git git://github.com/hnaz/linux-mm.git
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: include/linux/mm.h
|
F: include/linux/mm.h
|
||||||
F: include/linux/gfp.h
|
F: include/linux/gfp.h
|
||||||
|
@ -18034,6 +18038,7 @@ F: Documentation/vm/zsmalloc.rst
|
||||||
ZSWAP COMPRESSED SWAP CACHING
|
ZSWAP COMPRESSED SWAP CACHING
|
||||||
M: Seth Jennings <sjenning@redhat.com>
|
M: Seth Jennings <sjenning@redhat.com>
|
||||||
M: Dan Streetman <ddstreet@ieee.org>
|
M: Dan Streetman <ddstreet@ieee.org>
|
||||||
|
M: Vitaly Wool <vitaly.wool@konsulko.com>
|
||||||
L: linux-mm@kvack.org
|
L: linux-mm@kvack.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: mm/zswap.c
|
F: mm/zswap.c
|
||||||
|
|
136
fs/ocfs2/file.c
136
fs/ocfs2/file.c
|
@ -2098,53 +2098,89 @@ static int ocfs2_is_io_unaligned(struct inode *inode, size_t count, loff_t pos)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ocfs2_prepare_inode_for_refcount(struct inode *inode,
|
static int ocfs2_inode_lock_for_extent_tree(struct inode *inode,
|
||||||
struct file *file,
|
struct buffer_head **di_bh,
|
||||||
loff_t pos, size_t count,
|
int meta_level,
|
||||||
int *meta_level)
|
int overwrite_io,
|
||||||
|
int write_sem,
|
||||||
|
int wait)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret = 0;
|
||||||
struct buffer_head *di_bh = NULL;
|
|
||||||
u32 cpos = pos >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
|
|
||||||
u32 clusters =
|
|
||||||
ocfs2_clusters_for_bytes(inode->i_sb, pos + count) - cpos;
|
|
||||||
|
|
||||||
ret = ocfs2_inode_lock(inode, &di_bh, 1);
|
if (wait)
|
||||||
if (ret) {
|
ret = ocfs2_inode_lock(inode, NULL, meta_level);
|
||||||
mlog_errno(ret);
|
else
|
||||||
|
ret = ocfs2_try_inode_lock(inode,
|
||||||
|
overwrite_io ? NULL : di_bh, meta_level);
|
||||||
|
if (ret < 0)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
if (wait) {
|
||||||
|
if (write_sem)
|
||||||
|
down_write(&OCFS2_I(inode)->ip_alloc_sem);
|
||||||
|
else
|
||||||
|
down_read(&OCFS2_I(inode)->ip_alloc_sem);
|
||||||
|
} else {
|
||||||
|
if (write_sem)
|
||||||
|
ret = down_write_trylock(&OCFS2_I(inode)->ip_alloc_sem);
|
||||||
|
else
|
||||||
|
ret = down_read_trylock(&OCFS2_I(inode)->ip_alloc_sem);
|
||||||
|
|
||||||
|
if (!ret) {
|
||||||
|
ret = -EAGAIN;
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
*meta_level = 1;
|
|
||||||
|
|
||||||
ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX);
|
|
||||||
if (ret)
|
|
||||||
mlog_errno(ret);
|
|
||||||
out:
|
|
||||||
brelse(di_bh);
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
out_unlock:
|
||||||
|
brelse(*di_bh);
|
||||||
|
ocfs2_inode_unlock(inode, meta_level);
|
||||||
|
out:
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ocfs2_inode_unlock_for_extent_tree(struct inode *inode,
|
||||||
|
struct buffer_head **di_bh,
|
||||||
|
int meta_level,
|
||||||
|
int write_sem)
|
||||||
|
{
|
||||||
|
if (write_sem)
|
||||||
|
up_write(&OCFS2_I(inode)->ip_alloc_sem);
|
||||||
|
else
|
||||||
|
up_read(&OCFS2_I(inode)->ip_alloc_sem);
|
||||||
|
|
||||||
|
brelse(*di_bh);
|
||||||
|
*di_bh = NULL;
|
||||||
|
|
||||||
|
if (meta_level >= 0)
|
||||||
|
ocfs2_inode_unlock(inode, meta_level);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ocfs2_prepare_inode_for_write(struct file *file,
|
static int ocfs2_prepare_inode_for_write(struct file *file,
|
||||||
loff_t pos, size_t count, int wait)
|
loff_t pos, size_t count, int wait)
|
||||||
{
|
{
|
||||||
int ret = 0, meta_level = 0, overwrite_io = 0;
|
int ret = 0, meta_level = 0, overwrite_io = 0;
|
||||||
|
int write_sem = 0;
|
||||||
struct dentry *dentry = file->f_path.dentry;
|
struct dentry *dentry = file->f_path.dentry;
|
||||||
struct inode *inode = d_inode(dentry);
|
struct inode *inode = d_inode(dentry);
|
||||||
struct buffer_head *di_bh = NULL;
|
struct buffer_head *di_bh = NULL;
|
||||||
|
u32 cpos;
|
||||||
|
u32 clusters;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We start with a read level meta lock and only jump to an ex
|
* We start with a read level meta lock and only jump to an ex
|
||||||
* if we need to make modifications here.
|
* if we need to make modifications here.
|
||||||
*/
|
*/
|
||||||
for(;;) {
|
for(;;) {
|
||||||
if (wait)
|
ret = ocfs2_inode_lock_for_extent_tree(inode,
|
||||||
ret = ocfs2_inode_lock(inode, NULL, meta_level);
|
&di_bh,
|
||||||
else
|
meta_level,
|
||||||
ret = ocfs2_try_inode_lock(inode,
|
overwrite_io,
|
||||||
overwrite_io ? NULL : &di_bh, meta_level);
|
write_sem,
|
||||||
|
wait);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
meta_level = -1;
|
|
||||||
if (ret != -EAGAIN)
|
if (ret != -EAGAIN)
|
||||||
mlog_errno(ret);
|
mlog_errno(ret);
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -2156,15 +2192,8 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
|
||||||
*/
|
*/
|
||||||
if (!wait && !overwrite_io) {
|
if (!wait && !overwrite_io) {
|
||||||
overwrite_io = 1;
|
overwrite_io = 1;
|
||||||
if (!down_read_trylock(&OCFS2_I(inode)->ip_alloc_sem)) {
|
|
||||||
ret = -EAGAIN;
|
|
||||||
goto out_unlock;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = ocfs2_overwrite_io(inode, di_bh, pos, count);
|
ret = ocfs2_overwrite_io(inode, di_bh, pos, count);
|
||||||
brelse(di_bh);
|
|
||||||
di_bh = NULL;
|
|
||||||
up_read(&OCFS2_I(inode)->ip_alloc_sem);
|
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
if (ret != -EAGAIN)
|
if (ret != -EAGAIN)
|
||||||
mlog_errno(ret);
|
mlog_errno(ret);
|
||||||
|
@ -2183,7 +2212,10 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
|
||||||
* set inode->i_size at the end of a write. */
|
* set inode->i_size at the end of a write. */
|
||||||
if (should_remove_suid(dentry)) {
|
if (should_remove_suid(dentry)) {
|
||||||
if (meta_level == 0) {
|
if (meta_level == 0) {
|
||||||
ocfs2_inode_unlock(inode, meta_level);
|
ocfs2_inode_unlock_for_extent_tree(inode,
|
||||||
|
&di_bh,
|
||||||
|
meta_level,
|
||||||
|
write_sem);
|
||||||
meta_level = 1;
|
meta_level = 1;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -2197,18 +2229,32 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
|
||||||
|
|
||||||
ret = ocfs2_check_range_for_refcount(inode, pos, count);
|
ret = ocfs2_check_range_for_refcount(inode, pos, count);
|
||||||
if (ret == 1) {
|
if (ret == 1) {
|
||||||
ocfs2_inode_unlock(inode, meta_level);
|
ocfs2_inode_unlock_for_extent_tree(inode,
|
||||||
meta_level = -1;
|
&di_bh,
|
||||||
|
meta_level,
|
||||||
|
write_sem);
|
||||||
|
ret = ocfs2_inode_lock_for_extent_tree(inode,
|
||||||
|
&di_bh,
|
||||||
|
meta_level,
|
||||||
|
overwrite_io,
|
||||||
|
1,
|
||||||
|
wait);
|
||||||
|
write_sem = 1;
|
||||||
|
if (ret < 0) {
|
||||||
|
if (ret != -EAGAIN)
|
||||||
|
mlog_errno(ret);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
ret = ocfs2_prepare_inode_for_refcount(inode,
|
cpos = pos >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
|
||||||
file,
|
clusters =
|
||||||
pos,
|
ocfs2_clusters_for_bytes(inode->i_sb, pos + count) - cpos;
|
||||||
count,
|
ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX);
|
||||||
&meta_level);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
mlog_errno(ret);
|
if (ret != -EAGAIN)
|
||||||
|
mlog_errno(ret);
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2219,10 +2265,10 @@ out_unlock:
|
||||||
trace_ocfs2_prepare_inode_for_write(OCFS2_I(inode)->ip_blkno,
|
trace_ocfs2_prepare_inode_for_write(OCFS2_I(inode)->ip_blkno,
|
||||||
pos, count, wait);
|
pos, count, wait);
|
||||||
|
|
||||||
brelse(di_bh);
|
ocfs2_inode_unlock_for_extent_tree(inode,
|
||||||
|
&di_bh,
|
||||||
if (meta_level >= 0)
|
meta_level,
|
||||||
ocfs2_inode_unlock(inode, meta_level);
|
write_sem);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -695,11 +695,6 @@ static inline void *kvcalloc(size_t n, size_t size, gfp_t flags)
|
||||||
|
|
||||||
extern void kvfree(const void *addr);
|
extern void kvfree(const void *addr);
|
||||||
|
|
||||||
static inline atomic_t *compound_mapcount_ptr(struct page *page)
|
|
||||||
{
|
|
||||||
return &page[1].compound_mapcount;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int compound_mapcount(struct page *page)
|
static inline int compound_mapcount(struct page *page)
|
||||||
{
|
{
|
||||||
VM_BUG_ON_PAGE(!PageCompound(page), page);
|
VM_BUG_ON_PAGE(!PageCompound(page), page);
|
||||||
|
|
|
@ -221,6 +221,11 @@ struct page {
|
||||||
#endif
|
#endif
|
||||||
} _struct_page_alignment;
|
} _struct_page_alignment;
|
||||||
|
|
||||||
|
static inline atomic_t *compound_mapcount_ptr(struct page *page)
|
||||||
|
{
|
||||||
|
return &page[1].compound_mapcount;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Used for sizing the vmemmap region on some architectures
|
* Used for sizing the vmemmap region on some architectures
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -622,12 +622,28 @@ static inline int PageTransCompound(struct page *page)
|
||||||
*
|
*
|
||||||
* Unlike PageTransCompound, this is safe to be called only while
|
* Unlike PageTransCompound, this is safe to be called only while
|
||||||
* split_huge_pmd() cannot run from under us, like if protected by the
|
* split_huge_pmd() cannot run from under us, like if protected by the
|
||||||
* MMU notifier, otherwise it may result in page->_mapcount < 0 false
|
* MMU notifier, otherwise it may result in page->_mapcount check false
|
||||||
* positives.
|
* positives.
|
||||||
|
*
|
||||||
|
* We have to treat page cache THP differently since every subpage of it
|
||||||
|
* would get _mapcount inc'ed once it is PMD mapped. But, it may be PTE
|
||||||
|
* mapped in the current process so comparing subpage's _mapcount to
|
||||||
|
* compound_mapcount to filter out PTE mapped case.
|
||||||
*/
|
*/
|
||||||
static inline int PageTransCompoundMap(struct page *page)
|
static inline int PageTransCompoundMap(struct page *page)
|
||||||
{
|
{
|
||||||
return PageTransCompound(page) && atomic_read(&page->_mapcount) < 0;
|
struct page *head;
|
||||||
|
|
||||||
|
if (!PageTransCompound(page))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (PageAnon(page))
|
||||||
|
return atomic_read(&page->_mapcount) < 0;
|
||||||
|
|
||||||
|
head = compound_head(page);
|
||||||
|
/* File THP is PMD mapped and not PTE mapped */
|
||||||
|
return atomic_read(&page->_mapcount) ==
|
||||||
|
atomic_read(compound_mapcount_ptr(head));
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -106,7 +106,12 @@ retry:
|
||||||
was_locked = 1;
|
was_locked = 1;
|
||||||
} else {
|
} else {
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
cpu_relax();
|
/*
|
||||||
|
* Wait for the lock to release before jumping to
|
||||||
|
* atomic_cmpxchg() in order to mitigate the thundering herd
|
||||||
|
* problem.
|
||||||
|
*/
|
||||||
|
do { cpu_relax(); } while (atomic_read(&dump_lock) != -1);
|
||||||
goto retry;
|
goto retry;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1028,12 +1028,13 @@ static void collapse_huge_page(struct mm_struct *mm,
|
||||||
|
|
||||||
anon_vma_lock_write(vma->anon_vma);
|
anon_vma_lock_write(vma->anon_vma);
|
||||||
|
|
||||||
pte = pte_offset_map(pmd, address);
|
|
||||||
pte_ptl = pte_lockptr(mm, pmd);
|
|
||||||
|
|
||||||
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
|
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
|
||||||
address, address + HPAGE_PMD_SIZE);
|
address, address + HPAGE_PMD_SIZE);
|
||||||
mmu_notifier_invalidate_range_start(&range);
|
mmu_notifier_invalidate_range_start(&range);
|
||||||
|
|
||||||
|
pte = pte_offset_map(pmd, address);
|
||||||
|
pte_ptl = pte_lockptr(mm, pmd);
|
||||||
|
|
||||||
pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
|
pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
|
||||||
/*
|
/*
|
||||||
* After this gup_fast can't run anymore. This also removes
|
* After this gup_fast can't run anymore. This also removes
|
||||||
|
|
|
@ -484,7 +484,7 @@ ino_t page_cgroup_ino(struct page *page)
|
||||||
unsigned long ino = 0;
|
unsigned long ino = 0;
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
if (PageHead(page) && PageSlab(page))
|
if (PageSlab(page) && !PageTail(page))
|
||||||
memcg = memcg_from_slab_page(page);
|
memcg = memcg_from_slab_page(page);
|
||||||
else
|
else
|
||||||
memcg = READ_ONCE(page->mem_cgroup);
|
memcg = READ_ONCE(page->mem_cgroup);
|
||||||
|
@ -2534,6 +2534,15 @@ retry:
|
||||||
goto retry;
|
goto retry;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Memcg doesn't have a dedicated reserve for atomic
|
||||||
|
* allocations. But like the global atomic pool, we need to
|
||||||
|
* put the burden of reclaim on regular allocation requests
|
||||||
|
* and let these go through as privileged allocations.
|
||||||
|
*/
|
||||||
|
if (gfp_mask & __GFP_ATOMIC)
|
||||||
|
goto force;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Unlike in global OOM situations, memcg is not in a physical
|
* Unlike in global OOM situations, memcg is not in a physical
|
||||||
* memory shortage. Allow dying and OOM-killed tasks to
|
* memory shortage. Allow dying and OOM-killed tasks to
|
||||||
|
@ -5014,12 +5023,6 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
|
||||||
{
|
{
|
||||||
int node;
|
int node;
|
||||||
|
|
||||||
/*
|
|
||||||
* Flush percpu vmstats and vmevents to guarantee the value correctness
|
|
||||||
* on parent's and all ancestor levels.
|
|
||||||
*/
|
|
||||||
memcg_flush_percpu_vmstats(memcg, false);
|
|
||||||
memcg_flush_percpu_vmevents(memcg);
|
|
||||||
for_each_node(node)
|
for_each_node(node)
|
||||||
free_mem_cgroup_per_node_info(memcg, node);
|
free_mem_cgroup_per_node_info(memcg, node);
|
||||||
free_percpu(memcg->vmstats_percpu);
|
free_percpu(memcg->vmstats_percpu);
|
||||||
|
@ -5030,6 +5033,12 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
|
||||||
static void mem_cgroup_free(struct mem_cgroup *memcg)
|
static void mem_cgroup_free(struct mem_cgroup *memcg)
|
||||||
{
|
{
|
||||||
memcg_wb_domain_exit(memcg);
|
memcg_wb_domain_exit(memcg);
|
||||||
|
/*
|
||||||
|
* Flush percpu vmstats and vmevents to guarantee the value correctness
|
||||||
|
* on parent's and all ancestor levels.
|
||||||
|
*/
|
||||||
|
memcg_flush_percpu_vmstats(memcg, false);
|
||||||
|
memcg_flush_percpu_vmevents(memcg);
|
||||||
__mem_cgroup_free(memcg);
|
__mem_cgroup_free(memcg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -447,6 +447,14 @@ static void update_pgdat_span(struct pglist_data *pgdat)
|
||||||
zone->spanned_pages;
|
zone->spanned_pages;
|
||||||
|
|
||||||
/* No need to lock the zones, they can't change. */
|
/* No need to lock the zones, they can't change. */
|
||||||
|
if (!zone->spanned_pages)
|
||||||
|
continue;
|
||||||
|
if (!node_end_pfn) {
|
||||||
|
node_start_pfn = zone->zone_start_pfn;
|
||||||
|
node_end_pfn = zone_end_pfn;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
if (zone_end_pfn > node_end_pfn)
|
if (zone_end_pfn > node_end_pfn)
|
||||||
node_end_pfn = zone_end_pfn;
|
node_end_pfn = zone_end_pfn;
|
||||||
if (zone->zone_start_pfn < node_start_pfn)
|
if (zone->zone_start_pfn < node_start_pfn)
|
||||||
|
|
|
@ -180,7 +180,7 @@ int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
|
||||||
mn->ops->invalidate_range_start, _ret,
|
mn->ops->invalidate_range_start, _ret,
|
||||||
!mmu_notifier_range_blockable(range) ? "non-" : "");
|
!mmu_notifier_range_blockable(range) ? "non-" : "");
|
||||||
WARN_ON(mmu_notifier_range_blockable(range) ||
|
WARN_ON(mmu_notifier_range_blockable(range) ||
|
||||||
ret != -EAGAIN);
|
_ret != -EAGAIN);
|
||||||
ret = _ret;
|
ret = _ret;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1947,6 +1947,14 @@ void __init page_alloc_init_late(void)
|
||||||
/* Block until all are initialised */
|
/* Block until all are initialised */
|
||||||
wait_for_completion(&pgdat_init_all_done_comp);
|
wait_for_completion(&pgdat_init_all_done_comp);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The number of managed pages has changed due to the initialisation
|
||||||
|
* so the pcpu batch and high limits needs to be updated or the limits
|
||||||
|
* will be artificially small.
|
||||||
|
*/
|
||||||
|
for_each_populated_zone(zone)
|
||||||
|
zone_pcp_update(zone);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We initialized the rest of the deferred pages. Permanently disable
|
* We initialized the rest of the deferred pages. Permanently disable
|
||||||
* on-demand struct page initialization.
|
* on-demand struct page initialization.
|
||||||
|
@ -3720,10 +3728,6 @@ try_this_zone:
|
||||||
static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
|
static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
|
||||||
{
|
{
|
||||||
unsigned int filter = SHOW_MEM_FILTER_NODES;
|
unsigned int filter = SHOW_MEM_FILTER_NODES;
|
||||||
static DEFINE_RATELIMIT_STATE(show_mem_rs, HZ, 1);
|
|
||||||
|
|
||||||
if (!__ratelimit(&show_mem_rs))
|
|
||||||
return;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This documents exceptions given to allocations in certain
|
* This documents exceptions given to allocations in certain
|
||||||
|
@ -3744,8 +3748,7 @@ void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
|
||||||
{
|
{
|
||||||
struct va_format vaf;
|
struct va_format vaf;
|
||||||
va_list args;
|
va_list args;
|
||||||
static DEFINE_RATELIMIT_STATE(nopage_rs, DEFAULT_RATELIMIT_INTERVAL,
|
static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1);
|
||||||
DEFAULT_RATELIMIT_BURST);
|
|
||||||
|
|
||||||
if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs))
|
if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs))
|
||||||
return;
|
return;
|
||||||
|
@ -8514,7 +8517,6 @@ void free_contig_range(unsigned long pfn, unsigned int nr_pages)
|
||||||
WARN(count != 0, "%d pages are still in use!\n", count);
|
WARN(count != 0, "%d pages are still in use!\n", count);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
|
||||||
/*
|
/*
|
||||||
* The zone indicated has a new number of managed_pages; batch sizes and percpu
|
* The zone indicated has a new number of managed_pages; batch sizes and percpu
|
||||||
* page high values need to be recalulated.
|
* page high values need to be recalulated.
|
||||||
|
@ -8528,7 +8530,6 @@ void __meminit zone_pcp_update(struct zone *zone)
|
||||||
per_cpu_ptr(zone->pageset, cpu));
|
per_cpu_ptr(zone->pageset, cpu));
|
||||||
mutex_unlock(&pcp_batch_high_lock);
|
mutex_unlock(&pcp_batch_high_lock);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
void zone_pcp_reset(struct zone *zone)
|
void zone_pcp_reset(struct zone *zone)
|
||||||
{
|
{
|
||||||
|
|
|
@ -323,8 +323,8 @@ static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
|
||||||
* Expects a pointer to a slab page. Please note, that PageSlab() check
|
* Expects a pointer to a slab page. Please note, that PageSlab() check
|
||||||
* isn't sufficient, as it returns true also for tail compound slab pages,
|
* isn't sufficient, as it returns true also for tail compound slab pages,
|
||||||
* which do not have slab_cache pointer set.
|
* which do not have slab_cache pointer set.
|
||||||
* So this function assumes that the page can pass PageHead() and PageSlab()
|
* So this function assumes that the page can pass PageSlab() && !PageTail()
|
||||||
* checks.
|
* check.
|
||||||
*
|
*
|
||||||
* The kmem_cache can be reparented asynchronously. The caller must ensure
|
* The kmem_cache can be reparented asynchronously. The caller must ensure
|
||||||
* the memcg lifetime, e.g. by taking rcu_read_lock() or cgroup_mutex.
|
* the memcg lifetime, e.g. by taking rcu_read_lock() or cgroup_mutex.
|
||||||
|
|
25
mm/vmstat.c
25
mm/vmstat.c
|
@ -1383,12 +1383,29 @@ static void pagetypeinfo_showfree_print(struct seq_file *m,
|
||||||
unsigned long freecount = 0;
|
unsigned long freecount = 0;
|
||||||
struct free_area *area;
|
struct free_area *area;
|
||||||
struct list_head *curr;
|
struct list_head *curr;
|
||||||
|
bool overflow = false;
|
||||||
|
|
||||||
area = &(zone->free_area[order]);
|
area = &(zone->free_area[order]);
|
||||||
|
|
||||||
list_for_each(curr, &area->free_list[mtype])
|
list_for_each(curr, &area->free_list[mtype]) {
|
||||||
freecount++;
|
/*
|
||||||
seq_printf(m, "%6lu ", freecount);
|
* Cap the free_list iteration because it might
|
||||||
|
* be really large and we are under a spinlock
|
||||||
|
* so a long time spent here could trigger a
|
||||||
|
* hard lockup detector. Anyway this is a
|
||||||
|
* debugging tool so knowing there is a handful
|
||||||
|
* of pages of this order should be more than
|
||||||
|
* sufficient.
|
||||||
|
*/
|
||||||
|
if (++freecount >= 100000) {
|
||||||
|
overflow = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
seq_printf(m, "%s%6lu ", overflow ? ">" : "", freecount);
|
||||||
|
spin_unlock_irq(&zone->lock);
|
||||||
|
cond_resched();
|
||||||
|
spin_lock_irq(&zone->lock);
|
||||||
}
|
}
|
||||||
seq_putc(m, '\n');
|
seq_putc(m, '\n');
|
||||||
}
|
}
|
||||||
|
@ -1972,7 +1989,7 @@ void __init init_mm_internals(void)
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_PROC_FS
|
#ifdef CONFIG_PROC_FS
|
||||||
proc_create_seq("buddyinfo", 0444, NULL, &fragmentation_op);
|
proc_create_seq("buddyinfo", 0444, NULL, &fragmentation_op);
|
||||||
proc_create_seq("pagetypeinfo", 0444, NULL, &pagetypeinfo_op);
|
proc_create_seq("pagetypeinfo", 0400, NULL, &pagetypeinfo_op);
|
||||||
proc_create_seq("vmstat", 0444, NULL, &vmstat_op);
|
proc_create_seq("vmstat", 0444, NULL, &vmstat_op);
|
||||||
proc_create_seq("zoneinfo", 0444, NULL, &zoneinfo_op);
|
proc_create_seq("zoneinfo", 0444, NULL, &zoneinfo_op);
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -99,7 +99,8 @@ lx-symbols command."""
|
||||||
attrs[n]['name'].string(): attrs[n]['address']
|
attrs[n]['name'].string(): attrs[n]['address']
|
||||||
for n in range(int(sect_attrs['nsections']))}
|
for n in range(int(sect_attrs['nsections']))}
|
||||||
args = []
|
args = []
|
||||||
for section_name in [".data", ".data..read_mostly", ".rodata", ".bss"]:
|
for section_name in [".data", ".data..read_mostly", ".rodata", ".bss",
|
||||||
|
".text", ".text.hot", ".text.unlikely"]:
|
||||||
address = section_name_to_address.get(section_name)
|
address = section_name_to_address.get(section_name)
|
||||||
if address:
|
if address:
|
||||||
args.append(" -s {name} {addr}".format(
|
args.append(" -s {name} {addr}".format(
|
||||||
|
|
|
@ -71,7 +71,7 @@ int main(int argc, char **argv)
|
||||||
flags |= MAP_SHARED;
|
flags |= MAP_SHARED;
|
||||||
break;
|
break;
|
||||||
case 'H':
|
case 'H':
|
||||||
flags |= MAP_HUGETLB;
|
flags |= (MAP_HUGETLB | MAP_ANONYMOUS);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
return -1;
|
return -1;
|
||||||
|
|
Loading…
Reference in New Issue