Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
 "9 fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  fs/proc/proc_sysctl.c: Fix a NULL pointer dereference
  mm/page_alloc.c: fix never set ALLOC_NOFRAGMENT flag
  mm/page_alloc.c: avoid potential NULL pointer dereference
  mm, page_alloc: always use a captured page regardless of compaction result
  mm: do not boost watermarks to avoid fragmentation for the DISCONTIG memory model
  lib/test_vmalloc.c: do not create cpumask_t variable on stack
  lib/Kconfig.debug: fix build error without CONFIG_BLOCK
  zram: pass down the bvec we need to read into in the work struct
  mm/memory_hotplug.c: drop memory device reference after find_memory_block()
This commit is contained in:
Linus Torvalds 2019-04-26 18:15:33 -07:00
commit ce944935ee
7 changed files with 39 additions and 23 deletions

View File

@ -866,14 +866,14 @@ The intent is that compaction has less work to do in the future and to
increase the success rate of future high-order allocations such as SLUB
allocations, THP and hugetlbfs pages.
To make it sensible with respect to the watermark_scale_factor parameter,
the unit is in fractions of 10,000. The default value of 15,000 means
that up to 150% of the high watermark will be reclaimed in the event of
a pageblock being mixed due to fragmentation. The level of reclaim is
determined by the number of fragmentation events that occurred in the
recent past. If this value is smaller than a pageblock then a pageblocks
worth of pages will be reclaimed (e.g. 2MB on 64-bit x86). A boost factor
of 0 will disable the feature.
To make it sensible with respect to the watermark_scale_factor
parameter, the unit is in fractions of 10,000. The default value of
15,000 on !DISCONTIGMEM configurations means that up to 150% of the high
watermark will be reclaimed in the event of a pageblock being mixed due
to fragmentation. The level of reclaim is determined by the number of
fragmentation events that occurred in the recent past. If this value is
smaller than a pageblock then a pageblocks worth of pages will be reclaimed
(e.g. 2MB on 64-bit x86). A boost factor of 0 will disable the feature.
=============================================================

View File

@ -774,18 +774,18 @@ struct zram_work {
struct zram *zram;
unsigned long entry;
struct bio *bio;
struct bio_vec bvec;
};
#if PAGE_SIZE != 4096
static void zram_sync_read(struct work_struct *work)
{
struct bio_vec bvec;
struct zram_work *zw = container_of(work, struct zram_work, work);
struct zram *zram = zw->zram;
unsigned long entry = zw->entry;
struct bio *bio = zw->bio;
read_from_bdev_async(zram, &bvec, entry, bio);
read_from_bdev_async(zram, &zw->bvec, entry, bio);
}
/*
@ -798,6 +798,7 @@ static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
{
struct zram_work work;
work.bvec = *bvec;
work.zram = zram;
work.entry = entry;
work.bio = bio;

View File

@ -1626,9 +1626,11 @@ static void drop_sysctl_table(struct ctl_table_header *header)
if (--header->nreg)
return;
if (parent)
if (parent) {
put_links(header);
start_unregistering(header);
start_unregistering(header);
}
if (!--header->count)
kfree_rcu(header, rcu);

View File

@ -1929,6 +1929,7 @@ config TEST_KMOD
depends on m
depends on BLOCK && (64BIT || LBDAF) # for XFS, BTRFS
depends on NETDEVICES && NET_CORE && INET # for TUN
depends on BLOCK
select TEST_LKM
select XFS_FS
select TUN

View File

@ -383,14 +383,14 @@ static void shuffle_array(int *arr, int n)
static int test_func(void *private)
{
struct test_driver *t = private;
cpumask_t newmask = CPU_MASK_NONE;
int random_array[ARRAY_SIZE(test_case_array)];
int index, i, j, ret;
ktime_t kt;
u64 delta;
cpumask_set_cpu(t->cpu, &newmask);
set_cpus_allowed_ptr(current, &newmask);
ret = set_cpus_allowed_ptr(current, cpumask_of(t->cpu));
if (ret < 0)
pr_err("Failed to set affinity to %d CPU\n", t->cpu);
for (i = 0; i < ARRAY_SIZE(test_case_array); i++)
random_array[i] = i;

View File

@ -874,6 +874,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
*/
mem = find_memory_block(__pfn_to_section(pfn));
nid = mem->nid;
put_device(&mem->dev);
/* associate pfn range with the zone */
zone = move_pfn_range(online_type, nid, pfn, nr_pages);

View File

@ -266,7 +266,20 @@ compound_page_dtor * const compound_page_dtors[] = {
int min_free_kbytes = 1024;
int user_min_free_kbytes = -1;
#ifdef CONFIG_DISCONTIGMEM
/*
* DiscontigMem defines memory ranges as separate pg_data_t even if the ranges
* are not on separate NUMA nodes. Functionally this works but with
* watermark_boost_factor, it can reclaim prematurely as the ranges can be
* quite small. By default, do not boost watermarks on discontigmem as in
* many cases very high-order allocations like THP are likely to be
* unsupported and the premature reclaim offsets the advantage of long-term
* fragmentation avoidance.
*/
int watermark_boost_factor __read_mostly;
#else
int watermark_boost_factor __read_mostly = 15000;
#endif
int watermark_scale_factor = 10;
static unsigned long nr_kernel_pages __initdata;
@ -3419,8 +3432,11 @@ alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
alloc_flags |= ALLOC_KSWAPD;
#ifdef CONFIG_ZONE_DMA32
if (!zone)
return alloc_flags;
if (zone_idx(zone) != ZONE_NORMAL)
goto out;
return alloc_flags;
/*
* If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and
@ -3429,9 +3445,9 @@ alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
*/
BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1);
if (nr_online_nodes > 1 && !populated_zone(--zone))
goto out;
return alloc_flags;
out:
alloc_flags |= ALLOC_NOFRAGMENT;
#endif /* CONFIG_ZONE_DMA32 */
return alloc_flags;
}
@ -3773,11 +3789,6 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
memalloc_noreclaim_restore(noreclaim_flag);
psi_memstall_leave(&pflags);
if (*compact_result <= COMPACT_INACTIVE) {
WARN_ON_ONCE(page);
return NULL;
}
/*
* At least in one zone compaction wasn't deferred or skipped, so let's
* count a compaction stall