mm/page_alloc: don't reserve ZONE_HIGHMEM for ZONE_MOVABLE request
Freepage on ZONE_HIGHMEM doesn't work for kernel memory so it's not that important to reserve. When ZONE_MOVABLE is used, this problem would theorectically cause to decrease usable memory for GFP_HIGHUSER_MOVABLE allocation request which is mainly used for page cache and anon page allocation. So, fix it by setting 0 to sysctl_lowmem_reserve_ratio[ZONE_HIGHMEM]. And, defining sysctl_lowmem_reserve_ratio array by MAX_NR_ZONES - 1 size makes code complex. For example, if there is highmem system, following reserve ratio is activated for *NORMAL ZONE* which would be easyily misleading people. #ifdef CONFIG_HIGHMEM 32 #endif This patch also fixes this situation by defining sysctl_lowmem_reserve_ratio array by MAX_NR_ZONES and place "#ifdef" to right place. Link: http://lkml.kernel.org/r/1504672525-17915-1-git-send-email-iamjoonsoo.kim@lge.com Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Tested-by: Tony Lindgren <tony@atomide.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: "Aneesh Kumar K . V" <aneesh.kumar@linux.vnet.ibm.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Rik van Riel <riel@redhat.com> Cc: Laura Abbott <lauraa@codeaurora.org> Cc: Marek Szyprowski <m.szyprowski@samsung.com> Cc: Michal Nazarewicz <mina86@mina86.com> Cc: Russell King <linux@armlinux.org.uk> Cc: Will Deacon <will.deacon@arm.com> Cc: <linux-api@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
94723aafb9
commit
d3cda2337b
|
@ -312,8 +312,6 @@ The lowmem_reserve_ratio is an array. You can see them by reading this file.
|
|||
% cat /proc/sys/vm/lowmem_reserve_ratio
|
||||
256 256 32
|
||||
-
|
||||
Note: # of this elements is one fewer than number of zones. Because the highest
|
||||
zone's value is not necessary for following calculation.
|
||||
|
||||
But, these values are not used directly. The kernel calculates # of protection
|
||||
pages for each zones from them. These are shown as array of protection pages
|
||||
|
@ -364,7 +362,8 @@ As above expression, they are reciprocal number of ratio.
|
|||
pages of higher zones on the node.
|
||||
|
||||
If you would like to protect more pages, smaller values are effective.
|
||||
The minimum value is 1 (1/1 -> 100%).
|
||||
The minimum value is 1 (1/1 -> 100%). The value less than 1 completely
|
||||
disables protection of the pages.
|
||||
|
||||
==============================================================
|
||||
|
||||
|
|
|
@ -885,7 +885,7 @@ int min_free_kbytes_sysctl_handler(struct ctl_table *, int,
|
|||
void __user *, size_t *, loff_t *);
|
||||
int watermark_scale_factor_sysctl_handler(struct ctl_table *, int,
|
||||
void __user *, size_t *, loff_t *);
|
||||
extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1];
|
||||
extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES];
|
||||
int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int,
|
||||
void __user *, size_t *, loff_t *);
|
||||
int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int,
|
||||
|
|
|
@ -205,17 +205,18 @@ static void __free_pages_ok(struct page *page, unsigned int order);
|
|||
* TBD: should special case ZONE_DMA32 machines here - in those we normally
|
||||
* don't need any ZONE_NORMAL reservation
|
||||
*/
|
||||
int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
|
||||
int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = {
|
||||
#ifdef CONFIG_ZONE_DMA
|
||||
256,
|
||||
[ZONE_DMA] = 256,
|
||||
#endif
|
||||
#ifdef CONFIG_ZONE_DMA32
|
||||
256,
|
||||
[ZONE_DMA32] = 256,
|
||||
#endif
|
||||
[ZONE_NORMAL] = 32,
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
32,
|
||||
[ZONE_HIGHMEM] = 0,
|
||||
#endif
|
||||
32,
|
||||
[ZONE_MOVABLE] = 0,
|
||||
};
|
||||
|
||||
EXPORT_SYMBOL(totalram_pages);
|
||||
|
@ -7132,13 +7133,15 @@ static void setup_per_zone_lowmem_reserve(void)
|
|||
struct zone *lower_zone;
|
||||
|
||||
idx--;
|
||||
|
||||
if (sysctl_lowmem_reserve_ratio[idx] < 1)
|
||||
sysctl_lowmem_reserve_ratio[idx] = 1;
|
||||
|
||||
lower_zone = pgdat->node_zones + idx;
|
||||
lower_zone->lowmem_reserve[j] = managed_pages /
|
||||
sysctl_lowmem_reserve_ratio[idx];
|
||||
|
||||
if (sysctl_lowmem_reserve_ratio[idx] < 1) {
|
||||
sysctl_lowmem_reserve_ratio[idx] = 0;
|
||||
lower_zone->lowmem_reserve[j] = 0;
|
||||
} else {
|
||||
lower_zone->lowmem_reserve[j] =
|
||||
managed_pages / sysctl_lowmem_reserve_ratio[idx];
|
||||
}
|
||||
managed_pages += lower_zone->managed_pages;
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue