mm/percpu.c: fix potential memory leakage for pcpu_embed_first_chunk()

in order to ensure the percpu group areas within a chunk aren't
distributed too sparsely, pcpu_embed_first_chunk() goes to error handling
path when a chunk spans over 3/4 VMALLOC area, however, during the error
handling, it forget to free the memory allocated for all percpu groups by
going to label @out_free other than @out_free_areas.

it will cause memory leakage issue if the rare scene really happens, in
order to fix the issue, we check chunk spanned area immediately after
completing memory allocation for all percpu groups, we go to label
@out_free_areas to free the memory then return if the checking is failed.

in order to verify the approach, we dump all memory allocated then
enforce the jump then dump all memory freed, the result is okay after
checking whether we free all memory we allocate in this function.

BTW, The approach is chosen after thinking over the below scenes
 - we don't go to label @out_free directly to fix this issue since we
   maybe free several allocated memory blocks twice
 - the aim of jumping after pcpu_setup_first_chunk() is bypassing free
   usable memory other than handling error, moreover, the function does
   not return error code in any case, it either panics due to BUG_ON()
   or return 0.

Signed-off-by: zijun_hu <zijun_hu@htc.com>
Tested-by: zijun_hu <zijun_hu@htc.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
zijun_hu 2016-10-05 21:30:24 +08:00 committed by Tejun Heo
parent 93c76b6b2f
commit 9b7396624a
1 changed files with 18 additions and 18 deletions

View File

@ -1963,7 +1963,7 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
struct pcpu_alloc_info *ai; struct pcpu_alloc_info *ai;
size_t size_sum, areas_size; size_t size_sum, areas_size;
unsigned long max_distance; unsigned long max_distance;
int group, i, rc; int group, i, highest_group, rc;
ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size, ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
cpu_distance_fn); cpu_distance_fn);
@ -1979,7 +1979,8 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
goto out_free; goto out_free;
} }
/* allocate, copy and determine base address */ /* allocate, copy and determine base address & max_distance */
highest_group = 0;
for (group = 0; group < ai->nr_groups; group++) { for (group = 0; group < ai->nr_groups; group++) {
struct pcpu_group_info *gi = &ai->groups[group]; struct pcpu_group_info *gi = &ai->groups[group];
unsigned int cpu = NR_CPUS; unsigned int cpu = NR_CPUS;
@ -2000,6 +2001,21 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
areas[group] = ptr; areas[group] = ptr;
base = min(ptr, base); base = min(ptr, base);
if (ptr > areas[highest_group])
highest_group = group;
}
max_distance = areas[highest_group] - base;
max_distance += ai->unit_size * ai->groups[highest_group].nr_units;
/* warn if maximum distance is further than 75% of vmalloc space */
if (max_distance > VMALLOC_TOTAL * 3 / 4) {
pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n",
max_distance, VMALLOC_TOTAL);
#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
/* and fail if we have fallback */
rc = -EINVAL;
goto out_free_areas;
#endif
} }
/* /*
@ -2024,24 +2040,8 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
} }
/* base address is now known, determine group base offsets */ /* base address is now known, determine group base offsets */
i = 0;
for (group = 0; group < ai->nr_groups; group++) { for (group = 0; group < ai->nr_groups; group++) {
ai->groups[group].base_offset = areas[group] - base; ai->groups[group].base_offset = areas[group] - base;
if (areas[group] > areas[i])
i = group;
}
max_distance = ai->groups[i].base_offset +
ai->unit_size * ai->groups[i].nr_units;
/* warn if maximum distance is further than 75% of vmalloc space */
if (max_distance > VMALLOC_TOTAL * 3 / 4) {
pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n",
max_distance, VMALLOC_TOTAL);
#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
/* and fail if we have fallback */
rc = -EINVAL;
goto out_free;
#endif
} }
pr_info("Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n", pr_info("Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",