coredump: refactor page range dumping into common helper
Both fs/binfmt_elf.c and fs/binfmt_elf_fdpic.c need to dump ranges of pages into the coredump file. Extract that logic into a common helper. Signed-off-by: Jann Horn <jannh@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Cc: Christoph Hellwig <hch@lst.de> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: "Eric W . Biederman" <ebiederm@xmission.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Hugh Dickins <hughd@google.com> Link: http://lkml.kernel.org/r/20200827114932.3572699-4-jannh@google.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
df0c09c011
commit
afc63a97b7
|
@ -2444,26 +2444,8 @@ static int elf_core_dump(struct coredump_params *cprm)
|
|||
|
||||
for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
|
||||
vma = next_vma(vma, gate_vma)) {
|
||||
unsigned long addr;
|
||||
unsigned long end;
|
||||
|
||||
end = vma->vm_start + vma_filesz[i++];
|
||||
|
||||
for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
|
||||
struct page *page;
|
||||
int stop;
|
||||
|
||||
page = get_dump_page(addr);
|
||||
if (page) {
|
||||
void *kaddr = kmap(page);
|
||||
stop = !dump_emit(cprm, kaddr, PAGE_SIZE);
|
||||
kunmap(page);
|
||||
put_page(page);
|
||||
} else
|
||||
stop = !dump_skip(cprm, PAGE_SIZE);
|
||||
if (stop)
|
||||
goto end_coredump;
|
||||
}
|
||||
if (!dump_user_range(cprm, vma->vm_start, vma_filesz[i++]))
|
||||
goto end_coredump;
|
||||
}
|
||||
dump_truncate(cprm);
|
||||
|
||||
|
|
|
@ -1534,21 +1534,9 @@ static bool elf_fdpic_dump_segments(struct coredump_params *cprm)
|
|||
if (!maydump(vma, cprm->mm_flags))
|
||||
continue;
|
||||
|
||||
for (addr = vma->vm_start; addr < vma->vm_end;
|
||||
addr += PAGE_SIZE) {
|
||||
bool res;
|
||||
struct page *page = get_dump_page(addr);
|
||||
if (page) {
|
||||
void *kaddr = kmap(page);
|
||||
res = dump_emit(cprm, kaddr, PAGE_SIZE);
|
||||
kunmap(page);
|
||||
put_page(page);
|
||||
} else {
|
||||
res = dump_skip(cprm, PAGE_SIZE);
|
||||
}
|
||||
if (!res)
|
||||
return false;
|
||||
}
|
||||
if (!dump_user_range(cprm, vma->vm_start,
|
||||
vma->vma_end - vma->vm_start))
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -876,6 +876,40 @@ int dump_skip(struct coredump_params *cprm, size_t nr)
|
|||
}
|
||||
EXPORT_SYMBOL(dump_skip);
|
||||
|
||||
#ifdef CONFIG_ELF_CORE
|
||||
int dump_user_range(struct coredump_params *cprm, unsigned long start,
|
||||
unsigned long len)
|
||||
{
|
||||
unsigned long addr;
|
||||
|
||||
for (addr = start; addr < start + len; addr += PAGE_SIZE) {
|
||||
struct page *page;
|
||||
int stop;
|
||||
|
||||
/*
|
||||
* To avoid having to allocate page tables for virtual address
|
||||
* ranges that have never been used yet, and also to make it
|
||||
* easy to generate sparse core files, use a helper that returns
|
||||
* NULL when encountering an empty page table entry that would
|
||||
* otherwise have been filled with the zero page.
|
||||
*/
|
||||
page = get_dump_page(addr);
|
||||
if (page) {
|
||||
void *kaddr = kmap(page);
|
||||
|
||||
stop = !dump_emit(cprm, kaddr, PAGE_SIZE);
|
||||
kunmap(page);
|
||||
put_page(page);
|
||||
} else {
|
||||
stop = !dump_skip(cprm, PAGE_SIZE);
|
||||
}
|
||||
if (stop)
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
|
||||
int dump_align(struct coredump_params *cprm, int align)
|
||||
{
|
||||
unsigned mod = cprm->pos & (align - 1);
|
||||
|
|
|
@ -16,6 +16,8 @@ extern int dump_skip(struct coredump_params *cprm, size_t nr);
|
|||
extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr);
|
||||
extern int dump_align(struct coredump_params *cprm, int align);
|
||||
extern void dump_truncate(struct coredump_params *cprm);
|
||||
int dump_user_range(struct coredump_params *cprm, unsigned long start,
|
||||
unsigned long len);
|
||||
#ifdef CONFIG_COREDUMP
|
||||
extern void do_coredump(const kernel_siginfo_t *siginfo);
|
||||
#else
|
||||
|
|
Loading…
Reference in New Issue