[PATCH] kdump: Routines for copying dump pages
This patch provides the interfaces necessary to read the dump contents, treating it as a high memory device. Signed off by Hariprasad Nellitheertha <hari@in.ibm.com> Signed-off-by: Eric Biederman <ebiederm@xmission.com> Signed-off-by: Vivek Goyal <vgoyal@in.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
5f016456c9
commit
60e64d46a5
|
@ -75,6 +75,24 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
|
||||||
preempt_check_resched();
|
preempt_check_resched();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* This is the same as kmap_atomic() but can map memory that doesn't
|
||||||
|
* have a struct page associated with it.
|
||||||
|
*/
|
||||||
|
void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
|
||||||
|
{
|
||||||
|
enum fixed_addresses idx;
|
||||||
|
unsigned long vaddr;
|
||||||
|
|
||||||
|
inc_preempt_count();
|
||||||
|
|
||||||
|
idx = type + KM_TYPE_NR*smp_processor_id();
|
||||||
|
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
|
||||||
|
set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
|
||||||
|
__flush_tlb_one(vaddr);
|
||||||
|
|
||||||
|
return (void*) vaddr;
|
||||||
|
}
|
||||||
|
|
||||||
struct page *kmap_atomic_to_page(void *ptr)
|
struct page *kmap_atomic_to_page(void *ptr)
|
||||||
{
|
{
|
||||||
unsigned long idx, vaddr = (unsigned long)ptr;
|
unsigned long idx, vaddr = (unsigned long)ptr;
|
||||||
|
|
|
@ -70,6 +70,7 @@ void *kmap(struct page *page);
|
||||||
void kunmap(struct page *page);
|
void kunmap(struct page *page);
|
||||||
void *kmap_atomic(struct page *page, enum km_type type);
|
void *kmap_atomic(struct page *page, enum km_type type);
|
||||||
void kunmap_atomic(void *kvaddr, enum km_type type);
|
void kunmap_atomic(void *kvaddr, enum km_type type);
|
||||||
|
void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
|
||||||
struct page *kmap_atomic_to_page(void *ptr);
|
struct page *kmap_atomic_to_page(void *ptr);
|
||||||
|
|
||||||
#define flush_cache_kmaps() do { } while (0)
|
#define flush_cache_kmaps() do { } while (0)
|
||||||
|
|
|
@ -0,0 +1,13 @@
|
||||||
|
#ifndef LINUX_CRASH_DUMP_H
|
||||||
|
#define LINUX_CRASH_DUMP_H
|
||||||
|
|
||||||
|
#ifdef CONFIG_CRASH_DUMP
|
||||||
|
#include <linux/kexec.h>
|
||||||
|
#include <linux/smp_lock.h>
|
||||||
|
#include <linux/device.h>
|
||||||
|
#include <linux/proc_fs.h>
|
||||||
|
|
||||||
|
extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
|
||||||
|
unsigned long, int);
|
||||||
|
#endif /* CONFIG_CRASH_DUMP */
|
||||||
|
#endif /* LINUX_CRASHDUMP_H */
|
|
@ -28,6 +28,7 @@ static inline void *kmap(struct page *page)
|
||||||
|
|
||||||
#define kmap_atomic(page, idx) page_address(page)
|
#define kmap_atomic(page, idx) page_address(page)
|
||||||
#define kunmap_atomic(addr, idx) do { } while (0)
|
#define kunmap_atomic(addr, idx) do { } while (0)
|
||||||
|
#define kmap_atomic_pfn(pfn, idx) page_address(pfn_to_page(pfn))
|
||||||
#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
|
#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
|
||||||
|
|
||||||
#endif /* CONFIG_HIGHMEM */
|
#endif /* CONFIG_HIGHMEM */
|
||||||
|
|
|
@ -28,6 +28,7 @@ obj-$(CONFIG_AUDITSYSCALL) += auditsc.o
|
||||||
obj-$(CONFIG_KPROBES) += kprobes.o
|
obj-$(CONFIG_KPROBES) += kprobes.o
|
||||||
obj-$(CONFIG_SYSFS) += ksysfs.o
|
obj-$(CONFIG_SYSFS) += ksysfs.o
|
||||||
obj-$(CONFIG_GENERIC_HARDIRQS) += irq/
|
obj-$(CONFIG_GENERIC_HARDIRQS) += irq/
|
||||||
|
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
|
||||||
obj-$(CONFIG_SECCOMP) += seccomp.o
|
obj-$(CONFIG_SECCOMP) += seccomp.o
|
||||||
|
|
||||||
ifneq ($(CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER),y)
|
ifneq ($(CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER),y)
|
||||||
|
|
|
@ -0,0 +1,49 @@
|
||||||
|
/*
|
||||||
|
* kernel/crash_dump.c - Memory preserving reboot related code.
|
||||||
|
*
|
||||||
|
* Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
|
||||||
|
* Copyright (C) IBM Corporation, 2004. All rights reserved
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/smp_lock.h>
|
||||||
|
#include <linux/errno.h>
|
||||||
|
#include <linux/proc_fs.h>
|
||||||
|
#include <linux/bootmem.h>
|
||||||
|
#include <linux/highmem.h>
|
||||||
|
#include <linux/crash_dump.h>
|
||||||
|
|
||||||
|
#include <asm/io.h>
|
||||||
|
#include <asm/uaccess.h>
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Copy a page from "oldmem". For this page, there is no pte mapped
|
||||||
|
* in the current kernel. We stitch up a pte, similar to kmap_atomic.
|
||||||
|
*/
|
||||||
|
ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
|
||||||
|
size_t csize, unsigned long offset, int userbuf)
|
||||||
|
{
|
||||||
|
void *page, *vaddr;
|
||||||
|
|
||||||
|
if (!csize)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
page = kmalloc(PAGE_SIZE, GFP_KERNEL);
|
||||||
|
if (!page)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
vaddr = kmap_atomic_pfn(pfn, KM_PTE0);
|
||||||
|
copy_page(page, vaddr);
|
||||||
|
kunmap_atomic(vaddr, KM_PTE0);
|
||||||
|
|
||||||
|
if (userbuf) {
|
||||||
|
if (copy_to_user(buf, (page + offset), csize)) {
|
||||||
|
kfree(page);
|
||||||
|
return -EFAULT;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
memcpy(buf, (page + offset), csize);
|
||||||
|
}
|
||||||
|
|
||||||
|
kfree(page);
|
||||||
|
return csize;
|
||||||
|
}
|
Loading…
Reference in New Issue