[PATCH] kexec: x86_64: add CONFIG_PHYSICAL_START

For one kernel to report a crash another kernel has created we need
to have 2 kernels loaded simultaneously in memory.  To accomplish this
the two kernels need to built to run at different physical addresses.

This patch adds the CONFIG_PHYSICAL_START option to the x86_64 kernel
so we can do just that.  You need to know what you are doing and
the ramifications are before changing this value, and most users
won't care so I have made it depend on CONFIG_EMBEDDED

bzImage kernels will work and run at a different address when compiled
with this option but they will still load at 1MB.  If you need a kernel
loaded at a different address as well you need to boot a vmlinux.

Signed-off-by: Eric Biederman <ebiederm@xmission.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Eric W. Biederman 2005-06-25 14:57:52 -07:00 committed by Linus Torvalds
parent 8a9190853c
commit d0537508a9
5 changed files with 32 additions and 17 deletions

View File

@ -369,6 +369,17 @@ config X86_MCE_INTEL
Additional support for intel specific MCE features such as
the thermal monitor.
config PHYSICAL_START
hex "Physical address where the kernel is loaded" if EMBEDDED
default "0x100000"
help
This gives the physical address where the kernel is loaded.
Primarily used in the case of kexec on panic where the
fail safe kernel needs to run at a different address than
the panic-ed kernel.
Don't change this unless you know what you are doing.
config SECCOMP
bool "Enable seccomp to safely compute untrusted bytecode"
depends on PROC_FS

View File

@ -28,6 +28,7 @@
#include <linux/linkage.h>
#include <asm/segment.h>
#include <asm/page.h>
.code32
.globl startup_32
@ -77,7 +78,7 @@ startup_32:
jnz 3f
addl $8,%esp
xorl %ebx,%ebx
ljmp $(__KERNEL_CS), $0x100000
ljmp $(__KERNEL_CS), $__PHYSICAL_START
/*
* We come here, if we were loaded high.
@ -103,7 +104,7 @@ startup_32:
popl %ecx # lcount
popl %edx # high_buffer_start
popl %eax # hcount
movl $0x100000,%edi
movl $__PHYSICAL_START,%edi
cli # make sure we don't get interrupted
ljmp $(__KERNEL_CS), $0x1000 # and jump to the move routine
@ -128,7 +129,7 @@ move_routine_start:
movsl
movl %ebx,%esi # Restore setup pointer
xorl %ebx,%ebx
ljmp $(__KERNEL_CS), $0x100000
ljmp $(__KERNEL_CS), $__PHYSICAL_START
move_routine_end:

View File

@ -11,6 +11,7 @@
#include "miscsetup.h"
#include <asm/io.h>
#include <asm/page.h>
/*
* gzip declarations
@ -284,7 +285,7 @@ void setup_normal_output_buffer(void)
#else
if ((ALT_MEM_K > EXT_MEM_K ? ALT_MEM_K : EXT_MEM_K) < 1024) error("Less than 2MB of memory");
#endif
output_data = (char *)0x100000; /* Points to 1M */
output_data = (char *)__PHYSICAL_START; /* Normally Points to 1M */
free_mem_end_ptr = (long)real_mode;
}
@ -307,8 +308,8 @@ void setup_output_buffer_if_we_run_high(struct moveparams *mv)
low_buffer_size = low_buffer_end - LOW_BUFFER_START;
high_loaded = 1;
free_mem_end_ptr = (long)high_buffer_start;
if ( (0x100000 + low_buffer_size) > ((ulg)high_buffer_start)) {
high_buffer_start = (uch *)(0x100000 + low_buffer_size);
if ( (__PHYSICAL_START + low_buffer_size) > ((ulg)high_buffer_start)) {
high_buffer_start = (uch *)(__PHYSICAL_START + low_buffer_size);
mv->hcount = 0; /* say: we need not to move high_buffer */
}
else mv->hcount = -1;

View File

@ -248,23 +248,23 @@ ENTRY(_stext)
*/
.org 0x1000
ENTRY(init_level4_pgt)
.quad 0x0000000000102007 /* -> level3_ident_pgt */
.quad 0x0000000000002007 + __PHYSICAL_START /* -> level3_ident_pgt */
.fill 255,8,0
.quad 0x000000000010a007
.quad 0x000000000000a007 + __PHYSICAL_START
.fill 254,8,0
/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
.quad 0x0000000000103007 /* -> level3_kernel_pgt */
.quad 0x0000000000003007 + __PHYSICAL_START /* -> level3_kernel_pgt */
.org 0x2000
ENTRY(level3_ident_pgt)
.quad 0x0000000000104007
.quad 0x0000000000004007 + __PHYSICAL_START
.fill 511,8,0
.org 0x3000
ENTRY(level3_kernel_pgt)
.fill 510,8,0
/* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
.quad 0x0000000000105007 /* -> level2_kernel_pgt */
.quad 0x0000000000005007 + __PHYSICAL_START /* -> level2_kernel_pgt */
.fill 1,8,0
.org 0x4000
@ -337,17 +337,17 @@ ENTRY(empty_bad_pmd_table)
.org 0xa000
ENTRY(level3_physmem_pgt)
.quad 0x0000000000105007 /* -> level2_kernel_pgt (so that __va works even before pagetable_init) */
.quad 0x0000000000005007 + __PHYSICAL_START /* -> level2_kernel_pgt (so that __va works even before pagetable_init) */
.org 0xb000
#ifdef CONFIG_ACPI_SLEEP
ENTRY(wakeup_level4_pgt)
.quad 0x0000000000102007 /* -> level3_ident_pgt */
.quad 0x0000000000002007 + __PHYSICAL_START /* -> level3_ident_pgt */
.fill 255,8,0
.quad 0x000000000010a007
.quad 0x000000000000a007 + __PHYSICAL_START
.fill 254,8,0
/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
.quad 0x0000000000103007 /* -> level3_kernel_pgt */
.quad 0x0000000000003007 + __PHYSICAL_START /* -> level3_kernel_pgt */
#endif
.data

View File

@ -64,12 +64,14 @@ typedef struct { unsigned long pgprot; } pgprot_t;
#define __pgd(x) ((pgd_t) { (x) } )
#define __pgprot(x) ((pgprot_t) { (x) } )
#define __START_KERNEL 0xffffffff80100000UL
#define __PHYSICAL_START ((unsigned long)CONFIG_PHYSICAL_START)
#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
#define __START_KERNEL_map 0xffffffff80000000UL
#define __PAGE_OFFSET 0xffff810000000000UL
#else
#define __START_KERNEL 0xffffffff80100000
#define __PHYSICAL_START CONFIG_PHYSICAL_START
#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
#define __START_KERNEL_map 0xffffffff80000000
#define __PAGE_OFFSET 0xffff810000000000
#endif /* !__ASSEMBLY__ */