[PARISC] Beautify parisc vmlinux.lds.S

Introduce a consistent layout of vmlinux.
The same layout has been introduced for most
architectures.

And the same time move a few label definitions inside
the curly brackets so they are assigned the correct
starting address. Before a ld inserted alignment
would have casued the label to pint before the actual
start of the section.

Signed-off-by: Sam Ravnborg <sam@ravnborg.org>
Signed-off-by: Kyle McMartin <kyle@mcmartin.ca>
This commit is contained in:
Sam Ravnborg 2007-10-18 00:04:25 -07:00 committed by Kyle McMartin
parent e9a03990d9
commit be1b3d8cb1
1 changed files with 192 additions and 141 deletions

View File

@ -46,168 +46,219 @@ jiffies = jiffies_64;
#endif
SECTIONS
{
. = KERNEL_BINARY_TEXT_START;
. = KERNEL_BINARY_TEXT_START;
_text = .; /* Text and read-only data */
.text ALIGN(16) : {
TEXT_TEXT
SCHED_TEXT
LOCK_TEXT
*(.text.do_softirq)
*(.text.sys_exit)
*(.text.do_sigaltstack)
*(.text.do_fork)
*(.text.*)
*(.fixup)
*(.lock.text) /* out-of-line lock text */
*(.gnu.warning)
_text = .; /* Text and read-only data */
.text ALIGN(16) : {
TEXT_TEXT
SCHED_TEXT
LOCK_TEXT
*(.text.do_softirq)
*(.text.sys_exit)
*(.text.do_sigaltstack)
*(.text.do_fork)
*(.text.*)
*(.fixup)
*(.lock.text) /* out-of-line lock text */
*(.gnu.warning)
} = 0
/* End of text section */
_etext = .;
_etext = .; /* End of text section */
RODATA
BUG_TABLE
RODATA
BUG_TABLE
/* writeable */
. = ALIGN(ASM_PAGE_SIZE); /* Make sure this is page aligned so
that we can properly leave these
as writable */
data_start = .;
. = ALIGN(16); /* Exception table */
__start___ex_table = .;
__ex_table : { *(__ex_table) }
__stop___ex_table = .;
NOTES
__start___unwind = .; /* unwind info */
.PARISC.unwind : { *(.PARISC.unwind) }
__stop___unwind = .;
/* rarely changed data like cpu maps */
. = ALIGN(16);
.data.read_mostly : { *(.data.read_mostly) }
. = ALIGN(L1_CACHE_BYTES);
.data : { /* Data */
DATA_DATA
CONSTRUCTORS
/* writeable */
/* Make sure this is page aligned so
* that we can properly leave these
* as writable
*/
. = ALIGN(ASM_PAGE_SIZE);
data_start = .;
. = ALIGN(16);
/* Exception table */
__ex_table : {
__start___ex_table = .;
*(__ex_table)
__stop___ex_table = .;
}
. = ALIGN(L1_CACHE_BYTES);
.data.cacheline_aligned : { *(.data.cacheline_aligned) }
NOTES
/* PA-RISC locks requires 16-byte alignment */
. = ALIGN(16);
.data.lock_aligned : { *(.data.lock_aligned) }
. = ALIGN(ASM_PAGE_SIZE);
/* nosave data is really only used for software suspend...it's here
* just in case we ever implement it */
__nosave_begin = .;
.data_nosave : { *(.data.nosave) }
. = ALIGN(ASM_PAGE_SIZE);
__nosave_end = .;
_edata = .; /* End of data section */
__bss_start = .; /* BSS */
/* page table entries need to be PAGE_SIZE aligned */
. = ALIGN(ASM_PAGE_SIZE);
.data.vmpages : {
*(.data.vm0.pmd)
*(.data.vm0.pgd)
*(.data.vm0.pte)
/* unwind info */
.PARISC.unwind : {
__start___unwind = .;
*(.PARISC.unwind)
__stop___unwind = .;
}
.bss : { *(.bss) *(COMMON) }
__bss_stop = .;
/* rarely changed data like cpu maps */
. = ALIGN(16);
.data.read_mostly : {
*(.data.read_mostly)
}
. = ALIGN(L1_CACHE_BYTES);
/* Data */
.data : {
DATA_DATA
CONSTRUCTORS
}
. = ALIGN(L1_CACHE_BYTES);
.data.cacheline_aligned : {
*(.data.cacheline_aligned)
}
/* PA-RISC locks requires 16-byte alignment */
. = ALIGN(16);
.data.lock_aligned : {
*(.data.lock_aligned)
}
/* nosave data is really only used for software suspend...it's here
* just in case we ever implement it
*/
. = ALIGN(ASM_PAGE_SIZE);
__nosave_begin = .;
.data_nosave : {
*(.data.nosave)
}
. = ALIGN(ASM_PAGE_SIZE);
__nosave_end = .;
/* End of data section */
_edata = .;
/* BSS */
__bss_start = .;
/* page table entries need to be PAGE_SIZE aligned */
. = ALIGN(ASM_PAGE_SIZE);
.data.vmpages : {
*(.data.vm0.pmd)
*(.data.vm0.pgd)
*(.data.vm0.pte)
}
.bss : {
*(.bss)
*(COMMON)
}
__bss_stop = .;
/* assembler code expects init_task to be 16k aligned */
. = ALIGN(16384); /* init_task */
.data.init_task : { *(.data.init_task) }
/* assembler code expects init_task to be 16k aligned */
. = ALIGN(16384);
/* init_task */
.data.init_task : {
*(.data.init_task)
}
/* The interrupt stack is currently partially coded, but not yet
* implemented */
. = ALIGN(16384);
init_istack : { *(init_istack) }
/* The interrupt stack is currently partially coded, but not yet
* implemented
*/
. = ALIGN(16384);
init_istack : {
*(init_istack)
}
#ifdef CONFIG_64BIT
. = ALIGN(16); /* Linkage tables */
.opd : { *(.opd) } PROVIDE (__gp = .);
.plt : { *(.plt) }
.dlt : { *(.dlt) }
. = ALIGN(16);
/* Linkage tables */
.opd : {
*(.opd)
} PROVIDE (__gp = .);
.plt : {
*(.plt)
}
.dlt : {
*(.dlt)
}
#endif
/* reserve space for interrupt stack by aligning __init* to 16k */
. = ALIGN(16384);
__init_begin = .;
.init.text : {
_sinittext = .;
*(.init.text)
_einittext = .;
}
.init.data : { *(.init.data) }
. = ALIGN(16);
__setup_start = .;
.init.setup : { *(.init.setup) }
__setup_end = .;
__initcall_start = .;
.initcall.init : {
INITCALLS
}
__initcall_end = .;
__con_initcall_start = .;
.con_initcall.init : { *(.con_initcall.init) }
__con_initcall_end = .;
SECURITY_INIT
/* alternate instruction replacement. This is a mechanism x86 uses
* to detect the CPU type and replace generic instruction sequences
* with CPU specific ones. We don't currently do this in PA, but
* it seems like a good idea... */
. = ALIGN(4);
__alt_instructions = .;
.altinstructions : { *(.altinstructions) }
__alt_instructions_end = .;
.altinstr_replacement : { *(.altinstr_replacement) }
/* .exit.text is discard at runtime, not link time, to deal with references
from .altinstructions and .eh_frame */
.exit.text : { *(.exit.text) }
.exit.data : { *(.exit.data) }
/* reserve space for interrupt stack by aligning __init* to 16k */
. = ALIGN(16384);
__init_begin = .;
.init.text : {
_sinittext = .;
*(.init.text)
_einittext = .;
}
.init.data : {
*(.init.data)
}
. = ALIGN(16);
.init.setup : {
__setup_start = .;
*(.init.setup)
__setup_end = .;
}
.initcall.init : {
__initcall_start = .;
INITCALLS
__initcall_end = .;
}
.con_initcall.init : {
__con_initcall_start = .;
*(.con_initcall.init)
__con_initcall_end = .;
}
SECURITY_INIT
/* alternate instruction replacement. This is a mechanism x86 uses
* to detect the CPU type and replace generic instruction sequences
* with CPU specific ones. We don't currently do this in PA, but
* it seems like a good idea...
*/
. = ALIGN(4);
.altinstructions : {
__alt_instructions = .;
*(.altinstructions)
__alt_instructions_end = .;
}
.altinstr_replacement : {
*(.altinstr_replacement)
}
/* .exit.text is discard at runtime, not link time, to deal with references
* from .altinstructions and .eh_frame
*/
.exit.text : {
*(.exit.text)
}
.exit.data : {
*(.exit.data)
}
#ifdef CONFIG_BLK_DEV_INITRD
. = ALIGN(ASM_PAGE_SIZE);
__initramfs_start = .;
.init.ramfs : { *(.init.ramfs) }
__initramfs_end = .;
. = ALIGN(ASM_PAGE_SIZE);
.init.ramfs : {
__initramfs_start = .;
*(.init.ramfs)
__initramfs_end = .;
}
#endif
PERCPU(ASM_PAGE_SIZE)
PERCPU(ASM_PAGE_SIZE)
. = ALIGN(ASM_PAGE_SIZE);
__init_end = .;
/* freed after init ends here */
_end = . ;
. = ALIGN(ASM_PAGE_SIZE);
__init_end = .;
/* freed after init ends here */
_end = . ;
/* Sections to be discarded */
/DISCARD/ : {
*(.exitcall.exit)
/* Sections to be discarded */
/DISCARD/ : {
*(.exitcall.exit)
#ifdef CONFIG_64BIT
/* temporary hack until binutils is fixed to not emit these
for static binaries */
*(.interp)
*(.dynsym)
*(.dynstr)
*(.dynamic)
*(.hash)
*(.gnu.hash)
/* temporary hack until binutils is fixed to not emit these
* for static binaries
*/
*(.interp)
*(.dynsym)
*(.dynstr)
*(.dynamic)
*(.hash)
*(.gnu.hash)
#endif
}
STABS_DEBUG
.note 0 : { *(.note) }
STABS_DEBUG
.note 0 : { *(.note) }
}