[Blackfin] arch: move the init sections to the end of memory

Move the init sections to the end of memory so that after they
are free, run time memory is all continugous - this should help decrease
memory fragementation.

When doing this, we also pack some of the other sections a little closer
together, to make sure we don't waste memory. To make this happen,
we need to rename the .data.init_task section to .init_task.data, so
it doesn't get picked up by the linker script glob.

Signed-off-by: Mike Frysinger <vapier.adi@gmail.com>
Signed-off-by: Bryan Wu <bryan.wu@analog.com>
This commit is contained in:
Mike Frysinger 2008-02-02 15:53:17 +08:00 committed by Bryan Wu
parent 80f31c8a03
commit b7627acc43
3 changed files with 36 additions and 27 deletions

View File

@ -57,5 +57,5 @@ EXPORT_SYMBOL(init_task);
* "init_task" linker map entry. * "init_task" linker map entry.
*/ */
union thread_union init_thread_union union thread_union init_thread_union
__attribute__ ((__section__(".data.init_task"))) = { __attribute__ ((__section__(".init_task.data"))) = {
INIT_THREAD_INFO(init_task)}; INIT_THREAD_INFO(init_task)};

View File

@ -427,7 +427,7 @@ static __init void parse_cmdline_early(char *cmdline_p)
static __init void memory_setup(void) static __init void memory_setup(void)
{ {
_rambase = (unsigned long)_stext; _rambase = (unsigned long)_stext;
_ramstart = (unsigned long)__bss_stop; _ramstart = (unsigned long)_end;
if (DMA_UNCACHED_REGION > (_ramend - _ramstart)) { if (DMA_UNCACHED_REGION > (_ramend - _ramstart)) {
console_init(); console_init();
@ -489,7 +489,7 @@ static __init void memory_setup(void)
} }
/* Relocate MTD image to the top of memory after the uncached memory area */ /* Relocate MTD image to the top of memory after the uncached memory area */
dma_memcpy((char *)memory_end, __bss_stop, mtd_size); dma_memcpy((char *)memory_end, _end, mtd_size);
memory_mtd_start = memory_end; memory_mtd_start = memory_end;
_ebss = memory_mtd_start; /* define _ebss for compatible */ _ebss = memory_mtd_start; /* define _ebss for compatible */
@ -528,13 +528,13 @@ static __init void memory_setup(void)
printk(KERN_INFO "Board Memory: %ldMB\n", physical_mem_end >> 20); printk(KERN_INFO "Board Memory: %ldMB\n", physical_mem_end >> 20);
printk(KERN_INFO "Kernel Managed Memory: %ldMB\n", _ramend >> 20); printk(KERN_INFO "Kernel Managed Memory: %ldMB\n", _ramend >> 20);
printk( KERN_INFO "Memory map:\n" printk(KERN_INFO "Memory map:\n"
KERN_INFO " text = 0x%p-0x%p\n" KERN_INFO " text = 0x%p-0x%p\n"
KERN_INFO " rodata = 0x%p-0x%p\n" KERN_INFO " rodata = 0x%p-0x%p\n"
KERN_INFO " bss = 0x%p-0x%p\n"
KERN_INFO " data = 0x%p-0x%p\n" KERN_INFO " data = 0x%p-0x%p\n"
KERN_INFO " stack = 0x%p-0x%p\n" KERN_INFO " stack = 0x%p-0x%p\n"
KERN_INFO " init = 0x%p-0x%p\n" KERN_INFO " init = 0x%p-0x%p\n"
KERN_INFO " bss = 0x%p-0x%p\n"
KERN_INFO " available = 0x%p-0x%p\n" KERN_INFO " available = 0x%p-0x%p\n"
#ifdef CONFIG_MTD_UCLINUX #ifdef CONFIG_MTD_UCLINUX
KERN_INFO " rootfs = 0x%p-0x%p\n" KERN_INFO " rootfs = 0x%p-0x%p\n"
@ -544,12 +544,12 @@ static __init void memory_setup(void)
#endif #endif
, _stext, _etext, , _stext, _etext,
__start_rodata, __end_rodata, __start_rodata, __end_rodata,
__bss_start, __bss_stop,
_sdata, _edata, _sdata, _edata,
(void *)&init_thread_union, (void *)&init_thread_union,
(void *)((int)(&init_thread_union) + 0x2000), (void *)((int)(&init_thread_union) + 0x2000),
__init_begin, __init_end, __init_begin, __init_end,
__bss_start, __bss_stop, (void *)_ramstart, (void *)memory_end
(void *)_ramstart, (void *)memory_end
#ifdef CONFIG_MTD_UCLINUX #ifdef CONFIG_MTD_UCLINUX
, (void *)memory_mtd_start, (void *)(memory_mtd_start + mtd_size) , (void *)memory_mtd_start, (void *)(memory_mtd_start + mtd_size)
#endif #endif

View File

@ -41,6 +41,9 @@ _jiffies = _jiffies_64;
SECTIONS SECTIONS
{ {
. = CONFIG_BOOT_LOAD; . = CONFIG_BOOT_LOAD;
/* Neither the text, ro_data or bss section need to be aligned
* So pack them back to back
*/
.text : .text :
{ {
__text = .; __text = .;
@ -58,22 +61,25 @@ SECTIONS
*(__ex_table) *(__ex_table)
___stop___ex_table = .; ___stop___ex_table = .;
. = ALIGN(4);
__etext = .; __etext = .;
} }
RO_DATA(PAGE_SIZE) /* Just in case the first read only is a 32-bit access */
RO_DATA(4)
.bss :
{
. = ALIGN(4);
___bss_start = .;
*(.bss .bss.*)
*(COMMON)
___bss_stop = .;
}
.data : .data :
{ {
/* make sure the init_task is aligned to the
* kernel thread size so we can locate the kernel
* stack properly and quickly.
*/
__sdata = .; __sdata = .;
. = ALIGN(THREAD_SIZE); /* This gets done first, so the glob doesn't suck it in */
*(.data.init_task)
. = ALIGN(32); . = ALIGN(32);
*(.data.cacheline_aligned) *(.data.cacheline_aligned)
@ -81,10 +87,22 @@ SECTIONS
*(.data.*) *(.data.*)
CONSTRUCTORS CONSTRUCTORS
/* make sure the init_task is aligned to the
* kernel thread size so we can locate the kernel
* stack properly and quickly.
*/
. = ALIGN(THREAD_SIZE); . = ALIGN(THREAD_SIZE);
*(.init_task.data)
__edata = .; __edata = .;
} }
/* The init section should be last, so when we free it, it goes into
* the general memory pool, and (hopefully) will decrease fragmentation
* a tiny bit. The init section has a _requirement_ that it be
* PAGE_SIZE aligned
*/
. = ALIGN(PAGE_SIZE);
___init_begin = .; ___init_begin = .;
.init.text : .init.text :
@ -179,16 +197,7 @@ SECTIONS
. = ALIGN(PAGE_SIZE); . = ALIGN(PAGE_SIZE);
___init_end = .; ___init_end = .;
.bss : __end =.;
{
. = ALIGN(4);
___bss_start = .;
*(.bss .bss.*)
*(COMMON)
. = ALIGN(4);
___bss_stop = .;
__end = .;
}
STABS_DEBUG STABS_DEBUG