diff options
-rw-r--r-- | arch/blackfin/include/asm/sections.h | 16 | ||||
-rw-r--r-- | arch/blackfin/kernel/setup.c | 39 | ||||
-rw-r--r-- | arch/blackfin/kernel/vmlinux.lds.S | 28 |
3 files changed, 44 insertions, 39 deletions
diff --git a/arch/blackfin/include/asm/sections.h b/arch/blackfin/include/asm/sections.h index 1f5381fbb4a..42f6c53c59c 100644 --- a/arch/blackfin/include/asm/sections.h +++ b/arch/blackfin/include/asm/sections.h @@ -13,10 +13,18 @@ extern unsigned long memory_mtd_start, memory_mtd_end, mtd_size; extern unsigned long _ramstart, _ramend, _rambase; extern unsigned long memory_start, memory_end, physical_mem_end; -extern char _stext_l1[], _etext_l1[], _sdata_l1[], _edata_l1[], _sbss_l1[], - _ebss_l1[], _l1_lma_start[], _sdata_b_l1[], _sbss_b_l1[], _ebss_b_l1[], - _stext_l2[], _etext_l2[], _sdata_l2[], _edata_l2[], _sbss_l2[], - _ebss_l2[], _l2_lma_start[]; +/* + * The weak markings on the lengths might seem weird, but this is required + * in order to make gcc accept the fact that these may actually have a value + * of 0 (since they aren't actually addresses, but sizes of sections). + */ +extern char _stext_l1[], _etext_l1[], _text_l1_lma[], __weak _text_l1_len[]; +extern char _sdata_l1[], _edata_l1[], _sbss_l1[], _ebss_l1[], + _data_l1_lma[], __weak _data_l1_len[]; +extern char _sdata_b_l1[], _edata_b_l1[], _sbss_b_l1[], _ebss_b_l1[], + _data_b_l1_lma[], __weak _data_b_l1_len[]; +extern char _stext_l2[], _etext_l2[], _sdata_l2[], _edata_l2[], + _sbss_l2[], _ebss_l2[], _l2_lma[], __weak _l2_len[]; #include <asm/mem_map.h> diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c index c202a44d141..5fda7748831 100644 --- a/arch/blackfin/kernel/setup.c +++ b/arch/blackfin/kernel/setup.c @@ -178,10 +178,10 @@ void __init bfin_cache_init(void) void __init bfin_relocate_l1_mem(void) { - unsigned long l1_code_length; - unsigned long l1_data_a_length; - unsigned long l1_data_b_length; - unsigned long l2_length; + unsigned long text_l1_len = (unsigned long)_text_l1_len; + unsigned long data_l1_len = (unsigned long)_data_l1_len; + unsigned long data_b_l1_len = (unsigned long)_data_b_l1_len; + unsigned long l2_len = (unsigned long)_l2_len; early_shadow_stamp(); @@ -201,30 +201,23 @@ void __init bfin_relocate_l1_mem(void) blackfin_dma_early_init(); - /* if necessary, copy _stext_l1 to _etext_l1 to L1 instruction SRAM */ - l1_code_length = _etext_l1 - _stext_l1; - if (l1_code_length) - early_dma_memcpy(_stext_l1, _l1_lma_start, l1_code_length); + /* if necessary, copy L1 text to L1 instruction SRAM */ + if (L1_CODE_LENGTH && text_l1_len) + early_dma_memcpy(_stext_l1, _text_l1_lma, text_l1_len); - /* if necessary, copy _sdata_l1 to _sbss_l1 to L1 data bank A SRAM */ - l1_data_a_length = _sbss_l1 - _sdata_l1; - if (l1_data_a_length) - early_dma_memcpy(_sdata_l1, _l1_lma_start + l1_code_length, l1_data_a_length); + /* if necessary, copy L1 data to L1 data bank A SRAM */ + if (L1_DATA_A_LENGTH && data_l1_len) + early_dma_memcpy(_sdata_l1, _data_l1_lma, data_l1_len); - /* if necessary, copy _sdata_b_l1 to _sbss_b_l1 to L1 data bank B SRAM */ - l1_data_b_length = _sbss_b_l1 - _sdata_b_l1; - if (l1_data_b_length) - early_dma_memcpy(_sdata_b_l1, _l1_lma_start + l1_code_length + - l1_data_a_length, l1_data_b_length); + /* if necessary, copy L1 data B to L1 data bank B SRAM */ + if (L1_DATA_B_LENGTH && data_b_l1_len) + early_dma_memcpy(_sdata_b_l1, _data_b_l1_lma, data_b_l1_len); early_dma_memcpy_done(); - /* if necessary, copy _stext_l2 to _edata_l2 to L2 SRAM */ - if (L2_LENGTH != 0) { - l2_length = _sbss_l2 - _stext_l2; - if (l2_length) - memcpy(_stext_l2, _l2_lma_start, l2_length); - } + /* if necessary, copy L2 text/data to L2 SRAM */ + if (L2_LENGTH && l2_len) + memcpy(_stext_l2, _l2_lma, l2_len); } /* add_memory_region to memmap */ diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S index 10e12539000..01682eed771 100644 --- a/arch/blackfin/kernel/vmlinux.lds.S +++ b/arch/blackfin/kernel/vmlinux.lds.S @@ -123,8 +123,6 @@ SECTIONS EXIT_DATA } - __l1_lma_start = .; - .text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data)) { . = ALIGN(4); @@ -136,9 +134,11 @@ SECTIONS . = ALIGN(4); __etext_l1 = .; } - ASSERT (SIZEOF(.text_l1) <= L1_CODE_LENGTH, "L1 text overflow!") + __text_l1_lma = LOADADDR(.text_l1); + __text_l1_len = SIZEOF(.text_l1); + ASSERT (__text_l1_len <= L1_CODE_LENGTH, "L1 text overflow!") - .data_l1 L1_DATA_A_START : AT(LOADADDR(.text_l1) + SIZEOF(.text_l1)) + .data_l1 L1_DATA_A_START : AT(__text_l1_lma + __text_l1_len) { . = ALIGN(4); __sdata_l1 = .; @@ -154,9 +154,11 @@ SECTIONS . = ALIGN(4); __ebss_l1 = .; } - ASSERT (SIZEOF(.data_l1) <= L1_DATA_A_LENGTH, "L1 data A overflow!") + __data_l1_lma = LOADADDR(.data_l1); + __data_l1_len = SIZEOF(.data_l1); + ASSERT (__data_l1_len <= L1_DATA_A_LENGTH, "L1 data A overflow!") - .data_b_l1 L1_DATA_B_START : AT(LOADADDR(.data_l1) + SIZEOF(.data_l1)) + .data_b_l1 L1_DATA_B_START : AT(__data_l1_lma + __data_l1_len) { . = ALIGN(4); __sdata_b_l1 = .; @@ -169,11 +171,11 @@ SECTIONS . = ALIGN(4); __ebss_b_l1 = .; } - ASSERT (SIZEOF(.data_b_l1) <= L1_DATA_B_LENGTH, "L1 data B overflow!") - - __l2_lma_start = LOADADDR(.data_b_l1) + SIZEOF(.data_b_l1); + __data_b_l1_lma = LOADADDR(.data_b_l1); + __data_b_l1_len = SIZEOF(.data_b_l1); + ASSERT (__data_b_l1_len <= L1_DATA_B_LENGTH, "L1 data B overflow!") - .text_data_l2 L2_START : AT(LOADADDR(.data_b_l1) + SIZEOF(.data_b_l1)) + .text_data_l2 L2_START : AT(__data_b_l1_lma + __data_b_l1_len) { . = ALIGN(4); __stext_l2 = .; @@ -195,12 +197,14 @@ SECTIONS . = ALIGN(4); __ebss_l2 = .; } - ASSERT (SIZEOF(.text_data_l2) <= L2_LENGTH, "L2 overflow!") + __l2_lma = LOADADDR(.text_data_l2); + __l2_len = SIZEOF(.text_data_l2); + ASSERT (__l2_len <= L2_LENGTH, "L2 overflow!") /* Force trailing alignment of our init section so that when we * free our init memory, we don't leave behind a partial page. */ - . = LOADADDR(.text_data_l2) + SIZEOF(.text_data_l2); + . = __l2_lma + __l2_len; . = ALIGN(PAGE_SIZE); ___init_end = .; |