diff options
Diffstat (limited to 'arch/blackfin/kernel')
27 files changed, 1269 insertions, 768 deletions
diff --git a/arch/blackfin/kernel/Makefile b/arch/blackfin/kernel/Makefile index fd4d4328a0f..141d9281e4b 100644 --- a/arch/blackfin/kernel/Makefile +++ b/arch/blackfin/kernel/Makefile @@ -15,14 +15,18 @@ else obj-y += time.o endif +obj-$(CONFIG_FUNCTION_TRACER) += ftrace-entry.o +obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o +CFLAGS_REMOVE_ftrace.o = -pg + obj-$(CONFIG_IPIPE) += ipipe.o -obj-$(CONFIG_IPIPE_TRACE_MCOUNT) += mcount.o obj-$(CONFIG_BFIN_GPTIMERS) += gptimers.o obj-$(CONFIG_CPLB_INFO) += cplbinfo.o obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_KGDB) += kgdb.o obj-$(CONFIG_KGDB_TESTS) += kgdb_test.o obj-$(CONFIG_EARLY_PRINTK) += early_printk.o +obj-$(CONFIG_STACKTRACE) += stacktrace.o # the kgdb test puts code into L2 and without linker # relaxation, we need to force long calls to/from it diff --git a/arch/blackfin/kernel/bfin_dma_5xx.c b/arch/blackfin/kernel/bfin_dma_5xx.c index 8531693fb48..e0bf8cc0690 100644 --- a/arch/blackfin/kernel/bfin_dma_5xx.c +++ b/arch/blackfin/kernel/bfin_dma_5xx.c @@ -20,6 +20,11 @@ #include <asm/dma.h> #include <asm/uaccess.h> +/* + * To make sure we work around 05000119 - we always check DMA_DONE bit, + * never the DMA_RUN bit + */ + struct dma_channel dma_ch[MAX_DMA_CHANNELS]; EXPORT_SYMBOL(dma_ch); @@ -232,6 +237,87 @@ void blackfin_dma_resume(void) void __init blackfin_dma_early_init(void) { bfin_write_MDMA_S0_CONFIG(0); + bfin_write_MDMA_S1_CONFIG(0); +} + +void __init early_dma_memcpy(void *pdst, const void *psrc, size_t size) +{ + unsigned long dst = (unsigned long)pdst; + unsigned long src = (unsigned long)psrc; + struct dma_register *dst_ch, *src_ch; + + /* We assume that everything is 4 byte aligned, so include + * a basic sanity check + */ + BUG_ON(dst % 4); + BUG_ON(src % 4); + BUG_ON(size % 4); + + /* Force a sync in case a previous config reset on this channel + * occurred. This is needed so subsequent writes to DMA registers + * are not spuriously lost/corrupted. + */ + __builtin_bfin_ssync(); + + src_ch = 0; + /* Find an avalible memDMA channel */ + while (1) { + if (!src_ch || src_ch == (struct dma_register *)MDMA_S1_NEXT_DESC_PTR) { + dst_ch = (struct dma_register *)MDMA_D0_NEXT_DESC_PTR; + src_ch = (struct dma_register *)MDMA_S0_NEXT_DESC_PTR; + } else { + dst_ch = (struct dma_register *)MDMA_D1_NEXT_DESC_PTR; + src_ch = (struct dma_register *)MDMA_S1_NEXT_DESC_PTR; + } + + if (!bfin_read16(&src_ch->cfg)) { + break; + } else { + if (bfin_read16(&src_ch->irq_status) & DMA_DONE) + bfin_write16(&src_ch->cfg, 0); + } + + } + + /* Destination */ + bfin_write32(&dst_ch->start_addr, dst); + bfin_write16(&dst_ch->x_count, size >> 2); + bfin_write16(&dst_ch->x_modify, 1 << 2); + bfin_write16(&dst_ch->irq_status, DMA_DONE | DMA_ERR); + + /* Source */ + bfin_write32(&src_ch->start_addr, src); + bfin_write16(&src_ch->x_count, size >> 2); + bfin_write16(&src_ch->x_modify, 1 << 2); + bfin_write16(&src_ch->irq_status, DMA_DONE | DMA_ERR); + + /* Enable */ + bfin_write16(&src_ch->cfg, DMAEN | WDSIZE_32); + bfin_write16(&dst_ch->cfg, WNR | DI_EN | DMAEN | WDSIZE_32); + + /* Since we are atomic now, don't use the workaround ssync */ + __builtin_bfin_ssync(); +} + +void __init early_dma_memcpy_done(void) +{ + while ((bfin_read_MDMA_S0_CONFIG() && !(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE)) || + (bfin_read_MDMA_S1_CONFIG() && !(bfin_read_MDMA_D1_IRQ_STATUS() & DMA_DONE))) + continue; + + bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR); + bfin_write_MDMA_D1_IRQ_STATUS(DMA_DONE | DMA_ERR); + /* + * Now that DMA is done, we would normally flush cache, but + * i/d cache isn't running this early, so we don't bother, + * and just clear out the DMA channel for next time + */ + bfin_write_MDMA_S0_CONFIG(0); + bfin_write_MDMA_S1_CONFIG(0); + bfin_write_MDMA_D0_CONFIG(0); + bfin_write_MDMA_D1_CONFIG(0); + + __builtin_bfin_ssync(); } /** @@ -367,10 +453,10 @@ void *dma_memcpy(void *pdst, const void *psrc, size_t size) unsigned long src = (unsigned long)psrc; size_t bulk, rest; - if (bfin_addr_dcachable(src)) + if (bfin_addr_dcacheable(src)) blackfin_dcache_flush_range(src, src + size); - if (bfin_addr_dcachable(dst)) + if (bfin_addr_dcacheable(dst)) blackfin_dcache_invalidate_range(dst, dst + size); bulk = size & ~0xffff; diff --git a/arch/blackfin/kernel/bfin_gpio.c b/arch/blackfin/kernel/bfin_gpio.c index a0678da4053..beffa00a93c 100644 --- a/arch/blackfin/kernel/bfin_gpio.c +++ b/arch/blackfin/kernel/bfin_gpio.c @@ -313,15 +313,6 @@ inline void portmux_setup(unsigned short per) # define portmux_setup(...) do { } while (0) #endif -static int __init bfin_gpio_init(void) -{ - printk(KERN_INFO "Blackfin GPIO Controller\n"); - - return 0; -} -arch_initcall(bfin_gpio_init); - - #ifndef CONFIG_BF54x /*********************************************************** * @@ -1021,15 +1012,6 @@ int bfin_gpio_irq_request(unsigned gpio, const char *label) local_irq_save_hw(flags); - if (unlikely(reserved_gpio_irq_map[gpio_bank(gpio)] & gpio_bit(gpio))) { - if (system_state == SYSTEM_BOOTING) - dump_stack(); - printk(KERN_ERR - "bfin-gpio: GPIO %d is already reserved as gpio-irq !\n", - gpio); - local_irq_restore_hw(flags); - return -EBUSY; - } if (unlikely(reserved_peri_map[gpio_bank(gpio)] & gpio_bit(gpio))) { if (system_state == SYSTEM_BOOTING) dump_stack(); diff --git a/arch/blackfin/kernel/bfin_ksyms.c b/arch/blackfin/kernel/bfin_ksyms.c index 01f917d58b5..ed8392c117e 100644 --- a/arch/blackfin/kernel/bfin_ksyms.c +++ b/arch/blackfin/kernel/bfin_ksyms.c @@ -10,13 +10,13 @@ #include <linux/uaccess.h> #include <asm/cacheflush.h> +#include <asm/io.h> /* Allow people to have their own Blackfin exception handler in a module */ EXPORT_SYMBOL(bfin_return_from_exception); /* All the Blackfin cache functions: mach-common/cache.S */ EXPORT_SYMBOL(blackfin_dcache_invalidate_range); -EXPORT_SYMBOL(blackfin_icache_dcache_flush_range); EXPORT_SYMBOL(blackfin_icache_flush_range); EXPORT_SYMBOL(blackfin_dcache_flush_range); EXPORT_SYMBOL(blackfin_dflush_page); @@ -104,3 +104,8 @@ EXPORT_SYMBOL(__raw_smp_mark_barrier_asm); EXPORT_SYMBOL(__raw_smp_check_barrier_asm); #endif #endif + +#ifdef CONFIG_FUNCTION_TRACER +extern void _mcount(void); +EXPORT_SYMBOL(_mcount); +#endif diff --git a/arch/blackfin/kernel/cplb-mpu/cacheinit.c b/arch/blackfin/kernel/cplb-mpu/cacheinit.c index c6ff947f9d3..d5a86c3017f 100644 --- a/arch/blackfin/kernel/cplb-mpu/cacheinit.c +++ b/arch/blackfin/kernel/cplb-mpu/cacheinit.c @@ -55,7 +55,14 @@ void __cpuinit bfin_dcache_init(struct cplb_entry *dcplb_tbl) } ctrl = bfin_read_DMEM_CONTROL(); - ctrl |= DMEM_CNTR; + + /* + * Anomaly notes: + * 05000287 - We implement workaround #2 - Change the DMEM_CONTROL + * register, so that the port preferences for DAG0 and DAG1 are set + * to port B + */ + ctrl |= DMEM_CNTR | PORT_PREF0 | (ANOMALY_05000287 ? PORT_PREF1 : 0); bfin_write_DMEM_CONTROL(ctrl); SSYNC(); } diff --git a/arch/blackfin/kernel/cplb-mpu/cplbinit.c b/arch/blackfin/kernel/cplb-mpu/cplbinit.c index 3e329a6ce04..36193eed9a1 100644 --- a/arch/blackfin/kernel/cplb-mpu/cplbinit.c +++ b/arch/blackfin/kernel/cplb-mpu/cplbinit.c @@ -46,13 +46,13 @@ void __init generate_cplb_tables_cpu(unsigned int cpu) printk(KERN_INFO "MPU: setting up cplb tables with memory protection\n"); -#ifdef CONFIG_BFIN_ICACHE +#ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE i_cache = CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND; #endif -#ifdef CONFIG_BFIN_DCACHE +#ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE d_cache = CPLB_L1_CHBL; -#ifdef CONFIG_BFIN_WT +#ifdef CONFIG_BFIN_EXTMEM_WRITETROUGH d_cache |= CPLB_L1_AOW | CPLB_WT; #endif #endif @@ -64,7 +64,7 @@ void __init generate_cplb_tables_cpu(unsigned int cpu) dcplb_tbl[cpu][i_d++].data = SDRAM_OOPS | PAGE_SIZE_1KB; icplb_tbl[cpu][i_i].addr = 0; - icplb_tbl[cpu][i_i++].data = i_cache | CPLB_USER_RD | PAGE_SIZE_1KB; + icplb_tbl[cpu][i_i++].data = CPLB_VALID | i_cache | CPLB_USER_RD | PAGE_SIZE_1KB; /* Cover kernel memory with 4M pages. */ addr = 0; @@ -91,9 +91,9 @@ void __init generate_cplb_tables_cpu(unsigned int cpu) /* Cover L2 memory */ #if L2_LENGTH > 0 dcplb_tbl[cpu][i_d].addr = L2_START; - dcplb_tbl[cpu][i_d++].data = L2_DMEMORY | PAGE_SIZE_1MB; + dcplb_tbl[cpu][i_d++].data = L2_DMEMORY; icplb_tbl[cpu][i_i].addr = L2_START; - icplb_tbl[cpu][i_i++].data = L2_IMEMORY | PAGE_SIZE_1MB; + icplb_tbl[cpu][i_i++].data = L2_IMEMORY; #endif first_mask_dcplb = i_d; diff --git a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c index 87463ce87f5..bcdfe9b0b71 100644 --- a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c +++ b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c @@ -150,15 +150,19 @@ static noinline int dcplb_miss(unsigned int cpu) nr_dcplb_miss[cpu]++; d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB; -#ifdef CONFIG_BFIN_DCACHE - if (bfin_addr_dcachable(addr)) { +#ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE + if (bfin_addr_dcacheable(addr)) { d_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND; -#ifdef CONFIG_BFIN_WT +# ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH d_data |= CPLB_L1_AOW | CPLB_WT; -#endif +# endif } #endif - if (addr >= physical_mem_end) { + + if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) { + addr = L2_START; + d_data = L2_DMEMORY; + } else if (addr >= physical_mem_end) { if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE && (status & FAULT_USERSUPV)) { addr &= ~0x3fffff; @@ -235,7 +239,7 @@ static noinline int icplb_miss(unsigned int cpu) i_data = CPLB_VALID | CPLB_PORTPRIO | PAGE_SIZE_4KB; -#ifdef CONFIG_BFIN_ICACHE +#ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE /* * Normal RAM, and possibly the reserved memory area, are * cacheable. @@ -245,7 +249,10 @@ static noinline int icplb_miss(unsigned int cpu) i_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND; #endif - if (addr >= physical_mem_end) { + if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) { + addr = L2_START; + i_data = L2_IMEMORY; + } else if (addr >= physical_mem_end) { if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH && (status & FAULT_USERSUPV)) { addr &= ~(1 * 1024 * 1024 - 1); @@ -365,13 +372,18 @@ void set_mask_dcplbs(unsigned long *masks, unsigned int cpu) local_irq_save_hw(flags); current_rwx_mask[cpu] = masks; - d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB; -#ifdef CONFIG_BFIN_DCACHE - d_data |= CPLB_L1_CHBL; -#ifdef CONFIG_BFIN_WT - d_data |= CPLB_L1_AOW | CPLB_WT; -#endif + if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) { + addr = L2_START; + d_data = L2_DMEMORY; + } else { + d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB; +#ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE + d_data |= CPLB_L1_CHBL; +# ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH + d_data |= CPLB_L1_AOW | CPLB_WT; +# endif #endif + } disable_dcplb(); for (i = first_mask_dcplb; i < first_switched_dcplb; i++) { diff --git a/arch/blackfin/kernel/cplb-nompu/cacheinit.c b/arch/blackfin/kernel/cplb-nompu/cacheinit.c index c6ff947f9d3..d5a86c3017f 100644 --- a/arch/blackfin/kernel/cplb-nompu/cacheinit.c +++ b/arch/blackfin/kernel/cplb-nompu/cacheinit.c @@ -55,7 +55,14 @@ void __cpuinit bfin_dcache_init(struct cplb_entry *dcplb_tbl) } ctrl = bfin_read_DMEM_CONTROL(); - ctrl |= DMEM_CNTR; + + /* + * Anomaly notes: + * 05000287 - We implement workaround #2 - Change the DMEM_CONTROL + * register, so that the port preferences for DAG0 and DAG1 are set + * to port B + */ + ctrl |= DMEM_CNTR | PORT_PREF0 | (ANOMALY_05000287 ? PORT_PREF1 : 0); bfin_write_DMEM_CONTROL(ctrl); SSYNC(); } diff --git a/arch/blackfin/kernel/cplb-nompu/cplbmgr.c b/arch/blackfin/kernel/cplb-nompu/cplbmgr.c index 8cbb47c7b66..12b030842fd 100644 --- a/arch/blackfin/kernel/cplb-nompu/cplbmgr.c +++ b/arch/blackfin/kernel/cplb-nompu/cplbmgr.c @@ -28,6 +28,7 @@ #include <asm/cplbinit.h> #include <asm/cplb.h> #include <asm/mmu_context.h> +#include <asm/traps.h> /* * WARNING @@ -100,28 +101,6 @@ static inline void write_icplb_data(int cpu, int idx, unsigned long data, #endif } -/* - * Given the contents of the status register, return the index of the - * CPLB that caused the fault. - */ -static inline int faulting_cplb_index(int status) -{ - int signbits = __builtin_bfin_norm_fr1x32(status & 0xFFFF); - return 30 - signbits; -} - -/* - * Given the contents of the status register and the DCPLB_DATA contents, - * return true if a write access should be permitted. - */ -static inline int write_permitted(int status, unsigned long data) -{ - if (status & FAULT_USERSUPV) - return !!(data & CPLB_SUPV_WR); - else - return !!(data & CPLB_USER_WR); -} - /* Counters to implement round-robin replacement. */ static int icplb_rr_index[NR_CPUS] PDT_ATTR; static int dcplb_rr_index[NR_CPUS] PDT_ATTR; @@ -245,43 +224,16 @@ MGR_ATTR static int dcplb_miss(int cpu) return CPLB_RELOADED; } -MGR_ATTR static noinline int dcplb_protection_fault(int cpu) -{ - int status = bfin_read_DCPLB_STATUS(); - - nr_dcplb_prot[cpu]++; - - if (likely(status & FAULT_RW)) { - int idx = faulting_cplb_index(status); - unsigned long regaddr = DCPLB_DATA0 + idx * 4; - unsigned long data = bfin_read32(regaddr); - - /* Check if fault is to dirty a clean page */ - if (!(data & CPLB_WT) && !(data & CPLB_DIRTY) && - write_permitted(status, data)) { - - dcplb_tbl[cpu][idx].data = data; - bfin_write32(regaddr, data); - return CPLB_RELOADED; - } - } - - return CPLB_PROT_VIOL; -} - MGR_ATTR int cplb_hdr(int seqstat, struct pt_regs *regs) { int cause = seqstat & 0x3f; unsigned int cpu = smp_processor_id(); switch (cause) { - case 0x2C: + case VEC_CPLB_I_M: return icplb_miss(cpu); - case 0x26: + case VEC_CPLB_M: return dcplb_miss(cpu); default: - if (unlikely(cause == 0x23)) - return dcplb_protection_fault(cpu); - return CPLB_UNKNOWN_ERR; } } diff --git a/arch/blackfin/kernel/early_printk.c b/arch/blackfin/kernel/early_printk.c index c8ad051742e..2ab56811841 100644 --- a/arch/blackfin/kernel/early_printk.c +++ b/arch/blackfin/kernel/early_printk.c @@ -178,25 +178,15 @@ int __init setup_early_printk(char *buf) asmlinkage void __init init_early_exception_vectors(void) { + u32 evt; SSYNC(); /* cannot program in software: * evt0 - emulation (jtag) * evt1 - reset */ - bfin_write_EVT2(early_trap); - bfin_write_EVT3(early_trap); - bfin_write_EVT5(early_trap); - bfin_write_EVT6(early_trap); - bfin_write_EVT7(early_trap); - bfin_write_EVT8(early_trap); - bfin_write_EVT9(early_trap); - bfin_write_EVT10(early_trap); - bfin_write_EVT11(early_trap); - bfin_write_EVT12(early_trap); - bfin_write_EVT13(early_trap); - bfin_write_EVT14(early_trap); - bfin_write_EVT15(early_trap); + for (evt = EVT2; evt <= EVT15; evt += 4) + bfin_write32(evt, early_trap); CSYNC(); /* Set all the return from interrupt, exception, NMI to a known place @@ -212,11 +202,15 @@ asmlinkage void __init init_early_exception_vectors(void) asmlinkage void __init early_trap_c(struct pt_regs *fp, void *retaddr) { /* This can happen before the uart is initialized, so initialize - * the UART now + * the UART now (but only if we are running on the processor we think + * we are compiled for - otherwise we write to MMRs that don't exist, + * and cause other problems. Nothing comes out the UART, but it does + * end up in the __buf_log. */ - if (likely(early_console == NULL)) + if (likely(early_console == NULL) && CPUID == bfin_cpuid()) setup_early_printk(DEFAULT_EARLY_PORT); + printk(KERN_EMERG "Early panic\n"); dump_bfin_mem(fp); show_regs(fp); dump_bfin_trace_buffer(); diff --git a/arch/blackfin/kernel/ftrace-entry.S b/arch/blackfin/kernel/ftrace-entry.S new file mode 100644 index 00000000000..6980b7a0615 --- /dev/null +++ b/arch/blackfin/kernel/ftrace-entry.S @@ -0,0 +1,140 @@ +/* + * mcount and friends -- ftrace stuff + * + * Copyright (C) 2009 Analog Devices Inc. + * Licensed under the GPL-2 or later. + */ + +#include <linux/linkage.h> +#include <asm/ftrace.h> + +.text + +/* GCC will have called us before setting up the function prologue, so we + * can clobber the normal scratch registers, but we need to make sure to + * save/restore the registers used for argument passing (R0-R2) in case + * the profiled function is using them. With data registers, R3 is the + * only one we can blow away. With pointer registers, we have P0-P2. + * + * Upon entry, the RETS will point to the top of the current profiled + * function. And since GCC setup the frame for us, the previous function + * will be waiting there. mmmm pie. + */ +ENTRY(__mcount) + /* save third function arg early so we can do testing below */ + [--sp] = r2; + + /* load the function pointer to the tracer */ + p0.l = _ftrace_trace_function; + p0.h = _ftrace_trace_function; + r3 = [p0]; + + /* optional micro optimization: don't call the stub tracer */ + r2.l = _ftrace_stub; + r2.h = _ftrace_stub; + cc = r2 == r3; + if ! cc jump .Ldo_trace; + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + /* if the ftrace_graph_return function pointer is not set to + * the ftrace_stub entry, call prepare_ftrace_return(). + */ + p0.l = _ftrace_graph_return; + p0.h = _ftrace_graph_return; + r3 = [p0]; + cc = r2 == r3; + if ! cc jump _ftrace_graph_caller; + + /* similarly, if the ftrace_graph_entry function pointer is not + * set to the ftrace_graph_entry_stub entry, ... + */ + p0.l = _ftrace_graph_entry; + p0.h = _ftrace_graph_entry; + r2.l = _ftrace_graph_entry_stub; + r2.h = _ftrace_graph_entry_stub; + r3 = [p0]; + cc = r2 == r3; + if ! cc jump _ftrace_graph_caller; +#endif + + r2 = [sp++]; + rts; + +.Ldo_trace: + + /* save first/second function arg and the return register */ + [--sp] = r0; + [--sp] = r1; + [--sp] = rets; + + /* setup the tracer function */ + p0 = r3; + + /* tracer(ulong frompc, ulong selfpc): + * frompc: the pc that did the call to ... + * selfpc: ... this location + * the selfpc itself will need adjusting for the mcount call + */ + r1 = rets; + r0 = [fp + 4]; + r1 += -MCOUNT_INSN_SIZE; + + /* call the tracer */ + call (p0); + + /* restore state and get out of dodge */ +.Lfinish_trace: + rets = [sp++]; + r1 = [sp++]; + r0 = [sp++]; + r2 = [sp++]; + +.globl _ftrace_stub +_ftrace_stub: + rts; +ENDPROC(__mcount) + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +/* The prepare_ftrace_return() function is similar to the trace function + * except it takes a pointer to the location of the frompc. This is so + * the prepare_ftrace_return() can hijack it temporarily for probing + * purposes. + */ +ENTRY(_ftrace_graph_caller) + /* save first/second function arg and the return register */ + [--sp] = r0; + [--sp] = r1; + [--sp] = rets; + + r0 = fp; + r1 = rets; + r0 += 4; + r1 += -MCOUNT_INSN_SIZE; + call _prepare_ftrace_return; + + jump .Lfinish_trace; +ENDPROC(_ftrace_graph_caller) + +/* Undo the rewrite caused by ftrace_graph_caller(). The common function + * ftrace_return_to_handler() will return the original rets so we can + * restore it and be on our way. + */ +ENTRY(_return_to_handler) + /* make sure original return values are saved */ + [--sp] = p0; + [--sp] = r0; + [--sp] = r1; + + /* get original return address */ + call _ftrace_return_to_handler; + rets = r0; + + /* anomaly 05000371 - make sure we have at least three instructions + * between rets setting and the return + */ + r1 = [sp++]; + r0 = [sp++]; + p0 = [sp++]; + rts; +ENDPROC(_return_to_handler) +#endif diff --git a/arch/blackfin/kernel/ftrace.c b/arch/blackfin/kernel/ftrace.c new file mode 100644 index 00000000000..905bfc40a00 --- /dev/null +++ b/arch/blackfin/kernel/ftrace.c @@ -0,0 +1,42 @@ +/* + * ftrace graph code + * + * Copyright (C) 2009 Analog Devices Inc. + * Licensed under the GPL-2 or later. + */ + +#include <linux/ftrace.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <asm/atomic.h> + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + +/* + * Hook the return address and push it in the stack of return addrs + * in current thread info. + */ +void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) +{ + struct ftrace_graph_ent trace; + unsigned long return_hooker = (unsigned long)&return_to_handler; + + if (unlikely(atomic_read(¤t->tracing_graph_pause))) + return; + + if (ftrace_push_return_trace(*parent, self_addr, &trace.depth) == -EBUSY) + return; + + trace.func = self_addr; + + /* Only trace if the calling function expects to */ + if (!ftrace_graph_entry(&trace)) { + current->curr_ret_stack--; + return; + } + + /* all is well in the world ! hijack RETS ... */ + *parent = return_hooker; +} + +#endif diff --git a/arch/blackfin/kernel/gptimers.c b/arch/blackfin/kernel/gptimers.c index 3a3e9615b00..7281a91d26b 100644 --- a/arch/blackfin/kernel/gptimers.c +++ b/arch/blackfin/kernel/gptimers.c @@ -189,10 +189,10 @@ void set_gptimer_status(int group, uint32_t value) } EXPORT_SYMBOL(set_gptimer_status); -uint16_t get_gptimer_intr(int timer_id) +int get_gptimer_intr(int timer_id) { tassert(timer_id < MAX_BLACKFIN_GPTIMERS); - return (group_regs[BFIN_TIMER_OCTET(timer_id)]->status & timil_mask[timer_id]) ? 1 : 0; + return !!(group_regs[BFIN_TIMER_OCTET(timer_id)]->status & timil_mask[timer_id]); } EXPORT_SYMBOL(get_gptimer_intr); @@ -203,10 +203,10 @@ void clear_gptimer_intr(int timer_id) } EXPORT_SYMBOL(clear_gptimer_intr); -uint16_t get_gptimer_over(int timer_id) +int get_gptimer_over(int timer_id) { tassert(timer_id < MAX_BLACKFIN_GPTIMERS); - return (group_regs[BFIN_TIMER_OCTET(timer_id)]->status & tovf_mask[timer_id]) ? 1 : 0; + return !!(group_regs[BFIN_TIMER_OCTET(timer_id)]->status & tovf_mask[timer_id]); } EXPORT_SYMBOL(get_gptimer_over); @@ -217,6 +217,13 @@ void clear_gptimer_over(int timer_id) } EXPORT_SYMBOL(clear_gptimer_over); +int get_gptimer_run(int timer_id) +{ + tassert(timer_id < MAX_BLACKFIN_GPTIMERS); + return !!(group_regs[BFIN_TIMER_OCTET(timer_id)]->status & trun_mask[timer_id]); +} +EXPORT_SYMBOL(get_gptimer_run); + void set_gptimer_config(int timer_id, uint16_t config) { tassert(timer_id < MAX_BLACKFIN_GPTIMERS); @@ -244,7 +251,7 @@ void enable_gptimers(uint16_t mask) } EXPORT_SYMBOL(enable_gptimers); -void disable_gptimers(uint16_t mask) +static void _disable_gptimers(uint16_t mask) { int i; uint16_t m = mask; @@ -253,6 +260,12 @@ void disable_gptimers(uint16_t mask) group_regs[i]->disable = m & 0xFF; m >>= 8; } +} + +void disable_gptimers(uint16_t mask) +{ + int i; + _disable_gptimers(mask); for (i = 0; i < MAX_BLACKFIN_GPTIMERS; ++i) if (mask & (1 << i)) group_regs[BFIN_TIMER_OCTET(i)]->status |= trun_mask[i]; @@ -260,6 +273,13 @@ void disable_gptimers(uint16_t mask) } EXPORT_SYMBOL(disable_gptimers); +void disable_gptimers_sync(uint16_t mask) +{ + _disable_gptimers(mask); + SSYNC(); +} +EXPORT_SYMBOL(disable_gptimers_sync); + void set_gptimer_pulse_hi(int timer_id) { tassert(timer_id < MAX_BLACKFIN_GPTIMERS); diff --git a/arch/blackfin/kernel/init_task.c b/arch/blackfin/kernel/init_task.c index 2c228c02097..c26c34de9f3 100644 --- a/arch/blackfin/kernel/init_task.c +++ b/arch/blackfin/kernel/init_task.c @@ -35,10 +35,6 @@ static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); - -struct mm_struct init_mm = INIT_MM(init_mm); -EXPORT_SYMBOL(init_mm); - /* * Initial task structure. * diff --git a/arch/blackfin/kernel/ipipe.c b/arch/blackfin/kernel/ipipe.c index a5de8d45424..b8d22034b9a 100644 --- a/arch/blackfin/kernel/ipipe.c +++ b/arch/blackfin/kernel/ipipe.c @@ -52,7 +52,7 @@ EXPORT_SYMBOL(__ipipe_freq_scale); atomic_t __ipipe_irq_lvdepth[IVG15 + 1]; -unsigned long __ipipe_irq_lvmask = __all_masked_irq_flags; +unsigned long __ipipe_irq_lvmask = bfin_no_irqs; EXPORT_SYMBOL(__ipipe_irq_lvmask); static void __ipipe_ack_irq(unsigned irq, struct irq_desc *desc) @@ -99,7 +99,7 @@ void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs) * interrupt. */ m_ack = (regs == NULL || irq == IRQ_SYSTMR || irq == IRQ_CORETMR); - this_domain = ipipe_current_domain; + this_domain = __ipipe_current_domain; if (unlikely(test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control))) head = &this_domain->p_link; @@ -167,7 +167,7 @@ int __ipipe_check_root(void) void __ipipe_enable_irqdesc(struct ipipe_domain *ipd, unsigned irq) { struct irq_desc *desc = irq_to_desc(irq); - int prio = desc->ic_prio; + int prio = __ipipe_get_irq_priority(irq); desc->depth = 0; if (ipd != &ipipe_root && @@ -178,8 +178,7 @@ EXPORT_SYMBOL(__ipipe_enable_irqdesc); void __ipipe_disable_irqdesc(struct ipipe_domain *ipd, unsigned irq) { - struct irq_desc *desc = irq_to_desc(irq); - int prio = desc->ic_prio; + int prio = __ipipe_get_irq_priority(irq); if (ipd != &ipipe_root && atomic_dec_and_test(&__ipipe_irq_lvdepth[prio])) @@ -213,7 +212,9 @@ void __ipipe_unstall_root_raw(void) int __ipipe_syscall_root(struct pt_regs *regs) { + struct ipipe_percpu_domain_data *p; unsigned long flags; + int ret; /* * We need to run the IRQ tail hook whenever we don't @@ -232,29 +233,31 @@ int __ipipe_syscall_root(struct pt_regs *regs) /* * This routine either returns: * 0 -- if the syscall is to be passed to Linux; - * 1 -- if the syscall should not be passed to Linux, and no + * >0 -- if the syscall should not be passed to Linux, and no * tail work should be performed; - * -1 -- if the syscall should not be passed to Linux but the + * <0 -- if the syscall should not be passed to Linux but the * tail work has to be performed (for handling signals etc). */ - if (__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL) && - __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs) > 0) { - if (ipipe_root_domain_p && !in_atomic()) { - /* - * Sync pending VIRQs before _TIF_NEED_RESCHED - * is tested. - */ - local_irq_save_hw(flags); - if ((ipipe_root_cpudom_var(irqpend_himask) & IPIPE_IRQMASK_VIRT) != 0) - __ipipe_sync_pipeline(IPIPE_IRQMASK_VIRT); - local_irq_restore_hw(flags); - return -1; - } + if (!__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL)) + return 0; + + ret = __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs); + + local_irq_save_hw(flags); + + if (!__ipipe_root_domain_p) { + local_irq_restore_hw(flags); return 1; } - return 0; + p = ipipe_root_cpudom_ptr(); + if ((p->irqpend_himask & IPIPE_IRQMASK_VIRT) != 0) + __ipipe_sync_pipeline(IPIPE_IRQMASK_VIRT); + + local_irq_restore_hw(flags); + + return -ret; } unsigned long ipipe_critical_enter(void (*syncfn) (void)) @@ -310,12 +313,16 @@ int ipipe_trigger_irq(unsigned irq) asmlinkage void __ipipe_sync_root(void) { + void (*irq_tail_hook)(void) = (void (*)(void))__ipipe_irq_tail_hook; unsigned long flags; BUG_ON(irqs_disabled()); local_irq_save_hw(flags); + if (irq_tail_hook) + irq_tail_hook(); + clear_thread_flag(TIF_IRQ_SYNC); if (ipipe_root_cpudom_var(irqpend_himask) != 0) @@ -326,9 +333,7 @@ asmlinkage void __ipipe_sync_root(void) void ___ipipe_sync_pipeline(unsigned long syncmask) { - struct ipipe_domain *ipd = ipipe_current_domain; - - if (ipd == ipipe_root_domain) { + if (__ipipe_root_domain_p) { if (test_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status))) return; } @@ -337,8 +342,3 @@ void ___ipipe_sync_pipeline(unsigned long syncmask) } EXPORT_SYMBOL(show_stack); - -#ifdef CONFIG_IPIPE_TRACE_MCOUNT -void notrace _mcount(void); -EXPORT_SYMBOL(_mcount); -#endif /* CONFIG_IPIPE_TRACE_MCOUNT */ diff --git a/arch/blackfin/kernel/irqchip.c b/arch/blackfin/kernel/irqchip.c index 401bd32aa49..4b5fd36187d 100644 --- a/arch/blackfin/kernel/irqchip.c +++ b/arch/blackfin/kernel/irqchip.c @@ -38,36 +38,15 @@ #include <asm/pda.h> static atomic_t irq_err_count; -static spinlock_t irq_controller_lock; - -/* - * Dummy mask/unmask handler - */ -void dummy_mask_unmask_irq(unsigned int irq) -{ -} - void ack_bad_irq(unsigned int irq) { atomic_inc(&irq_err_count); printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq); } -static struct irq_chip bad_chip = { - .ack = dummy_mask_unmask_irq, - .mask = dummy_mask_unmask_irq, - .unmask = dummy_mask_unmask_irq, -}; - static struct irq_desc bad_irq_desc = { - .status = IRQ_DISABLED, - .chip = &bad_chip, .handle_irq = handle_bad_irq, - .depth = 1, .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), -#ifdef CONFIG_SMP - .affinity = CPU_MASK_ALL -#endif }; #ifdef CONFIG_CPUMASK_OFFSTACK @@ -75,6 +54,7 @@ static struct irq_desc bad_irq_desc = { #error "Blackfin architecture does not support CONFIG_CPUMASK_OFFSTACK." #endif +#ifdef CONFIG_PROC_FS int show_interrupts(struct seq_file *p, void *v) { int i = *(loff_t *) v, j; @@ -106,50 +86,29 @@ int show_interrupts(struct seq_file *p, void *v) } return 0; } - -/* - * do_IRQ handles all hardware IRQs. Decoded IRQs should not - * come via this function. Instead, they should provide their - * own 'handler' - */ -#ifdef CONFIG_DO_IRQ_L1 -__attribute__((l1_text)) -#endif -asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs) -{ - struct pt_regs *old_regs; - struct irq_desc *desc = irq_desc + irq; -#ifndef CONFIG_IPIPE - unsigned short pending, other_ints; #endif - old_regs = set_irq_regs(regs); - /* - * Some hardware gives randomly wrong interrupts. Rather - * than crashing, do something sensible. - */ - if (irq >= NR_IRQS) - desc = &bad_irq_desc; - - irq_enter(); #ifdef CONFIG_DEBUG_STACKOVERFLOW +static void check_stack_overflow(int irq) +{ /* Debugging check for stack overflow: is there less than STACK_WARN free? */ - { - long sp; - - sp = __get_SP() & (THREAD_SIZE-1); + long sp = __get_SP() & (THREAD_SIZE - 1); - if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { - dump_stack(); - printk(KERN_EMERG "%s: possible stack overflow while handling irq %i " - " only %ld bytes free\n", - __func__, irq, sp - sizeof(struct thread_info)); - } + if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { + dump_stack(); + pr_emerg("irq%i: possible stack overflow only %ld bytes free\n", + irq, sp - sizeof(struct thread_info)); } +} +#else +static inline void check_stack_overflow(int irq) { } #endif - generic_handle_irq(irq); #ifndef CONFIG_IPIPE +static void maybe_lower_to_irq14(void) +{ + unsigned short pending, other_ints; + /* * If we're the only interrupt running (ignoring IRQ15 which * is for syscalls), lower our priority to IRQ14 so that @@ -163,7 +122,38 @@ asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs) other_ints = pending & (pending - 1); if (other_ints == 0) lower_to_irq14(); -#endif /* !CONFIG_IPIPE */ +} +#else +static inline void maybe_lower_to_irq14(void) { } +#endif + +/* + * do_IRQ handles all hardware IRQs. Decoded IRQs should not + * come via this function. Instead, they should provide their + * own 'handler' + */ +#ifdef CONFIG_DO_IRQ_L1 +__attribute__((l1_text)) +#endif +asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs) +{ + struct pt_regs *old_regs = set_irq_regs(regs); + + irq_enter(); + + check_stack_overflow(irq); + + /* + * Some hardware gives randomly wrong interrupts. Rather + * than crashing, do something sensible. + */ + if (irq >= NR_IRQS) + handle_bad_irq(irq, &bad_irq_desc); + else + generic_handle_irq(irq); + + maybe_lower_to_irq14(); + irq_exit(); set_irq_regs(old_regs); @@ -171,14 +161,6 @@ asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs) void __init init_IRQ(void) { - struct irq_desc *desc; - int irq; - - spin_lock_init(&irq_controller_lock); - for (irq = 0, desc = irq_desc; irq < NR_IRQS; irq++, desc++) { - *desc = bad_irq_desc; - } - init_arch_irq(); #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c index b163f6d3330..cce79d05b90 100644 --- a/arch/blackfin/kernel/kgdb.c +++ b/arch/blackfin/kernel/kgdb.c @@ -34,15 +34,6 @@ int gdb_bfin_vector = -1; #error change the definition of slavecpulocks #endif -#define IN_MEM(addr, size, l1_addr, l1_size) \ -({ \ - unsigned long __addr = (unsigned long)(addr); \ - (l1_size && __addr >= l1_addr && __addr + (size) <= l1_addr + l1_size); \ -}) -#define ASYNC_BANK_SIZE \ - (ASYNC_BANK0_SIZE + ASYNC_BANK1_SIZE + \ - ASYNC_BANK2_SIZE + ASYNC_BANK3_SIZE) - void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) { gdb_regs[BFIN_R0] = regs->r0; @@ -463,42 +454,89 @@ static int hex(char ch) static int validate_memory_access_address(unsigned long addr, int size) { - int cpu = raw_smp_processor_id(); + if (size < 0 || addr == 0) + return -EFAULT; + return bfin_mem_access_type(addr, size); +} - if (size < 0) - return EFAULT; - if (addr >= 0x1000 && (addr + size) <= physical_mem_end) - return 0; - if (addr >= SYSMMR_BASE) - return 0; - if (IN_MEM(addr, size, ASYNC_BANK0_BASE, ASYNC_BANK_SIZE)) - return 0; - if (cpu == 0) { - if (IN_MEM(addr, size, L1_SCRATCH_START, L1_SCRATCH_LENGTH)) - return 0; - if (IN_MEM(addr, size, L1_CODE_START, L1_CODE_LENGTH)) - return 0; - if (IN_MEM(addr, size, L1_DATA_A_START, L1_DATA_A_LENGTH)) - return 0; - if (IN_MEM(addr, size, L1_DATA_B_START, L1_DATA_B_LENGTH)) - return 0; -#ifdef CONFIG_SMP - } else if (cpu == 1) { - if (IN_MEM(addr, size, COREB_L1_SCRATCH_START, L1_SCRATCH_LENGTH)) +static int bfin_probe_kernel_read(char *dst, char *src, int size) +{ + unsigned long lsrc = (unsigned long)src; + int mem_type; + + mem_type = validate_memory_access_address(lsrc, size); + if (mem_type < 0) + return mem_type; + + if (lsrc >= SYSMMR_BASE) { + if (size == 2 && lsrc % 2 == 0) { + u16 mmr = bfin_read16(src); + memcpy(dst, &mmr, sizeof(mmr)); return 0; - if (IN_MEM(addr, size, COREB_L1_CODE_START, L1_CODE_LENGTH)) + } else if (size == 4 && lsrc % 4 == 0) { + u32 mmr = bfin_read32(src); + memcpy(dst, &mmr, sizeof(mmr)); return 0; - if (IN_MEM(addr, size, COREB_L1_DATA_A_START, L1_DATA_A_LENGTH)) + } + } else { + switch (mem_type) { + case BFIN_MEM_ACCESS_CORE: + case BFIN_MEM_ACCESS_CORE_ONLY: + return probe_kernel_read(dst, src, size); + /* XXX: should support IDMA here with SMP */ + case BFIN_MEM_ACCESS_DMA: + if (dma_memcpy(dst, src, size)) + return 0; + break; + case BFIN_MEM_ACCESS_ITEST: + if (isram_memcpy(dst, src, size)) + return 0; + break; + } + } + + return -EFAULT; +} + +static int bfin_probe_kernel_write(char *dst, char *src, int size) +{ + unsigned long ldst = (unsigned long)dst; + int mem_type; + + mem_type = validate_memory_access_address(ldst, size); + if (mem_type < 0) + return mem_type; + + if (ldst >= SYSMMR_BASE) { + if (size == 2 && ldst % 2 == 0) { + u16 mmr; + memcpy(&mmr, src, sizeof(mmr)); + bfin_write16(dst, mmr); return 0; - if (IN_MEM(addr, size, COREB_L1_DATA_B_START, L1_DATA_B_LENGTH)) + } else if (size == 4 && ldst % 4 == 0) { + u32 mmr; + memcpy(&mmr, src, sizeof(mmr)); + bfin_write32(dst, mmr); return 0; -#endif + } + } else { + switch (mem_type) { + case BFIN_MEM_ACCESS_CORE: + case BFIN_MEM_ACCESS_CORE_ONLY: + return probe_kernel_write(dst, src, size); + /* XXX: should support IDMA here with SMP */ + case BFIN_MEM_ACCESS_DMA: + if (dma_memcpy(dst, src, size)) + return 0; + break; + case BFIN_MEM_ACCESS_ITEST: + if (isram_memcpy(dst, src, size)) + return 0; + break; + } } - if (IN_MEM(addr, size, L2_START, L2_LENGTH)) - return 0; - - return EFAULT; + return -EFAULT; } /* @@ -508,14 +546,7 @@ static int validate_memory_access_address(unsigned long addr, int size) int kgdb_mem2hex(char *mem, char *buf, int count) { char *tmp; - int err = 0; - unsigned char *pch; - unsigned short mmr16; - unsigned long mmr32; - int cpu = raw_smp_processor_id(); - - if (validate_memory_access_address((unsigned long)mem, count)) - return EFAULT; + int err; /* * We use the upper half of buf as an intermediate buffer for the @@ -523,44 +554,7 @@ int kgdb_mem2hex(char *mem, char *buf, int count) */ tmp = buf + count; - if ((unsigned int)mem >= SYSMMR_BASE) { /*access MMR registers*/ - switch (count) { - case 2: - if ((unsigned int)mem % 2 == 0) { - mmr16 = *(unsigned short *)mem; - pch = (unsigned char *)&mmr16; - *tmp++ = *pch++; - *tmp++ = *pch++; - tmp -= 2; - } else - err = EFAULT; - break; - case 4: - if ((unsigned int)mem % 4 == 0) { - mmr32 = *(unsigned long *)mem; - pch = (unsigned char *)&mmr32; - *tmp++ = *pch++; - *tmp++ = *pch++; - *tmp++ = *pch++; - *tmp++ = *pch++; - tmp -= 4; - } else - err = EFAULT; - break; - default: - err = EFAULT; - } - } else if ((cpu == 0 && IN_MEM(mem, count, L1_CODE_START, L1_CODE_LENGTH)) -#ifdef CONFIG_SMP - || (cpu == 1 && IN_MEM(mem, count, COREB_L1_CODE_START, L1_CODE_LENGTH)) -#endif - ) { - /* access L1 instruction SRAM*/ - if (dma_memcpy(tmp, mem, count) == NULL) - err = EFAULT; - } else - err = probe_kernel_read(tmp, mem, count); - + err = bfin_probe_kernel_read(tmp, mem, count); if (!err) { while (count > 0) { buf = pack_hex_byte(buf, *tmp); @@ -581,60 +575,21 @@ int kgdb_mem2hex(char *mem, char *buf, int count) */ int kgdb_ebin2mem(char *buf, char *mem, int count) { - char *tmp_old; - char *tmp_new; - unsigned short *mmr16; - unsigned long *mmr32; - int err = 0; - int size = 0; - int cpu = raw_smp_processor_id(); + char *tmp_old, *tmp_new; + int size; tmp_old = tmp_new = buf; - while (count-- > 0) { + for (size = 0; size < count; ++size) { if (*tmp_old == 0x7d) *tmp_new = *(++tmp_old) ^ 0x20; else *tmp_new = *tmp_old; tmp_new++; tmp_old++; - size++; } - if (validate_memory_access_address((unsigned long)mem, size)) - return EFAULT; - - if ((unsigned int)mem >= SYSMMR_BASE) { /*access MMR registers*/ - switch (size) { - case 2: - if ((unsigned int)mem % 2 == 0) { - mmr16 = (unsigned short *)buf; - *(unsigned short *)mem = *mmr16; - } else - return EFAULT; - break; - case 4: - if ((unsigned int)mem % 4 == 0) { - mmr32 = (unsigned long *)buf; - *(unsigned long *)mem = *mmr32; - } else - return EFAULT; - break; - default: - return EFAULT; - } - } else if ((cpu == 0 && IN_MEM(mem, count, L1_CODE_START, L1_CODE_LENGTH)) -#ifdef CONFIG_SMP - || (cpu == 1 && IN_MEM(mem, count, COREB_L1_CODE_START, L1_CODE_LENGTH)) -#endif - ) { - /* access L1 instruction SRAM */ - if (dma_memcpy(mem, buf, size) == NULL) - err = EFAULT; - } else - err = probe_kernel_write(mem, buf, size); - - return err; + return bfin_probe_kernel_write(mem, buf, count); } /* @@ -644,14 +599,7 @@ int kgdb_ebin2mem(char *buf, char *mem, int count) */ int kgdb_hex2mem(char *buf, char *mem, int count) { - char *tmp_raw; - char *tmp_hex; - unsigned short *mmr16; - unsigned long *mmr32; - int cpu = raw_smp_processor_id(); - - if (validate_memory_access_address((unsigned long)mem, count)) - return EFAULT; + char *tmp_raw, *tmp_hex; /* * We use the upper half of buf as an intermediate buffer for the @@ -666,38 +614,18 @@ int kgdb_hex2mem(char *buf, char *mem, int count) *tmp_raw |= hex(*tmp_hex--) << 4; } - if ((unsigned int)mem >= SYSMMR_BASE) { /*access MMR registers*/ - switch (count) { - case 2: - if ((unsigned int)mem % 2 == 0) { - mmr16 = (unsigned short *)tmp_raw; - *(unsigned short *)mem = *mmr16; - } else - return EFAULT; - break; - case 4: - if ((unsigned int)mem % 4 == 0) { - mmr32 = (unsigned long *)tmp_raw; - *(unsigned long *)mem = *mmr32; - } else - return EFAULT; - break; - default: - return EFAULT; - } - } else if ((cpu == 0 && IN_MEM(mem, count, L1_CODE_START, L1_CODE_LENGTH)) -#ifdef CONFIG_SMP - || (cpu == 1 && IN_MEM(mem, count, COREB_L1_CODE_START, L1_CODE_LENGTH)) -#endif - ) { - /* access L1 instruction SRAM */ - if (dma_memcpy(mem, tmp_raw, count) == NULL) - return EFAULT; - } else - return probe_kernel_write(mem, tmp_raw, count); - return 0; + return bfin_probe_kernel_write(mem, tmp_raw, count); } +#define IN_MEM(addr, size, l1_addr, l1_size) \ +({ \ + unsigned long __addr = (unsigned long)(addr); \ + (l1_size && __addr >= l1_addr && __addr + (size) <= l1_addr + l1_size); \ +}) +#define ASYNC_BANK_SIZE \ + (ASYNC_BANK0_SIZE + ASYNC_BANK1_SIZE + \ + ASYNC_BANK2_SIZE + ASYNC_BANK3_SIZE) + int kgdb_validate_break_address(unsigned long addr) { int cpu = raw_smp_processor_id(); @@ -715,51 +643,22 @@ int kgdb_validate_break_address(unsigned long addr) if (IN_MEM(addr, BREAK_INSTR_SIZE, L2_START, L2_LENGTH)) return 0; - return EFAULT; + return -EFAULT; } int kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr) { - int err; - int cpu = raw_smp_processor_id(); - - if ((cpu == 0 && IN_MEM(addr, BREAK_INSTR_SIZE, L1_CODE_START, L1_CODE_LENGTH)) -#ifdef CONFIG_SMP - || (cpu == 1 && IN_MEM(addr, BREAK_INSTR_SIZE, COREB_L1_CODE_START, L1_CODE_LENGTH)) -#endif - ) { - /* access L1 instruction SRAM */ - if (dma_memcpy(saved_instr, (void *)addr, BREAK_INSTR_SIZE) - == NULL) - return -EFAULT; - - if (dma_memcpy((void *)addr, arch_kgdb_ops.gdb_bpt_instr, - BREAK_INSTR_SIZE) == NULL) - return -EFAULT; - - return 0; - } else { - err = probe_kernel_read(saved_instr, (char *)addr, - BREAK_INSTR_SIZE); - if (err) - return err; - - return probe_kernel_write((char *)addr, - arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE); - } + int err = bfin_probe_kernel_read(saved_instr, (char *)addr, + BREAK_INSTR_SIZE); + if (err) + return err; + return bfin_probe_kernel_write((char *)addr, arch_kgdb_ops.gdb_bpt_instr, + BREAK_INSTR_SIZE); } int kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle) { - if (IN_MEM(addr, BREAK_INSTR_SIZE, L1_CODE_START, L1_CODE_LENGTH)) { - /* access L1 instruction SRAM */ - if (dma_memcpy((void *)addr, bundle, BREAK_INSTR_SIZE) == NULL) - return -EFAULT; - - return 0; - } else - return probe_kernel_write((char *)addr, - (char *)bundle, BREAK_INSTR_SIZE); + return bfin_probe_kernel_write((char *)addr, bundle, BREAK_INSTR_SIZE); } int kgdb_arch_init(void) diff --git a/arch/blackfin/kernel/mcount.S b/arch/blackfin/kernel/mcount.S deleted file mode 100644 index edcfb3865f4..00000000000 --- a/arch/blackfin/kernel/mcount.S +++ /dev/null @@ -1,70 +0,0 @@ -/* - * linux/arch/blackfin/mcount.S - * - * Copyright (C) 2006 Analog Devices Inc. - * - * 2007/04/12 Save index, length, modify and base registers. --rpm - */ - -#include <linux/linkage.h> -#include <asm/blackfin.h> - -.text - -.align 4 /* just in case */ - -ENTRY(__mcount) - [--sp] = i0; - [--sp] = i1; - [--sp] = i2; - [--sp] = i3; - [--sp] = l0; - [--sp] = l1; - [--sp] = l2; - [--sp] = l3; - [--sp] = m0; - [--sp] = m1; - [--sp] = m2; - [--sp] = m3; - [--sp] = b0; - [--sp] = b1; - [--sp] = b2; - [--sp] = b3; - [--sp] = ( r7:0, p5:0 ); - [--sp] = ASTAT; - - p1.L = _ipipe_trace_enable; - p1.H = _ipipe_trace_enable; - r7 = [p1]; - CC = r7 == 0; - if CC jump out; - link 0x10; - r0 = 0x0; - [sp + 0xc] = r0; /* v */ - r0 = 0x0; /* type: IPIPE_TRACE_FN */ - r1 = rets; - p0 = [fp]; /* p0: Prior FP */ - r2 = [p0 + 4]; /* r2: Prior RETS */ - call ___ipipe_trace; - unlink; -out: - ASTAT = [sp++]; - ( r7:0, p5:0 ) = [sp++]; - b3 = [sp++]; - b2 = [sp++]; - b1 = [sp++]; - b0 = [sp++]; - m3 = [sp++]; - m2 = [sp++]; - m1 = [sp++]; - m0 = [sp++]; - l3 = [sp++]; - l2 = [sp++]; - l1 = [sp++]; - l0 = [sp++]; - i3 = [sp++]; - i2 = [sp++]; - i1 = [sp++]; - i0 = [sp++]; - rts; -ENDPROC(__mcount) diff --git a/arch/blackfin/kernel/module.c b/arch/blackfin/kernel/module.c index 1bd7f2d018a..d5aee362668 100644 --- a/arch/blackfin/kernel/module.c +++ b/arch/blackfin/kernel/module.c @@ -201,8 +201,8 @@ apply_relocate(Elf_Shdr * sechdrs, const char *strtab, /* Arithmetic relocations are handled. */ /* We do not expect LSETUP to be split and hence is not */ /* handled. */ -/* R_byte and R_byte2 are also not handled as the gas */ -/* does not generate it. */ +/* R_BFIN_BYTE and R_BFIN_BYTE2 are also not handled as the */ +/* gas does not generate it. */ /*************************************************************************/ int apply_relocate_add(Elf_Shdr * sechdrs, const char *strtab, @@ -243,8 +243,8 @@ apply_relocate_add(Elf_Shdr * sechdrs, const char *strtab, #endif switch (ELF32_R_TYPE(rel[i].r_info)) { - case R_pcrel24: - case R_pcrel24_jump_l: + case R_BFIN_PCREL24: + case R_BFIN_PCREL24_JUMP_L: /* Add the value, subtract its postition */ location16 = (uint16_t *) (sechdrs[sechdrs[relsec].sh_info]. @@ -266,18 +266,18 @@ apply_relocate_add(Elf_Shdr * sechdrs, const char *strtab, (*location16 & 0xff00) | (value >> 16 & 0x00ff); *(location16 + 1) = value & 0xffff; break; - case R_pcrel12_jump: - case R_pcrel12_jump_s: + case R_BFIN_PCREL12_JUMP: + case R_BFIN_PCREL12_JUMP_S: value -= (uint32_t) location32; value >>= 1; *location16 = (value & 0xfff); break; - case R_pcrel10: + case R_BFIN_PCREL10: value -= (uint32_t) location32; value >>= 1; *location16 = (value & 0x3ff); break; - case R_luimm16: + case R_BFIN_LUIMM16: pr_debug("before %x after %x\n", *location16, (value & 0xffff)); tmp = (value & 0xffff); @@ -286,7 +286,7 @@ apply_relocate_add(Elf_Shdr * sechdrs, const char *strtab, } else *location16 = tmp; break; - case R_huimm16: + case R_BFIN_HUIMM16: pr_debug("before %x after %x\n", *location16, ((value >> 16) & 0xffff)); tmp = ((value >> 16) & 0xffff); @@ -295,10 +295,10 @@ apply_relocate_add(Elf_Shdr * sechdrs, const char *strtab, } else *location16 = tmp; break; - case R_rimm16: + case R_BFIN_RIMM16: *location16 = (value & 0xffff); break; - case R_byte4_data: + case R_BFIN_BYTE4_DATA: pr_debug("before %x after %x\n", *location32, value); *location32 = value; break; diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c index e040e03335e..79cad0ac589 100644 --- a/arch/blackfin/kernel/process.c +++ b/arch/blackfin/kernel/process.c @@ -160,6 +160,29 @@ pid_t kernel_thread(int (*fn) (void *), void *arg, unsigned long flags) } EXPORT_SYMBOL(kernel_thread); +/* + * Do necessary setup to start up a newly executed thread. + * + * pass the data segment into user programs if it exists, + * it can't hurt anything as far as I can tell + */ +void start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) +{ + set_fs(USER_DS); + regs->pc = new_ip; + if (current->mm) + regs->p5 = current->mm->start_data; +#ifdef CONFIG_SMP + task_thread_info(current)->l1_task_info.stack_start = + (void *)current->mm->context.stack_start; + task_thread_info(current)->l1_task_info.lowest_sp = (void *)new_sp; + memcpy(L1_SCRATCH_TASK_INFO, &task_thread_info(current)->l1_task_info, + sizeof(*L1_SCRATCH_TASK_INFO)); +#endif + wrusp(new_sp); +} +EXPORT_SYMBOL_GPL(start_thread); + void flush_thread(void) { } @@ -321,57 +344,151 @@ void finish_atomic_sections (struct pt_regs *regs) } } +static inline +int in_mem(unsigned long addr, unsigned long size, + unsigned long start, unsigned long end) +{ + return addr >= start && addr + size <= end; +} +static inline +int in_mem_const_off(unsigned long addr, unsigned long size, unsigned long off, + unsigned long const_addr, unsigned long const_size) +{ + return const_size && + in_mem(addr, size, const_addr + off, const_addr + const_size); +} +static inline +int in_mem_const(unsigned long addr, unsigned long size, + unsigned long const_addr, unsigned long const_size) +{ + return in_mem_const_off(addr, 0, size, const_addr, const_size); +} +#define IN_ASYNC(bnum, bctlnum) \ +({ \ + (bfin_read_EBIU_AMGCTL() & 0xe) < ((bnum + 1) << 1) ? -EFAULT : \ + bfin_read_EBIU_AMBCTL##bctlnum() & B##bnum##RDYEN ? -EFAULT : \ + BFIN_MEM_ACCESS_CORE; \ +}) + +int bfin_mem_access_type(unsigned long addr, unsigned long size) +{ + int cpu = raw_smp_processor_id(); + + /* Check that things do not wrap around */ + if (addr > ULONG_MAX - size) + return -EFAULT; + + if (in_mem(addr, size, FIXED_CODE_START, physical_mem_end)) + return BFIN_MEM_ACCESS_CORE; + + if (in_mem_const(addr, size, L1_CODE_START, L1_CODE_LENGTH)) + return cpu == 0 ? BFIN_MEM_ACCESS_ITEST : BFIN_MEM_ACCESS_IDMA; + if (in_mem_const(addr, size, L1_SCRATCH_START, L1_SCRATCH_LENGTH)) + return cpu == 0 ? BFIN_MEM_ACCESS_CORE_ONLY : -EFAULT; + if (in_mem_const(addr, size, L1_DATA_A_START, L1_DATA_A_LENGTH)) + return cpu == 0 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA; + if (in_mem_const(addr, size, L1_DATA_B_START, L1_DATA_B_LENGTH)) + return cpu == 0 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA; +#ifdef COREB_L1_CODE_START + if (in_mem_const(addr, size, COREB_L1_CODE_START, L1_CODE_LENGTH)) + return cpu == 1 ? BFIN_MEM_ACCESS_ITEST : BFIN_MEM_ACCESS_IDMA; + if (in_mem_const(addr, size, COREB_L1_SCRATCH_START, L1_SCRATCH_LENGTH)) + return cpu == 1 ? BFIN_MEM_ACCESS_CORE_ONLY : -EFAULT; + if (in_mem_const(addr, size, COREB_L1_DATA_A_START, L1_DATA_A_LENGTH)) + return cpu == 1 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA; + if (in_mem_const(addr, size, COREB_L1_DATA_B_START, L1_DATA_B_LENGTH)) + return cpu == 1 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA; +#endif + if (in_mem_const(addr, size, L2_START, L2_LENGTH)) + return BFIN_MEM_ACCESS_CORE; + + if (addr >= SYSMMR_BASE) + return BFIN_MEM_ACCESS_CORE_ONLY; + + /* We can't read EBIU banks that aren't enabled or we end up hanging + * on the access to the async space. + */ + if (in_mem_const(addr, size, ASYNC_BANK0_BASE, ASYNC_BANK0_SIZE)) + return IN_ASYNC(0, 0); + if (in_mem_const(addr, size, ASYNC_BANK1_BASE, ASYNC_BANK1_SIZE)) + return IN_ASYNC(1, 0); + if (in_mem_const(addr, size, ASYNC_BANK2_BASE, ASYNC_BANK2_SIZE)) + return IN_ASYNC(2, 1); + if (in_mem_const(addr, size, ASYNC_BANK3_BASE, ASYNC_BANK3_SIZE)) + return IN_ASYNC(3, 1); + + if (in_mem_const(addr, size, BOOT_ROM_START, BOOT_ROM_LENGTH)) + return BFIN_MEM_ACCESS_CORE; + if (in_mem_const(addr, size, L1_ROM_START, L1_ROM_LENGTH)) + return BFIN_MEM_ACCESS_DMA; + + return -EFAULT; +} + #if defined(CONFIG_ACCESS_CHECK) +#ifdef CONFIG_ACCESS_OK_L1 +__attribute__((l1_text)) +#endif /* Return 1 if access to memory range is OK, 0 otherwise */ int _access_ok(unsigned long addr, unsigned long size) { if (size == 0) return 1; - if (addr > (addr + size)) + /* Check that things do not wrap around */ + if (addr > ULONG_MAX - size) return 0; if (segment_eq(get_fs(), KERNEL_DS)) return 1; #ifdef CONFIG_MTD_UCLINUX - if (addr >= memory_start && (addr + size) <= memory_end) - return 1; - if (addr >= memory_mtd_end && (addr + size) <= physical_mem_end) + if (1) +#else + if (0) +#endif + { + if (in_mem(addr, size, memory_start, memory_end)) + return 1; + if (in_mem(addr, size, memory_mtd_end, physical_mem_end)) + return 1; +# ifndef CONFIG_ROMFS_ON_MTD + if (0) +# endif + /* For XIP, allow user space to use pointers within the ROMFS. */ + if (in_mem(addr, size, memory_mtd_start, memory_mtd_end)) + return 1; + } else { + if (in_mem(addr, size, memory_start, physical_mem_end)) + return 1; + } + + if (in_mem(addr, size, (unsigned long)__init_begin, (unsigned long)__init_end)) return 1; -#ifdef CONFIG_ROMFS_ON_MTD - /* For XIP, allow user space to use pointers within the ROMFS. */ - if (addr >= memory_mtd_start && (addr + size) <= memory_mtd_end) + if (in_mem_const(addr, size, L1_CODE_START, L1_CODE_LENGTH)) return 1; -#endif -#else - if (addr >= memory_start && (addr + size) <= physical_mem_end) + if (in_mem_const_off(addr, size, _etext_l1 - _stext_l1, L1_CODE_START, L1_CODE_LENGTH)) return 1; -#endif - if (addr >= (unsigned long)__init_begin && - addr + size <= (unsigned long)__init_end) + if (in_mem_const_off(addr, size, _ebss_l1 - _sdata_l1, L1_DATA_A_START, L1_DATA_A_LENGTH)) return 1; - if (addr >= get_l1_scratch_start() - && addr + size <= get_l1_scratch_start() + L1_SCRATCH_LENGTH) + if (in_mem_const_off(addr, size, _ebss_b_l1 - _sdata_b_l1, L1_DATA_B_START, L1_DATA_B_LENGTH)) return 1; -#if L1_CODE_LENGTH != 0 - if (addr >= get_l1_code_start() + (_etext_l1 - _stext_l1) - && addr + size <= get_l1_code_start() + L1_CODE_LENGTH) +#ifdef COREB_L1_CODE_START + if (in_mem_const(addr, size, COREB_L1_CODE_START, L1_CODE_LENGTH)) return 1; -#endif -#if L1_DATA_A_LENGTH != 0 - if (addr >= get_l1_data_a_start() + (_ebss_l1 - _sdata_l1) - && addr + size <= get_l1_data_a_start() + L1_DATA_A_LENGTH) + if (in_mem_const(addr, size, COREB_L1_SCRATCH_START, L1_SCRATCH_LENGTH)) return 1; -#endif -#if L1_DATA_B_LENGTH != 0 - if (addr >= get_l1_data_b_start() + (_ebss_b_l1 - _sdata_b_l1) - && addr + size <= get_l1_data_b_start() + L1_DATA_B_LENGTH) + if (in_mem_const(addr, size, COREB_L1_DATA_A_START, L1_DATA_A_LENGTH)) return 1; -#endif -#if L2_LENGTH != 0 - if (addr >= L2_START + (_ebss_l2 - _stext_l2) - && addr + size <= L2_START + L2_LENGTH) + if (in_mem_const(addr, size, COREB_L1_DATA_B_START, L1_DATA_B_LENGTH)) return 1; #endif + if (in_mem_const_off(addr, size, _ebss_l2 - _stext_l2, L2_START, L2_LENGTH)) + return 1; + + if (in_mem_const(addr, size, BOOT_ROM_START, BOOT_ROM_LENGTH)) + return 1; + if (in_mem_const(addr, size, L1_ROM_START, L1_ROM_LENGTH)) + return 1; + return 0; } EXPORT_SYMBOL(_access_ok); diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c index a58687bdee6..298f023bcc0 100644 --- a/arch/blackfin/kernel/setup.c +++ b/arch/blackfin/kernel/setup.c @@ -18,9 +18,12 @@ #include <linux/tty.h> #include <linux/pfn.h> +#ifdef CONFIG_MTD_UCLINUX +#include <linux/mtd/map.h> #include <linux/ext2_fs.h> #include <linux/cramfs_fs.h> #include <linux/romfs_fs.h> +#endif #include <asm/cplb.h> #include <asm/cacheflush.h> @@ -45,6 +48,7 @@ EXPORT_SYMBOL(_ramend); EXPORT_SYMBOL(reserved_mem_dcache_on); #ifdef CONFIG_MTD_UCLINUX +extern struct map_info uclinux_ram_map; unsigned long memory_mtd_end, memory_mtd_start, mtd_size; unsigned long _ebss; EXPORT_SYMBOL(memory_mtd_end); @@ -113,15 +117,49 @@ void __cpuinit bfin_setup_caches(unsigned int cpu) */ #ifdef CONFIG_BFIN_ICACHE printk(KERN_INFO "Instruction Cache Enabled for CPU%u\n", cpu); + printk(KERN_INFO " External memory:" +# ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE + " cacheable" +# else + " uncacheable" +# endif + " in instruction cache\n"); + if (L2_LENGTH) + printk(KERN_INFO " L2 SRAM :" +# ifdef CONFIG_BFIN_L2_ICACHEABLE + " cacheable" +# else + " uncacheable" +# endif + " in instruction cache\n"); + +#else + printk(KERN_INFO "Instruction Cache Disabled for CPU%u\n", cpu); #endif + #ifdef CONFIG_BFIN_DCACHE - printk(KERN_INFO "Data Cache Enabled for CPU%u" -# if defined CONFIG_BFIN_WB - " (write-back)" -# elif defined CONFIG_BFIN_WT - " (write-through)" + printk(KERN_INFO "Data Cache Enabled for CPU%u\n", cpu); + printk(KERN_INFO " External memory:" +# if defined CONFIG_BFIN_EXTMEM_WRITEBACK + " cacheable (write-back)" +# elif defined CONFIG_BFIN_EXTMEM_WRITETHROUGH + " cacheable (write-through)" +# else + " uncacheable" +# endif + " in data cache\n"); + if (L2_LENGTH) + printk(KERN_INFO " L2 SRAM :" +# if defined CONFIG_BFIN_L2_WRITEBACK + " cacheable (write-back)" +# elif defined CONFIG_BFIN_L2_WRITETHROUGH + " cacheable (write-through)" +# else + " uncacheable" # endif - "\n", cpu); + " in data cache\n"); +#else + printk(KERN_INFO "Data Cache Disabled for CPU%u\n", cpu); #endif } @@ -150,40 +188,45 @@ void __init bfin_relocate_l1_mem(void) unsigned long l1_data_b_length; unsigned long l2_length; + /* + * due to the ALIGN(4) in the arch/blackfin/kernel/vmlinux.lds.S + * we know that everything about l1 text/data is nice and aligned, + * so copy by 4 byte chunks, and don't worry about overlapping + * src/dest. + * + * We can't use the dma_memcpy functions, since they can call + * scheduler functions which might be in L1 :( and core writes + * into L1 instruction cause bad access errors, so we are stuck, + * we are required to use DMA, but can't use the common dma + * functions. We can't use memcpy either - since that might be + * going to be in the relocated L1 + */ + blackfin_dma_early_init(); + /* if necessary, copy _stext_l1 to _etext_l1 to L1 instruction SRAM */ l1_code_length = _etext_l1 - _stext_l1; - if (l1_code_length > L1_CODE_LENGTH) - panic("L1 Instruction SRAM Overflow\n"); - /* cannot complain as printk is not available as yet. - * But we can continue booting and complain later! - */ - - /* Copy _stext_l1 to _etext_l1 to L1 instruction SRAM */ - dma_memcpy(_stext_l1, _l1_lma_start, l1_code_length); + if (l1_code_length) + early_dma_memcpy(_stext_l1, _l1_lma_start, l1_code_length); + /* if necessary, copy _sdata_l1 to _sbss_l1 to L1 data bank A SRAM */ l1_data_a_length = _sbss_l1 - _sdata_l1; - if (l1_data_a_length > L1_DATA_A_LENGTH) - panic("L1 Data SRAM Bank A Overflow\n"); - - /* Copy _sdata_l1 to _sbss_l1 to L1 data bank A SRAM */ - dma_memcpy(_sdata_l1, _l1_lma_start + l1_code_length, l1_data_a_length); + if (l1_data_a_length) + early_dma_memcpy(_sdata_l1, _l1_lma_start + l1_code_length, l1_data_a_length); + /* if necessary, copy _sdata_b_l1 to _sbss_b_l1 to L1 data bank B SRAM */ l1_data_b_length = _sbss_b_l1 - _sdata_b_l1; - if (l1_data_b_length > L1_DATA_B_LENGTH) - panic("L1 Data SRAM Bank B Overflow\n"); - - /* Copy _sdata_b_l1 to _sbss_b_l1 to L1 data bank B SRAM */ - dma_memcpy(_sdata_b_l1, _l1_lma_start + l1_code_length + + if (l1_data_b_length) + early_dma_memcpy(_sdata_b_l1, _l1_lma_start + l1_code_length + l1_data_a_length, l1_data_b_length); + early_dma_memcpy_done(); + + /* if necessary, copy _stext_l2 to _edata_l2 to L2 SRAM */ if (L2_LENGTH != 0) { l2_length = _sbss_l2 - _stext_l2; - if (l2_length > L2_LENGTH) - panic("L2 SRAM Overflow\n"); - - /* Copy _stext_l2 to _edata_l2 to L2 SRAM */ - dma_memcpy(_stext_l2, _l2_lma_start, l2_length); + if (l2_length) + memcpy(_stext_l2, _l2_lma_start, l2_length); } } @@ -434,9 +477,11 @@ static __init void parse_cmdline_early(char *cmdline_p) } else if (!memcmp(to, "clkin_hz=", 9)) { to += 9; early_init_clkin_hz(to); +#ifdef CONFIG_EARLY_PRINTK } else if (!memcmp(to, "earlyprintk=", 12)) { to += 12; setup_early_printk(to); +#endif } else if (!memcmp(to, "memmap=", 7)) { to += 7; parse_memmap(to); @@ -472,7 +517,7 @@ static __init void memory_setup(void) if (DMA_UNCACHED_REGION > (_ramend - _ramstart)) { console_init(); - panic("DMA region exceeds memory limit: %lu.\n", + panic("DMA region exceeds memory limit: %lu.", _ramend - _ramstart); } memory_end = _ramend - DMA_UNCACHED_REGION; @@ -507,7 +552,7 @@ static __init void memory_setup(void) && ((unsigned long *)mtd_phys)[1] == ROMSB_WORD1) mtd_size = PAGE_ALIGN(be32_to_cpu(((unsigned long *)mtd_phys)[2])); -# if (defined(CONFIG_BFIN_ICACHE) && ANOMALY_05000263) +# if (defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) && ANOMALY_05000263) /* Due to a Hardware Anomaly we need to limit the size of usable * instruction memory to max 60MB, 56 if HUNT_FOR_ZERO is on * 05000263 - Hardware loop corrupted when taking an ICPLB exception @@ -526,17 +571,16 @@ static __init void memory_setup(void) if (mtd_size == 0) { console_init(); - panic("Don't boot kernel without rootfs attached.\n"); + panic("Don't boot kernel without rootfs attached."); } /* Relocate MTD image to the top of memory after the uncached memory area */ - dma_memcpy((char *)memory_end, _end, mtd_size); - - memory_mtd_start = memory_end; - _ebss = memory_mtd_start; /* define _ebss for compatible */ + uclinux_ram_map.phys = memory_mtd_start = memory_end; + uclinux_ram_map.size = mtd_size; + dma_memcpy((void *)uclinux_ram_map.phys, _end, uclinux_ram_map.size); #endif /* CONFIG_MTD_UCLINUX */ -#if (defined(CONFIG_BFIN_ICACHE) && ANOMALY_05000263) +#if (defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) && ANOMALY_05000263) /* Due to a Hardware Anomaly we need to limit the size of usable * instruction memory to max 60MB, 56 if HUNT_FOR_ZERO is on * 05000263 - Hardware loop corrupted when taking an ICPLB exception @@ -756,6 +800,11 @@ void __init setup_arch(char **cmdline_p) { unsigned long sclk, cclk; + /* Check to make sure we are running on the right processor */ + if (unlikely(CPUID != bfin_cpuid())) + printk(KERN_ERR "ERROR: Not running on ADSP-%s: unknown CPUID 0x%04x Rev 0.%d\n", + CPU, bfin_cpuid(), bfin_revid()); + #ifdef CONFIG_DUMMY_CONSOLE conswitchp = &dummy_con; #endif @@ -770,14 +819,17 @@ void __init setup_arch(char **cmdline_p) memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE); boot_command_line[COMMAND_LINE_SIZE - 1] = '\0'; - /* setup memory defaults from the user config */ - physical_mem_end = 0; - _ramend = get_mem_size() * 1024 * 1024; - memset(&bfin_memmap, 0, sizeof(bfin_memmap)); + /* If the user does not specify things on the command line, use + * what the bootloader set things up as + */ + physical_mem_end = 0; parse_cmdline_early(&command_line[0]); + if (_ramend == 0) + _ramend = get_mem_size() * 1024 * 1024; + if (physical_mem_end == 0) physical_mem_end = _ramend; @@ -796,10 +848,8 @@ void __init setup_arch(char **cmdline_p) cclk = get_cclk(); sclk = get_sclk(); -#if !defined(CONFIG_BFIN_KERNEL_CLOCK) - if (ANOMALY_05000273 && cclk == sclk) - panic("ANOMALY 05000273, SCLK can not be same as CCLK"); -#endif + if ((ANOMALY_05000273 || ANOMALY_05000274) && (cclk >> 1) < sclk) + panic("ANOMALY 05000273 or 05000274: CCLK must be >= 2*SCLK"); #ifdef BF561_FAMILY if (ANOMALY_05000266) { @@ -831,7 +881,8 @@ void __init setup_arch(char **cmdline_p) defined(CONFIG_BF538) || defined(CONFIG_BF539) _bfin_swrst = bfin_read_SWRST(); #else - _bfin_swrst = bfin_read_SYSCR(); + /* Clear boot mode field */ + _bfin_swrst = bfin_read_SYSCR() & ~0xf; #endif #ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT @@ -869,10 +920,7 @@ void __init setup_arch(char **cmdline_p) else printk(KERN_INFO "Compiled for ADSP-%s Rev 0.%d\n", CPU, bfin_compiled_revid()); - if (unlikely(CPUID != bfin_cpuid())) - printk(KERN_ERR "ERROR: Not running on ADSP-%s: unknown CPUID 0x%04x Rev 0.%d\n", - CPU, bfin_cpuid(), bfin_revid()); - else { + if (likely(CPUID == bfin_cpuid())) { if (bfin_revid() != bfin_compiled_revid()) { if (bfin_compiled_revid() == -1) printk(KERN_ERR "Warning: Compiled for Rev none, but running on Rev %d\n", @@ -881,7 +929,7 @@ void __init setup_arch(char **cmdline_p) printk(KERN_ERR "Warning: Compiled for Rev %d, but running on Rev %d\n", bfin_compiled_revid(), bfin_revid()); if (bfin_compiled_revid() > bfin_revid()) - panic("Error: you are missing anomaly workarounds for this rev\n"); + panic("Error: you are missing anomaly workarounds for this rev"); } } if (bfin_revid() < CONFIG_BF_REV_MIN || bfin_revid() > CONFIG_BF_REV_MAX) @@ -891,16 +939,13 @@ void __init setup_arch(char **cmdline_p) /* We can't run on BF548-0.1 due to ANOMALY 05000448 */ if (bfin_cpuid() == 0x27de && bfin_revid() == 1) - panic("You can't run on this processor due to 05000448\n"); + panic("You can't run on this processor due to 05000448"); printk(KERN_INFO "Blackfin Linux support by http://blackfin.uclinux.org/\n"); printk(KERN_INFO "Processor Speed: %lu MHz core clock and %lu MHz System Clock\n", cclk / 1000000, sclk / 1000000); - if (ANOMALY_05000273 && (cclk >> 1) <= sclk) - printk("\n\n\nANOMALY_05000273: CCLK must be >= 2*SCLK !!!\n\n\n"); - setup_bootmem_allocator(); paging_init(); @@ -1095,7 +1140,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) CPUID, bfin_cpuid()); seq_printf(m, "model name\t: ADSP-%s %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n" - "stepping\t: %d\n", + "stepping\t: %d ", cpu, cclk/1000000, sclk/1000000, #ifdef CONFIG_MPU "mpu on", @@ -1104,7 +1149,16 @@ static int show_cpuinfo(struct seq_file *m, void *v) #endif revid); - seq_printf(m, "cpu MHz\t\t: %lu.%03lu/%lu.%03lu\n", + if (bfin_revid() != bfin_compiled_revid()) { + if (bfin_compiled_revid() == -1) + seq_printf(m, "(Compiled for Rev none)"); + else if (bfin_compiled_revid() == 0xffff) + seq_printf(m, "(Compiled for Rev any)"); + else + seq_printf(m, "(Compiled for Rev %d)", bfin_compiled_revid()); + } + + seq_printf(m, "\ncpu MHz\t\t: %lu.%03lu/%lu.%03lu\n", cclk/1000000, cclk%1000000, sclk/1000000, sclk%1000000); seq_printf(m, "bogomips\t: %lu.%02lu\n" @@ -1145,16 +1199,25 @@ static int show_cpuinfo(struct seq_file *m, void *v) icache_size = 0; seq_printf(m, "cache size\t: %d KB(L1 icache) " - "%d KB(L1 dcache%s) %d KB(L2 cache)\n", - icache_size, dcache_size, -#if defined CONFIG_BFIN_WB - "-wb" -#elif defined CONFIG_BFIN_WT - "-wt" -#endif - "", 0); - + "%d KB(L1 dcache) %d KB(L2 cache)\n", + icache_size, dcache_size, 0); seq_printf(m, "%s\n", cache); + seq_printf(m, "external memory\t: " +#if defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) + "cacheable" +#else + "uncacheable" +#endif + " in instruction cache\n"); + seq_printf(m, "external memory\t: " +#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK) + "cacheable (write-back)" +#elif defined(CONFIG_BFIN_EXTMEM_WRITETHROUGH) + "cacheable (write-through)" +#else + "uncacheable" +#endif + " in data cache\n"); if (icache_size) seq_printf(m, "icache setup\t: %d Sub-banks/%d Ways, %d Lines/Way\n", @@ -1169,6 +1232,9 @@ static int show_cpuinfo(struct seq_file *m, void *v) #ifdef __ARCH_SYNC_CORE_DCACHE seq_printf(m, "SMP Dcache Flushes\t: %lu\n\n", cpudata->dcache_invld_count); #endif +#ifdef __ARCH_SYNC_CORE_ICACHE + seq_printf(m, "SMP Icache Flushes\t: %lu\n\n", cpudata->icache_invld_count); +#endif #ifdef CONFIG_BFIN_ICACHE_LOCK switch ((cpudata->imemctl >> 3) & WAYALL_L) { case WAY0_L: @@ -1224,8 +1290,25 @@ static int show_cpuinfo(struct seq_file *m, void *v) if (cpu_num != num_possible_cpus() - 1) return 0; - if (L2_LENGTH) + if (L2_LENGTH) { seq_printf(m, "L2 SRAM\t\t: %dKB\n", L2_LENGTH/0x400); + seq_printf(m, "L2 SRAM\t\t: " +#if defined(CONFIG_BFIN_L2_ICACHEABLE) + "cacheable" +#else + "uncacheable" +#endif + " in instruction cache\n"); + seq_printf(m, "L2 SRAM\t\t: " +#if defined(CONFIG_BFIN_L2_WRITEBACK) + "cacheable (write-back)" +#elif defined(CONFIG_BFIN_L2_WRITETHROUGH) + "cacheable (write-through)" +#else + "uncacheable" +#endif + " in data cache\n"); + } seq_printf(m, "board name\t: %s\n", bfin_board_name); seq_printf(m, "board memory\t: %ld kB (0x%p -> 0x%p)\n", physical_mem_end >> 10, (void *)0, (void *)physical_mem_end); diff --git a/arch/blackfin/kernel/stacktrace.c b/arch/blackfin/kernel/stacktrace.c new file mode 100644 index 00000000000..30301e1eace --- /dev/null +++ b/arch/blackfin/kernel/stacktrace.c @@ -0,0 +1,53 @@ +/* + * Blackfin stacktrace code (mostly copied from avr32) + * + * Copyright 2009 Analog Devices Inc. + * Licensed under the GPL-2 or later. + */ + +#include <linux/sched.h> +#include <linux/stacktrace.h> +#include <linux/thread_info.h> +#include <linux/module.h> + +register unsigned long current_frame_pointer asm("FP"); + +struct stackframe { + unsigned long fp; + unsigned long rets; +}; + +/* + * Save stack-backtrace addresses into a stack_trace buffer. + */ +void save_stack_trace(struct stack_trace *trace) +{ + unsigned long low, high; + unsigned long fp; + struct stackframe *frame; + int skip = trace->skip; + + low = (unsigned long)task_stack_page(current); + high = low + THREAD_SIZE; + fp = current_frame_pointer; + + while (fp >= low && fp <= (high - sizeof(*frame))) { + frame = (struct stackframe *)fp; + + if (skip) { + skip--; + } else { + trace->entries[trace->nr_entries++] = frame->rets; + if (trace->nr_entries >= trace->max_entries) + break; + } + + /* + * The next frame must be at a higher address than the + * current frame. + */ + low = fp + sizeof(*frame); + fp = frame->fp; + } +} +EXPORT_SYMBOL_GPL(save_stack_trace); diff --git a/arch/blackfin/kernel/sys_bfin.c b/arch/blackfin/kernel/sys_bfin.c index fce49d7cf00..a8f1329c15a 100644 --- a/arch/blackfin/kernel/sys_bfin.c +++ b/arch/blackfin/kernel/sys_bfin.c @@ -78,11 +78,6 @@ asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, return do_mmap2(addr, len, prot, flags, fd, pgoff); } -asmlinkage int sys_getpagesize(void) -{ - return PAGE_SIZE; -} - asmlinkage void *sys_sram_alloc(size_t size, unsigned long flags) { return sram_alloc_with_lsl(size, flags); diff --git a/arch/blackfin/kernel/time-ts.c b/arch/blackfin/kernel/time-ts.c index 27646121280..0791eba40d9 100644 --- a/arch/blackfin/kernel/time-ts.c +++ b/arch/blackfin/kernel/time-ts.c @@ -20,8 +20,9 @@ #include <asm/blackfin.h> #include <asm/time.h> +#include <asm/gptimers.h> -#ifdef CONFIG_CYCLES_CLOCKSOURCE +#if defined(CONFIG_CYCLES_CLOCKSOURCE) /* Accelerators for sched_clock() * convert from cycles(64bits) => nanoseconds (64bits) @@ -58,15 +59,15 @@ static inline unsigned long long cycles_2_ns(cycle_t cyc) return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR; } -static cycle_t read_cycles(struct clocksource *cs) +static cycle_t bfin_read_cycles(struct clocksource *cs) { return __bfin_cycles_off + (get_cycles() << __bfin_cycles_mod); } -static struct clocksource clocksource_bfin = { - .name = "bfin_cycles", +static struct clocksource bfin_cs_cycles = { + .name = "bfin_cs_cycles", .rating = 350, - .read = read_cycles, + .read = bfin_read_cycles, .mask = CLOCKSOURCE_MASK(64), .shift = 22, .flags = CLOCK_SOURCE_IS_CONTINUOUS, @@ -74,53 +75,198 @@ static struct clocksource clocksource_bfin = { unsigned long long sched_clock(void) { - return cycles_2_ns(read_cycles(&clocksource_bfin)); + return cycles_2_ns(bfin_read_cycles(&bfin_cs_cycles)); } -static int __init bfin_clocksource_init(void) +static int __init bfin_cs_cycles_init(void) { set_cyc2ns_scale(get_cclk() / 1000); - clocksource_bfin.mult = clocksource_hz2mult(get_cclk(), clocksource_bfin.shift); + bfin_cs_cycles.mult = \ + clocksource_hz2mult(get_cclk(), bfin_cs_cycles.shift); - if (clocksource_register(&clocksource_bfin)) + if (clocksource_register(&bfin_cs_cycles)) panic("failed to register clocksource"); return 0; } +#else +# define bfin_cs_cycles_init() +#endif + +#ifdef CONFIG_GPTMR0_CLOCKSOURCE + +void __init setup_gptimer0(void) +{ + disable_gptimers(TIMER0bit); + + set_gptimer_config(TIMER0_id, \ + TIMER_OUT_DIS | TIMER_PERIOD_CNT | TIMER_MODE_PWM); + set_gptimer_period(TIMER0_id, -1); + set_gptimer_pwidth(TIMER0_id, -2); + SSYNC(); + enable_gptimers(TIMER0bit); +} + +static cycle_t bfin_read_gptimer0(void) +{ + return bfin_read_TIMER0_COUNTER(); +} + +static struct clocksource bfin_cs_gptimer0 = { + .name = "bfin_cs_gptimer0", + .rating = 400, + .read = bfin_read_gptimer0, + .mask = CLOCKSOURCE_MASK(32), + .shift = 22, + .flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; + +static int __init bfin_cs_gptimer0_init(void) +{ + setup_gptimer0(); + bfin_cs_gptimer0.mult = \ + clocksource_hz2mult(get_sclk(), bfin_cs_gptimer0.shift); + + if (clocksource_register(&bfin_cs_gptimer0)) + panic("failed to register clocksource"); + + return 0; +} #else -# define bfin_clocksource_init() +# define bfin_cs_gptimer0_init() #endif +#ifdef CONFIG_CORE_TIMER_IRQ_L1 +__attribute__((l1_text)) +#endif +irqreturn_t timer_interrupt(int irq, void *dev_id); + +static int bfin_timer_set_next_event(unsigned long, \ + struct clock_event_device *); + +static void bfin_timer_set_mode(enum clock_event_mode, \ + struct clock_event_device *); + +static struct clock_event_device clockevent_bfin = { +#if defined(CONFIG_TICKSOURCE_GPTMR0) + .name = "bfin_gptimer0", + .rating = 300, + .irq = IRQ_TIMER0, +#else + .name = "bfin_core_timer", + .rating = 350, + .irq = IRQ_CORETMR, +#endif + .shift = 32, + .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, + .set_next_event = bfin_timer_set_next_event, + .set_mode = bfin_timer_set_mode, +}; + +static struct irqaction bfin_timer_irq = { +#if defined(CONFIG_TICKSOURCE_GPTMR0) + .name = "Blackfin GPTimer0", +#else + .name = "Blackfin CoreTimer", +#endif + .flags = IRQF_DISABLED | IRQF_TIMER | \ + IRQF_IRQPOLL | IRQF_PERCPU, + .handler = timer_interrupt, + .dev_id = &clockevent_bfin, +}; + +#if defined(CONFIG_TICKSOURCE_GPTMR0) static int bfin_timer_set_next_event(unsigned long cycles, struct clock_event_device *evt) { + disable_gptimers(TIMER0bit); + + /* it starts counting three SCLK cycles after the TIMENx bit is set */ + set_gptimer_pwidth(TIMER0_id, cycles - 3); + enable_gptimers(TIMER0bit); + return 0; +} + +static void bfin_timer_set_mode(enum clock_event_mode mode, + struct clock_event_device *evt) +{ + switch (mode) { + case CLOCK_EVT_MODE_PERIODIC: { + set_gptimer_config(TIMER0_id, \ + TIMER_OUT_DIS | TIMER_IRQ_ENA | \ + TIMER_PERIOD_CNT | TIMER_MODE_PWM); + set_gptimer_period(TIMER0_id, get_sclk() / HZ); + set_gptimer_pwidth(TIMER0_id, get_sclk() / HZ - 1); + enable_gptimers(TIMER0bit); + break; + } + case CLOCK_EVT_MODE_ONESHOT: + disable_gptimers(TIMER0bit); + set_gptimer_config(TIMER0_id, \ + TIMER_OUT_DIS | TIMER_IRQ_ENA | TIMER_MODE_PWM); + set_gptimer_period(TIMER0_id, 0); + break; + case CLOCK_EVT_MODE_UNUSED: + case CLOCK_EVT_MODE_SHUTDOWN: + disable_gptimers(TIMER0bit); + break; + case CLOCK_EVT_MODE_RESUME: + break; + } +} + +static void bfin_timer_ack(void) +{ + set_gptimer_status(TIMER_GROUP1, TIMER_STATUS_TIMIL0); +} + +static void __init bfin_timer_init(void) +{ + disable_gptimers(TIMER0bit); +} + +static unsigned long __init bfin_clockevent_check(void) +{ + setup_irq(IRQ_TIMER0, &bfin_timer_irq); + return get_sclk(); +} + +#else /* CONFIG_TICKSOURCE_CORETMR */ + +static int bfin_timer_set_next_event(unsigned long cycles, + struct clock_event_device *evt) +{ + bfin_write_TCNTL(TMPWR); + CSYNC(); bfin_write_TCOUNT(cycles); CSYNC(); + bfin_write_TCNTL(TMPWR | TMREN); return 0; } static void bfin_timer_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) + struct clock_event_device *evt) { switch (mode) { case CLOCK_EVT_MODE_PERIODIC: { unsigned long tcount = ((get_cclk() / (HZ * TIME_SCALE)) - 1); bfin_write_TCNTL(TMPWR); - bfin_write_TSCALE(TIME_SCALE - 1); CSYNC(); + bfin_write_TSCALE(TIME_SCALE - 1); bfin_write_TPERIOD(tcount); bfin_write_TCOUNT(tcount); - bfin_write_TCNTL(TMPWR | TMREN | TAUTORLD); CSYNC(); + bfin_write_TCNTL(TMPWR | TMREN | TAUTORLD); break; } case CLOCK_EVT_MODE_ONESHOT: + bfin_write_TCNTL(TMPWR); + CSYNC(); bfin_write_TSCALE(TIME_SCALE - 1); + bfin_write_TPERIOD(0); bfin_write_TCOUNT(0); - bfin_write_TCNTL(TMPWR | TMREN); - CSYNC(); break; case CLOCK_EVT_MODE_UNUSED: case CLOCK_EVT_MODE_SHUTDOWN: @@ -132,6 +278,10 @@ static void bfin_timer_set_mode(enum clock_event_mode mode, } } +static void bfin_timer_ack(void) +{ +} + static void __init bfin_timer_init(void) { /* power up the timer, but don't enable it just yet */ @@ -145,38 +295,32 @@ static void __init bfin_timer_init(void) bfin_write_TPERIOD(0); bfin_write_TCOUNT(0); - /* now enable the timer */ CSYNC(); } +static unsigned long __init bfin_clockevent_check(void) +{ + setup_irq(IRQ_CORETMR, &bfin_timer_irq); + return get_cclk() / TIME_SCALE; +} + +void __init setup_core_timer(void) +{ + bfin_timer_init(); + bfin_timer_set_mode(CLOCK_EVT_MODE_PERIODIC, NULL); +} +#endif /* CONFIG_TICKSOURCE_GPTMR0 */ + /* * timer_interrupt() needs to keep up the real-time clock, * as well as call the "do_timer()" routine every clocktick */ -#ifdef CONFIG_CORE_TIMER_IRQ_L1 -__attribute__((l1_text)) -#endif -irqreturn_t timer_interrupt(int irq, void *dev_id); - -static struct clock_event_device clockevent_bfin = { - .name = "bfin_core_timer", - .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, - .shift = 32, - .set_next_event = bfin_timer_set_next_event, - .set_mode = bfin_timer_set_mode, -}; - -static struct irqaction bfin_timer_irq = { - .name = "Blackfin Core Timer", - .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, - .handler = timer_interrupt, - .dev_id = &clockevent_bfin, -}; - irqreturn_t timer_interrupt(int irq, void *dev_id) { struct clock_event_device *evt = dev_id; + smp_mb(); evt->event_handler(evt); + bfin_timer_ack(); return IRQ_HANDLED; } @@ -184,9 +328,8 @@ static int __init bfin_clockevent_init(void) { unsigned long timer_clk; - timer_clk = get_cclk() / TIME_SCALE; + timer_clk = bfin_clockevent_check(); - setup_irq(IRQ_CORETMR, &bfin_timer_irq); bfin_timer_init(); clockevent_bfin.mult = div_sc(timer_clk, NSEC_PER_SEC, clockevent_bfin.shift); @@ -218,6 +361,7 @@ void __init time_init(void) xtime.tv_nsec = 0; set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); - bfin_clocksource_init(); + bfin_cs_cycles_init(); + bfin_cs_gptimer0_init(); bfin_clockevent_init(); } diff --git a/arch/blackfin/kernel/time.c b/arch/blackfin/kernel/time.c index 1bbacfbd4c5..adb54aa7d7c 100644 --- a/arch/blackfin/kernel/time.c +++ b/arch/blackfin/kernel/time.c @@ -24,14 +24,10 @@ static struct irqaction bfin_timer_irq = { .name = "Blackfin Timer Tick", -#ifdef CONFIG_IRQ_PER_CPU - .flags = IRQF_DISABLED | IRQF_PERCPU, -#else .flags = IRQF_DISABLED -#endif }; -#if defined(CONFIG_TICK_SOURCE_SYSTMR0) || defined(CONFIG_IPIPE) +#if defined(CONFIG_IPIPE) void __init setup_system_timer0(void) { /* Power down the core timer, just to play safe. */ @@ -74,7 +70,7 @@ void __init setup_core_timer(void) static void __init time_sched_init(irqreturn_t(*timer_routine) (int, void *)) { -#if defined(CONFIG_TICK_SOURCE_SYSTMR0) || defined(CONFIG_IPIPE) +#if defined(CONFIG_IPIPE) setup_system_timer0(); bfin_timer_irq.handler = timer_routine; setup_irq(IRQ_TIMER0, &bfin_timer_irq); @@ -94,7 +90,7 @@ static unsigned long gettimeoffset(void) unsigned long offset; unsigned long clocks_per_jiffy; -#if defined(CONFIG_TICK_SOURCE_SYSTMR0) || defined(CONFIG_IPIPE) +#if defined(CONFIG_IPIPE) clocks_per_jiffy = bfin_read_TIMER0_PERIOD(); offset = bfin_read_TIMER0_COUNTER() / \ (((clocks_per_jiffy + 1) * HZ) / USEC_PER_SEC); @@ -133,36 +129,25 @@ irqreturn_t timer_interrupt(int irq, void *dummy) static long last_rtc_update; write_seqlock(&xtime_lock); -#if defined(CONFIG_TICK_SOURCE_SYSTMR0) && !defined(CONFIG_IPIPE) + do_timer(1); + /* - * TIMIL0 is latched in __ipipe_grab_irq() when the I-Pipe is - * enabled. + * If we have an externally synchronized Linux clock, then update + * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be + * called as close as possible to 500 ms before the new second starts. */ - if (get_gptimer_status(0) & TIMER_STATUS_TIMIL0) { -#endif - do_timer(1); - - /* - * If we have an externally synchronized Linux clock, then update - * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be - * called as close as possible to 500 ms before the new second starts. - */ - if (ntp_synced() && - xtime.tv_sec > last_rtc_update + 660 && - (xtime.tv_nsec / NSEC_PER_USEC) >= - 500000 - ((unsigned)TICK_SIZE) / 2 - && (xtime.tv_nsec / NSEC_PER_USEC) <= - 500000 + ((unsigned)TICK_SIZE) / 2) { - if (set_rtc_mmss(xtime.tv_sec) == 0) - last_rtc_update = xtime.tv_sec; - else - /* Do it again in 60s. */ - last_rtc_update = xtime.tv_sec - 600; - } -#if defined(CONFIG_TICK_SOURCE_SYSTMR0) && !defined(CONFIG_IPIPE) - set_gptimer_status(0, TIMER_STATUS_TIMIL0); + if (ntp_synced() && + xtime.tv_sec > last_rtc_update + 660 && + (xtime.tv_nsec / NSEC_PER_USEC) >= + 500000 - ((unsigned)TICK_SIZE) / 2 + && (xtime.tv_nsec / NSEC_PER_USEC) <= + 500000 + ((unsigned)TICK_SIZE) / 2) { + if (set_rtc_mmss(xtime.tv_sec) == 0) + last_rtc_update = xtime.tv_sec; + else + /* Do it again in 60s. */ + last_rtc_update = xtime.tv_sec - 600; } -#endif write_sequnlock(&xtime_lock); #ifdef CONFIG_IPIPE diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c index ffe7fb53ecc..8eeb457ce5d 100644 --- a/arch/blackfin/kernel/traps.c +++ b/arch/blackfin/kernel/traps.c @@ -27,6 +27,7 @@ * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ +#include <linux/bug.h> #include <linux/uaccess.h> #include <linux/interrupt.h> #include <linux/module.h> @@ -36,6 +37,7 @@ #include <asm/traps.h> #include <asm/cacheflush.h> #include <asm/cplb.h> +#include <asm/dma.h> #include <asm/blackfin.h> #include <asm/irq_handler.h> #include <linux/irq.h> @@ -68,6 +70,13 @@ ({ if (0) printk(fmt, ##arg); 0; }) #endif +#if defined(CONFIG_DEBUG_MMRS) || defined(CONFIG_DEBUG_MMRS_MODULE) +u32 last_seqstat; +#ifdef CONFIG_DEBUG_MMRS_MODULE +EXPORT_SYMBOL(last_seqstat); +#endif +#endif + /* Initiate the event table handler */ void __init trap_init(void) { @@ -79,7 +88,6 @@ void __init trap_init(void) static void decode_address(char *buf, unsigned long address) { #ifdef CONFIG_DEBUG_VERBOSE - struct vm_list_struct *vml; struct task_struct *p; struct mm_struct *mm; unsigned long flags, offset; @@ -196,6 +204,11 @@ done: asmlinkage void double_fault_c(struct pt_regs *fp) { +#ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON + int j; + trace_buffer_save(j); +#endif + console_verbose(); oops_in_progress = 1; #ifdef CONFIG_DEBUG_VERBOSE @@ -220,10 +233,16 @@ asmlinkage void double_fault_c(struct pt_regs *fp) dump_bfin_process(fp); dump_bfin_mem(fp); show_regs(fp); + dump_bfin_trace_buffer(); } #endif - panic("Double Fault - unrecoverable event\n"); + panic("Double Fault - unrecoverable event"); + +} +static int kernel_mode_regs(struct pt_regs *regs) +{ + return regs->ipend & 0xffc0; } asmlinkage void trap_c(struct pt_regs *fp) @@ -234,37 +253,24 @@ asmlinkage void trap_c(struct pt_regs *fp) #ifdef CONFIG_DEBUG_HUNT_FOR_ZERO unsigned int cpu = smp_processor_id(); #endif + const char *strerror = NULL; int sig = 0; siginfo_t info; unsigned long trapnr = fp->seqstat & SEQSTAT_EXCAUSE; trace_buffer_save(j); +#if defined(CONFIG_DEBUG_MMRS) || defined(CONFIG_DEBUG_MMRS_MODULE) + last_seqstat = (u32)fp->seqstat; +#endif /* Important - be very careful dereferncing pointers - will lead to * double faults if the stack has become corrupt */ - /* If the fault was caused by a kernel thread, or interrupt handler - * we will kernel panic, so the system reboots. - * If KGDB is enabled, don't set this for kernel breakpoints - */ - - /* TODO: check to see if we are in some sort of deferred HWERR - * that we should be able to recover from, not kernel panic - */ - if ((bfin_read_IPEND() & 0xFFC0) && (trapnr != VEC_STEP) -#ifdef CONFIG_KGDB - && (trapnr != VEC_EXCPT02) +#ifndef CONFIG_KGDB + /* IPEND is skipped if KGDB isn't enabled (see entry code) */ + fp->ipend = bfin_read_IPEND(); #endif - ){ - console_verbose(); - oops_in_progress = 1; - } else if (current) { - if (current->mm == NULL) { - console_verbose(); - oops_in_progress = 1; - } - } /* trap_c() will be called for exceptions. During exceptions * processing, the pc value should be set with retx value. @@ -292,15 +298,15 @@ asmlinkage void trap_c(struct pt_regs *fp) sig = SIGTRAP; CHK_DEBUGGER_TRAP_MAYBE(); /* Check if this is a breakpoint in kernel space */ - if (fp->ipend & 0xffc0) - return; + if (kernel_mode_regs(fp)) + goto traps_done; else break; /* 0x03 - User Defined, userspace stack overflow */ case VEC_EXCPT03: info.si_code = SEGV_STACKFLOW; sig = SIGSEGV; - verbose_printk(KERN_NOTICE EXC_0x03(KERN_NOTICE)); + strerror = KERN_NOTICE EXC_0x03(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x02 - KGDB initial connection and break signal trap */ @@ -309,7 +315,7 @@ asmlinkage void trap_c(struct pt_regs *fp) info.si_code = TRAP_ILLTRAP; sig = SIGTRAP; CHK_DEBUGGER_TRAP(); - return; + goto traps_done; #endif /* 0x04 - User Defined */ /* 0x05 - User Defined */ @@ -329,7 +335,7 @@ asmlinkage void trap_c(struct pt_regs *fp) case VEC_EXCPT04 ... VEC_EXCPT15: info.si_code = ILL_ILLPARAOP; sig = SIGILL; - verbose_printk(KERN_NOTICE EXC_0x04(KERN_NOTICE)); + strerror = KERN_NOTICE EXC_0x04(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x10 HW Single step, handled here */ @@ -338,15 +344,15 @@ asmlinkage void trap_c(struct pt_regs *fp) sig = SIGTRAP; CHK_DEBUGGER_TRAP_MAYBE(); /* Check if this is a single step in kernel space */ - if (fp->ipend & 0xffc0) - return; + if (kernel_mode_regs(fp)) + goto traps_done; else break; /* 0x11 - Trace Buffer Full, handled here */ case VEC_OVFLOW: info.si_code = TRAP_TRACEFLOW; sig = SIGTRAP; - verbose_printk(KERN_NOTICE EXC_0x11(KERN_NOTICE)); + strerror = KERN_NOTICE EXC_0x11(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x12 - Reserved, Caught by default */ @@ -366,37 +372,54 @@ asmlinkage void trap_c(struct pt_regs *fp) /* 0x20 - Reserved, Caught by default */ /* 0x21 - Undefined Instruction, handled here */ case VEC_UNDEF_I: +#ifdef CONFIG_BUG + if (kernel_mode_regs(fp)) { + switch (report_bug(fp->pc, fp)) { + case BUG_TRAP_TYPE_NONE: + break; + case BUG_TRAP_TYPE_WARN: + dump_bfin_trace_buffer(); + fp->pc += 2; + goto traps_done; + case BUG_TRAP_TYPE_BUG: + /* call to panic() will dump trace, and it is + * off at this point, so it won't be clobbered + */ + panic("BUG()"); + } + } +#endif info.si_code = ILL_ILLOPC; sig = SIGILL; - verbose_printk(KERN_NOTICE EXC_0x21(KERN_NOTICE)); + strerror = KERN_NOTICE EXC_0x21(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x22 - Illegal Instruction Combination, handled here */ case VEC_ILGAL_I: info.si_code = ILL_ILLPARAOP; sig = SIGILL; - verbose_printk(KERN_NOTICE EXC_0x22(KERN_NOTICE)); + strerror = KERN_NOTICE EXC_0x22(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x23 - Data CPLB protection violation, handled here */ case VEC_CPLB_VL: info.si_code = ILL_CPLB_VI; sig = SIGBUS; - verbose_printk(KERN_NOTICE EXC_0x23(KERN_NOTICE)); + strerror = KERN_NOTICE EXC_0x23(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x24 - Data access misaligned, handled here */ case VEC_MISALI_D: info.si_code = BUS_ADRALN; sig = SIGBUS; - verbose_printk(KERN_NOTICE EXC_0x24(KERN_NOTICE)); + strerror = KERN_NOTICE EXC_0x24(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x25 - Unrecoverable Event, handled here */ case VEC_UNCOV: info.si_code = ILL_ILLEXCPT; sig = SIGILL; - verbose_printk(KERN_NOTICE EXC_0x25(KERN_NOTICE)); + strerror = KERN_NOTICE EXC_0x25(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x26 - Data CPLB Miss, normal case is handled in _cplb_hdr, @@ -404,7 +427,7 @@ asmlinkage void trap_c(struct pt_regs *fp) case VEC_CPLB_M: info.si_code = BUS_ADRALN; sig = SIGBUS; - verbose_printk(KERN_NOTICE EXC_0x26(KERN_NOTICE)); + strerror = KERN_NOTICE EXC_0x26(KERN_NOTICE); break; /* 0x27 - Data CPLB Multiple Hits - Linux Trap Zero, handled here */ case VEC_CPLB_MHIT: @@ -412,10 +435,10 @@ asmlinkage void trap_c(struct pt_regs *fp) sig = SIGSEGV; #ifdef CONFIG_DEBUG_HUNT_FOR_ZERO if (cpu_pda[cpu].dcplb_fault_addr < FIXED_CODE_START) - verbose_printk(KERN_NOTICE "NULL pointer access\n"); + strerror = KERN_NOTICE "NULL pointer access\n"; else #endif - verbose_printk(KERN_NOTICE EXC_0x27(KERN_NOTICE)); + strerror = KERN_NOTICE EXC_0x27(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x28 - Emulation Watchpoint, handled here */ @@ -425,8 +448,8 @@ asmlinkage void trap_c(struct pt_regs *fp) pr_debug(EXC_0x28(KERN_DEBUG)); CHK_DEBUGGER_TRAP_MAYBE(); /* Check if this is a watchpoint in kernel space */ - if (fp->ipend & 0xffc0) - return; + if (kernel_mode_regs(fp)) + goto traps_done; else break; #ifdef CONFIG_BF535 @@ -434,7 +457,7 @@ asmlinkage void trap_c(struct pt_regs *fp) case VEC_ISTRU_VL: /* ADSP-BF535 only (MH) */ info.si_code = BUS_OPFETCH; sig = SIGBUS; - verbose_printk(KERN_NOTICE "BF535: VEC_ISTRU_VL\n"); + strerror = KERN_NOTICE "BF535: VEC_ISTRU_VL\n"; CHK_DEBUGGER_TRAP_MAYBE(); break; #else @@ -444,21 +467,21 @@ asmlinkage void trap_c(struct pt_regs *fp) case VEC_MISALI_I: info.si_code = BUS_ADRALN; sig = SIGBUS; - verbose_printk(KERN_NOTICE EXC_0x2A(KERN_NOTICE)); + strerror = KERN_NOTICE EXC_0x2A(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x2B - Instruction CPLB protection violation, handled here */ case VEC_CPLB_I_VL: info.si_code = ILL_CPLB_VI; sig = SIGBUS; - verbose_printk(KERN_NOTICE EXC_0x2B(KERN_NOTICE)); + strerror = KERN_NOTICE EXC_0x2B(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x2C - Instruction CPLB miss, handled in _cplb_hdr */ case VEC_CPLB_I_M: info.si_code = ILL_CPLB_MISS; sig = SIGBUS; - verbose_printk(KERN_NOTICE EXC_0x2C(KERN_NOTICE)); + strerror = KERN_NOTICE EXC_0x2C(KERN_NOTICE); break; /* 0x2D - Instruction CPLB Multiple Hits, handled here */ case VEC_CPLB_I_MHIT: @@ -466,17 +489,17 @@ asmlinkage void trap_c(struct pt_regs *fp) sig = SIGSEGV; #ifdef CONFIG_DEBUG_HUNT_FOR_ZERO if (cpu_pda[cpu].icplb_fault_addr < FIXED_CODE_START) - verbose_printk(KERN_NOTICE "Jump to NULL address\n"); + strerror = KERN_NOTICE "Jump to NULL address\n"; else #endif - verbose_printk(KERN_NOTICE EXC_0x2D(KERN_NOTICE)); + strerror = KERN_NOTICE EXC_0x2D(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x2E - Illegal use of Supervisor Resource, handled here */ case VEC_ILL_RES: info.si_code = ILL_PRVOPC; sig = SIGILL; - verbose_printk(KERN_NOTICE EXC_0x2E(KERN_NOTICE)); + strerror = KERN_NOTICE EXC_0x2E(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x2F - Reserved, Caught by default */ @@ -504,17 +527,17 @@ asmlinkage void trap_c(struct pt_regs *fp) case (SEQSTAT_HWERRCAUSE_SYSTEM_MMR): info.si_code = BUS_ADRALN; sig = SIGBUS; - verbose_printk(KERN_NOTICE HWC_x2(KERN_NOTICE)); + strerror = KERN_NOTICE HWC_x2(KERN_NOTICE); break; /* External Memory Addressing Error */ case (SEQSTAT_HWERRCAUSE_EXTERN_ADDR): info.si_code = BUS_ADRERR; sig = SIGBUS; - verbose_printk(KERN_NOTICE HWC_x3(KERN_NOTICE)); + strerror = KERN_NOTICE HWC_x3(KERN_NOTICE); break; /* Performance Monitor Overflow */ case (SEQSTAT_HWERRCAUSE_PERF_FLOW): - verbose_printk(KERN_NOTICE HWC_x12(KERN_NOTICE)); + strerror = KERN_NOTICE HWC_x12(KERN_NOTICE); break; /* RAISE 5 instruction */ case (SEQSTAT_HWERRCAUSE_RAISE_5): @@ -531,7 +554,6 @@ asmlinkage void trap_c(struct pt_regs *fp) * if we get here we hit a reserved one, so panic */ default: - oops_in_progress = 1; info.si_code = ILL_ILLPARAOP; sig = SIGILL; verbose_printk(KERN_EMERG "Caught Unhandled Exception, code = %08lx\n", @@ -542,6 +564,16 @@ asmlinkage void trap_c(struct pt_regs *fp) BUG_ON(sig == 0); + /* If the fault was caused by a kernel thread, or interrupt handler + * we will kernel panic, so the system reboots. + */ + if (kernel_mode_regs(fp) || (current && !current->mm)) { + console_verbose(); + oops_in_progress = 1; + if (strerror) + verbose_printk(strerror); + } + if (sig != SIGTRAP) { dump_bfin_process(fp); dump_bfin_mem(fp); @@ -588,8 +620,11 @@ asmlinkage void trap_c(struct pt_regs *fp) force_sig_info(sig, &info, current); } + if (ANOMALY_05000461 && trapnr == VEC_HWERR && !access_ok(VERIFY_READ, fp->pc, 8)) + fp->pc = SAFE_USER_INSTRUCTION; + + traps_done: trace_buffer_restore(j); - return; } /* Typical exception handling routines */ @@ -602,57 +637,30 @@ asmlinkage void trap_c(struct pt_regs *fp) */ static bool get_instruction(unsigned short *val, unsigned short *address) { - - unsigned long addr; - - addr = (unsigned long)address; + unsigned long addr = (unsigned long)address; /* Check for odd addresses */ if (addr & 0x1) return false; - /* Check that things do not wrap around */ - if (addr > (addr + 2)) + /* MMR region will never have instructions */ + if (addr >= SYSMMR_BASE) return false; - /* - * Since we are in exception context, we need to do a little address checking - * We need to make sure we are only accessing valid memory, and - * we don't read something in the async space that can hang forever - */ - if ((addr >= FIXED_CODE_START && (addr + 2) <= physical_mem_end) || -#if L2_LENGTH != 0 - (addr >= L2_START && (addr + 2) <= (L2_START + L2_LENGTH)) || -#endif - (addr >= BOOT_ROM_START && (addr + 2) <= (BOOT_ROM_START + BOOT_ROM_LENGTH)) || -#if L1_DATA_A_LENGTH != 0 - (addr >= L1_DATA_A_START && (addr + 2) <= (L1_DATA_A_START + L1_DATA_A_LENGTH)) || -#endif -#if L1_DATA_B_LENGTH != 0 - (addr >= L1_DATA_B_START && (addr + 2) <= (L1_DATA_B_START + L1_DATA_B_LENGTH)) || -#endif - (addr >= L1_SCRATCH_START && (addr + 2) <= (L1_SCRATCH_START + L1_SCRATCH_LENGTH)) || - (!(bfin_read_EBIU_AMBCTL0() & B0RDYEN) && - addr >= ASYNC_BANK0_BASE && (addr + 2) <= (ASYNC_BANK0_BASE + ASYNC_BANK0_SIZE)) || - (!(bfin_read_EBIU_AMBCTL0() & B1RDYEN) && - addr >= ASYNC_BANK1_BASE && (addr + 2) <= (ASYNC_BANK1_BASE + ASYNC_BANK1_SIZE)) || - (!(bfin_read_EBIU_AMBCTL1() & B2RDYEN) && - addr >= ASYNC_BANK2_BASE && (addr + 2) <= (ASYNC_BANK2_BASE + ASYNC_BANK1_SIZE)) || - (!(bfin_read_EBIU_AMBCTL1() & B3RDYEN) && - addr >= ASYNC_BANK3_BASE && (addr + 2) <= (ASYNC_BANK3_BASE + ASYNC_BANK1_SIZE))) { - *val = *address; - return true; + switch (bfin_mem_access_type(addr, 2)) { + case BFIN_MEM_ACCESS_CORE: + case BFIN_MEM_ACCESS_CORE_ONLY: + *val = *address; + return true; + case BFIN_MEM_ACCESS_DMA: + dma_memcpy(val, address, 2); + return true; + case BFIN_MEM_ACCESS_ITEST: + isram_memcpy(val, address, 2); + return true; + default: /* invalid access */ + return false; } - -#if L1_CODE_LENGTH != 0 - if (addr >= L1_CODE_START && (addr + 2) <= (L1_CODE_START + L1_CODE_LENGTH)) { - isram_memcpy(val, address, 2); - return true; - } -#endif - - - return false; } /* @@ -774,6 +782,18 @@ void dump_bfin_trace_buffer(void) } EXPORT_SYMBOL(dump_bfin_trace_buffer); +#ifdef CONFIG_BUG +int is_valid_bugaddr(unsigned long addr) +{ + unsigned short opcode; + + if (!get_instruction(&opcode, (unsigned short *)addr)) + return 0; + + return opcode == BFIN_BUG_OPCODE; +} +#endif + /* * Checks to see if the address pointed to is either a * 16-bit CALL instruction, or a 32-bit CALL instruction @@ -832,6 +852,11 @@ void show_stack(struct task_struct *task, unsigned long *stack) decode_address(buf, (unsigned int)stack); printk(KERN_NOTICE " SP: [0x%p] %s\n", stack, buf); + if (!access_ok(VERIFY_READ, stack, (unsigned int)endstack - (unsigned int)stack)) { + printk(KERN_NOTICE "Invalid stack pointer\n"); + return; + } + /* First thing is to look for a frame pointer */ for (addr = (unsigned int *)((unsigned int)stack & ~0xF); addr < endstack; addr++) { if (*addr & 0x1) @@ -1066,6 +1091,29 @@ void show_regs(struct pt_regs *fp) unsigned int cpu = smp_processor_id(); unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic(); + verbose_printk(KERN_NOTICE "\n"); + if (CPUID != bfin_cpuid()) + verbose_printk(KERN_NOTICE "Compiled for cpu family 0x%04x (Rev %d), " + "but running on:0x%04x (Rev %d)\n", + CPUID, bfin_compiled_revid(), bfin_cpuid(), bfin_revid()); + + verbose_printk(KERN_NOTICE "ADSP-%s-0.%d", + CPU, bfin_compiled_revid()); + + if (bfin_compiled_revid() != bfin_revid()) + verbose_printk("(Detected 0.%d)", bfin_revid()); + + verbose_printk(" %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n", + get_cclk()/1000000, get_sclk()/1000000, +#ifdef CONFIG_MPU + "mpu on" +#else + "mpu off" +#endif + ); + + verbose_printk(KERN_NOTICE "%s", linux_banner); + verbose_printk(KERN_NOTICE "\n" KERN_NOTICE "SEQUENCER STATUS:\t\t%s\n", print_tainted()); verbose_printk(KERN_NOTICE " SEQSTAT: %08lx IPEND: %04lx SYSCFG: %04lx\n", (long)fp->seqstat, fp->ipend, fp->syscfg); @@ -1246,5 +1294,5 @@ void panic_cplb_error(int cplb_panic, struct pt_regs *fp) dump_bfin_mem(fp); show_regs(fp); dump_stack(); - panic("Unrecoverable event\n"); + panic("Unrecoverable event"); } diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S index 27952ae047d..6ac307ca0d8 100644 --- a/arch/blackfin/kernel/vmlinux.lds.S +++ b/arch/blackfin/kernel/vmlinux.lds.S @@ -50,8 +50,11 @@ SECTIONS _text = .; __stext = .; TEXT_TEXT +#ifndef CONFIG_SCHEDULE_L1 SCHED_TEXT +#endif LOCK_TEXT + IRQENTRY_TEXT KPROBES_TEXT *(.text.*) *(.fixup) @@ -164,6 +167,20 @@ SECTIONS } PERCPU(4) SECURITY_INIT + + /* we have to discard exit text and such at runtime, not link time, to + * handle embedded cross-section references (alt instructions, bug + * table, eh_frame, etc...) + */ + .exit.text : + { + EXIT_TEXT + } + .exit.data : + { + EXIT_DATA + } + .init.ramfs : { . = ALIGN(4); @@ -180,6 +197,9 @@ SECTIONS . = ALIGN(4); __stext_l1 = .; *(.l1.text) +#ifdef CONFIG_SCHEDULE_L1 + SCHED_TEXT +#endif . = ALIGN(4); __etext_l1 = .; } @@ -259,8 +279,6 @@ SECTIONS /DISCARD/ : { - EXIT_TEXT - EXIT_DATA *(.exitcall.exit) } } |