diff options
author | Bernd Schmidt <bernd.schmidt@analog.com> | 2008-01-11 16:58:44 +0800 |
---|---|---|
committer | Bryan Wu <bryan.wu@analog.com> | 2008-01-11 16:58:44 +0800 |
commit | 7a1a6d00618bce53ae88e501ff5d4b82522db926 (patch) | |
tree | 9e54215c764c16386f9874cace4cace5e2854131 /arch/blackfin/kernel/cplb-nompu | |
parent | aee3a29240ad167ad7875d859506d8bb90431c70 (diff) |
[Blackfin] arch: move all code related to CPLB handling into a new subdirectory under kernel/
Signed-off-by: Bernd Schmidt <bernd.schmidt@analog.com>
Signed-off-by: Bryan Wu <bryan.wu@analog.com>
Diffstat (limited to 'arch/blackfin/kernel/cplb-nompu')
-rw-r--r-- | arch/blackfin/kernel/cplb-nompu/Makefile | 8 | ||||
-rw-r--r-- | arch/blackfin/kernel/cplb-nompu/cacheinit.c | 67 | ||||
-rw-r--r-- | arch/blackfin/kernel/cplb-nompu/cplbhdlr.S | 130 | ||||
-rw-r--r-- | arch/blackfin/kernel/cplb-nompu/cplbinfo.c | 208 | ||||
-rw-r--r-- | arch/blackfin/kernel/cplb-nompu/cplbinit.c | 437 | ||||
-rw-r--r-- | arch/blackfin/kernel/cplb-nompu/cplbmgr.S | 646 |
6 files changed, 1496 insertions, 0 deletions
diff --git a/arch/blackfin/kernel/cplb-nompu/Makefile b/arch/blackfin/kernel/cplb-nompu/Makefile new file mode 100644 index 00000000000..d36ea9b5382 --- /dev/null +++ b/arch/blackfin/kernel/cplb-nompu/Makefile @@ -0,0 +1,8 @@ +# +# arch/blackfin/kernel/cplb-nompu/Makefile +# + +obj-y := cplbinit.o cacheinit.o cplbhdlr.o cplbmgr.o + +obj-$(CONFIG_CPLB_INFO) += cplbinfo.o + diff --git a/arch/blackfin/kernel/cplb-nompu/cacheinit.c b/arch/blackfin/kernel/cplb-nompu/cacheinit.c new file mode 100644 index 00000000000..62cbba7364b --- /dev/null +++ b/arch/blackfin/kernel/cplb-nompu/cacheinit.c @@ -0,0 +1,67 @@ +/* + * Copyright 2004-2007 Analog Devices Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <linux/cpu.h> + +#include <asm/cacheflush.h> +#include <asm/blackfin.h> +#include <asm/cplb.h> +#include <asm/cplbinit.h> + +#if defined(CONFIG_BFIN_ICACHE) +void bfin_icache_init(void) +{ + unsigned long *table = icplb_table; + unsigned long ctrl; + int i; + + for (i = 0; i < MAX_CPLBS; i++) { + unsigned long addr = *table++; + unsigned long data = *table++; + if (addr == (unsigned long)-1) + break; + bfin_write32(ICPLB_ADDR0 + i * 4, addr); + bfin_write32(ICPLB_DATA0 + i * 4, data); + } + ctrl = bfin_read_IMEM_CONTROL(); + ctrl |= IMC | ENICPLB; + bfin_write_IMEM_CONTROL(ctrl); +} +#endif + +#if defined(CONFIG_BFIN_DCACHE) +void bfin_dcache_init(void) +{ + unsigned long *table = dcplb_table; + unsigned long ctrl; + int i; + + for (i = 0; i < MAX_CPLBS; i++) { + unsigned long addr = *table++; + unsigned long data = *table++; + if (addr == (unsigned long)-1) + break; + bfin_write32(DCPLB_ADDR0 + i * 4, addr); + bfin_write32(DCPLB_DATA0 + i * 4, data); + } + ctrl = bfin_read_DMEM_CONTROL(); + ctrl |= DMEM_CNTR; + bfin_write_DMEM_CONTROL(ctrl); +} +#endif diff --git a/arch/blackfin/kernel/cplb-nompu/cplbhdlr.S b/arch/blackfin/kernel/cplb-nompu/cplbhdlr.S new file mode 100644 index 00000000000..2788532de72 --- /dev/null +++ b/arch/blackfin/kernel/cplb-nompu/cplbhdlr.S @@ -0,0 +1,130 @@ +/* + * File: arch/blackfin/mach-common/cplbhdlr.S + * Based on: + * Author: LG Soft India + * + * Created: ? + * Description: CPLB exception handler + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <linux/linkage.h> +#include <asm/cplb.h> +#include <asm/entry.h> + +#ifdef CONFIG_EXCPT_IRQ_SYSC_L1 +.section .l1.text +#else +.text +#endif + +.type _cplb_mgr, STT_FUNC; +.type _panic_cplb_error, STT_FUNC; + +.align 2 + +ENTRY(__cplb_hdr) + R2 = SEQSTAT; + + /* Mask the contents of SEQSTAT and leave only EXCAUSE in R2 */ + R2 <<= 26; + R2 >>= 26; + + R1 = 0x23; /* Data access CPLB protection violation */ + CC = R2 == R1; + IF !CC JUMP .Lnot_data_write; + R0 = 2; /* is a write to data space*/ + JUMP .Lis_icplb_miss; + +.Lnot_data_write: + R1 = 0x2C; /* CPLB miss on an instruction fetch */ + CC = R2 == R1; + R0 = 0; /* is_data_miss == False*/ + IF CC JUMP .Lis_icplb_miss; + + R1 = 0x26; + CC = R2 == R1; + IF !CC JUMP .Lunknown; + + R0 = 1; /* is_data_miss == True*/ + +.Lis_icplb_miss: + +#if defined(CONFIG_BFIN_ICACHE) || defined(CONFIG_BFIN_DCACHE) +# if defined(CONFIG_BFIN_ICACHE) && !defined(CONFIG_BFIN_DCACHE) + R1 = CPLB_ENABLE_ICACHE; +# endif +# if !defined(CONFIG_BFIN_ICACHE) && defined(CONFIG_BFIN_DCACHE) + R1 = CPLB_ENABLE_DCACHE; +# endif +# if defined(CONFIG_BFIN_ICACHE) && defined(CONFIG_BFIN_DCACHE) + R1 = CPLB_ENABLE_DCACHE | CPLB_ENABLE_ICACHE; +# endif +#else + R1 = 0; +#endif + + [--SP] = RETS; + CALL _cplb_mgr; + RETS = [SP++]; + CC = R0 == 0; + IF !CC JUMP .Lnot_replaced; + RTS; + +/* + * Diagnostic exception handlers + */ +.Lunknown: + R0 = CPLB_UNKNOWN_ERR; + JUMP .Lcplb_error; + +.Lnot_replaced: + CC = R0 == CPLB_NO_UNLOCKED; + IF !CC JUMP .Lnext_check; + R0 = CPLB_NO_UNLOCKED; + JUMP .Lcplb_error; + +.Lnext_check: + CC = R0 == CPLB_NO_ADDR_MATCH; + IF !CC JUMP .Lnext_check2; + R0 = CPLB_NO_ADDR_MATCH; + JUMP .Lcplb_error; + +.Lnext_check2: + CC = R0 == CPLB_PROT_VIOL; + IF !CC JUMP .Lstrange_return_from_cplb_mgr; + R0 = CPLB_PROT_VIOL; + JUMP .Lcplb_error; + +.Lstrange_return_from_cplb_mgr: + IDLE; + CSYNC; + JUMP .Lstrange_return_from_cplb_mgr; + +.Lcplb_error: + R1 = sp; + SP += -12; + call _panic_cplb_error; + SP += 12; + JUMP _handle_bad_cplb; + +ENDPROC(__cplb_hdr) diff --git a/arch/blackfin/kernel/cplb-nompu/cplbinfo.c b/arch/blackfin/kernel/cplb-nompu/cplbinfo.c new file mode 100644 index 00000000000..a4f0b428a34 --- /dev/null +++ b/arch/blackfin/kernel/cplb-nompu/cplbinfo.c @@ -0,0 +1,208 @@ +/* + * File: arch/blackfin/mach-common/cplbinfo.c + * Based on: + * Author: Sonic Zhang <sonic.zhang@analog.com> + * + * Created: Jan. 2005 + * Description: Display CPLB status + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/proc_fs.h> +#include <linux/uaccess.h> + +#include <asm/current.h> +#include <asm/system.h> +#include <asm/cplb.h> +#include <asm/blackfin.h> + +#define CPLB_I 1 +#define CPLB_D 2 + +#define SYNC_SYS SSYNC() +#define SYNC_CORE CSYNC() + +#define CPLB_BIT_PAGESIZE 0x30000 + +static int page_size_table[4] = { + 0x00000400, /* 1K */ + 0x00001000, /* 4K */ + 0x00100000, /* 1M */ + 0x00400000 /* 4M */ +}; + +static char page_size_string_table[][4] = { "1K", "4K", "1M", "4M" }; + +static int cplb_find_entry(unsigned long *cplb_addr, + unsigned long *cplb_data, unsigned long addr, + unsigned long data) +{ + int ii; + + for (ii = 0; ii < 16; ii++) + if (addr >= cplb_addr[ii] && addr < cplb_addr[ii] + + page_size_table[(cplb_data[ii] & CPLB_BIT_PAGESIZE) >> 16] + && (cplb_data[ii] == data)) + return ii; + + return -1; +} + +static char *cplb_print_entry(char *buf, int type) +{ + unsigned long *p_addr = dpdt_table; + unsigned long *p_data = dpdt_table + 1; + unsigned long *p_icount = dpdt_swapcount_table; + unsigned long *p_ocount = dpdt_swapcount_table + 1; + unsigned long *cplb_addr = (unsigned long *)DCPLB_ADDR0; + unsigned long *cplb_data = (unsigned long *)DCPLB_DATA0; + int entry = 0, used_cplb = 0; + + if (type == CPLB_I) { + buf += sprintf(buf, "Instruction CPLB entry:\n"); + p_addr = ipdt_table; + p_data = ipdt_table + 1; + p_icount = ipdt_swapcount_table; + p_ocount = ipdt_swapcount_table + 1; + cplb_addr = (unsigned long *)ICPLB_ADDR0; + cplb_data = (unsigned long *)ICPLB_DATA0; + } else + buf += sprintf(buf, "Data CPLB entry:\n"); + + buf += sprintf(buf, "Address\t\tData\tSize\tValid\tLocked\tSwapin\tiCount\toCount\n"); + + while (*p_addr != 0xffffffff) { + entry = cplb_find_entry(cplb_addr, cplb_data, *p_addr, *p_data); + if (entry >= 0) + used_cplb |= 1 << entry; + + buf += + sprintf(buf, + "0x%08lx\t0x%05lx\t%s\t%c\t%c\t%2d\t%ld\t%ld\n", + *p_addr, *p_data, + page_size_string_table[(*p_data & 0x30000) >> 16], + (*p_data & CPLB_VALID) ? 'Y' : 'N', + (*p_data & CPLB_LOCK) ? 'Y' : 'N', entry, *p_icount, + *p_ocount); + + p_addr += 2; + p_data += 2; + p_icount += 2; + p_ocount += 2; + } + + if (used_cplb != 0xffff) { + buf += sprintf(buf, "Unused/mismatched CPLBs:\n"); + + for (entry = 0; entry < 16; entry++) + if (0 == ((1 << entry) & used_cplb)) { + int flags = cplb_data[entry]; + buf += + sprintf(buf, + "%2d: 0x%08lx\t0x%05x\t%s\t%c\t%c\n", + entry, cplb_addr[entry], flags, + page_size_string_table[(flags & + 0x30000) >> + 16], + (flags & CPLB_VALID) ? 'Y' : 'N', + (flags & CPLB_LOCK) ? 'Y' : 'N'); + } + } + + buf += sprintf(buf, "\n"); + + return buf; +} + +static int cplbinfo_proc_output(char *buf) +{ + char *p; + + p = buf; + + p += sprintf(p, "------------------ CPLB Information ------------------\n\n"); + + if (bfin_read_IMEM_CONTROL() & ENICPLB) + p = cplb_print_entry(p, CPLB_I); + else + p += sprintf(p, "Instruction CPLB is disabled.\n\n"); + + if (bfin_read_DMEM_CONTROL() & ENDCPLB) + p = cplb_print_entry(p, CPLB_D); + else + p += sprintf(p, "Data CPLB is disabled.\n"); + + return p - buf; +} + +static int cplbinfo_read_proc(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + int len; + + len = cplbinfo_proc_output(page); + if (len <= off + count) + *eof = 1; + *start = page + off; + len -= off; + if (len > count) + len = count; + if (len < 0) + len = 0; + return len; +} + +static int cplbinfo_write_proc(struct file *file, const char __user *buffer, + unsigned long count, void *data) +{ + printk(KERN_INFO "Reset the CPLB swap in/out counts.\n"); + memset(ipdt_swapcount_table, 0, MAX_SWITCH_I_CPLBS * sizeof(unsigned long)); + memset(dpdt_swapcount_table, 0, MAX_SWITCH_D_CPLBS * sizeof(unsigned long)); + + return count; +} + +static int __init cplbinfo_init(void) +{ + struct proc_dir_entry *entry; + + entry = create_proc_entry("cplbinfo", 0, NULL); + if (!entry) + return -ENOMEM; + + entry->read_proc = cplbinfo_read_proc; + entry->write_proc = cplbinfo_write_proc; + entry->data = NULL; + + return 0; +} + +static void __exit cplbinfo_exit(void) +{ + remove_proc_entry("cplbinfo", NULL); +} + +module_init(cplbinfo_init); +module_exit(cplbinfo_exit); diff --git a/arch/blackfin/kernel/cplb-nompu/cplbinit.c b/arch/blackfin/kernel/cplb-nompu/cplbinit.c new file mode 100644 index 00000000000..6320bc45fbb --- /dev/null +++ b/arch/blackfin/kernel/cplb-nompu/cplbinit.c @@ -0,0 +1,437 @@ +/* + * Blackfin CPLB initialization + * + * Copyright 2004-2007 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#include <linux/module.h> + +#include <asm/blackfin.h> +#include <asm/cplb.h> +#include <asm/cplbinit.h> + +u_long icplb_table[MAX_CPLBS + 1]; +u_long dcplb_table[MAX_CPLBS + 1]; + +#ifdef CONFIG_CPLB_SWITCH_TAB_L1 +# define PDT_ATTR __attribute__((l1_data)) +#else +# define PDT_ATTR +#endif + +u_long ipdt_table[MAX_SWITCH_I_CPLBS + 1] PDT_ATTR; +u_long dpdt_table[MAX_SWITCH_D_CPLBS + 1] PDT_ATTR; + +#ifdef CONFIG_CPLB_INFO +u_long ipdt_swapcount_table[MAX_SWITCH_I_CPLBS] PDT_ATTR; +u_long dpdt_swapcount_table[MAX_SWITCH_D_CPLBS] PDT_ATTR; +#endif + +struct s_cplb { + struct cplb_tab init_i; + struct cplb_tab init_d; + struct cplb_tab switch_i; + struct cplb_tab switch_d; +}; + +#if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE) +static struct cplb_desc cplb_data[] = { + { + .start = 0, + .end = SIZE_1K, + .psize = SIZE_1K, + .attr = INITIAL_T | SWITCH_T | I_CPLB | D_CPLB, + .i_conf = SDRAM_OOPS, + .d_conf = SDRAM_OOPS, +#if defined(CONFIG_DEBUG_HUNT_FOR_ZERO) + .valid = 1, +#else + .valid = 0, +#endif + .name = "Zero Pointer Guard Page", + }, + { + .start = L1_CODE_START, + .end = L1_CODE_START + L1_CODE_LENGTH, + .psize = SIZE_4M, + .attr = INITIAL_T | SWITCH_T | I_CPLB, + .i_conf = L1_IMEMORY, + .d_conf = 0, + .valid = 1, + .name = "L1 I-Memory", + }, + { + .start = L1_DATA_A_START, + .end = L1_DATA_B_START + L1_DATA_B_LENGTH, + .psize = SIZE_4M, + .attr = INITIAL_T | SWITCH_T | D_CPLB, + .i_conf = 0, + .d_conf = L1_DMEMORY, +#if ((L1_DATA_A_LENGTH > 0) || (L1_DATA_B_LENGTH > 0)) + .valid = 1, +#else + .valid = 0, +#endif + .name = "L1 D-Memory", + }, + { + .start = 0, + .end = 0, /* dynamic */ + .psize = 0, + .attr = INITIAL_T | SWITCH_T | I_CPLB | D_CPLB, + .i_conf = SDRAM_IGENERIC, + .d_conf = SDRAM_DGENERIC, + .valid = 1, + .name = "Kernel Memory", + }, + { + .start = 0, /* dynamic */ + .end = 0, /* dynamic */ + .psize = 0, + .attr = INITIAL_T | SWITCH_T | D_CPLB, + .i_conf = SDRAM_IGENERIC, + .d_conf = SDRAM_DNON_CHBL, + .valid = 1, + .name = "uClinux MTD Memory", + }, + { + .start = 0, /* dynamic */ + .end = 0, /* dynamic */ + .psize = SIZE_1M, + .attr = INITIAL_T | SWITCH_T | D_CPLB, + .d_conf = SDRAM_DNON_CHBL, + .valid = 1, + .name = "Uncached DMA Zone", + }, + { + .start = 0, /* dynamic */ + .end = 0, /* dynamic */ + .psize = 0, + .attr = SWITCH_T | D_CPLB, + .i_conf = 0, /* dynamic */ + .d_conf = 0, /* dynamic */ + .valid = 1, + .name = "Reserved Memory", + }, + { + .start = ASYNC_BANK0_BASE, + .end = ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE, + .psize = 0, + .attr = SWITCH_T | D_CPLB, + .d_conf = SDRAM_EBIU, + .valid = 1, + .name = "Asynchronous Memory Banks", + }, + { +#ifdef L2_START + .start = L2_START, + .end = L2_START + L2_LENGTH, + .psize = SIZE_1M, + .attr = SWITCH_T | I_CPLB | D_CPLB, + .i_conf = L2_MEMORY, + .d_conf = L2_MEMORY, + .valid = 1, +#else + .valid = 0, +#endif + .name = "L2 Memory", + }, + { + .start = BOOT_ROM_START, + .end = BOOT_ROM_START + BOOT_ROM_LENGTH, + .psize = SIZE_1M, + .attr = SWITCH_T | I_CPLB | D_CPLB, + .i_conf = SDRAM_IGENERIC, + .d_conf = SDRAM_DGENERIC, + .valid = 1, + .name = "On-Chip BootROM", + }, +}; + +static u16 __init lock_kernel_check(u32 start, u32 end) +{ + if ((end <= (u32) _end && end >= (u32)_stext) || + (start <= (u32) _end && start >= (u32)_stext)) + return IN_KERNEL; + return 0; +} + +static unsigned short __init +fill_cplbtab(struct cplb_tab *table, + unsigned long start, unsigned long end, + unsigned long block_size, unsigned long cplb_data) +{ + int i; + + switch (block_size) { + case SIZE_4M: + i = 3; + break; + case SIZE_1M: + i = 2; + break; + case SIZE_4K: + i = 1; + break; + case SIZE_1K: + default: + i = 0; + break; + } + + cplb_data = (cplb_data & ~(3 << 16)) | (i << 16); + + while ((start < end) && (table->pos < table->size)) { + + table->tab[table->pos++] = start; + + if (lock_kernel_check(start, start + block_size) == IN_KERNEL) + table->tab[table->pos++] = + cplb_data | CPLB_LOCK | CPLB_DIRTY; + else + table->tab[table->pos++] = cplb_data; + + start += block_size; + } + return 0; +} + +static unsigned short __init +close_cplbtab(struct cplb_tab *table) +{ + + while (table->pos < table->size) { + + table->tab[table->pos++] = 0; + table->tab[table->pos++] = 0; /* !CPLB_VALID */ + } + return 0; +} + +/* helper function */ +static void __fill_code_cplbtab(struct cplb_tab *t, int i, u32 a_start, u32 a_end) +{ + if (cplb_data[i].psize) { + fill_cplbtab(t, + cplb_data[i].start, + cplb_data[i].end, + cplb_data[i].psize, + cplb_data[i].i_conf); + } else { +#if defined(CONFIG_BFIN_ICACHE) + if (ANOMALY_05000263 && i == SDRAM_KERN) { + fill_cplbtab(t, + cplb_data[i].start, + cplb_data[i].end, + SIZE_4M, + cplb_data[i].i_conf); + } else +#endif + { + fill_cplbtab(t, + cplb_data[i].start, + a_start, + SIZE_1M, + cplb_data[i].i_conf); + fill_cplbtab(t, + a_start, + a_end, + SIZE_4M, + cplb_data[i].i_conf); + fill_cplbtab(t, a_end, + cplb_data[i].end, + SIZE_1M, + cplb_data[i].i_conf); + } + } +} + +static void __fill_data_cplbtab(struct cplb_tab *t, int i, u32 a_start, u32 a_end) +{ + if (cplb_data[i].psize) { + fill_cplbtab(t, + cplb_data[i].start, + cplb_data[i].end, + cplb_data[i].psize, + cplb_data[i].d_conf); + } else { + fill_cplbtab(t, + cplb_data[i].start, + a_start, SIZE_1M, + cplb_data[i].d_conf); + fill_cplbtab(t, a_start, + a_end, SIZE_4M, + cplb_data[i].d_conf); + fill_cplbtab(t, a_end, + cplb_data[i].end, + SIZE_1M, + cplb_data[i].d_conf); + } +} + +void __init generate_cpl_tables(void) +{ + + u16 i, j, process; + u32 a_start, a_end, as, ae, as_1m; + + struct cplb_tab *t_i = NULL; + struct cplb_tab *t_d = NULL; + struct s_cplb cplb; + + cplb.init_i.size = MAX_CPLBS; + cplb.init_d.size = MAX_CPLBS; + cplb.switch_i.size = MAX_SWITCH_I_CPLBS; + cplb.switch_d.size = MAX_SWITCH_D_CPLBS; + + cplb.init_i.pos = 0; + cplb.init_d.pos = 0; + cplb.switch_i.pos = 0; + cplb.switch_d.pos = 0; + + cplb.init_i.tab = icplb_table; + cplb.init_d.tab = dcplb_table; + cplb.switch_i.tab = ipdt_table; + cplb.switch_d.tab = dpdt_table; + + cplb_data[SDRAM_KERN].end = memory_end; + +#ifdef CONFIG_MTD_UCLINUX + cplb_data[SDRAM_RAM_MTD].start = memory_mtd_start; + cplb_data[SDRAM_RAM_MTD].end = memory_mtd_start + mtd_size; + cplb_data[SDRAM_RAM_MTD].valid = mtd_size > 0; +# if defined(CONFIG_ROMFS_FS) + cplb_data[SDRAM_RAM_MTD].attr |= I_CPLB; + + /* + * The ROMFS_FS size is often not multiple of 1MB. + * This can cause multiple CPLB sets covering the same memory area. + * This will then cause multiple CPLB hit exceptions. + * Workaround: We ensure a contiguous memory area by extending the kernel + * memory section over the mtd section. + * For ROMFS_FS memory must be covered with ICPLBs anyways. + * So there is no difference between kernel and mtd memory setup. + */ + + cplb_data[SDRAM_KERN].end = memory_mtd_start + mtd_size;; + cplb_data[SDRAM_RAM_MTD].valid = 0; + +# endif +#else + cplb_data[SDRAM_RAM_MTD].valid = 0; +#endif + + cplb_data[SDRAM_DMAZ].start = _ramend - DMA_UNCACHED_REGION; + cplb_data[SDRAM_DMAZ].end = _ramend; + + cplb_data[RES_MEM].start = _ramend; + cplb_data[RES_MEM].end = physical_mem_end; + + if (reserved_mem_dcache_on) + cplb_data[RES_MEM].d_conf = SDRAM_DGENERIC; + else + cplb_data[RES_MEM].d_conf = SDRAM_DNON_CHBL; + + if (reserved_mem_icache_on) + cplb_data[RES_MEM].i_conf = SDRAM_IGENERIC; + else + cplb_data[RES_MEM].i_conf = SDRAM_INON_CHBL; + + for (i = ZERO_P; i < ARRAY_SIZE(cplb_data); ++i) { + if (!cplb_data[i].valid) + continue; + + as_1m = cplb_data[i].start % SIZE_1M; + + /* We need to make sure all sections are properly 1M aligned + * However between Kernel Memory and the Kernel mtd section, depending on the + * rootfs size, there can be overlapping memory areas. + */ + + if (as_1m && i != L1I_MEM && i != L1D_MEM) { +#ifdef CONFIG_MTD_UCLINUX + if (i == SDRAM_RAM_MTD) { + if ((cplb_data[SDRAM_KERN].end + 1) > cplb_data[SDRAM_RAM_MTD].start) + cplb_data[SDRAM_RAM_MTD].start = (cplb_data[i].start & (-2*SIZE_1M)) + SIZE_1M; + else + cplb_data[SDRAM_RAM_MTD].start = (cplb_data[i].start & (-2*SIZE_1M)); + } else +#endif + printk(KERN_WARNING "Unaligned Start of %s at 0x%X\n", + cplb_data[i].name, cplb_data[i].start); + } + + as = cplb_data[i].start % SIZE_4M; + ae = cplb_data[i].end % SIZE_4M; + + if (as) + a_start = cplb_data[i].start + (SIZE_4M - (as)); + else + a_start = cplb_data[i].start; + + a_end = cplb_data[i].end - ae; + + for (j = INITIAL_T; j <= SWITCH_T; j++) { + + switch (j) { + case INITIAL_T: + if (cplb_data[i].attr & INITIAL_T) { + t_i = &cplb.init_i; + t_d = &cplb.init_d; + process = 1; + } else + process = 0; + break; + case SWITCH_T: + if (cplb_data[i].attr & SWITCH_T) { + t_i = &cplb.switch_i; + t_d = &cplb.switch_d; + process = 1; + } else + process = 0; + break; + default: + process = 0; + break; + } + + if (!process) + continue; + if (cplb_data[i].attr & I_CPLB) + __fill_code_cplbtab(t_i, i, a_start, a_end); + + if (cplb_data[i].attr & D_CPLB) + __fill_data_cplbtab(t_d, i, a_start, a_end); + } + } + +/* close tables */ + + close_cplbtab(&cplb.init_i); + close_cplbtab(&cplb.init_d); + + cplb.init_i.tab[cplb.init_i.pos] = -1; + cplb.init_d.tab[cplb.init_d.pos] = -1; + cplb.switch_i.tab[cplb.switch_i.pos] = -1; + cplb.switch_d.tab[cplb.switch_d.pos] = -1; + +} + +#endif + diff --git a/arch/blackfin/kernel/cplb-nompu/cplbmgr.S b/arch/blackfin/kernel/cplb-nompu/cplbmgr.S new file mode 100644 index 00000000000..f5cf3accef3 --- /dev/null +++ b/arch/blackfin/kernel/cplb-nompu/cplbmgr.S @@ -0,0 +1,646 @@ +/* + * File: arch/blackfin/mach-common/cplbmgtr.S + * Based on: + * Author: LG Soft India + * + * Created: ? + * Description: CPLB replacement routine for CPLB mismatch + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/* Usage: int _cplb_mgr(is_data_miss,int enable_cache) + * is_data_miss==2 => Mark as Dirty, write to the clean data page + * is_data_miss==1 => Replace a data CPLB. + * is_data_miss==0 => Replace an instruction CPLB. + * + * Returns: + * CPLB_RELOADED => Successfully updated CPLB table. + * CPLB_NO_UNLOCKED => All CPLBs are locked, so cannot be evicted. + * This indicates that the CPLBs in the configuration + * tablei are badly configured, as this should never + * occur. + * CPLB_NO_ADDR_MATCH => The address being accessed, that triggered the + * exception, is not covered by any of the CPLBs in + * the configuration table. The application is + * presumably misbehaving. + * CPLB_PROT_VIOL => The address being accessed, that triggered the + * exception, was not a first-write to a clean Write + * Back Data page, and so presumably is a genuine + * violation of the page's protection attributes. + * The application is misbehaving. + */ + +#include <linux/linkage.h> +#include <asm/blackfin.h> +#include <asm/cplb.h> + +#ifdef CONFIG_EXCPT_IRQ_SYSC_L1 +.section .l1.text +#else +.text +#endif + +.align 2; +ENTRY(_cplb_mgr) + + [--SP]=( R7:4,P5:3 ); + + CC = R0 == 2; + IF CC JUMP .Ldcplb_write; + + CC = R0 == 0; + IF !CC JUMP .Ldcplb_miss_compare; + + /* ICPLB Miss Exception. We need to choose one of the + * currently-installed CPLBs, and replace it with one + * from the configuration table. + */ + + /* A multi-word instruction can cross a page boundary. This means the + * first part of the instruction can be in a valid page, but the + * second part is not, and hence generates the instruction miss. + * However, the fault address is for the start of the instruction, + * not the part that's in the bad page. Therefore, we have to check + * whether the fault address applies to a page that is already present + * in the table. + */ + + P4.L = LO(ICPLB_FAULT_ADDR); + P4.H = HI(ICPLB_FAULT_ADDR); + + P1 = 16; + P5.L = _page_size_table; + P5.H = _page_size_table; + + P0.L = LO(ICPLB_DATA0); + P0.H = HI(ICPLB_DATA0); + R4 = [P4]; /* Get faulting address*/ + R6 = 64; /* Advance past the fault address, which*/ + R6 = R6 + R4; /* we'll use if we find a match*/ + R3 = ((16 << 8) | 2); /* Extract mask, two bits at posn 16 */ + + R5 = 0; +.Lisearch: + + R1 = [P0-0x100]; /* Address for this CPLB */ + + R0 = [P0++]; /* Info for this CPLB*/ + CC = BITTST(R0,0); /* Is the CPLB valid?*/ + IF !CC JUMP .Lnomatch; /* Skip it, if not.*/ + CC = R4 < R1(IU); /* If fault address less than page start*/ + IF CC JUMP .Lnomatch; /* then skip this one.*/ + R2 = EXTRACT(R0,R3.L) (Z); /* Get page size*/ + P1 = R2; + P1 = P5 + (P1<<2); /* index into page-size table*/ + R2 = [P1]; /* Get the page size*/ + R1 = R1 + R2; /* and add to page start, to get page end*/ + CC = R4 < R1(IU); /* and see whether fault addr is in page.*/ + IF !CC R4 = R6; /* If so, advance the address and finish loop.*/ + IF !CC JUMP .Lisearch_done; +.Lnomatch: + /* Go around again*/ + R5 += 1; + CC = BITTST(R5, 4); /* i.e CC = R5 >= 16*/ + IF !CC JUMP .Lisearch; + +.Lisearch_done: + I0 = R4; /* Fault address we'll search for*/ + + /* set up pointers */ + P0.L = LO(ICPLB_DATA0); + P0.H = HI(ICPLB_DATA0); + + /* The replacement procedure for ICPLBs */ + + P4.L = LO(IMEM_CONTROL); + P4.H = HI(IMEM_CONTROL); + + /* Turn off CPLBs while we work, necessary according to HRM before + * modifying CPLB descriptors + */ + R5 = [P4]; /* Control Register*/ + BITCLR(R5,ENICPLB_P); + CLI R1; + SSYNC; /* SSYNC required before writing to IMEM_CONTROL. */ + .align 8; + [P4] = R5; + SSYNC; + STI R1; + + R1 = -1; /* end point comparison */ + R3 = 16; /* counter */ + + /* Search through CPLBs for first non-locked entry */ + /* Overwrite it by moving everyone else up by 1 */ +.Licheck_lock: + R0 = [P0++]; + R3 = R3 + R1; + CC = R3 == R1; + IF CC JUMP .Lall_locked; + CC = BITTST(R0, 0); /* an invalid entry is good */ + IF !CC JUMP .Lifound_victim; + CC = BITTST(R0,1); /* but a locked entry isn't */ + IF CC JUMP .Licheck_lock; + +.Lifound_victim: +#ifdef CONFIG_CPLB_INFO + R7 = [P0 - 0x104]; + P2.L = _ipdt_table; + P2.H = _ipdt_table; + P3.L = _ipdt_swapcount_table; + P3.H = _ipdt_swapcount_table; + P3 += -4; +.Licount: + R2 = [P2]; /* address from config table */ + P2 += 8; + P3 += 8; + CC = R2==-1; + IF CC JUMP .Licount_done; + CC = R7==R2; + IF !CC JUMP .Licount; + R7 = [P3]; + R7 += 1; + [P3] = R7; + CSYNC; +.Licount_done: +#endif + LC0=R3; + LSETUP(.Lis_move,.Lie_move) LC0; +.Lis_move: + R0 = [P0]; + [P0 - 4] = R0; + R0 = [P0 - 0x100]; + [P0-0x104] = R0; +.Lie_move: + P0+=4; + + /* Clear ICPLB_DATA15, in case we don't find a replacement + * otherwise, we would have a duplicate entry, and will crash + */ + R0 = 0; + [P0 - 4] = R0; + + /* We've made space in the ICPLB table, so that ICPLB15 + * is now free to be overwritten. Next, we have to determine + * which CPLB we need to install, from the configuration + * table. This is a matter of getting the start-of-page + * addresses and page-lengths from the config table, and + * determining whether the fault address falls within that + * range. + */ + + P2.L = _ipdt_table; + P2.H = _ipdt_table; +#ifdef CONFIG_CPLB_INFO + P3.L = _ipdt_swapcount_table; + P3.H = _ipdt_swapcount_table; + P3 += -8; +#endif + P0.L = _page_size_table; + P0.H = _page_size_table; + + /* Retrieve our fault address (which may have been advanced + * because the faulting instruction crossed a page boundary). + */ + + R0 = I0; + + /* An extraction pattern, to get the page-size bits from + * the CPLB data entry. Bits 16-17, so two bits at posn 16. + */ + + R1 = ((16<<8)|2); +.Linext: R4 = [P2++]; /* address from config table */ + R2 = [P2++]; /* data from config table */ +#ifdef CONFIG_CPLB_INFO + P3 += 8; +#endif + + CC = R4 == -1; /* End of config table*/ + IF CC JUMP .Lno_page_in_table; + + /* See if failed address > start address */ + CC = R4 <= R0(IU); + IF !CC JUMP .Linext; + + /* extract page size (17:16)*/ + R3 = EXTRACT(R2, R1.L) (Z); + + /* add page size to addr to get range */ + + P5 = R3; + P5 = P0 + (P5 << 2); /* scaled, for int access*/ + R3 = [P5]; + R3 = R3 + R4; + + /* See if failed address < (start address + page size) */ + CC = R0 < R3(IU); + IF !CC JUMP .Linext; + + /* We've found a CPLB in the config table that covers + * the faulting address, so install this CPLB into the + * last entry of the table. + */ + + P1.L = LO(ICPLB_DATA15); /* ICPLB_DATA15 */ + P1.H = HI(ICPLB_DATA15); + [P1] = R2; + [P1-0x100] = R4; +#ifdef CONFIG_CPLB_INFO + R3 = [P3]; + R3 += 1; + [P3] = R3; +#endif + + /* P4 points to IMEM_CONTROL, and R5 contains its old + * value, after we disabled ICPLBS. Re-enable them. + */ + + BITSET(R5,ENICPLB_P); + CLI R2; + SSYNC; /* SSYNC required before writing to IMEM_CONTROL. */ + .align 8; + [P4] = R5; + SSYNC; + STI R2; + + ( R7:4,P5:3 ) = [SP++]; + R0 = CPLB_RELOADED; + RTS; + +/* FAILED CASES*/ +.Lno_page_in_table: + R0 = CPLB_NO_ADDR_MATCH; + JUMP .Lfail_ret; + +.Lall_locked: + R0 = CPLB_NO_UNLOCKED; + JUMP .Lfail_ret; + +.Lprot_violation: + R0 = CPLB_PROT_VIOL; + +.Lfail_ret: + /* Make sure we turn protection/cache back on, even in the failing case */ + BITSET(R5,ENICPLB_P); + CLI R2; + SSYNC; /* SSYNC required before writing to IMEM_CONTROL. */ + .align 8; + [P4] = R5; + SSYNC; + STI R2; + + ( R7:4,P5:3 ) = [SP++]; + RTS; + +.Ldcplb_write: + + /* if a DCPLB is marked as write-back (CPLB_WT==0), and + * it is clean (CPLB_DIRTY==0), then a write to the + * CPLB's page triggers a protection violation. We have to + * mark the CPLB as dirty, to indicate that there are + * pending writes associated with the CPLB. + */ + + P4.L = LO(DCPLB_STATUS); + P4.H = HI(DCPLB_STATUS); + P3.L = LO(DCPLB_DATA0); + P3.H = HI(DCPLB_DATA0); + R5 = [P4]; + + /* A protection violation can be caused by more than just writes + * to a clean WB page, so we have to ensure that: + * - It's a write + * - to a clean WB page + * - and is allowed in the mode the access occurred. + */ + + CC = BITTST(R5, 16); /* ensure it was a write*/ + IF !CC JUMP .Lprot_violation; + + /* to check the rest, we have to retrieve the DCPLB.*/ + + /* The low half of DCPLB_STATUS is a bit mask*/ + + R2 = R5.L (Z); /* indicating which CPLB triggered the event.*/ + R3 = 30; /* so we can use this to determine the offset*/ + R2.L = SIGNBITS R2; + R2 = R2.L (Z); /* into the DCPLB table.*/ + R3 = R3 - R2; + P4 = R3; + P3 = P3 + (P4<<2); + R3 = [P3]; /* Retrieve the CPLB*/ + + /* Now we can check whether it's a clean WB page*/ + + CC = BITTST(R3, 14); /* 0==WB, 1==WT*/ + IF CC JUMP .Lprot_violation; + CC = BITTST(R3, 7); /* 0 == clean, 1 == dirty*/ + IF CC JUMP .Lprot_violation; + + /* Check whether the write is allowed in the mode that was active.*/ + + R2 = 1<<3; /* checking write in user mode*/ + CC = BITTST(R5, 17); /* 0==was user, 1==was super*/ + R5 = CC; + R2 <<= R5; /* if was super, check write in super mode*/ + R2 = R3 & R2; + CC = R2 == 0; + IF CC JUMP .Lprot_violation; + + /* It's a genuine write-to-clean-page.*/ + + BITSET(R3, 7); /* mark as dirty*/ + [P3] = R3; /* and write back.*/ + NOP; + CSYNC; + ( R7:4,P5:3 ) = [SP++]; + R0 = CPLB_RELOADED; + RTS; + +.Ldcplb_miss_compare: + + /* Data CPLB Miss event. We need to choose a CPLB to + * evict, and then locate a new CPLB to install from the + * config table, that covers the faulting address. + */ + + P1.L = LO(DCPLB_DATA15); + P1.H = HI(DCPLB_DATA15); + + P4.L = LO(DCPLB_FAULT_ADDR); + P4.H = HI(DCPLB_FAULT_ADDR); + R4 = [P4]; + I0 = R4; + + /* The replacement procedure for DCPLBs*/ + + R6 = R1; /* Save for later*/ + + /* Turn off CPLBs while we work.*/ + P4.L = LO(DMEM_CONTROL); + P4.H = HI(DMEM_CONTROL); + R5 = [P4]; + BITCLR(R5,ENDCPLB_P); + CLI R0; + SSYNC; /* SSYNC required before writing to DMEM_CONTROL. */ + .align 8; + [P4] = R5; + SSYNC; + STI R0; + + /* Start looking for a CPLB to evict. Our order of preference + * is: invalid CPLBs, clean CPLBs, dirty CPLBs. Locked CPLBs + * are no good. + */ + + I1.L = LO(DCPLB_DATA0); + I1.H = HI(DCPLB_DATA0); + P1 = 2; + P2 = 16; + I2.L = _dcplb_preference; + I2.H = _dcplb_preference; + LSETUP(.Lsdsearch1, .Ledsearch1) LC0 = P1; +.Lsdsearch1: + R0 = [I2++]; /* Get the bits we're interested in*/ + P0 = I1; /* Go back to start of table*/ + LSETUP (.Lsdsearch2, .Ledsearch2) LC1 = P2; +.Lsdsearch2: + R1 = [P0++]; /* Fetch each installed CPLB in turn*/ + R2 = R1 & R0; /* and test for interesting bits.*/ + CC = R2 == 0; /* If none are set, it'll do.*/ + IF !CC JUMP .Lskip_stack_check; + + R2 = [P0 - 0x104]; /* R2 - PageStart */ + P3.L = _page_size_table; /* retrieve end address */ + P3.H = _page_size_table; /* retrieve end address */ + R3 = 0x1002; /* 16th - position, 2 bits -length */ +#if ANOMALY_05000209 + nop; /* Anomaly 05000209 */ +#endif + R7 = EXTRACT(R1,R3.l); + R7 = R7 << 2; /* Page size index offset */ + P5 = R7; + P3 = P3 + P5; + R7 = [P3]; /* page size in bytes */ + + R7 = R2 + R7; /* R7 - PageEnd */ + R4 = SP; /* Test SP is in range */ + + CC = R7 < R4; /* if PageEnd < SP */ + IF CC JUMP .Ldfound_victim; + R3 = 0x284; /* stack length from start of trap till + * the point. + * 20 stack locations for future modifications + */ + R4 = R4 + R3; + CC = R4 < R2; /* if SP + stacklen < PageStart */ + IF CC JUMP .Ldfound_victim; +.Lskip_stack_check: + +.Ledsearch2: NOP; +.Ledsearch1: NOP; + + /* If we got here, we didn't find a DCPLB we considered + * replacable, which means all of them were locked. + */ + + JUMP .Lall_locked; +.Ldfound_victim: + +#ifdef CONFIG_CPLB_INFO + R7 = [P0 - 0x104]; + P2.L = _dpdt_table; + P2.H = _dpdt_table; + P3.L = _dpdt_swapcount_table; + P3.H = _dpdt_swapcount_table; + P3 += -4; +.Ldicount: + R2 = [P2]; + P2 += 8; + P3 += 8; + CC = R2==-1; + IF CC JUMP .Ldicount_done; + CC = R7==R2; + IF !CC JUMP .Ldicount; + R7 = [P3]; + R7 += 1; + [P3] = R7; +.Ldicount_done: +#endif + + /* Clean down the hardware loops*/ + R2 = 0; + LC1 = R2; + LC0 = R2; + + /* There's a suitable victim in [P0-4] (because we've + * advanced already). + */ + +.LDdoverwrite: + + /* [P0-4] is a suitable victim CPLB, so we want to + * overwrite it by moving all the following CPLBs + * one space closer to the start. + */ + + R1.L = LO(DCPLB_DATA16); /* DCPLB_DATA15 + 4 */ + R1.H = HI(DCPLB_DATA16); + R0 = P0; + + /* If the victim happens to be in DCPLB15, + * we don't need to move anything. + */ + + CC = R1 == R0; + IF CC JUMP .Lde_moved; + R1 = R1 - R0; + R1 >>= 2; + P1 = R1; + LSETUP(.Lds_move, .Lde_move) LC0=P1; +.Lds_move: + R0 = [P0++]; /* move data */ + [P0 - 8] = R0; + R0 = [P0-0x104] /* move address */ +.Lde_move: + [P0-0x108] = R0; + +.Lde_moved: + NOP; + + /* Clear DCPLB_DATA15, in case we don't find a replacement + * otherwise, we would have a duplicate entry, and will crash + */ + R0 = 0; + [P0 - 0x4] = R0; + + /* We've now made space in DCPLB15 for the new CPLB to be + * installed. The next stage is to locate a CPLB in the + * config table that covers the faulting address. + */ + + R0 = I0; /* Our faulting address */ + + P2.L = _dpdt_table; + P2.H = _dpdt_table; +#ifdef CONFIG_CPLB_INFO + P3.L = _dpdt_swapcount_table; + P3.H = _dpdt_swapcount_table; + P3 += -8; +#endif + + P1.L = _page_size_table; + P1.H = _page_size_table; + + /* An extraction pattern, to retrieve bits 17:16.*/ + + R1 = (16<<8)|2; +.Ldnext: R4 = [P2++]; /* address */ + R2 = [P2++]; /* data */ +#ifdef CONFIG_CPLB_INFO + P3 += 8; +#endif + + CC = R4 == -1; + IF CC JUMP .Lno_page_in_table; + + /* See if failed address > start address */ + CC = R4 <= R0(IU); + IF !CC JUMP .Ldnext; + + /* extract page size (17:16)*/ + R3 = EXTRACT(R2, R1.L) (Z); + + /* add page size to addr to get range */ + + P5 = R3; + P5 = P1 + (P5 << 2); + R3 = [P5]; + R3 = R3 + R4; + + /* See if failed address < (start address + page size) */ + CC = R0 < R3(IU); + IF !CC JUMP .Ldnext; + + /* We've found the CPLB that should be installed, so + * write it into CPLB15, masking off any caching bits + * if necessary. + */ + + P1.L = LO(DCPLB_DATA15); + P1.H = HI(DCPLB_DATA15); + + /* If the DCPLB has cache bits set, but caching hasn't + * been enabled, then we want to mask off the cache-in-L1 + * bit before installing. Moreover, if caching is off, we + * also want to ensure that the DCPLB has WT mode set, rather + * than WB, since WB pages still trigger first-write exceptions + * even when not caching is off, and the page isn't marked as + * cachable. Finally, we could mark the page as clean, not dirty, + * but we choose to leave that decision to the user; if the user + * chooses to have a CPLB pre-defined as dirty, then they always + * pay the cost of flushing during eviction, but don't pay the + * cost of first-write exceptions to mark the page as dirty. + */ + +#ifdef CONFIG_BFIN_WT + BITSET(R6, 14); /* Set WT*/ +#endif + + [P1] = R2; + [P1-0x100] = R4; +#ifdef CONFIG_CPLB_INFO + R3 = [P3]; + R3 += 1; + [P3] = R3; +#endif + + /* We've installed the CPLB, so re-enable CPLBs. P4 + * points to DMEM_CONTROL, and R5 is the value we + * last wrote to it, when we were disabling CPLBs. + */ + + BITSET(R5,ENDCPLB_P); + CLI R2; + .align 8; + [P4] = R5; + SSYNC; + STI R2; + + ( R7:4,P5:3 ) = [SP++]; + R0 = CPLB_RELOADED; + RTS; +ENDPROC(_cplb_mgr) + +.data +.align 4; +_page_size_table: +.byte4 0x00000400; /* 1K */ +.byte4 0x00001000; /* 4K */ +.byte4 0x00100000; /* 1M */ +.byte4 0x00400000; /* 4M */ + +.align 4; +_dcplb_preference: +.byte4 0x00000001; /* valid bit */ +.byte4 0x00000002; /* lock bit */ |