diff options
Diffstat (limited to 'arch/sh/kernel/cpu/sh4')
-rw-r--r-- | arch/sh/kernel/cpu/sh4/Makefile | 10 | ||||
-rw-r--r-- | arch/sh/kernel/cpu/sh4/ex.S | 176 | ||||
-rw-r--r-- | arch/sh/kernel/cpu/sh4/probe.c | 138 | ||||
-rw-r--r-- | arch/sh/kernel/cpu/sh4/setup-sh4-202.c | 43 | ||||
-rw-r--r-- | arch/sh/kernel/cpu/sh4/setup-sh73180.c | 43 | ||||
-rw-r--r-- | arch/sh/kernel/cpu/sh4/setup-sh7343.c | 43 | ||||
-rw-r--r-- | arch/sh/kernel/cpu/sh4/setup-sh7750.c | 48 | ||||
-rw-r--r-- | arch/sh/kernel/cpu/sh4/setup-sh7760.c | 53 | ||||
-rw-r--r-- | arch/sh/kernel/cpu/sh4/setup-sh7770.c | 53 | ||||
-rw-r--r-- | arch/sh/kernel/cpu/sh4/setup-sh7780.c | 79 | ||||
-rw-r--r-- | arch/sh/kernel/cpu/sh4/sq.c | 543 |
11 files changed, 912 insertions, 317 deletions
diff --git a/arch/sh/kernel/cpu/sh4/Makefile b/arch/sh/kernel/cpu/sh4/Makefile index 3d5cafc71ae..8dbf3895ece 100644 --- a/arch/sh/kernel/cpu/sh4/Makefile +++ b/arch/sh/kernel/cpu/sh4/Makefile @@ -7,6 +7,16 @@ obj-y := ex.o probe.o obj-$(CONFIG_SH_FPU) += fpu.o obj-$(CONFIG_SH_STORE_QUEUES) += sq.o +# CPU subtype setup +obj-$(CONFIG_CPU_SUBTYPE_SH7750) += setup-sh7750.o +obj-$(CONFIG_CPU_SUBTYPE_SH7751) += setup-sh7750.o +obj-$(CONFIG_CPU_SUBTYPE_SH7760) += setup-sh7760.o +obj-$(CONFIG_CPU_SUBTYPE_SH7770) += setup-sh7770.o +obj-$(CONFIG_CPU_SUBTYPE_SH7780) += setup-sh7780.o +obj-$(CONFIG_CPU_SUBTYPE_SH73180) += setup-sh73180.o +obj-$(CONFIG_CPU_SUBTYPE_SH7343) += setup-sh7343.o +obj-$(CONFIG_CPU_SUBTYPE_SH4_202) += setup-sh4-202.o + # Primary on-chip clocks (common) clock-$(CONFIG_CPU_SH4) := clock-sh4.o clock-$(CONFIG_CPU_SUBTYPE_SH73180) := clock-sh73180.o diff --git a/arch/sh/kernel/cpu/sh4/ex.S b/arch/sh/kernel/cpu/sh4/ex.S index 26a27df0650..7146893a6cc 100644 --- a/arch/sh/kernel/cpu/sh4/ex.S +++ b/arch/sh/kernel/cpu/sh4/ex.S @@ -72,6 +72,7 @@ ENTRY(interrupt_table) .long do_IRQ ! 1110 .long exception_error ! Internal hardware +#ifndef CONFIG_CPU_SUBTYPE_SH7780 .long do_IRQ ! TMU0 tuni0 /* 400 */ .long do_IRQ ! TMU1 tuni1 .long do_IRQ ! TMU2 tuni2 @@ -122,6 +123,13 @@ ENTRY(interrupt_table) .long do_IRQ ! 45 dmte5 .long do_IRQ ! 46 dmte6 .long do_IRQ ! 47 dmte7 /* 7E0 */ +#elif defined(CONFIG_CPU_SUBTYPE_SH7343) + .long do_IRQ ! 44 IIC1 ali /* 780 */ + .long do_IRQ ! 45 tacki + .long do_IRQ ! 46 waiti + .long do_IRQ ! 47 dtei /* 7E0 */ + .long do_IRQ ! 48 DMAC dei0 /* 800 */ + .long do_IRQ ! 49 dei1 /* 820 */ #else .long exception_error ! 44 /* 780 */ .long exception_error ! 45 @@ -131,7 +139,8 @@ ENTRY(interrupt_table) #if defined(CONFIG_SH_FPU) .long do_fpu_state_restore ! 48 /* 800 */ .long do_fpu_state_restore ! 49 /* 820 */ -#else +#elif !defined(CONFIG_CPU_SUBTYPE_SH7343) && \ + !defined(CONFIG_CPU_SUBTYPE_SH73180) .long exception_error .long exception_error #endif @@ -224,7 +233,7 @@ ENTRY(interrupt_table) .long exception_error .long do_IRQ ! ADC adi .long do_IRQ ! CMT cmti /* FA0 */ -#elif defined(CONFIG_CPU_SUBTYPE_SH73180) +#elif defined(CONFIG_CPU_SUBTYPE_SH73180) || defined(CONFIG_CPU_SUBTYPE_SH7343) .long do_IRQ ! 50 0x840 .long do_IRQ ! 51 0x860 .long do_IRQ ! 52 0x880 @@ -379,5 +388,168 @@ ENTRY(interrupt_table) .long exception_error ! 141 0x13a0 .long exception_error ! 142 0x13c0 .long exception_error ! 143 0x13e0 +#elif defined(CONFIG_CPU_SUBTYPE_SH7770) + .long do_IRQ ! 50 0x840 + .long do_IRQ ! 51 0x860 + .long do_IRQ ! 52 0x880 + .long do_IRQ ! 53 0x8a0 + .long do_IRQ ! 54 0x8c0 + .long do_IRQ ! 55 0x8e0 + .long do_IRQ ! 56 0x900 + .long do_IRQ ! 57 0x920 + .long do_IRQ ! 58 0x940 + .long do_IRQ ! 59 0x960 + .long do_IRQ ! 60 0x980 + .long do_IRQ ! 61 0x9a0 + .long do_IRQ ! 62 0x9c0 + .long do_IRQ ! 63 0x9e0 + .long do_IRQ ! 64 0xa00 + .long do_IRQ ! 65 0xa20 + .long do_IRQ ! 66 0xa4d + .long do_IRQ ! 67 0xa60 + .long do_IRQ ! 68 0xa80 + .long do_IRQ ! 69 0xaa0 + .long do_IRQ ! 70 0xac0 + .long do_IRQ ! 71 0xae0 + .long do_IRQ ! 72 0xb00 + .long do_IRQ ! 73 0xb20 + .long do_IRQ ! 74 0xb40 + .long do_IRQ ! 75 0xb60 + .long do_IRQ ! 76 0xb80 + .long do_IRQ ! 77 0xba0 + .long do_IRQ ! 78 0xbc0 + .long do_IRQ ! 79 0xbe0 + .long do_IRQ ! 80 0xc00 + .long do_IRQ ! 81 0xc20 + .long do_IRQ ! 82 0xc40 + .long do_IRQ ! 83 0xc60 + .long do_IRQ ! 84 0xc80 + .long do_IRQ ! 85 0xca0 + .long do_IRQ ! 86 0xcc0 + .long do_IRQ ! 87 0xce0 + .long do_IRQ ! 88 0xd00 + .long do_IRQ ! 89 0xd20 + .long do_IRQ ! 90 0xd40 + .long do_IRQ ! 91 0xd60 + .long do_IRQ ! 92 0xd80 + .long do_IRQ ! 93 0xda0 + .long do_IRQ ! 94 0xdc0 + .long do_IRQ ! 95 0xde0 + .long do_IRQ ! 96 0xe00 + .long do_IRQ ! 97 0xe20 + .long do_IRQ ! 98 0xe40 + .long do_IRQ ! 99 0xe60 + .long do_IRQ ! 100 0xe80 + .long do_IRQ ! 101 0xea0 + .long do_IRQ ! 102 0xec0 + .long do_IRQ ! 103 0xee0 + .long do_IRQ ! 104 0xf00 + .long do_IRQ ! 105 0xf20 + .long do_IRQ ! 106 0xf40 + .long do_IRQ ! 107 0xf60 + .long do_IRQ ! 108 0xf80 +#endif +#else + .long exception_error /* 400 */ + .long exception_error + .long exception_error + .long exception_error + .long do_IRQ ! RTC ati + .long do_IRQ ! pri + .long do_IRQ ! cui + .long exception_error + .long exception_error /* 500 */ + .long exception_error + .long exception_error + .long do_IRQ ! WDT iti /* 560 */ + .long do_IRQ ! TMU-ch0 + .long do_IRQ ! TMU-ch1 + .long do_IRQ ! TMU-ch2 + .long do_IRQ ! ticpi2 /* 5E0 */ + .long do_IRQ ! 32 Hitachi UDI /* 600 */ + .long exception_error + .long do_IRQ ! 34 DMAC dmte0 + .long do_IRQ ! 35 dmte1 + .long do_IRQ ! 36 dmte2 + .long do_IRQ ! 37 dmte3 + .long do_IRQ ! 38 dmae + .long exception_error ! 39 /* 6E0 */ + .long do_IRQ ! 40 SCIF-ch0 eri /* 700 */ + .long do_IRQ ! 41 rxi + .long do_IRQ ! 42 bri + .long do_IRQ ! 43 txi + .long do_IRQ ! 44 DMAC dmte4 /* 780 */ + .long do_IRQ ! 45 dmte5 + .long do_IRQ ! 46 dmte6 + .long do_IRQ ! 47 dmte7 /* 7E0 */ +#if defined(CONFIG_SH_FPU) + .long do_fpu_state_restore ! 48 /* 800 */ + .long do_fpu_state_restore ! 49 /* 820 */ +#else + .long exception_error + .long exception_error +#endif + .long exception_error /* 840 */ + .long exception_error + .long exception_error + .long exception_error + .long exception_error + .long exception_error + .long do_IRQ ! 56 CMT /* 900 */ + .long exception_error + .long exception_error + .long exception_error + .long do_IRQ ! 60 HAC + .long exception_error + .long exception_error + .long exception_error + .long do_IRQ ! PCI serr /* A00 */ + .long do_IRQ ! INTA + .long do_IRQ ! INTB + .long do_IRQ ! INTC + .long do_IRQ ! INTD + .long do_IRQ ! err + .long do_IRQ ! pwd3 + .long do_IRQ ! pwd2 + .long do_IRQ ! pwd1 /* B00 */ + .long do_IRQ ! pwd0 + .long exception_error + .long exception_error + .long do_IRQ ! SCIF-ch1 eri /* B80 */ + .long do_IRQ ! rxi + .long do_IRQ ! bri + .long do_IRQ ! txi + .long do_IRQ ! SIOF /* C00 */ + .long exception_error + .long exception_error + .long exception_error + .long do_IRQ ! HSPI /* C80 */ + .long exception_error + .long exception_error + .long exception_error + .long do_IRQ ! MMCIF fatat /* D00 */ + .long do_IRQ ! tran + .long do_IRQ ! err + .long do_IRQ ! frdy + .long do_IRQ ! DMAC dmint8 /* D80 */ + .long do_IRQ ! dmint9 + .long do_IRQ ! dmint10 + .long do_IRQ ! dmint11 + .long do_IRQ ! TMU-ch3 /* E00 */ + .long do_IRQ ! TMU-ch4 + .long do_IRQ ! TMU-ch5 + .long exception_error + .long do_IRQ ! SSI + .long exception_error + .long exception_error + .long exception_error + .long do_IRQ ! FLCTL flste /* F00 */ + .long do_IRQ ! fltend + .long do_IRQ ! fltrq0 + .long do_IRQ ! fltrq1 + .long do_IRQ ! GPIO gpioi0 /* F80 */ + .long do_IRQ ! gpioi1 + .long do_IRQ ! gpioi2 + .long do_IRQ ! gpioi3 #endif diff --git a/arch/sh/kernel/cpu/sh4/probe.c b/arch/sh/kernel/cpu/sh4/probe.c index 42427b79697..c294de1e14a 100644 --- a/arch/sh/kernel/cpu/sh4/probe.c +++ b/arch/sh/kernel/cpu/sh4/probe.c @@ -3,7 +3,7 @@ * * CPU Subtype Probing for SH-4. * - * Copyright (C) 2001, 2002, 2003, 2004 Paul Mundt + * Copyright (C) 2001 - 2006 Paul Mundt * Copyright (C) 2003 Richard Curnow * * This file is subject to the terms and conditions of the GNU General Public @@ -29,7 +29,7 @@ int __init detect_cpu_and_cache_system(void) [9] = (1 << 16) }; - pvr = (ctrl_inl(CCN_PVR) >> 8) & 0xffff; + pvr = (ctrl_inl(CCN_PVR) >> 8) & 0xffffff; prr = (ctrl_inl(CCN_PRR) >> 4) & 0xff; cvr = (ctrl_inl(CCN_CVR)); @@ -38,7 +38,6 @@ int __init detect_cpu_and_cache_system(void) */ cpu_data->icache.way_incr = (1 << 13); cpu_data->icache.entry_shift = 5; - cpu_data->icache.entry_mask = 0x1fe0; cpu_data->icache.sets = 256; cpu_data->icache.ways = 1; cpu_data->icache.linesz = L1_CACHE_BYTES; @@ -48,13 +47,29 @@ int __init detect_cpu_and_cache_system(void) */ cpu_data->dcache.way_incr = (1 << 14); cpu_data->dcache.entry_shift = 5; - cpu_data->dcache.entry_mask = 0x3fe0; cpu_data->dcache.sets = 512; cpu_data->dcache.ways = 1; cpu_data->dcache.linesz = L1_CACHE_BYTES; - /* Set the FPU flag, virtually all SH-4's have one */ - cpu_data->flags |= CPU_HAS_FPU; + /* + * Setup some generic flags we can probe + * (L2 and DSP detection only work on SH-4A) + */ + if (((pvr >> 16) & 0xff) == 0x10) { + if ((cvr & 0x02000000) == 0) + cpu_data->flags |= CPU_HAS_L2_CACHE; + if ((cvr & 0x10000000) == 0) + cpu_data->flags |= CPU_HAS_DSP; + + cpu_data->flags |= CPU_HAS_LLSC; + } + + /* FPU detection works for everyone */ + if ((cvr & 0x20000000) == 1) + cpu_data->flags |= CPU_HAS_FPU; + + /* Mask off the upper chip ID */ + pvr &= 0xffff; /* * Probe the underlying processor version/revision and @@ -63,56 +78,101 @@ int __init detect_cpu_and_cache_system(void) switch (pvr) { case 0x205: cpu_data->type = CPU_SH7750; - cpu_data->flags |= CPU_HAS_P2_FLUSH_BUG | CPU_HAS_PERF_COUNTER; + cpu_data->flags |= CPU_HAS_P2_FLUSH_BUG | CPU_HAS_FPU | + CPU_HAS_PERF_COUNTER | CPU_HAS_PTEA; break; case 0x206: cpu_data->type = CPU_SH7750S; - cpu_data->flags |= CPU_HAS_P2_FLUSH_BUG | CPU_HAS_PERF_COUNTER; + cpu_data->flags |= CPU_HAS_P2_FLUSH_BUG | CPU_HAS_FPU | + CPU_HAS_PERF_COUNTER | CPU_HAS_PTEA; break; case 0x1100: cpu_data->type = CPU_SH7751; + cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_PTEA; break; case 0x2000: cpu_data->type = CPU_SH73180; cpu_data->icache.ways = 4; cpu_data->dcache.ways = 4; - cpu_data->flags &= ~CPU_HAS_FPU; + cpu_data->flags |= CPU_HAS_LLSC; + break; + case 0x2001: + case 0x2004: + cpu_data->type = CPU_SH7770; + cpu_data->icache.ways = 4; + cpu_data->dcache.ways = 4; + + cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_LLSC; + break; + case 0x2006: + case 0x200A: + if (prr == 0x61) + cpu_data->type = CPU_SH7781; + else + cpu_data->type = CPU_SH7780; + + cpu_data->icache.ways = 4; + cpu_data->dcache.ways = 4; + + cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_PERF_COUNTER | + CPU_HAS_LLSC; + break; + case 0x3000: + case 0x3003: + cpu_data->type = CPU_SH7343; + cpu_data->icache.ways = 4; + cpu_data->dcache.ways = 4; + cpu_data->flags |= CPU_HAS_LLSC; break; case 0x8000: cpu_data->type = CPU_ST40RA; + cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_PTEA; break; case 0x8100: cpu_data->type = CPU_ST40GX1; + cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_PTEA; break; case 0x700: cpu_data->type = CPU_SH4_501; cpu_data->icache.ways = 2; cpu_data->dcache.ways = 2; - - /* No FPU on the SH4-500 series.. */ - cpu_data->flags &= ~CPU_HAS_FPU; + cpu_data->flags |= CPU_HAS_PTEA; break; case 0x600: cpu_data->type = CPU_SH4_202; cpu_data->icache.ways = 2; cpu_data->dcache.ways = 2; + cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_PTEA; break; case 0x500 ... 0x501: switch (prr) { - case 0x10: cpu_data->type = CPU_SH7750R; break; - case 0x11: cpu_data->type = CPU_SH7751R; break; - case 0x50: cpu_data->type = CPU_SH7760; break; + case 0x10: + cpu_data->type = CPU_SH7750R; + break; + case 0x11: + cpu_data->type = CPU_SH7751R; + break; + case 0x50 ... 0x5f: + cpu_data->type = CPU_SH7760; + break; } cpu_data->icache.ways = 2; cpu_data->dcache.ways = 2; + cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_PTEA; + break; default: cpu_data->type = CPU_SH_NONE; break; } +#ifdef CONFIG_SH_DIRECT_MAPPED + cpu_data->icache.ways = 1; + cpu_data->dcache.ways = 1; +#endif + /* * On anything that's not a direct-mapped cache, look to the CVR * for I/D-cache specifics. @@ -121,18 +181,56 @@ int __init detect_cpu_and_cache_system(void) size = sizes[(cvr >> 20) & 0xf]; cpu_data->icache.way_incr = (size >> 1); cpu_data->icache.sets = (size >> 6); - cpu_data->icache.entry_mask = - (cpu_data->icache.way_incr - (1 << 5)); + } + /* Setup the rest of the I-cache info */ + cpu_data->icache.entry_mask = cpu_data->icache.way_incr - + cpu_data->icache.linesz; + + cpu_data->icache.way_size = cpu_data->icache.sets * + cpu_data->icache.linesz; + + /* And the rest of the D-cache */ if (cpu_data->dcache.ways > 1) { size = sizes[(cvr >> 16) & 0xf]; cpu_data->dcache.way_incr = (size >> 1); cpu_data->dcache.sets = (size >> 6); - cpu_data->dcache.entry_mask = - (cpu_data->dcache.way_incr - (1 << 5)); + } + + cpu_data->dcache.entry_mask = cpu_data->dcache.way_incr - + cpu_data->dcache.linesz; + + cpu_data->dcache.way_size = cpu_data->dcache.sets * + cpu_data->dcache.linesz; + + /* + * Setup the L2 cache desc + * + * SH-4A's have an optional PIPT L2. + */ + if (cpu_data->flags & CPU_HAS_L2_CACHE) { + /* + * Size calculation is much more sensible + * than it is for the L1. + * + * Sizes are 128KB, 258KB, 512KB, and 1MB. + */ + size = (cvr & 0xf) << 17; + + BUG_ON(!size); + + cpu_data->scache.way_incr = (1 << 16); + cpu_data->scache.entry_shift = 5; + cpu_data->scache.ways = 4; + cpu_data->scache.linesz = L1_CACHE_BYTES; + cpu_data->scache.entry_mask = + (cpu_data->scache.way_incr - cpu_data->scache.linesz); + cpu_data->scache.sets = size / + (cpu_data->scache.linesz * cpu_data->scache.ways); + cpu_data->scache.way_size = + (cpu_data->scache.sets * cpu_data->scache.linesz); } return 0; } - diff --git a/arch/sh/kernel/cpu/sh4/setup-sh4-202.c b/arch/sh/kernel/cpu/sh4/setup-sh4-202.c new file mode 100644 index 00000000000..6e4e9654135 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4/setup-sh4-202.c @@ -0,0 +1,43 @@ +/* + * SH4-202 Setup + * + * Copyright (C) 2006 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/platform_device.h> +#include <linux/init.h> +#include <linux/serial.h> +#include <asm/sci.h> + +static struct plat_sci_port sci_platform_data[] = { + { + .mapbase = 0xffe80000, + .flags = UPF_BOOT_AUTOCONF, + .type = PORT_SCIF, + .irqs = { 40, 41, 43, 42 }, + }, { + .flags = 0, + } +}; + +static struct platform_device sci_device = { + .name = "sh-sci", + .id = -1, + .dev = { + .platform_data = sci_platform_data, + }, +}; + +static struct platform_device *sh4202_devices[] __initdata = { + &sci_device, +}; + +static int __init sh4202_devices_setup(void) +{ + return platform_add_devices(sh4202_devices, + ARRAY_SIZE(sh4202_devices)); +} +__initcall(sh4202_devices_setup); diff --git a/arch/sh/kernel/cpu/sh4/setup-sh73180.c b/arch/sh/kernel/cpu/sh4/setup-sh73180.c new file mode 100644 index 00000000000..cc9ea1e2e5d --- /dev/null +++ b/arch/sh/kernel/cpu/sh4/setup-sh73180.c @@ -0,0 +1,43 @@ +/* + * SH73180 Setup + * + * Copyright (C) 2006 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/platform_device.h> +#include <linux/init.h> +#include <linux/serial.h> +#include <asm/sci.h> + +static struct plat_sci_port sci_platform_data[] = { + { + .mapbase = 0xffe80000, + .flags = UPF_BOOT_AUTOCONF, + .type = PORT_SCIF, + .irqs = { 80, 81, 83, 82 }, + }, { + .flags = 0, + } +}; + +static struct platform_device sci_device = { + .name = "sh-sci", + .id = -1, + .dev = { + .platform_data = sci_platform_data, + }, +}; + +static struct platform_device *sh73180_devices[] __initdata = { + &sci_device, +}; + +static int __init sh73180_devices_setup(void) +{ + return platform_add_devices(sh73180_devices, + ARRAY_SIZE(sh73180_devices)); +} +__initcall(sh73180_devices_setup); diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7343.c b/arch/sh/kernel/cpu/sh4/setup-sh7343.c new file mode 100644 index 00000000000..91d61cf91ba --- /dev/null +++ b/arch/sh/kernel/cpu/sh4/setup-sh7343.c @@ -0,0 +1,43 @@ +/* + * SH7343 Setup + * + * Copyright (C) 2006 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/platform_device.h> +#include <linux/init.h> +#include <linux/serial.h> +#include <asm/sci.h> + +static struct plat_sci_port sci_platform_data[] = { + { + .mapbase = 0xffe00000, + .flags = UPF_BOOT_AUTOCONF, + .type = PORT_SCIF, + .irqs = { 80, 81, 83, 82 }, + }, { + .flags = 0, + } +}; + +static struct platform_device sci_device = { + .name = "sh-sci", + .id = -1, + .dev = { + .platform_data = sci_platform_data, + }, +}; + +static struct platform_device *sh7343_devices[] __initdata = { + &sci_device, +}; + +static int __init sh7343_devices_setup(void) +{ + return platform_add_devices(sh7343_devices, + ARRAY_SIZE(sh7343_devices)); +} +__initcall(sh7343_devices_setup); diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/arch/sh/kernel/cpu/sh4/setup-sh7750.c new file mode 100644 index 00000000000..50812d57c1c --- /dev/null +++ b/arch/sh/kernel/cpu/sh4/setup-sh7750.c @@ -0,0 +1,48 @@ +/* + * SH7750/SH7751 Setup + * + * Copyright (C) 2006 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/platform_device.h> +#include <linux/init.h> +#include <linux/serial.h> +#include <asm/sci.h> + +static struct plat_sci_port sci_platform_data[] = { + { + .mapbase = 0xffe00000, + .flags = UPF_BOOT_AUTOCONF, + .type = PORT_SCI, + .irqs = { 23, 24, 25, 0 }, + }, { + .mapbase = 0xffe80000, + .flags = UPF_BOOT_AUTOCONF, + .type = PORT_SCIF, + .irqs = { 40, 41, 43, 42 }, + }, { + .flags = 0, + } +}; + +static struct platform_device sci_device = { + .name = "sh-sci", + .id = -1, + .dev = { + .platform_data = sci_platform_data, + }, +}; + +static struct platform_device *sh7750_devices[] __initdata = { + &sci_device, +}; + +static int __init sh7750_devices_setup(void) +{ + return platform_add_devices(sh7750_devices, + ARRAY_SIZE(sh7750_devices)); +} +__initcall(sh7750_devices_setup); diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7760.c b/arch/sh/kernel/cpu/sh4/setup-sh7760.c new file mode 100644 index 00000000000..97f1c9af35d --- /dev/null +++ b/arch/sh/kernel/cpu/sh4/setup-sh7760.c @@ -0,0 +1,53 @@ +/* + * SH7760 Setup + * + * Copyright (C) 2006 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/platform_device.h> +#include <linux/init.h> +#include <linux/serial.h> +#include <asm/sci.h> + +static struct plat_sci_port sci_platform_data[] = { + { + .mapbase = 0xfe600000, + .flags = UPF_BOOT_AUTOCONF, + .type = PORT_SCIF, + .irqs = { 52, 53, 55, 54 }, + }, { + .mapbase = 0xfe610000, + .flags = UPF_BOOT_AUTOCONF, + .type = PORT_SCIF, + .irqs = { 72, 73, 75, 74 }, + }, { + .mapbase = 0xfe620000, + .flags = UPF_BOOT_AUTOCONF, + .type = PORT_SCIF, + .irqs = { 76, 77, 79, 78 }, + }, { + .flags = 0, + } +}; + +static struct platform_device sci_device = { + .name = "sh-sci", + .id = -1, + .dev = { + .platform_data = sci_platform_data, + }, +}; + +static struct platform_device *sh7760_devices[] __initdata = { + &sci_device, +}; + +static int __init sh7760_devices_setup(void) +{ + return platform_add_devices(sh7760_devices, + ARRAY_SIZE(sh7760_devices)); +} +__initcall(sh7760_devices_setup); diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7770.c b/arch/sh/kernel/cpu/sh4/setup-sh7770.c new file mode 100644 index 00000000000..6a04cc5f5ac --- /dev/null +++ b/arch/sh/kernel/cpu/sh4/setup-sh7770.c @@ -0,0 +1,53 @@ +/* + * SH7770 Setup + * + * Copyright (C) 2006 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/platform_device.h> +#include <linux/init.h> +#include <linux/serial.h> +#include <asm/sci.h> + +static struct plat_sci_port sci_platform_data[] = { + { + .mapbase = 0xff923000, + .flags = UPF_BOOT_AUTOCONF, + .type = PORT_SCIF, + .irqs = { 61, 61, 61, 61 }, + }, { + .mapbase = 0xff924000, + .flags = UPF_BOOT_AUTOCONF, + .type = PORT_SCIF, + .irqs = { 62, 62, 62, 62 }, + }, { + .mapbase = 0xff925000, + .flags = UPF_BOOT_AUTOCONF, + .type = PORT_SCIF, + .irqs = { 63, 63, 63, 63 }, + }, { + .flags = 0, + } +}; + +static struct platform_device sci_device = { + .name = "sh-sci", + .id = -1, + .dev = { + .platform_data = sci_platform_data, + }, +}; + +static struct platform_device *sh7770_devices[] __initdata = { + &sci_device, +}; + +static int __init sh7770_devices_setup(void) +{ + return platform_add_devices(sh7770_devices, + ARRAY_SIZE(sh7770_devices)); +} +__initcall(sh7770_devices_setup); diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7780.c b/arch/sh/kernel/cpu/sh4/setup-sh7780.c new file mode 100644 index 00000000000..72493f259ed --- /dev/null +++ b/arch/sh/kernel/cpu/sh4/setup-sh7780.c @@ -0,0 +1,79 @@ +/* + * SH7780 Setup + * + * Copyright (C) 2006 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/platform_device.h> +#include <linux/init.h> +#include <linux/serial.h> +#include <asm/sci.h> + +static struct resource rtc_resources[] = { + [0] = { + .start = 0xffe80000, + .end = 0xffe80000 + 0x58 - 1, + .flags = IORESOURCE_IO, + }, + [1] = { + /* Period IRQ */ + .start = 21, + .flags = IORESOURCE_IRQ, + }, + [2] = { + /* Carry IRQ */ + .start = 22, + .flags = IORESOURCE_IRQ, + }, + [3] = { + /* Alarm IRQ */ + .start = 23, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device rtc_device = { + .name = "sh-rtc", + .id = -1, + .num_resources = ARRAY_SIZE(rtc_resources), + .resource = rtc_resources, +}; + +static struct plat_sci_port sci_platform_data[] = { + { + .mapbase = 0xffe00000, + .flags = UPF_BOOT_AUTOCONF, + .type = PORT_SCIF, + .irqs = { 40, 41, 43, 42 }, + }, { + .mapbase = 0xffe10000, + .flags = UPF_BOOT_AUTOCONF, + .type = PORT_SCIF, + .irqs = { 76, 77, 79, 78 }, + }, { + .flags = 0, + } +}; + +static struct platform_device sci_device = { + .name = "sh-sci", + .id = -1, + .dev = { + .platform_data = sci_platform_data, + }, +}; + +static struct platform_device *sh7780_devices[] __initdata = { + &rtc_device, + &sci_device, +}; + +static int __init sh7780_devices_setup(void) +{ + return platform_add_devices(sh7780_devices, + ARRAY_SIZE(sh7780_devices)); +} +__initcall(sh7780_devices_setup); diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c index b09805f3ee2..7bcc73f9b8d 100644 --- a/arch/sh/kernel/cpu/sh4/sq.c +++ b/arch/sh/kernel/cpu/sh4/sq.c @@ -1,49 +1,52 @@ /* - * arch/sh/kernel/cpu/sq.c + * arch/sh/kernel/cpu/sh4/sq.c * * General management API for SH-4 integrated Store Queues * - * Copyright (C) 2001, 2002, 2003, 2004 Paul Mundt + * Copyright (C) 2001 - 2006 Paul Mundt * Copyright (C) 2001, 2002 M. R. Brown * - * Some of this code has been adopted directly from the old arch/sh/mm/sq.c - * hack that was part of the LinuxDC project. For all intents and purposes, - * this is a completely new interface that really doesn't have much in common - * with the old zone-based approach at all. In fact, it's only listed here for - * general completeness. - * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> +#include <linux/cpu.h> +#include <linux/bitmap.h> +#include <linux/sysdev.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> -#include <linux/list.h> -#include <linux/proc_fs.h> -#include <linux/miscdevice.h> #include <linux/vmalloc.h> - +#include <linux/mm.h> #include <asm/io.h> #include <asm/page.h> -#include <asm/mmu_context.h> +#include <asm/cacheflush.h> #include <asm/cpu/sq.h> -static LIST_HEAD(sq_mapping_list); +struct sq_mapping; + +struct sq_mapping { + const char *name; + + unsigned long sq_addr; + unsigned long addr; + unsigned int size; + + struct sq_mapping *next; +}; + +static struct sq_mapping *sq_mapping_list; static DEFINE_SPINLOCK(sq_mapping_lock); +static kmem_cache_t *sq_cache; +static unsigned long *sq_bitmap; -/** - * sq_flush - Flush (prefetch) the store queue cache - * @addr: the store queue address to flush - * - * Executes a prefetch instruction on the specified store queue cache, - * so that the cached data is written to physical memory. - */ -inline void sq_flush(void *addr) -{ - __asm__ __volatile__ ("pref @%0" : : "r" (addr) : "memory"); -} +#define store_queue_barrier() \ +do { \ + (void)ctrl_inl(P4SEG_STORE_QUE); \ + ctrl_outl(0, P4SEG_STORE_QUE + 0); \ + ctrl_outl(0, P4SEG_STORE_QUE + 8); \ +} while (0); /** * sq_flush_range - Flush (prefetch) a specific SQ range @@ -56,152 +59,73 @@ inline void sq_flush(void *addr) void sq_flush_range(unsigned long start, unsigned int len) { volatile unsigned long *sq = (unsigned long *)start; - unsigned long dummy; /* Flush the queues */ for (len >>= 5; len--; sq += 8) - sq_flush((void *)sq); + prefetchw((void *)sq); /* Wait for completion */ - dummy = ctrl_inl(P4SEG_STORE_QUE); - - ctrl_outl(0, P4SEG_STORE_QUE + 0); - ctrl_outl(0, P4SEG_STORE_QUE + 8); + store_queue_barrier(); } -static struct sq_mapping *__sq_alloc_mapping(unsigned long virt, unsigned long phys, unsigned long size, const char *name) +static inline void sq_mapping_list_add(struct sq_mapping *map) { - struct sq_mapping *map; - - if (virt + size > SQ_ADDRMAX) - return ERR_PTR(-ENOSPC); - - map = kmalloc(sizeof(struct sq_mapping), GFP_KERNEL); - if (!map) - return ERR_PTR(-ENOMEM); + struct sq_mapping **p, *tmp; - INIT_LIST_HEAD(&map->list); + spin_lock_irq(&sq_mapping_lock); - map->sq_addr = virt; - map->addr = phys; - map->size = size + 1; - map->name = name; + p = &sq_mapping_list; + while ((tmp = *p) != NULL) + p = &tmp->next; - list_add(&map->list, &sq_mapping_list); + map->next = tmp; + *p = map; - return map; + spin_unlock_irq(&sq_mapping_lock); } -static unsigned long __sq_get_next_addr(void) +static inline void sq_mapping_list_del(struct sq_mapping *map) { - if (!list_empty(&sq_mapping_list)) { - struct list_head *pos, *tmp; - - /* - * Read one off the list head, as it will have the highest - * mapped allocation. Set the next one up right above it. - * - * This is somewhat sub-optimal, as we don't look at - * gaps between allocations or anything lower then the - * highest-level allocation. - * - * However, in the interest of performance and the general - * lack of desire to do constant list rebalancing, we don't - * worry about it. - */ - list_for_each_safe(pos, tmp, &sq_mapping_list) { - struct sq_mapping *entry; - - entry = list_entry(pos, typeof(*entry), list); - - return entry->sq_addr + entry->size; + struct sq_mapping **p, *tmp; + + spin_lock_irq(&sq_mapping_lock); + + for (p = &sq_mapping_list; (tmp = *p); p = &tmp->next) + if (tmp == map) { + *p = tmp->next; + break; } - } - return P4SEG_STORE_QUE; + spin_unlock_irq(&sq_mapping_lock); } -/** - * __sq_remap - Perform a translation from the SQ to a phys addr - * @map: sq mapping containing phys and store queue addresses. - * - * Maps the store queue address specified in the mapping to the physical - * address specified in the mapping. - */ -static struct sq_mapping *__sq_remap(struct sq_mapping *map) +static int __sq_remap(struct sq_mapping *map, unsigned long flags) { - unsigned long flags, pteh, ptel; +#if defined(CONFIG_MMU) struct vm_struct *vma; - pgprot_t pgprot; - - /* - * Without an MMU (or with it turned off), this is much more - * straightforward, as we can just load up each queue's QACR with - * the physical address appropriately masked. - */ - - ctrl_outl(((map->addr >> 26) << 2) & 0x1c, SQ_QACR0); - ctrl_outl(((map->addr >> 26) << 2) & 0x1c, SQ_QACR1); -#ifdef CONFIG_MMU - /* - * With an MMU on the other hand, things are slightly more involved. - * Namely, we have to have a direct mapping between the SQ addr and - * the associated physical address in the UTLB by way of setting up - * a virt<->phys translation by hand. We do this by simply specifying - * the SQ addr in UTLB.VPN and the associated physical address in - * UTLB.PPN. - * - * Notably, even though this is a special case translation, and some - * of the configuration bits are meaningless, we're still required - * to have a valid ASID context in PTEH. - * - * We could also probably get by without explicitly setting PTEA, but - * we do it here just for good measure. - */ - spin_lock_irqsave(&sq_mapping_lock, flags); - - pteh = map->sq_addr; - ctrl_outl((pteh & MMU_VPN_MASK) | get_asid(), MMU_PTEH); - - ptel = map->addr & PAGE_MASK; - ctrl_outl(((ptel >> 28) & 0xe) | (ptel & 0x1), MMU_PTEA); - - pgprot = pgprot_noncached(PAGE_KERNEL); - - ptel &= _PAGE_FLAGS_HARDWARE_MASK; - ptel |= pgprot_val(pgprot); - ctrl_outl(ptel, MMU_PTEL); - - __asm__ __volatile__ ("ldtlb" : : : "memory"); - - spin_unlock_irqrestore(&sq_mapping_lock, flags); - - /* - * Next, we need to map ourselves in the kernel page table, so that - * future accesses after a TLB flush will be handled when we take a - * page fault. - * - * Theoretically we could just do this directly and not worry about - * setting up the translation by hand ahead of time, but for the - * cases where we want a one-shot SQ mapping followed by a quick - * writeout before we hit the TLB flush, we do it anyways. This way - * we at least save ourselves the initial page fault overhead. - */ vma = __get_vm_area(map->size, VM_ALLOC, map->sq_addr, SQ_ADDRMAX); if (!vma) - return ERR_PTR(-ENOMEM); + return -ENOMEM; vma->phys_addr = map->addr; if (remap_area_pages((unsigned long)vma->addr, vma->phys_addr, - map->size, pgprot_val(pgprot))) { + map->size, flags)) { vunmap(vma->addr); - return NULL; + return -EAGAIN; } -#endif /* CONFIG_MMU */ +#else + /* + * Without an MMU (or with it turned off), this is much more + * straightforward, as we can just load up each queue's QACR with + * the physical address appropriately masked. + */ + ctrl_outl(((map->addr >> 26) << 2) & 0x1c, SQ_QACR0); + ctrl_outl(((map->addr >> 26) << 2) & 0x1c, SQ_QACR1); +#endif - return map; + return 0; } /** @@ -209,42 +133,65 @@ static struct sq_mapping *__sq_remap(struct sq_mapping *map) * @phys: Physical address of mapping. * @size: Length of mapping. * @name: User invoking mapping. + * @flags: Protection flags. * * Remaps the physical address @phys through the next available store queue * address of @size length. @name is logged at boot time as well as through - * the procfs interface. - * - * A pre-allocated and filled sq_mapping pointer is returned, and must be - * cleaned up with a call to sq_unmap() when the user is done with the - * mapping. + * the sysfs interface. */ -struct sq_mapping *sq_remap(unsigned long phys, unsigned int size, const char *name) +unsigned long sq_remap(unsigned long phys, unsigned int size, + const char *name, unsigned long flags) { struct sq_mapping *map; - unsigned long virt, end; + unsigned long end; unsigned int psz; + int ret, page; /* Don't allow wraparound or zero size */ end = phys + size - 1; - if (!size || end < phys) - return NULL; + if (unlikely(!size || end < phys)) + return -EINVAL; /* Don't allow anyone to remap normal memory.. */ - if (phys < virt_to_phys(high_memory)) - return NULL; + if (unlikely(phys < virt_to_phys(high_memory))) + return -EINVAL; phys &= PAGE_MASK; + size = PAGE_ALIGN(end + 1) - phys; + + map = kmem_cache_alloc(sq_cache, GFP_KERNEL); + if (unlikely(!map)) + return -ENOMEM; + + map->addr = phys; + map->size = size; + map->name = name; + + page = bitmap_find_free_region(sq_bitmap, 0x04000000, + get_order(map->size)); + if (unlikely(page < 0)) { + ret = -ENOSPC; + goto out; + } + + map->sq_addr = P4SEG_STORE_QUE + (page << PAGE_SHIFT); + + ret = __sq_remap(map, flags); + if (unlikely(ret != 0)) + goto out; + + psz = (size + (PAGE_SIZE - 1)) >> PAGE_SHIFT; + pr_info("sqremap: %15s [%4d page%s] va 0x%08lx pa 0x%08lx\n", + likely(map->name) ? map->name : "???", + psz, psz == 1 ? " " : "s", + map->sq_addr, map->addr); - size = PAGE_ALIGN(end + 1) - phys; - virt = __sq_get_next_addr(); - psz = (size + (PAGE_SIZE - 1)) / PAGE_SIZE; - map = __sq_alloc_mapping(virt, phys, size, name); + sq_mapping_list_add(map); - printk("sqremap: %15s [%4d page%s] va 0x%08lx pa 0x%08lx\n", - map->name ? map->name : "???", - psz, psz == 1 ? " " : "s", - map->sq_addr, map->addr); + return map->sq_addr; - return __sq_remap(map); +out: + kmem_cache_free(sq_cache, map); + return ret; } /** @@ -255,188 +202,198 @@ struct sq_mapping *sq_remap(unsigned long phys, unsigned int size, const char *n * sq_remap(). Also frees up the pte that was previously inserted into * the kernel page table and discards the UTLB translation. */ -void sq_unmap(struct sq_mapping *map) +void sq_unmap(unsigned long vaddr) { - if (map->sq_addr > (unsigned long)high_memory) - vfree((void *)(map->sq_addr & PAGE_MASK)); + struct sq_mapping **p, *map; + struct vm_struct *vma; + int page; - list_del(&map->list); - kfree(map); -} + for (p = &sq_mapping_list; (map = *p); p = &map->next) + if (map->sq_addr == vaddr) + break; -/** - * sq_clear - Clear a store queue range - * @addr: Address to start clearing from. - * @len: Length to clear. - * - * A quick zero-fill implementation for clearing out memory that has been - * remapped through the store queues. - */ -void sq_clear(unsigned long addr, unsigned int len) -{ - int i; + if (unlikely(!map)) { + printk("%s: bad store queue address 0x%08lx\n", + __FUNCTION__, vaddr); + return; + } - /* Clear out both queues linearly */ - for (i = 0; i < 8; i++) { - ctrl_outl(0, addr + i + 0); - ctrl_outl(0, addr + i + 8); + page = (map->sq_addr - P4SEG_STORE_QUE) >> PAGE_SHIFT; + bitmap_release_region(sq_bitmap, page, get_order(map->size)); + +#ifdef CONFIG_MMU + vma = remove_vm_area((void *)(map->sq_addr & PAGE_MASK)); + if (!vma) { + printk(KERN_ERR "%s: bad address 0x%08lx\n", + __FUNCTION__, map->sq_addr); + return; } +#endif + + sq_mapping_list_del(map); - sq_flush_range(addr, len); + kmem_cache_free(sq_cache, map); } -/** - * sq_vma_unmap - Unmap a VMA range - * @area: VMA containing range. - * @addr: Start of range. - * @len: Length of range. +/* + * Needlessly complex sysfs interface. Unfortunately it doesn't seem like + * there is any other easy way to add things on a per-cpu basis without + * putting the directory entries somewhere stupid and having to create + * links in sysfs by hand back in to the per-cpu directories. * - * Searches the sq_mapping_list for a mapping matching the sq addr @addr, - * and subsequently frees up the entry. Further cleanup is done by generic - * code. + * Some day we may want to have an additional abstraction per store + * queue, but considering the kobject hell we already have to deal with, + * it's simply not worth the trouble. */ -static void sq_vma_unmap(struct vm_area_struct *area, - unsigned long addr, size_t len) -{ - struct list_head *pos, *tmp; +static struct kobject *sq_kobject[NR_CPUS]; - list_for_each_safe(pos, tmp, &sq_mapping_list) { - struct sq_mapping *entry; +struct sq_sysfs_attr { + struct attribute attr; + ssize_t (*show)(char *buf); + ssize_t (*store)(const char *buf, size_t count); +}; - entry = list_entry(pos, typeof(*entry), list); +#define to_sq_sysfs_attr(attr) container_of(attr, struct sq_sysfs_attr, attr) - if (entry->sq_addr == addr) { - /* - * We could probably get away without doing the tlb flush - * here, as generic code should take care of most of this - * when unmapping the rest of the VMA range for us. Leave - * it in for added sanity for the time being.. - */ - __flush_tlb_page(get_asid(), entry->sq_addr & PAGE_MASK); +static ssize_t sq_sysfs_show(struct kobject *kobj, struct attribute *attr, + char *buf) +{ + struct sq_sysfs_attr *sattr = to_sq_sysfs_attr(attr); - list_del(&entry->list); - kfree(entry); + if (likely(sattr->show)) + return sattr->show(buf); - return; - } - } + return -EIO; } -/** - * sq_vma_sync - Sync a VMA range - * @area: VMA containing range. - * @start: Start of range. - * @len: Length of range. - * @flags: Additional flags. - * - * Synchronizes an sq mapped range by flushing the store queue cache for - * the duration of the mapping. - * - * Used internally for user mappings, which must use msync() to prefetch - * the store queue cache. - */ -static int sq_vma_sync(struct vm_area_struct *area, - unsigned long start, size_t len, unsigned int flags) +static ssize_t sq_sysfs_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) { - sq_flush_range(start, len); + struct sq_sysfs_attr *sattr = to_sq_sysfs_attr(attr); - return 0; + if (likely(sattr->store)) + return sattr->store(buf, count); + + return -EIO; } -static struct vm_operations_struct sq_vma_ops = { - .unmap = sq_vma_unmap, - .sync = sq_vma_sync, -}; +static ssize_t mapping_show(char *buf) +{ + struct sq_mapping **list, *entry; + char *p = buf; -/** - * sq_mmap - mmap() for /dev/cpu/sq - * @file: unused. - * @vma: VMA to remap. - * - * Remap the specified vma @vma through the store queues, and setup associated - * information for the new mapping. Also build up the page tables for the new - * area. - */ -static int sq_mmap(struct file *file, struct vm_area_struct *vma) + for (list = &sq_mapping_list; (entry = *list); list = &entry->next) + p += sprintf(p, "%08lx-%08lx [%08lx]: %s\n", + entry->sq_addr, entry->sq_addr + entry->size, + entry->addr, entry->name); + + return p - buf; +} + +static ssize_t mapping_store(const char *buf, size_t count) { - unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; - unsigned long size = vma->vm_end - vma->vm_start; - struct sq_mapping *map; + unsigned long base = 0, len = 0; - /* - * We're not interested in any arbitrary virtual address that has - * been stuck in the VMA, as we already know what addresses we - * want. Save off the size, and reposition the VMA to begin at - * the next available sq address. - */ - vma->vm_start = __sq_get_next_addr(); - vma->vm_end = vma->vm_start + size; + sscanf(buf, "%lx %lx", &base, &len); + if (!base) + return -EIO; - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + if (likely(len)) { + int ret = sq_remap(base, len, "Userspace", + pgprot_val(PAGE_SHARED)); + if (ret < 0) + return ret; + } else + sq_unmap(base); - vma->vm_flags |= VM_IO | VM_RESERVED; + return count; +} - map = __sq_alloc_mapping(vma->vm_start, offset, size, "Userspace"); +static struct sq_sysfs_attr mapping_attr = + __ATTR(mapping, 0644, mapping_show, mapping_store); - if (io_remap_pfn_range(vma, map->sq_addr, map->addr >> PAGE_SHIFT, - size, vma->vm_page_prot)) - return -EAGAIN; +static struct attribute *sq_sysfs_attrs[] = { + &mapping_attr.attr, + NULL, +}; - vma->vm_ops = &sq_vma_ops; +static struct sysfs_ops sq_sysfs_ops = { + .show = sq_sysfs_show, + .store = sq_sysfs_store, +}; - return 0; -} +static struct kobj_type ktype_percpu_entry = { + .sysfs_ops = &sq_sysfs_ops, + .default_attrs = sq_sysfs_attrs, +}; -#ifdef CONFIG_PROC_FS -static int sq_mapping_read_proc(char *buf, char **start, off_t off, - int len, int *eof, void *data) +static int __devinit sq_sysdev_add(struct sys_device *sysdev) { - struct list_head *pos; - char *p = buf; + unsigned int cpu = sysdev->id; + struct kobject *kobj; - list_for_each_prev(pos, &sq_mapping_list) { - struct sq_mapping *entry; + sq_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL); + if (unlikely(!sq_kobject[cpu])) + return -ENOMEM; - entry = list_entry(pos, typeof(*entry), list); + kobj = sq_kobject[cpu]; + kobj->parent = &sysdev->kobj; + kobject_set_name(kobj, "%s", "sq"); + kobj->ktype = &ktype_percpu_entry; - p += sprintf(p, "%08lx-%08lx [%08lx]: %s\n", entry->sq_addr, - entry->sq_addr + entry->size - 1, entry->addr, - entry->name); - } - - return p - buf; + return kobject_register(kobj); } -#endif -static struct file_operations sq_fops = { - .owner = THIS_MODULE, - .mmap = sq_mmap, -}; +static int __devexit sq_sysdev_remove(struct sys_device *sysdev) +{ + unsigned int cpu = sysdev->id; + struct kobject *kobj = sq_kobject[cpu]; -static struct miscdevice sq_dev = { - .minor = STORE_QUEUE_MINOR, - .name = "sq", - .fops = &sq_fops, + kobject_unregister(kobj); + return 0; +} + +static struct sysdev_driver sq_sysdev_driver = { + .add = sq_sysdev_add, + .remove = __devexit_p(sq_sysdev_remove), }; static int __init sq_api_init(void) { - int ret; + unsigned int nr_pages = 0x04000000 >> PAGE_SHIFT; + unsigned int size = (nr_pages + (BITS_PER_LONG - 1)) / BITS_PER_LONG; + int ret = -ENOMEM; + printk(KERN_NOTICE "sq: Registering store queue API.\n"); - create_proc_read_entry("sq_mapping", 0, 0, sq_mapping_read_proc, 0); + sq_cache = kmem_cache_create("store_queue_cache", + sizeof(struct sq_mapping), 0, 0, + NULL, NULL); + if (unlikely(!sq_cache)) + return ret; - ret = misc_register(&sq_dev); - if (ret) - remove_proc_entry("sq_mapping", NULL); + sq_bitmap = kzalloc(size, GFP_KERNEL); + if (unlikely(!sq_bitmap)) + goto out; + + ret = sysdev_driver_register(&cpu_sysdev_class, &sq_sysdev_driver); + if (unlikely(ret != 0)) + goto out; + + return 0; + +out: + kfree(sq_bitmap); + kmem_cache_destroy(sq_cache); return ret; } static void __exit sq_api_exit(void) { - misc_deregister(&sq_dev); - remove_proc_entry("sq_mapping", NULL); + sysdev_driver_unregister(&cpu_sysdev_class, &sq_sysdev_driver); + kfree(sq_bitmap); + kmem_cache_destroy(sq_cache); } module_init(sq_api_init); @@ -445,11 +402,7 @@ module_exit(sq_api_exit); MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>, M. R. Brown <mrbrown@0xd6.org>"); MODULE_DESCRIPTION("Simple API for SH-4 integrated Store Queues"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(STORE_QUEUE_MINOR); EXPORT_SYMBOL(sq_remap); EXPORT_SYMBOL(sq_unmap); -EXPORT_SYMBOL(sq_clear); -EXPORT_SYMBOL(sq_flush); EXPORT_SYMBOL(sq_flush_range); - |