aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/Kconfig4
-rw-r--r--arch/alpha/defconfig2
-rw-r--r--arch/alpha/mm/numa.c16
-rw-r--r--arch/arm/Kconfig4
-rw-r--r--arch/arm/boot/install.sh4
-rw-r--r--arch/arm/configs/s3c2410_defconfig27
-rw-r--r--arch/arm/mach-ixp2000/core.c85
-rw-r--r--arch/arm/mach-ixp4xx/common.c8
-rw-r--r--arch/arm/mach-s3c2410/mach-bast.c49
-rw-r--r--arch/arm/mach-s3c2410/mach-vr1000.c77
-rw-r--r--arch/arm/mm/proc-v6.S6
-rw-r--r--arch/arm/nwfpe/softfloat-macros22
-rw-r--r--arch/arm/nwfpe/softfloat.c12
-rw-r--r--arch/arm26/Kconfig2
-rw-r--r--arch/arm26/boot/install.sh4
-rw-r--r--arch/cris/Kconfig2
-rw-r--r--arch/frv/Kconfig2
-rw-r--r--arch/h8300/Kconfig.cpu3
-rw-r--r--arch/h8300/platform/h8300h/ptrace_h8300h.c4
-rw-r--r--arch/i386/Kconfig40
-rw-r--r--arch/i386/Makefile7
-rw-r--r--arch/i386/boot/install.sh4
-rw-r--r--arch/i386/kernel/apic.c2
-rw-r--r--arch/i386/kernel/cpu/common.c2
-rw-r--r--arch/i386/kernel/cpu/mtrr/main.c23
-rw-r--r--arch/i386/kernel/cpu/proc.c2
-rw-r--r--arch/i386/kernel/i386_ksyms.c160
-rw-r--r--arch/i386/kernel/i387.c3
-rw-r--r--arch/i386/kernel/io_apic.c13
-rw-r--r--arch/i386/kernel/kprobes.c176
-rw-r--r--arch/i386/kernel/mpparse.c31
-rw-r--r--arch/i386/kernel/nmi.c24
-rw-r--r--arch/i386/kernel/pci-dma.c3
-rw-r--r--arch/i386/kernel/process.c36
-rw-r--r--arch/i386/kernel/ptrace.c2
-rw-r--r--arch/i386/kernel/reboot.c5
-rw-r--r--arch/i386/kernel/setup.c28
-rw-r--r--arch/i386/kernel/signal.c31
-rw-r--r--arch/i386/kernel/smp.c3
-rw-r--r--arch/i386/kernel/smpboot.c14
-rw-r--r--arch/i386/kernel/time.c6
-rw-r--r--arch/i386/kernel/timers/common.c12
-rw-r--r--arch/i386/kernel/timers/timer.c9
-rw-r--r--arch/i386/kernel/timers/timer_hpet.c3
-rw-r--r--arch/i386/kernel/timers/timer_pm.c1
-rw-r--r--arch/i386/kernel/timers/timer_tsc.c8
-rw-r--r--arch/i386/kernel/traps.c21
-rw-r--r--arch/i386/lib/dec_and_lock.c2
-rw-r--r--arch/i386/lib/delay.c6
-rw-r--r--arch/i386/lib/mmx.c5
-rw-r--r--arch/i386/lib/usercopy.c8
-rw-r--r--arch/i386/mach-voyager/voyager_smp.c2
-rw-r--r--arch/i386/mm/Makefile2
-rw-r--r--arch/i386/mm/discontig.c127
-rw-r--r--arch/i386/mm/highmem.c6
-rw-r--r--arch/i386/mm/init.c21
-rw-r--r--arch/i386/mm/ioremap.c5
-rw-r--r--arch/i386/mm/pgtable.c10
-rw-r--r--arch/i386/oprofile/backtrace.c2
-rw-r--r--arch/i386/pci/irq.c22
-rw-r--r--arch/i386/pci/pcbios.c4
-rw-r--r--arch/i386/power/cpu.c14
-rw-r--r--arch/ia64/Kconfig6
-rw-r--r--arch/ia64/Kconfig.debug11
-rw-r--r--arch/ia64/configs/sn2_defconfig2
-rw-r--r--arch/ia64/defconfig2
-rw-r--r--arch/ia64/ia32/ia32priv.h2
-rw-r--r--arch/ia64/kernel/Makefile1
-rw-r--r--arch/ia64/kernel/jprobes.S61
-rw-r--r--arch/ia64/kernel/kprobes.c601
-rw-r--r--arch/ia64/kernel/traps.c33
-rw-r--r--arch/ia64/mm/discontig.c9
-rw-r--r--arch/ia64/mm/fault.c8
-rw-r--r--arch/m32r/Kconfig4
-rw-r--r--arch/m32r/mm/init.c4
-rw-r--r--arch/m68k/Kconfig2
-rw-r--r--arch/m68knommu/Kconfig2
-rw-r--r--arch/mips/Kconfig2
-rw-r--r--arch/mips/configs/ip27_defconfig2
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c5
-rw-r--r--arch/parisc/Kconfig4
-rw-r--r--arch/parisc/mm/init.c2
-rw-r--r--arch/ppc/Kconfig2
-rw-r--r--arch/ppc/boot/simple/misc.c2
-rw-r--r--arch/ppc/boot/simple/mpc10x_memory.c2
-rw-r--r--arch/ppc64/Kconfig63
-rw-r--r--arch/ppc64/Makefile2
-rw-r--r--arch/ppc64/boot/install.sh4
-rw-r--r--arch/ppc64/configs/pSeries_defconfig2
-rw-r--r--arch/ppc64/defconfig2
-rw-r--r--arch/ppc64/kernel/Makefile11
-rw-r--r--arch/ppc64/kernel/bpa_iic.c270
-rw-r--r--arch/ppc64/kernel/bpa_iic.h62
-rw-r--r--arch/ppc64/kernel/bpa_iommu.c377
-rw-r--r--arch/ppc64/kernel/bpa_iommu.h65
-rw-r--r--arch/ppc64/kernel/bpa_nvram.c118
-rw-r--r--arch/ppc64/kernel/bpa_setup.c140
-rw-r--r--arch/ppc64/kernel/cpu_setup_power4.S16
-rw-r--r--arch/ppc64/kernel/cputable.c11
-rw-r--r--arch/ppc64/kernel/iSeries_setup.c5
-rw-r--r--arch/ppc64/kernel/irq.c3
-rw-r--r--arch/ppc64/kernel/kprobes.c61
-rw-r--r--arch/ppc64/kernel/maple_setup.c64
-rw-r--r--arch/ppc64/kernel/maple_time.c51
-rw-r--r--arch/ppc64/kernel/mpic.h3
-rw-r--r--arch/ppc64/kernel/pSeries_pci.c497
-rw-r--r--arch/ppc64/kernel/pSeries_setup.c184
-rw-r--r--arch/ppc64/kernel/pSeries_smp.c69
-rw-r--r--arch/ppc64/kernel/pci.c3
-rw-r--r--arch/ppc64/kernel/pci.h6
-rw-r--r--arch/ppc64/kernel/pmac_time.c8
-rw-r--r--arch/ppc64/kernel/proc_ppc64.c2
-rw-r--r--arch/ppc64/kernel/prom_init.c4
-rw-r--r--arch/ppc64/kernel/ptrace.c4
-rw-r--r--arch/ppc64/kernel/rtas-proc.c4
-rw-r--r--arch/ppc64/kernel/rtas.c121
-rw-r--r--arch/ppc64/kernel/rtas_pci.c495
-rw-r--r--arch/ppc64/kernel/rtc.c6
-rw-r--r--arch/ppc64/kernel/setup.c34
-rw-r--r--arch/ppc64/kernel/smp.c4
-rw-r--r--arch/ppc64/kernel/spider-pic.c191
-rw-r--r--arch/ppc64/kernel/time.c63
-rw-r--r--arch/ppc64/kernel/traps.c4
-rw-r--r--arch/ppc64/mm/Makefile2
-rw-r--r--arch/ppc64/mm/init.c22
-rw-r--r--arch/ppc64/mm/numa.c3
-rw-r--r--arch/s390/Kconfig2
-rw-r--r--arch/s390/boot/install.sh4
-rw-r--r--arch/s390/kernel/compat_linux.h2
-rw-r--r--arch/sh/Kconfig4
-rw-r--r--arch/sh64/Kconfig2
-rw-r--r--arch/sparc/Kconfig18
-rw-r--r--arch/sparc64/Kconfig2
-rw-r--r--arch/sparc64/kernel/kprobes.c83
-rw-r--r--arch/sparc64/kernel/signal32.c2
-rw-r--r--arch/um/Kconfig1
-rw-r--r--arch/um/kernel/process_kern.c8
-rw-r--r--arch/v850/Kconfig2
-rw-r--r--arch/x86_64/Kconfig25
-rw-r--r--arch/x86_64/boot/install.sh4
-rw-r--r--arch/x86_64/ia32/ia32_signal.c15
-rw-r--r--arch/x86_64/kernel/aperture.c4
-rw-r--r--arch/x86_64/kernel/early_printk.c13
-rw-r--r--arch/x86_64/kernel/head64.c2
-rw-r--r--arch/x86_64/kernel/i8259.c15
-rw-r--r--arch/x86_64/kernel/kprobes.c190
-rw-r--r--arch/x86_64/kernel/mpparse.c25
-rw-r--r--arch/x86_64/kernel/process.c16
-rw-r--r--arch/x86_64/kernel/setup.c33
-rw-r--r--arch/x86_64/kernel/signal.c30
-rw-r--r--arch/x86_64/kernel/time.c42
-rw-r--r--arch/x86_64/kernel/traps.c22
-rw-r--r--arch/x86_64/lib/delay.c7
-rw-r--r--arch/x86_64/mm/Makefile2
-rw-r--r--arch/x86_64/mm/fault.c4
-rw-r--r--arch/x86_64/mm/init.c9
-rw-r--r--arch/x86_64/mm/ioremap.c2
-rw-r--r--arch/x86_64/mm/numa.c8
-rw-r--r--arch/x86_64/pci/k8-bus.c16
-rw-r--r--arch/xtensa/Kconfig258
-rw-r--r--arch/xtensa/Kconfig.debug7
-rw-r--r--arch/xtensa/Makefile102
-rw-r--r--arch/xtensa/boot/Makefile37
-rw-r--r--arch/xtensa/boot/boot-elf/Makefile52
-rw-r--r--arch/xtensa/boot/boot-elf/boot.ld71
-rw-r--r--arch/xtensa/boot/boot-elf/bootstrap.S37
-rw-r--r--arch/xtensa/boot/boot-redboot/Makefile35
-rw-r--r--arch/xtensa/boot/boot-redboot/boot.ld66
-rw-r--r--arch/xtensa/boot/boot-redboot/bootstrap.S246
-rw-r--r--arch/xtensa/boot/include/zlib.h433
-rw-r--r--arch/xtensa/boot/lib/Makefile6
-rw-r--r--arch/xtensa/boot/lib/memcpy.S36
-rw-r--r--arch/xtensa/boot/lib/zlib.c2150
-rw-r--r--arch/xtensa/boot/lib/zmem.c87
-rw-r--r--arch/xtensa/boot/ramdisk/Makefile23
-rw-r--r--arch/xtensa/configs/common_defconfig662
-rw-r--r--arch/xtensa/configs/iss_defconfig531
-rw-r--r--arch/xtensa/kernel/Makefile18
-rw-r--r--arch/xtensa/kernel/align.S459
-rw-r--r--arch/xtensa/kernel/asm-offsets.c94
-rw-r--r--arch/xtensa/kernel/coprocessor.S201
-rw-r--r--arch/xtensa/kernel/entry.S1996
-rw-r--r--arch/xtensa/kernel/head.S237
-rw-r--r--arch/xtensa/kernel/irq.c192
-rw-r--r--arch/xtensa/kernel/module.c78
-rw-r--r--arch/xtensa/kernel/pci-dma.c73
-rw-r--r--arch/xtensa/kernel/pci.c563
-rw-r--r--arch/xtensa/kernel/platform.c49
-rw-r--r--arch/xtensa/kernel/process.c482
-rw-r--r--arch/xtensa/kernel/ptrace.c407
-rw-r--r--arch/xtensa/kernel/semaphore.c226
-rw-r--r--arch/xtensa/kernel/setup.c520
-rw-r--r--arch/xtensa/kernel/signal.c713
-rw-r--r--arch/xtensa/kernel/syscalls.c418
-rw-r--r--arch/xtensa/kernel/syscalls.h248
-rw-r--r--arch/xtensa/kernel/time.c227
-rw-r--r--arch/xtensa/kernel/traps.c498
-rw-r--r--arch/xtensa/kernel/vectors.S464
-rw-r--r--arch/xtensa/kernel/vmlinux.lds.S341
-rw-r--r--arch/xtensa/kernel/xtensa_ksyms.c123
-rw-r--r--arch/xtensa/lib/Makefile7
-rw-r--r--arch/xtensa/lib/checksum.S410
-rw-r--r--arch/xtensa/lib/memcopy.S315
-rw-r--r--arch/xtensa/lib/memset.S160
-rw-r--r--arch/xtensa/lib/pci-auto.c352
-rw-r--r--arch/xtensa/lib/strcasecmp.c32
-rw-r--r--arch/xtensa/lib/strncpy_user.S224
-rw-r--r--arch/xtensa/lib/strnlen_user.S147
-rw-r--r--arch/xtensa/lib/usercopy.S321
-rw-r--r--arch/xtensa/mm/Makefile13
-rw-r--r--arch/xtensa/mm/fault.c241
-rw-r--r--arch/xtensa/mm/init.c551
-rw-r--r--arch/xtensa/mm/misc.S374
-rw-r--r--arch/xtensa/mm/pgtable.c76
-rw-r--r--arch/xtensa/mm/tlb.c545
-rw-r--r--arch/xtensa/platform-iss/Makefile13
-rw-r--r--arch/xtensa/platform-iss/console.c303
-rw-r--r--arch/xtensa/platform-iss/io.c32
-rw-r--r--arch/xtensa/platform-iss/network.c855
-rw-r--r--arch/xtensa/platform-iss/setup.c112
220 files changed, 22883 insertions, 1343 deletions
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index f7c96635d3b..c5739d6309d 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -509,7 +509,7 @@ config NR_CPUS
depends on SMP
default "64"
-config DISCONTIGMEM
+config ARCH_DISCONTIGMEM_ENABLE
bool "Discontiguous Memory Support (EXPERIMENTAL)"
depends on EXPERIMENTAL
help
@@ -518,6 +518,8 @@ config DISCONTIGMEM
or have huge holes in the physical address space for other reasons.
See <file:Documentation/vm/numa> for more.
+source "mm/Kconfig"
+
config NUMA
bool "NUMA Support (EXPERIMENTAL)"
depends on DISCONTIGMEM
diff --git a/arch/alpha/defconfig b/arch/alpha/defconfig
index 5e39b7a7c8f..6da9c3dbde4 100644
--- a/arch/alpha/defconfig
+++ b/arch/alpha/defconfig
@@ -96,7 +96,7 @@ CONFIG_ALPHA_CORE_AGP=y
CONFIG_ALPHA_BROKEN_IRQ_MASK=y
CONFIG_EISA=y
# CONFIG_SMP is not set
-# CONFIG_DISCONTIGMEM is not set
+# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set
CONFIG_VERBOSE_MCHECK=y
CONFIG_VERBOSE_MCHECK_ON=1
CONFIG_PCI_LEGACY_PROC=y
diff --git a/arch/alpha/mm/numa.c b/arch/alpha/mm/numa.c
index ba81c4422aa..c7481d59b6d 100644
--- a/arch/alpha/mm/numa.c
+++ b/arch/alpha/mm/numa.c
@@ -327,8 +327,6 @@ void __init mem_init(void)
extern char _text, _etext, _data, _edata;
extern char __init_begin, __init_end;
unsigned long nid, i;
- struct page * lmem_map;
-
high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
reservedpages = 0;
@@ -338,10 +336,10 @@ void __init mem_init(void)
*/
totalram_pages += free_all_bootmem_node(NODE_DATA(nid));
- lmem_map = node_mem_map(nid);
pfn = NODE_DATA(nid)->node_start_pfn;
for (i = 0; i < node_spanned_pages(nid); i++, pfn++)
- if (page_is_ram(pfn) && PageReserved(lmem_map+i))
+ if (page_is_ram(pfn) &&
+ PageReserved(nid_page_nr(nid, i)))
reservedpages++;
}
@@ -373,18 +371,18 @@ show_mem(void)
show_free_areas();
printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
for_each_online_node(nid) {
- struct page * lmem_map = node_mem_map(nid);
i = node_spanned_pages(nid);
while (i-- > 0) {
+ struct page *page = nid_page_nr(nid, i);
total++;
- if (PageReserved(lmem_map+i))
+ if (PageReserved(page))
reserved++;
- else if (PageSwapCache(lmem_map+i))
+ else if (PageSwapCache(page))
cached++;
- else if (!page_count(lmem_map+i))
+ else if (!page_count(page))
free++;
else
- shared += page_count(lmem_map + i) - 1;
+ shared += page_count(page) - 1;
}
}
printk("%ld pages of RAM\n",total);
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index ee8a9ad7bbd..07ba77c19f6 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -346,7 +346,7 @@ config PREEMPT
Say Y here if you are building a kernel for a desktop, embedded
or real-time system. Say N if you are unsure.
-config DISCONTIGMEM
+config ARCH_DISCONTIGMEM_ENABLE
bool
default (ARCH_LH7A40X && !LH7A40X_CONTIGMEM)
help
@@ -355,6 +355,8 @@ config DISCONTIGMEM
or have huge holes in the physical address space for other reasons.
See <file:Documentation/vm/numa> for more.
+source "mm/Kconfig"
+
config LEDS
bool "Timer and CPU usage LEDs"
depends on ARCH_CDB89712 || ARCH_CO285 || ARCH_EBSA110 || \
diff --git a/arch/arm/boot/install.sh b/arch/arm/boot/install.sh
index 935bb27369e..9f9bed20734 100644
--- a/arch/arm/boot/install.sh
+++ b/arch/arm/boot/install.sh
@@ -21,8 +21,8 @@
#
# User may have a custom install script
-if [ -x ~/bin/installkernel ]; then exec ~/bin/installkernel "$@"; fi
-if [ -x /sbin/installkernel ]; then exec /sbin/installkernel "$@"; fi
+if [ -x ~/bin/${CROSS_COMPILE}installkernel ]; then exec ~/bin/${CROSS_COMPILE}installkernel "$@"; fi
+if [ -x /sbin/${CROSS_COMPILE}installkernel ]; then exec /sbin/${CROSS_COMPILE}installkernel "$@"; fi
if [ "$(basename $2)" = "zImage" ]; then
# Compressed install
diff --git a/arch/arm/configs/s3c2410_defconfig b/arch/arm/configs/s3c2410_defconfig
index 2a63fb27719..98b72ff3883 100644
--- a/arch/arm/configs/s3c2410_defconfig
+++ b/arch/arm/configs/s3c2410_defconfig
@@ -1,14 +1,13 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.12-rc1-bk2
-# Sun Mar 27 17:47:45 2005
+# Linux kernel version: 2.6.12-git4
+# Wed Jun 22 15:56:42 2005
#
CONFIG_ARM=y
CONFIG_MMU=y
CONFIG_UID16=y
CONFIG_RWSEM_GENERIC_SPINLOCK=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
-CONFIG_GENERIC_IOMAP=y
#
# Code maturity level options
@@ -17,6 +16,7 @@ CONFIG_EXPERIMENTAL=y
# CONFIG_CLEAN_COMPILE is not set
CONFIG_BROKEN=y
CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
#
# General setup
@@ -35,6 +35,8 @@ CONFIG_KOBJECT_UEVENT=y
CONFIG_KALLSYMS=y
# CONFIG_KALLSYMS_ALL is not set
# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_PRINTK=y
+CONFIG_BUG=y
CONFIG_BASE_FULL=y
CONFIG_FUTEX=y
CONFIG_EPOLL=y
@@ -81,6 +83,7 @@ CONFIG_ARCH_S3C2410=y
# CONFIG_ARCH_VERSATILE is not set
# CONFIG_ARCH_IMX is not set
# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_AAEC2000 is not set
#
# S3C24XX Implementations
@@ -134,6 +137,7 @@ CONFIG_CPU_TLB_V4WBI=y
#
# Bus support
#
+CONFIG_ISA_DMA_API=y
#
# PCCARD (PCMCIA/CardBus) support
@@ -143,7 +147,9 @@ CONFIG_CPU_TLB_V4WBI=y
#
# Kernel Features
#
+# CONFIG_SMP is not set
# CONFIG_PREEMPT is not set
+# CONFIG_DISCONTIGMEM is not set
CONFIG_ALIGNMENT_TRAP=y
#
@@ -297,7 +303,6 @@ CONFIG_PARPORT_1284=y
#
# Block devices
#
-# CONFIG_BLK_DEV_FD is not set
# CONFIG_PARIDE is not set
# CONFIG_BLK_DEV_COW_COMMON is not set
CONFIG_BLK_DEV_LOOP=y
@@ -359,6 +364,7 @@ CONFIG_BLK_DEV_IDE_BAST=y
#
# Fusion MPT device support
#
+# CONFIG_FUSION is not set
#
# IEEE 1394 (FireWire) support
@@ -378,10 +384,11 @@ CONFIG_NET=y
# Networking options
#
# CONFIG_PACKET is not set
-# CONFIG_NETLINK_DEV is not set
CONFIG_UNIX=y
# CONFIG_NET_KEY is not set
CONFIG_INET=y
+CONFIG_IP_FIB_HASH=y
+# CONFIG_IP_FIB_TRIE is not set
# CONFIG_IP_MULTICAST is not set
# CONFIG_IP_ADVANCED_ROUTER is not set
CONFIG_IP_PNP=y
@@ -443,8 +450,9 @@ CONFIG_NETDEVICES=y
# Ethernet (10 or 100Mbit)
#
CONFIG_NET_ETHERNET=y
-# CONFIG_MII is not set
+CONFIG_MII=m
# CONFIG_SMC91X is not set
+CONFIG_DM9000=m
#
# Ethernet (1000 Mbit)
@@ -521,7 +529,6 @@ CONFIG_SERIO_SERPORT=y
CONFIG_SERIO_LIBPS2=y
# CONFIG_SERIO_RAW is not set
# CONFIG_GAMEPORT is not set
-CONFIG_SOUND_GAMEPORT=y
#
# Character devices
@@ -605,7 +612,6 @@ CONFIG_S3C2410_RTC=y
#
# TPM devices
#
-# CONFIG_TCG_TPM is not set
#
# I2C support
@@ -654,6 +660,7 @@ CONFIG_SENSORS_LM78=m
CONFIG_SENSORS_LM85=m
# CONFIG_SENSORS_LM87 is not set
# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
# CONFIG_SENSORS_MAX1619 is not set
# CONFIG_SENSORS_PC87360 is not set
# CONFIG_SENSORS_SMSC47B397 is not set
@@ -665,6 +672,7 @@ CONFIG_SENSORS_LM85=m
#
# Other I2C Chip support
#
+# CONFIG_SENSORS_DS1337 is not set
CONFIG_SENSORS_EEPROM=m
# CONFIG_SENSORS_PCF8574 is not set
# CONFIG_SENSORS_PCF8591 is not set
@@ -696,8 +704,10 @@ CONFIG_FB=y
# CONFIG_FB_CFB_COPYAREA is not set
# CONFIG_FB_CFB_IMAGEBLIT is not set
# CONFIG_FB_SOFT_CURSOR is not set
+# CONFIG_FB_MACMODES is not set
CONFIG_FB_MODE_HELPERS=y
# CONFIG_FB_TILEBLITTING is not set
+# CONFIG_FB_S1D13XXX is not set
# CONFIG_FB_VIRTUAL is not set
#
@@ -782,7 +792,6 @@ CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
#
CONFIG_PROC_FS=y
CONFIG_SYSFS=y
-# CONFIG_DEVFS_FS is not set
# CONFIG_DEVPTS_FS_XATTR is not set
# CONFIG_TMPFS is not set
# CONFIG_HUGETLBFS is not set
diff --git a/arch/arm/mach-ixp2000/core.c b/arch/arm/mach-ixp2000/core.c
index fc0555596d6..0ee34acb8d7 100644
--- a/arch/arm/mach-ixp2000/core.c
+++ b/arch/arm/mach-ixp2000/core.c
@@ -40,6 +40,8 @@
#include <asm/mach/time.h>
#include <asm/mach/irq.h>
+#include <asm/arch/gpio.h>
+
static DEFINE_SPINLOCK(ixp2000_slowport_lock);
static unsigned long ixp2000_slowport_irq_flags;
@@ -179,7 +181,7 @@ static int ixp2000_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
/* clear timer 1 */
ixp2000_reg_write(IXP2000_T1_CLR, 1);
-
+
while ((next_jiffy_time - *missing_jiffy_timer_csr) > ticks_per_jiffy) {
timer_tick(regs);
next_jiffy_time -= ticks_per_jiffy;
@@ -238,35 +240,40 @@ void __init ixp2000_init_time(unsigned long tick_rate)
/*************************************************************************
* GPIO helpers
*************************************************************************/
-static unsigned long GPIO_IRQ_rising_edge;
static unsigned long GPIO_IRQ_falling_edge;
+static unsigned long GPIO_IRQ_rising_edge;
static unsigned long GPIO_IRQ_level_low;
static unsigned long GPIO_IRQ_level_high;
-void gpio_line_config(int line, int style)
+static void update_gpio_int_csrs(void)
+{
+ ixp2000_reg_write(IXP2000_GPIO_FEDR, GPIO_IRQ_falling_edge);
+ ixp2000_reg_write(IXP2000_GPIO_REDR, GPIO_IRQ_rising_edge);
+ ixp2000_reg_write(IXP2000_GPIO_LSLR, GPIO_IRQ_level_low);
+ ixp2000_reg_write(IXP2000_GPIO_LSHR, GPIO_IRQ_level_high);
+}
+
+void gpio_line_config(int line, int direction)
{
unsigned long flags;
local_irq_save(flags);
+ if (direction == GPIO_OUT) {
+ irq_desc[line + IRQ_IXP2000_GPIO0].valid = 0;
- if(style == GPIO_OUT) {
/* if it's an output, it ain't an interrupt anymore */
- ixp2000_reg_write(IXP2000_GPIO_PDSR, (1 << line));
GPIO_IRQ_falling_edge &= ~(1 << line);
GPIO_IRQ_rising_edge &= ~(1 << line);
GPIO_IRQ_level_low &= ~(1 << line);
GPIO_IRQ_level_high &= ~(1 << line);
- ixp2000_reg_write(IXP2000_GPIO_FEDR, GPIO_IRQ_falling_edge);
- ixp2000_reg_write(IXP2000_GPIO_REDR, GPIO_IRQ_rising_edge);
- ixp2000_reg_write(IXP2000_GPIO_LSHR, GPIO_IRQ_level_high);
- ixp2000_reg_write(IXP2000_GPIO_LSLR, GPIO_IRQ_level_low);
- irq_desc[line+IRQ_IXP2000_GPIO0].valid = 0;
- } else if(style == GPIO_IN) {
- ixp2000_reg_write(IXP2000_GPIO_PDCR, (1 << line));
+ update_gpio_int_csrs();
+
+ ixp2000_reg_write(IXP2000_GPIO_PDSR, 1 << line);
+ } else if (direction == GPIO_IN) {
+ ixp2000_reg_write(IXP2000_GPIO_PDCR, 1 << line);
}
-
local_irq_restore(flags);
-}
+}
/*************************************************************************
@@ -285,9 +292,50 @@ static void ixp2000_GPIO_irq_handler(unsigned int irq, struct irqdesc *desc, str
}
}
+static int ixp2000_GPIO_irq_type(unsigned int irq, unsigned int type)
+{
+ int line = irq - IRQ_IXP2000_GPIO0;
+
+ /*
+ * First, configure this GPIO line as an input.
+ */
+ ixp2000_reg_write(IXP2000_GPIO_PDCR, 1 << line);
+
+ /*
+ * Then, set the proper trigger type.
+ */
+ if (type & IRQT_FALLING)
+ GPIO_IRQ_falling_edge |= 1 << line;
+ else
+ GPIO_IRQ_falling_edge &= ~(1 << line);
+ if (type & IRQT_RISING)
+ GPIO_IRQ_rising_edge |= 1 << line;
+ else
+ GPIO_IRQ_rising_edge &= ~(1 << line);
+ if (type & IRQT_LOW)
+ GPIO_IRQ_level_low |= 1 << line;
+ else
+ GPIO_IRQ_level_low &= ~(1 << line);
+ if (type & IRQT_HIGH)
+ GPIO_IRQ_level_high |= 1 << line;
+ else
+ GPIO_IRQ_level_high &= ~(1 << line);
+ update_gpio_int_csrs();
+
+ /*
+ * Finally, mark the corresponding IRQ as valid.
+ */
+ irq_desc[irq].valid = 1;
+
+ return 0;
+}
+
static void ixp2000_GPIO_irq_mask_ack(unsigned int irq)
{
ixp2000_reg_write(IXP2000_GPIO_INCR, (1 << (irq - IRQ_IXP2000_GPIO0)));
+
+ ixp2000_reg_write(IXP2000_GPIO_EDSR, (1 << (irq - IRQ_IXP2000_GPIO0)));
+ ixp2000_reg_write(IXP2000_GPIO_LDSR, (1 << (irq - IRQ_IXP2000_GPIO0)));
ixp2000_reg_write(IXP2000_GPIO_INST, (1 << (irq - IRQ_IXP2000_GPIO0)));
}
@@ -302,6 +350,7 @@ static void ixp2000_GPIO_irq_unmask(unsigned int irq)
}
static struct irqchip ixp2000_GPIO_irq_chip = {
+ .type = ixp2000_GPIO_irq_type,
.ack = ixp2000_GPIO_irq_mask_ack,
.mask = ixp2000_GPIO_irq_mask,
.unmask = ixp2000_GPIO_irq_unmask
@@ -338,7 +387,7 @@ static void ixp2000_irq_mask(unsigned int irq)
static void ixp2000_irq_unmask(unsigned int irq)
{
- ixp2000_reg_write(IXP2000_IRQ_ENABLE_SET, (1 << irq));
+ ixp2000_reg_write(IXP2000_IRQ_ENABLE_SET, (1 << irq));
}
static struct irqchip ixp2000_irq_chip = {
@@ -375,16 +424,16 @@ void __init ixp2000_init_irq(void)
* our mask/unmask code much simpler.
*/
for (irq = IRQ_IXP2000_SOFT_INT; irq <= IRQ_IXP2000_THDB3; irq++) {
- if((1 << irq) & IXP2000_VALID_IRQ_MASK) {
+ if ((1 << irq) & IXP2000_VALID_IRQ_MASK) {
set_irq_chip(irq, &ixp2000_irq_chip);
set_irq_handler(irq, do_level_IRQ);
set_irq_flags(irq, IRQF_VALID);
} else set_irq_flags(irq, 0);
}
-
+
/*
* GPIO IRQs are invalid until someone sets the interrupt mode
- * by calling gpio_line_set();
+ * by calling set_irq_type().
*/
for (irq = IRQ_IXP2000_GPIO0; irq <= IRQ_IXP2000_GPIO7; irq++) {
set_irq_chip(irq, &ixp2000_GPIO_irq_chip);
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
index 267ba02d77d..f39e8408488 100644
--- a/arch/arm/mach-ixp4xx/common.c
+++ b/arch/arm/mach-ixp4xx/common.c
@@ -141,7 +141,15 @@ static struct map_desc ixp4xx_io_desc[] __initdata = {
.physical = IXP4XX_PCI_CFG_BASE_PHYS,
.length = IXP4XX_PCI_CFG_REGION_SIZE,
.type = MT_DEVICE
+ },
+#ifdef CONFIG_DEBUG_LL
+ { /* Debug UART mapping */
+ .virtual = IXP4XX_DEBUG_UART_BASE_VIRT,
+ .physical = IXP4XX_DEBUG_UART_BASE_PHYS,
+ .length = IXP4XX_DEBUG_UART_REGION_SIZE,
+ .type = MT_DEVICE
}
+#endif
};
void __init ixp4xx_map_io(void)
diff --git a/arch/arm/mach-s3c2410/mach-bast.c b/arch/arm/mach-s3c2410/mach-bast.c
index 3bb97eb6e69..f3e970039b6 100644
--- a/arch/arm/mach-s3c2410/mach-bast.c
+++ b/arch/arm/mach-s3c2410/mach-bast.c
@@ -26,6 +26,7 @@
* 03-Mar-2005 BJD Ensured that bast-cpld.h is included
* 10-Mar-2005 LCVR Changed S3C2410_VA to S3C24XX_VA
* 14-Mar-2006 BJD Updated for __iomem changes
+ * 22-Jun-2006 BJD Added DM9000 platform information
*/
#include <linux/kernel.h>
@@ -35,6 +36,7 @@
#include <linux/timer.h>
#include <linux/init.h>
#include <linux/device.h>
+#include <linux/dm9000.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@@ -53,6 +55,7 @@
#include <asm/arch/regs-serial.h>
#include <asm/arch/regs-gpio.h>
#include <asm/arch/regs-mem.h>
+#include <asm/arch/regs-lcd.h>
#include <asm/arch/nand.h>
#include <linux/mtd/mtd.h>
@@ -112,7 +115,6 @@ static struct map_desc bast_iodesc[] __initdata = {
{ VA_C2(BAST_VA_ISAMEM), PA_CS2(BAST_PA_ISAMEM), SZ_16M, MT_DEVICE },
{ VA_C2(BAST_VA_ASIXNET), PA_CS3(BAST_PA_ASIXNET), SZ_1M, MT_DEVICE },
{ VA_C2(BAST_VA_SUPERIO), PA_CS2(BAST_PA_SUPERIO), SZ_1M, MT_DEVICE },
- { VA_C2(BAST_VA_DM9000), PA_CS2(BAST_PA_DM9000), SZ_1M, MT_DEVICE },
{ VA_C2(BAST_VA_IDEPRI), PA_CS3(BAST_PA_IDEPRI), SZ_1M, MT_DEVICE },
{ VA_C2(BAST_VA_IDESEC), PA_CS3(BAST_PA_IDESEC), SZ_1M, MT_DEVICE },
{ VA_C2(BAST_VA_IDEPRIAUX), PA_CS3(BAST_PA_IDEPRIAUX), SZ_1M, MT_DEVICE },
@@ -123,7 +125,6 @@ static struct map_desc bast_iodesc[] __initdata = {
{ VA_C3(BAST_VA_ISAMEM), PA_CS3(BAST_PA_ISAMEM), SZ_16M, MT_DEVICE },
{ VA_C3(BAST_VA_ASIXNET), PA_CS3(BAST_PA_ASIXNET), SZ_1M, MT_DEVICE },
{ VA_C3(BAST_VA_SUPERIO), PA_CS3(BAST_PA_SUPERIO), SZ_1M, MT_DEVICE },
- { VA_C3(BAST_VA_DM9000), PA_CS3(BAST_PA_DM9000), SZ_1M, MT_DEVICE },
{ VA_C3(BAST_VA_IDEPRI), PA_CS3(BAST_PA_IDEPRI), SZ_1M, MT_DEVICE },
{ VA_C3(BAST_VA_IDESEC), PA_CS3(BAST_PA_IDESEC), SZ_1M, MT_DEVICE },
{ VA_C3(BAST_VA_IDEPRIAUX), PA_CS3(BAST_PA_IDEPRIAUX), SZ_1M, MT_DEVICE },
@@ -134,7 +135,6 @@ static struct map_desc bast_iodesc[] __initdata = {
{ VA_C4(BAST_VA_ISAMEM), PA_CS4(BAST_PA_ISAMEM), SZ_16M, MT_DEVICE },
{ VA_C4(BAST_VA_ASIXNET), PA_CS5(BAST_PA_ASIXNET), SZ_1M, MT_DEVICE },
{ VA_C4(BAST_VA_SUPERIO), PA_CS4(BAST_PA_SUPERIO), SZ_1M, MT_DEVICE },
- { VA_C4(BAST_VA_DM9000), PA_CS4(BAST_PA_DM9000), SZ_1M, MT_DEVICE },
{ VA_C4(BAST_VA_IDEPRI), PA_CS5(BAST_PA_IDEPRI), SZ_1M, MT_DEVICE },
{ VA_C4(BAST_VA_IDESEC), PA_CS5(BAST_PA_IDESEC), SZ_1M, MT_DEVICE },
{ VA_C4(BAST_VA_IDEPRIAUX), PA_CS5(BAST_PA_IDEPRIAUX), SZ_1M, MT_DEVICE },
@@ -145,7 +145,6 @@ static struct map_desc bast_iodesc[] __initdata = {
{ VA_C5(BAST_VA_ISAMEM), PA_CS5(BAST_PA_ISAMEM), SZ_16M, MT_DEVICE },
{ VA_C5(BAST_VA_ASIXNET), PA_CS5(BAST_PA_ASIXNET), SZ_1M, MT_DEVICE },
{ VA_C5(BAST_VA_SUPERIO), PA_CS5(BAST_PA_SUPERIO), SZ_1M, MT_DEVICE },
- { VA_C5(BAST_VA_DM9000), PA_CS5(BAST_PA_DM9000), SZ_1M, MT_DEVICE },
{ VA_C5(BAST_VA_IDEPRI), PA_CS5(BAST_PA_IDEPRI), SZ_1M, MT_DEVICE },
{ VA_C5(BAST_VA_IDESEC), PA_CS5(BAST_PA_IDESEC), SZ_1M, MT_DEVICE },
{ VA_C5(BAST_VA_IDEPRIAUX), PA_CS5(BAST_PA_IDEPRIAUX), SZ_1M, MT_DEVICE },
@@ -313,6 +312,45 @@ static struct s3c2410_platform_nand bast_nand_info = {
.select_chip = bast_nand_select,
};
+/* DM9000 */
+
+static struct resource bast_dm9k_resource[] = {
+ [0] = {
+ .start = S3C2410_CS5 + BAST_PA_DM9000,
+ .end = S3C2410_CS5 + BAST_PA_DM9000 + 3,
+ .flags = IORESOURCE_MEM
+ },
+ [1] = {
+ .start = S3C2410_CS5 + BAST_PA_DM9000 + 0x40,
+ .end = S3C2410_CS5 + BAST_PA_DM9000 + 0x40 + 0x3f,
+ .flags = IORESOURCE_MEM
+ },
+ [2] = {
+ .start = IRQ_DM9000,
+ .end = IRQ_DM9000,
+ .flags = IORESOURCE_IRQ
+ }
+
+};
+
+/* for the moment we limit ourselves to 16bit IO until some
+ * better IO routines can be written and tested
+*/
+
+struct dm9000_plat_data bast_dm9k_platdata = {
+ .flags = DM9000_PLATF_16BITONLY
+};
+
+static struct platform_device bast_device_dm9k = {
+ .name = "dm9000",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(bast_dm9k_resource),
+ .resource = bast_dm9k_resource,
+ .dev = {
+ .platform_data = &bast_dm9k_platdata,
+ }
+};
+
/* Standard BAST devices */
@@ -324,7 +362,8 @@ static struct platform_device *bast_devices[] __initdata = {
&s3c_device_iis,
&s3c_device_rtc,
&s3c_device_nand,
- &bast_device_nor
+ &bast_device_nor,
+ &bast_device_dm9k,
};
static struct clk *bast_clocks[] = {
diff --git a/arch/arm/mach-s3c2410/mach-vr1000.c b/arch/arm/mach-s3c2410/mach-vr1000.c
index 5512146b1ce..76be074944a 100644
--- a/arch/arm/mach-s3c2410/mach-vr1000.c
+++ b/arch/arm/mach-s3c2410/mach-vr1000.c
@@ -27,6 +27,7 @@
* 10-Feb-2005 BJD Added power-off capability
* 10-Mar-2005 LCVR Changed S3C2410_VA to S3C24XX_VA
* 14-Mar-2006 BJD void __iomem fixes
+ * 22-Jun-2006 BJD Added DM9000 platform information
*/
#include <linux/kernel.h>
@@ -35,6 +36,7 @@
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/init.h>
+#include <linux/dm9000.h>
#include <linux/serial.h>
#include <linux/tty.h>
@@ -98,28 +100,24 @@ static struct map_desc vr1000_iodesc[] __initdata = {
* are only 8bit */
/* slow, byte */
- { VA_C2(VR1000_VA_DM9000), PA_CS2(VR1000_PA_DM9000), SZ_1M, MT_DEVICE },
{ VA_C2(VR1000_VA_IDEPRI), PA_CS3(VR1000_PA_IDEPRI), SZ_1M, MT_DEVICE },
{ VA_C2(VR1000_VA_IDESEC), PA_CS3(VR1000_PA_IDESEC), SZ_1M, MT_DEVICE },
{ VA_C2(VR1000_VA_IDEPRIAUX), PA_CS3(VR1000_PA_IDEPRIAUX), SZ_1M, MT_DEVICE },
{ VA_C2(VR1000_VA_IDESECAUX), PA_CS3(VR1000_PA_IDESECAUX), SZ_1M, MT_DEVICE },
/* slow, word */
- { VA_C3(VR1000_VA_DM9000), PA_CS3(VR1000_PA_DM9000), SZ_1M, MT_DEVICE },
{ VA_C3(VR1000_VA_IDEPRI), PA_CS3(VR1000_PA_IDEPRI), SZ_1M, MT_DEVICE },
{ VA_C3(VR1000_VA_IDESEC), PA_CS3(VR1000_PA_IDESEC), SZ_1M, MT_DEVICE },
{ VA_C3(VR1000_VA_IDEPRIAUX), PA_CS3(VR1000_PA_IDEPRIAUX), SZ_1M, MT_DEVICE },
{ VA_C3(VR1000_VA_IDESECAUX), PA_CS3(VR1000_PA_IDESECAUX), SZ_1M, MT_DEVICE },
/* fast, byte */
- { VA_C4(VR1000_VA_DM9000), PA_CS4(VR1000_PA_DM9000), SZ_1M, MT_DEVICE },
{ VA_C4(VR1000_VA_IDEPRI), PA_CS5(VR1000_PA_IDEPRI), SZ_1M, MT_DEVICE },
{ VA_C4(VR1000_VA_IDESEC), PA_CS5(VR1000_PA_IDESEC), SZ_1M, MT_DEVICE },
{ VA_C4(VR1000_VA_IDEPRIAUX), PA_CS5(VR1000_PA_IDEPRIAUX), SZ_1M, MT_DEVICE },
{ VA_C4(VR1000_VA_IDESECAUX), PA_CS5(VR1000_PA_IDESECAUX), SZ_1M, MT_DEVICE },
/* fast, word */
- { VA_C5(VR1000_VA_DM9000), PA_CS5(VR1000_PA_DM9000), SZ_1M, MT_DEVICE },
{ VA_C5(VR1000_VA_IDEPRI), PA_CS5(VR1000_PA_IDEPRI), SZ_1M, MT_DEVICE },
{ VA_C5(VR1000_VA_IDESEC), PA_CS5(VR1000_PA_IDESEC), SZ_1M, MT_DEVICE },
{ VA_C5(VR1000_VA_IDEPRIAUX), PA_CS5(VR1000_PA_IDEPRIAUX), SZ_1M, MT_DEVICE },
@@ -246,6 +244,74 @@ static struct platform_device vr1000_nor = {
.resource = vr1000_nor_resource,
};
+/* DM9000 ethernet devices */
+
+static struct resource vr1000_dm9k0_resource[] = {
+ [0] = {
+ .start = S3C2410_CS5 + VR1000_PA_DM9000,
+ .end = S3C2410_CS5 + VR1000_PA_DM9000 + 3,
+ .flags = IORESOURCE_MEM
+ },
+ [1] = {
+ .start = S3C2410_CS5 + VR1000_PA_DM9000 + 0x40,
+ .end = S3C2410_CS5 + VR1000_PA_DM9000 + 0x7f,
+ .flags = IORESOURCE_MEM
+ },
+ [2] = {
+ .start = IRQ_VR1000_DM9000A,
+ .end = IRQ_VR1000_DM9000A,
+ .flags = IORESOURCE_IRQ
+ }
+
+};
+
+static struct resource vr1000_dm9k1_resource[] = {
+ [0] = {
+ .start = S3C2410_CS5 + VR1000_PA_DM9000 + 0x80,
+ .end = S3C2410_CS5 + VR1000_PA_DM9000 + 0x83,
+ .flags = IORESOURCE_MEM
+ },
+ [1] = {
+ .start = S3C2410_CS5 + VR1000_PA_DM9000 + 0xC0,
+ .end = S3C2410_CS5 + VR1000_PA_DM9000 + 0xFF,
+ .flags = IORESOURCE_MEM
+ },
+ [2] = {
+ .start = IRQ_VR1000_DM9000N,
+ .end = IRQ_VR1000_DM9000N,
+ .flags = IORESOURCE_IRQ
+ }
+};
+
+/* for the moment we limit ourselves to 16bit IO until some
+ * better IO routines can be written and tested
+*/
+
+struct dm9000_plat_data vr1000_dm9k_platdata = {
+ .flags = DM9000_PLATF_16BITONLY,
+};
+
+static struct platform_device vr1000_dm9k0 = {
+ .name = "dm9000",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(vr1000_dm9k0_resource),
+ .resource = vr1000_dm9k0_resource,
+ .dev = {
+ .platform_data = &vr1000_dm9k_platdata,
+ }
+};
+
+static struct platform_device vr1000_dm9k1 = {
+ .name = "dm9000",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(vr1000_dm9k1_resource),
+ .resource = vr1000_dm9k1_resource,
+ .dev = {
+ .platform_data = &vr1000_dm9k_platdata,
+ }
+};
+
+/* devices for this board */
static struct platform_device *vr1000_devices[] __initdata = {
&s3c_device_usb,
@@ -253,8 +319,11 @@ static struct platform_device *vr1000_devices[] __initdata = {
&s3c_device_wdt,
&s3c_device_i2c,
&s3c_device_iis,
+ &s3c_device_adc,
&serial_device,
&vr1000_nor,
+ &vr1000_dm9k0,
+ &vr1000_dm9k1
};
static struct clk *vr1000_clocks[] = {
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 0aa73d41478..e3d8510f434 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -132,8 +132,8 @@ ENTRY(cpu_v6_switch_mm)
* 100x 1 0 1 r/o no acc
* 10x0 1 0 1 r/o no acc
* 1011 0 0 1 r/w no acc
- * 110x 1 1 0 r/o r/o
- * 11x0 1 1 0 r/o r/o
+ * 110x 0 1 0 r/w r/o
+ * 11x0 0 1 0 r/w r/o
* 1111 0 1 1 r/w r/w
*/
ENTRY(cpu_v6_set_pte)
@@ -150,7 +150,7 @@ ENTRY(cpu_v6_set_pte)
tst r1, #L_PTE_USER
orrne r2, r2, #AP1 | nG
tstne r2, #APX
- eorne r2, r2, #AP0
+ bicne r2, r2, #APX | AP0
tst r1, #L_PTE_YOUNG
biceq r2, r2, #APX | AP1 | AP0
diff --git a/arch/arm/nwfpe/softfloat-macros b/arch/arm/nwfpe/softfloat-macros
index 5469989f2c5..5a060f95a58 100644
--- a/arch/arm/nwfpe/softfloat-macros
+++ b/arch/arm/nwfpe/softfloat-macros
@@ -563,8 +563,14 @@ static bits64 estimateDiv128To64( bits64 a0, bits64 a1, bits64 b )
bits64 rem0, rem1, term0, term1;
bits64 z;
if ( b <= a0 ) return LIT64( 0xFFFFFFFFFFFFFFFF );
- b0 = b>>32;
- z = ( b0<<32 <= a0 ) ? LIT64( 0xFFFFFFFF00000000 ) : ( a0 / b0 )<<32;
+ b0 = b>>32; /* hence b0 is 32 bits wide now */
+ if ( b0<<32 <= a0 ) {
+ z = LIT64( 0xFFFFFFFF00000000 );
+ } else {
+ z = a0;
+ do_div( z, b0 );
+ z <<= 32;
+ }
mul64To128( b, z, &term0, &term1 );
sub128( a0, a1, term0, term1, &rem0, &rem1 );
while ( ( (sbits64) rem0 ) < 0 ) {
@@ -573,7 +579,12 @@ static bits64 estimateDiv128To64( bits64 a0, bits64 a1, bits64 b )
add128( rem0, rem1, b0, b1, &rem0, &rem1 );
}
rem0 = ( rem0<<32 ) | ( rem1>>32 );
- z |= ( b0<<32 <= rem0 ) ? 0xFFFFFFFF : rem0 / b0;
+ if ( b0<<32 <= rem0 ) {
+ z |= 0xFFFFFFFF;
+ } else {
+ do_div( rem0, b0 );
+ z |= rem0;
+ }
return z;
}
@@ -601,6 +612,7 @@ static bits32 estimateSqrt32( int16 aExp, bits32 a )
};
int8 index;
bits32 z;
+ bits64 A;
index = ( a>>27 ) & 15;
if ( aExp & 1 ) {
@@ -614,7 +626,9 @@ static bits32 estimateSqrt32( int16 aExp, bits32 a )
z = ( 0x20000 <= z ) ? 0xFFFF8000 : ( z<<15 );
if ( z <= a ) return (bits32) ( ( (sbits32) a )>>1 );
}
- return ( (bits32) ( ( ( (bits64) a )<<31 ) / z ) ) + ( z>>1 );
+ A = ( (bits64) a )<<31;
+ do_div( A, z );
+ return ( (bits32) A ) + ( z>>1 );
}
diff --git a/arch/arm/nwfpe/softfloat.c b/arch/arm/nwfpe/softfloat.c
index 9d743ae2906..e038dd3be9b 100644
--- a/arch/arm/nwfpe/softfloat.c
+++ b/arch/arm/nwfpe/softfloat.c
@@ -28,6 +28,8 @@ this code that are retained.
===============================================================================
*/
+#include <asm/div64.h>
+
#include "fpa11.h"
//#include "milieu.h"
//#include "softfloat.h"
@@ -1331,7 +1333,11 @@ float32 float32_div( float32 a, float32 b )
aSig >>= 1;
++zExp;
}
- zSig = ( ( (bits64) aSig )<<32 ) / bSig;
+ {
+ bits64 tmp = ( (bits64) aSig )<<32;
+ do_div( tmp, bSig );
+ zSig = tmp;
+ }
if ( ( zSig & 0x3F ) == 0 ) {
zSig |= ( ( (bits64) bSig ) * zSig != ( (bits64) aSig )<<32 );
}
@@ -1397,7 +1403,9 @@ float32 float32_rem( float32 a, float32 b )
q = ( bSig <= aSig );
if ( q ) aSig -= bSig;
if ( 0 < expDiff ) {
- q = ( ( (bits64) aSig )<<32 ) / bSig;
+ bits64 tmp = ( (bits64) aSig )<<32;
+ do_div( tmp, bSig );
+ q = tmp;
q >>= 32 - expDiff;
bSig >>= 2;
aSig = ( ( aSig>>1 )<<( expDiff - 1 ) ) - bSig * q;
diff --git a/arch/arm26/Kconfig b/arch/arm26/Kconfig
index 6caed90661f..dc0c1936969 100644
--- a/arch/arm26/Kconfig
+++ b/arch/arm26/Kconfig
@@ -179,6 +179,8 @@ config CMDLINE
time by entering them here. As a minimum, you should specify the
memory size and the root device (e.g., mem=64M root=/dev/nfs).
+source "mm/Kconfig"
+
endmenu
source "drivers/base/Kconfig"
diff --git a/arch/arm26/boot/install.sh b/arch/arm26/boot/install.sh
index c628328dd9e..8a8399b26cf 100644
--- a/arch/arm26/boot/install.sh
+++ b/arch/arm26/boot/install.sh
@@ -23,8 +23,8 @@
# User may have a custom install script
-if [ -x /sbin/installkernel ]; then
- exec /sbin/installkernel "$@"
+if [ -x /sbin/${CROSS_COMPILE}installkernel ]; then
+ exec /sbin/${CROSS_COMPILE}installkernel "$@"
fi
if [ "$2" = "zImage" ]; then
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index 4332ca348d5..f848e376149 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -74,6 +74,8 @@ config PREEMPT
Say Y here if you are building a kernel for a desktop, embedded
or real-time system. Say N if you are unsure.
+source mm/Kconfig
+
endmenu
menu "Hardware setup"
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig
index 2b19372767e..c93f95146cc 100644
--- a/arch/frv/Kconfig
+++ b/arch/frv/Kconfig
@@ -74,6 +74,8 @@ config HIGHPTE
with a lot of RAM, this can be wasteful of precious low memory.
Setting this option will put user-space page tables in high memory.
+source "mm/Kconfig"
+
choice
prompt "uClinux kernel load address"
depends on !MMU
diff --git a/arch/h8300/Kconfig.cpu b/arch/h8300/Kconfig.cpu
index d9dd62a565a..a380167a13c 100644
--- a/arch/h8300/Kconfig.cpu
+++ b/arch/h8300/Kconfig.cpu
@@ -180,4 +180,7 @@ config CPU_H8S
config PREEMPT
bool "Preemptible Kernel"
default n
+
+source "mm/Kconfig"
+
endmenu
diff --git a/arch/h8300/platform/h8300h/ptrace_h8300h.c b/arch/h8300/platform/h8300h/ptrace_h8300h.c
index 18e51a7167d..6ac93c05a1a 100644
--- a/arch/h8300/platform/h8300h/ptrace_h8300h.c
+++ b/arch/h8300/platform/h8300h/ptrace_h8300h.c
@@ -245,12 +245,12 @@ static unsigned short *getnextpc(struct task_struct *child, unsigned short *pc)
addr = h8300_get_reg(child, regno-1+PT_ER1);
return (unsigned short *)addr;
case relb:
- if ((inst = 0x55) || isbranch(child,inst & 0x0f))
+ if (inst == 0x55 || isbranch(child,inst & 0x0f))
pc = (unsigned short *)((unsigned long)pc +
((signed char)(*fetch_p)));
return pc+1; /* skip myself */
case relw:
- if ((inst = 0x5c) || isbranch(child,(*fetch_p & 0xf0) >> 4))
+ if (inst == 0x5c || isbranch(child,(*fetch_p & 0xf0) >> 4))
pc = (unsigned short *)((unsigned long)pc +
((signed short)(*(pc+1))));
return pc+2; /* skip myself */
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index dfd904f6883..d4ae5f9ceae 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -68,7 +68,6 @@ config X86_VOYAGER
config X86_NUMAQ
bool "NUMAQ (IBM/Sequent)"
- select DISCONTIGMEM
select NUMA
help
This option is used for getting Linux to run on a (IBM/Sequent) NUMA
@@ -783,25 +782,48 @@ comment "NUMA (NUMA-Q) requires SMP, 64GB highmem support"
comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI"
depends on X86_SUMMIT && (!HIGHMEM64G || !ACPI)
-config DISCONTIGMEM
- bool
- depends on NUMA
- default y
-
config HAVE_ARCH_BOOTMEM_NODE
bool
depends on NUMA
default y
-config HAVE_MEMORY_PRESENT
+config ARCH_HAVE_MEMORY_PRESENT
bool
depends on DISCONTIGMEM
default y
config NEED_NODE_MEMMAP_SIZE
bool
- depends on DISCONTIGMEM
+ depends on DISCONTIGMEM || SPARSEMEM
+ default y
+
+config HAVE_ARCH_ALLOC_REMAP
+ bool
+ depends on NUMA
+ default y
+
+config ARCH_DISCONTIGMEM_ENABLE
+ def_bool y
+ depends on NUMA
+
+config ARCH_DISCONTIGMEM_DEFAULT
+ def_bool y
+ depends on NUMA
+
+config ARCH_SPARSEMEM_ENABLE
+ def_bool y
+ depends on NUMA
+
+config ARCH_SELECT_MEMORY_MODEL
+ def_bool y
+ depends on ARCH_SPARSEMEM_ENABLE
+
+source "mm/Kconfig"
+
+config HAVE_ARCH_EARLY_PFN_TO_NID
+ bool
default y
+ depends on NUMA
config HIGHPTE
bool "Allocate 3rd-level pagetables from highmem"
@@ -939,6 +961,8 @@ config SECCOMP
If unsure, say Y. Only embedded should say N here.
+source kernel/Kconfig.hz
+
endmenu
diff --git a/arch/i386/Makefile b/arch/i386/Makefile
index 1c36ca332a9..bf7c9ba709f 100644
--- a/arch/i386/Makefile
+++ b/arch/i386/Makefile
@@ -17,6 +17,13 @@
# 20050320 Kianusch Sayah Karadji <kianusch@sk-tech.net>
# Added support for GEODE CPU
+HAS_BIARCH := $(call cc-option-yn, -m32)
+ifeq ($(HAS_BIARCH),y)
+AS := $(AS) --32
+LD := $(LD) -m elf_i386
+CC := $(CC) -m32
+endif
+
LDFLAGS := -m elf_i386
OBJCOPYFLAGS := -O binary -R .note -R .comment -S
LDFLAGS_vmlinux :=
diff --git a/arch/i386/boot/install.sh b/arch/i386/boot/install.sh
index 90f2452b3b9..f17b40dfc0f 100644
--- a/arch/i386/boot/install.sh
+++ b/arch/i386/boot/install.sh
@@ -21,8 +21,8 @@
# User may have a custom install script
-if [ -x ~/bin/installkernel ]; then exec ~/bin/installkernel "$@"; fi
-if [ -x /sbin/installkernel ]; then exec /sbin/installkernel "$@"; fi
+if [ -x ~/bin/${CROSS_COMPILE}installkernel ]; then exec ~/bin/${CROSS_COMPILE}installkernel "$@"; fi
+if [ -x /sbin/${CROSS_COMPILE}installkernel ]; then exec /sbin/${CROSS_COMPILE}installkernel "$@"; fi
# Default install - same as make zlilo
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c
index d509836b70c..8d993fa7175 100644
--- a/arch/i386/kernel/apic.c
+++ b/arch/i386/kernel/apic.c
@@ -1133,7 +1133,7 @@ inline void smp_local_timer_interrupt(struct pt_regs * regs)
}
#ifdef CONFIG_SMP
- update_process_times(user_mode(regs));
+ update_process_times(user_mode_vm(regs));
#endif
}
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c
index d199e525680..b9954248d0a 100644
--- a/arch/i386/kernel/cpu/common.c
+++ b/arch/i386/kernel/cpu/common.c
@@ -635,7 +635,7 @@ void __init cpu_init (void)
/* Clear all 6 debug registers: */
-#define CD(register) __asm__("movl %0,%%db" #register ::"r"(0) );
+#define CD(register) set_debugreg(0, register)
CD(0); CD(1); CD(2); CD(3); /* no db4 and db5 */; CD(6); CD(7);
diff --git a/arch/i386/kernel/cpu/mtrr/main.c b/arch/i386/kernel/cpu/mtrr/main.c
index e1c2042b9b7..d66b09e0c82 100644
--- a/arch/i386/kernel/cpu/mtrr/main.c
+++ b/arch/i386/kernel/cpu/mtrr/main.c
@@ -375,6 +375,19 @@ int mtrr_add_page(unsigned long base, unsigned long size,
return error;
}
+static int mtrr_check(unsigned long base, unsigned long size)
+{
+ if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
+ printk(KERN_WARNING
+ "mtrr: size and base must be multiples of 4 kiB\n");
+ printk(KERN_DEBUG
+ "mtrr: size: 0x%lx base: 0x%lx\n", size, base);
+ dump_stack();
+ return -1;
+ }
+ return 0;
+}
+
/**
* mtrr_add - Add a memory type region
* @base: Physical base address of region
@@ -415,11 +428,8 @@ int
mtrr_add(unsigned long base, unsigned long size, unsigned int type,
char increment)
{
- if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
- printk(KERN_WARNING "mtrr: size and base must be multiples of 4 kiB\n");
- printk(KERN_DEBUG "mtrr: size: 0x%lx base: 0x%lx\n", size, base);
+ if (mtrr_check(base, size))
return -EINVAL;
- }
return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
increment);
}
@@ -511,11 +521,8 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
int
mtrr_del(int reg, unsigned long base, unsigned long size)
{
- if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
- printk(KERN_INFO "mtrr: size and base must be multiples of 4 kiB\n");
- printk(KERN_DEBUG "mtrr: size: 0x%lx base: 0x%lx\n", size, base);
+ if (mtrr_check(base, size))
return -EINVAL;
- }
return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
}
diff --git a/arch/i386/kernel/cpu/proc.c b/arch/i386/kernel/cpu/proc.c
index 7323c19f354..8bd77d948a8 100644
--- a/arch/i386/kernel/cpu/proc.c
+++ b/arch/i386/kernel/cpu/proc.c
@@ -86,7 +86,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "stepping\t: unknown\n");
if ( cpu_has(c, X86_FEATURE_TSC) ) {
- seq_printf(m, "cpu MHz\t\t: %lu.%03lu\n",
+ seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
cpu_khz / 1000, (cpu_khz % 1000));
}
diff --git a/arch/i386/kernel/i386_ksyms.c b/arch/i386/kernel/i386_ksyms.c
index 903190a4b3f..180f070d03c 100644
--- a/arch/i386/kernel/i386_ksyms.c
+++ b/arch/i386/kernel/i386_ksyms.c
@@ -1,97 +1,17 @@
#include <linux/config.h>
#include <linux/module.h>
-#include <linux/smp.h>
-#include <linux/user.h>
-#include <linux/elfcore.h>
-#include <linux/mca.h>
-#include <linux/sched.h>
-#include <linux/in6.h>
-#include <linux/interrupt.h>
-#include <linux/smp_lock.h>
-#include <linux/pm.h>
-#include <linux/pci.h>
-#include <linux/apm_bios.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/tty.h>
-#include <linux/highmem.h>
-#include <linux/time.h>
-
-#include <asm/semaphore.h>
-#include <asm/processor.h>
-#include <asm/i387.h>
-#include <asm/uaccess.h>
#include <asm/checksum.h>
-#include <asm/io.h>
-#include <asm/delay.h>
-#include <asm/irq.h>
-#include <asm/mmx.h>
#include <asm/desc.h>
-#include <asm/pgtable.h>
-#include <asm/tlbflush.h>
-#include <asm/nmi.h>
-#include <asm/ist.h>
-#include <asm/kdebug.h>
-
-extern void dump_thread(struct pt_regs *, struct user *);
-extern spinlock_t rtc_lock;
/* This is definitely a GPL-only symbol */
EXPORT_SYMBOL_GPL(cpu_gdt_table);
-#if defined(CONFIG_APM_MODULE)
-extern void machine_real_restart(unsigned char *, int);
-EXPORT_SYMBOL(machine_real_restart);
-extern void default_idle(void);
-EXPORT_SYMBOL(default_idle);
-#endif
-
-#ifdef CONFIG_SMP
-extern void FASTCALL( __write_lock_failed(rwlock_t *rw));
-extern void FASTCALL( __read_lock_failed(rwlock_t *rw));
-#endif
-
-#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_HD) || defined(CONFIG_BLK_DEV_IDE_MODULE) || defined(CONFIG_BLK_DEV_HD_MODULE)
-extern struct drive_info_struct drive_info;
-EXPORT_SYMBOL(drive_info);
-#endif
-
-extern unsigned long cpu_khz;
-extern unsigned long get_cmos_time(void);
-
-/* platform dependent support */
-EXPORT_SYMBOL(boot_cpu_data);
-#ifdef CONFIG_DISCONTIGMEM
-EXPORT_SYMBOL(node_data);
-EXPORT_SYMBOL(physnode_map);
-#endif
-#ifdef CONFIG_X86_NUMAQ
-EXPORT_SYMBOL(xquad_portio);
-#endif
-EXPORT_SYMBOL(dump_thread);
-EXPORT_SYMBOL(dump_fpu);
-EXPORT_SYMBOL_GPL(kernel_fpu_begin);
-EXPORT_SYMBOL(__ioremap);
-EXPORT_SYMBOL(ioremap_nocache);
-EXPORT_SYMBOL(iounmap);
-EXPORT_SYMBOL(kernel_thread);
-EXPORT_SYMBOL(pm_idle);
-EXPORT_SYMBOL(pm_power_off);
-EXPORT_SYMBOL(get_cmos_time);
-EXPORT_SYMBOL(cpu_khz);
-EXPORT_SYMBOL(apm_info);
-
EXPORT_SYMBOL(__down_failed);
EXPORT_SYMBOL(__down_failed_interruptible);
EXPORT_SYMBOL(__down_failed_trylock);
EXPORT_SYMBOL(__up_wakeup);
/* Networking helper routines. */
EXPORT_SYMBOL(csum_partial_copy_generic);
-/* Delay loops */
-EXPORT_SYMBOL(__ndelay);
-EXPORT_SYMBOL(__udelay);
-EXPORT_SYMBOL(__delay);
-EXPORT_SYMBOL(__const_udelay);
EXPORT_SYMBOL(__get_user_1);
EXPORT_SYMBOL(__get_user_2);
@@ -105,87 +25,11 @@ EXPORT_SYMBOL(__put_user_8);
EXPORT_SYMBOL(strpbrk);
EXPORT_SYMBOL(strstr);
-EXPORT_SYMBOL(strncpy_from_user);
-EXPORT_SYMBOL(__strncpy_from_user);
-EXPORT_SYMBOL(clear_user);
-EXPORT_SYMBOL(__clear_user);
-EXPORT_SYMBOL(__copy_from_user_ll);
-EXPORT_SYMBOL(__copy_to_user_ll);
-EXPORT_SYMBOL(strnlen_user);
-
-EXPORT_SYMBOL(dma_alloc_coherent);
-EXPORT_SYMBOL(dma_free_coherent);
-
-#ifdef CONFIG_PCI
-EXPORT_SYMBOL(pci_mem_start);
-#endif
-
-#ifdef CONFIG_PCI_BIOS
-EXPORT_SYMBOL(pcibios_set_irq_routing);
-EXPORT_SYMBOL(pcibios_get_irq_routing_table);
-#endif
-
-#ifdef CONFIG_X86_USE_3DNOW
-EXPORT_SYMBOL(_mmx_memcpy);
-EXPORT_SYMBOL(mmx_clear_page);
-EXPORT_SYMBOL(mmx_copy_page);
-#endif
-
-#ifdef CONFIG_X86_HT
-EXPORT_SYMBOL(smp_num_siblings);
-EXPORT_SYMBOL(cpu_sibling_map);
-#endif
-
#ifdef CONFIG_SMP
-EXPORT_SYMBOL(cpu_data);
-EXPORT_SYMBOL(cpu_online_map);
-EXPORT_SYMBOL(cpu_callout_map);
+extern void FASTCALL( __write_lock_failed(rwlock_t *rw));
+extern void FASTCALL( __read_lock_failed(rwlock_t *rw));
EXPORT_SYMBOL(__write_lock_failed);
EXPORT_SYMBOL(__read_lock_failed);
-
-/* Global SMP stuff */
-EXPORT_SYMBOL(smp_call_function);
-
-/* TLB flushing */
-EXPORT_SYMBOL(flush_tlb_page);
-#endif
-
-#ifdef CONFIG_X86_IO_APIC
-EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
-#endif
-
-#ifdef CONFIG_MCA
-EXPORT_SYMBOL(machine_id);
-#endif
-
-#ifdef CONFIG_VT
-EXPORT_SYMBOL(screen_info);
-#endif
-
-EXPORT_SYMBOL(get_wchan);
-
-EXPORT_SYMBOL(rtc_lock);
-
-EXPORT_SYMBOL_GPL(set_nmi_callback);
-EXPORT_SYMBOL_GPL(unset_nmi_callback);
-
-EXPORT_SYMBOL(register_die_notifier);
-#ifdef CONFIG_HAVE_DEC_LOCK
-EXPORT_SYMBOL(_atomic_dec_and_lock);
-#endif
-
-EXPORT_SYMBOL(__PAGE_KERNEL);
-
-#ifdef CONFIG_HIGHMEM
-EXPORT_SYMBOL(kmap);
-EXPORT_SYMBOL(kunmap);
-EXPORT_SYMBOL(kmap_atomic);
-EXPORT_SYMBOL(kunmap_atomic);
-EXPORT_SYMBOL(kmap_atomic_to_page);
-#endif
-
-#if defined(CONFIG_X86_SPEEDSTEP_SMI) || defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE)
-EXPORT_SYMBOL(ist_info);
#endif
EXPORT_SYMBOL(csum_partial);
diff --git a/arch/i386/kernel/i387.c b/arch/i386/kernel/i387.c
index c55e037f08f..b817168d9c6 100644
--- a/arch/i386/kernel/i387.c
+++ b/arch/i386/kernel/i387.c
@@ -10,6 +10,7 @@
#include <linux/config.h>
#include <linux/sched.h>
+#include <linux/module.h>
#include <asm/processor.h>
#include <asm/i387.h>
#include <asm/math_emu.h>
@@ -79,6 +80,7 @@ void kernel_fpu_begin(void)
}
clts();
}
+EXPORT_SYMBOL_GPL(kernel_fpu_begin);
void restore_fpu( struct task_struct *tsk )
{
@@ -526,6 +528,7 @@ int dump_fpu( struct pt_regs *regs, struct user_i387_struct *fpu )
return fpvalid;
}
+EXPORT_SYMBOL(dump_fpu);
int dump_task_fpu(struct task_struct *tsk, struct user_i387_struct *fpu)
{
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index 7a324e8b86f..08540bc4ba3 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -31,7 +31,7 @@
#include <linux/mc146818rtc.h>
#include <linux/compiler.h>
#include <linux/acpi.h>
-
+#include <linux/module.h>
#include <linux/sysdev.h>
#include <asm/io.h>
#include <asm/smp.h>
@@ -812,6 +812,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
}
return best_guess;
}
+EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
/*
* This function currently is only a helper for the i386 smp boot process where
@@ -1659,6 +1660,12 @@ static void __init setup_ioapic_ids_from_mpc(void)
unsigned long flags;
/*
+ * Don't check I/O APIC IDs for xAPIC systems. They have
+ * no meaning without the serial APIC bus.
+ */
+ if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && boot_cpu_data.x86 < 15))
+ return;
+ /*
* This is broken; anything with a real cpu count has to
* circumvent this idiocy regardless.
*/
@@ -1684,10 +1691,6 @@ static void __init setup_ioapic_ids_from_mpc(void)
mp_ioapics[apic].mpc_apicid = reg_00.bits.ID;
}
- /* Don't check I/O APIC IDs for some xAPIC systems. They have
- * no meaning without the serial APIC bus. */
- if (NO_IOAPIC_CHECK)
- continue;
/*
* Sanity check, is the ID really free? Every APIC in a
* system must have a unique ID or we get lots of nice
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c
index 59ff9b45506..3762f6b35ab 100644
--- a/arch/i386/kernel/kprobes.c
+++ b/arch/i386/kernel/kprobes.c
@@ -23,6 +23,9 @@
* Rusty Russell).
* 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
* interface to access function arguments.
+ * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
+ * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
+ * <prasanna@in.ibm.com> added function-return probes.
*/
#include <linux/config.h>
@@ -30,15 +33,14 @@
#include <linux/ptrace.h>
#include <linux/spinlock.h>
#include <linux/preempt.h>
+#include <asm/cacheflush.h>
#include <asm/kdebug.h>
#include <asm/desc.h>
-/* kprobe_status settings */
-#define KPROBE_HIT_ACTIVE 0x00000001
-#define KPROBE_HIT_SS 0x00000002
-
static struct kprobe *current_kprobe;
static unsigned long kprobe_status, kprobe_old_eflags, kprobe_saved_eflags;
+static struct kprobe *kprobe_prev;
+static unsigned long kprobe_status_prev, kprobe_old_eflags_prev, kprobe_saved_eflags_prev;
static struct pt_regs jprobe_saved_regs;
static long *jprobe_saved_esp;
/* copy of the kernel stack at the probe fire time */
@@ -68,16 +70,50 @@ int arch_prepare_kprobe(struct kprobe *p)
void arch_copy_kprobe(struct kprobe *p)
{
memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
+ p->opcode = *p->addr;
}
-void arch_remove_kprobe(struct kprobe *p)
+void arch_arm_kprobe(struct kprobe *p)
{
+ *p->addr = BREAKPOINT_INSTRUCTION;
+ flush_icache_range((unsigned long) p->addr,
+ (unsigned long) p->addr + sizeof(kprobe_opcode_t));
}
-static inline void disarm_kprobe(struct kprobe *p, struct pt_regs *regs)
+void arch_disarm_kprobe(struct kprobe *p)
{
*p->addr = p->opcode;
- regs->eip = (unsigned long)p->addr;
+ flush_icache_range((unsigned long) p->addr,
+ (unsigned long) p->addr + sizeof(kprobe_opcode_t));
+}
+
+void arch_remove_kprobe(struct kprobe *p)
+{
+}
+
+static inline void save_previous_kprobe(void)
+{
+ kprobe_prev = current_kprobe;
+ kprobe_status_prev = kprobe_status;
+ kprobe_old_eflags_prev = kprobe_old_eflags;
+ kprobe_saved_eflags_prev = kprobe_saved_eflags;
+}
+
+static inline void restore_previous_kprobe(void)
+{
+ current_kprobe = kprobe_prev;
+ kprobe_status = kprobe_status_prev;
+ kprobe_old_eflags = kprobe_old_eflags_prev;
+ kprobe_saved_eflags = kprobe_saved_eflags_prev;
+}
+
+static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs)
+{
+ current_kprobe = p;
+ kprobe_saved_eflags = kprobe_old_eflags
+ = (regs->eflags & (TF_MASK | IF_MASK));
+ if (is_IF_modifier(p->opcode))
+ kprobe_saved_eflags &= ~IF_MASK;
}
static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
@@ -91,6 +127,50 @@ static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
regs->eip = (unsigned long)&p->ainsn.insn;
}
+struct task_struct *arch_get_kprobe_task(void *ptr)
+{
+ return ((struct thread_info *) (((unsigned long) ptr) &
+ (~(THREAD_SIZE -1))))->task;
+}
+
+void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs)
+{
+ unsigned long *sara = (unsigned long *)&regs->esp;
+ struct kretprobe_instance *ri;
+ static void *orig_ret_addr;
+
+ /*
+ * Save the return address when the return probe hits
+ * the first time, and use it to populate the (krprobe
+ * instance)->ret_addr for subsequent return probes at
+ * the same addrress since stack address would have
+ * the kretprobe_trampoline by then.
+ */
+ if (((void*) *sara) != kretprobe_trampoline)
+ orig_ret_addr = (void*) *sara;
+
+ if ((ri = get_free_rp_inst(rp)) != NULL) {
+ ri->rp = rp;
+ ri->stack_addr = sara;
+ ri->ret_addr = orig_ret_addr;
+ add_rp_inst(ri);
+ /* Replace the return addr with trampoline addr */
+ *sara = (unsigned long) &kretprobe_trampoline;
+ } else {
+ rp->nmissed++;
+ }
+}
+
+void arch_kprobe_flush_task(struct task_struct *tk)
+{
+ struct kretprobe_instance *ri;
+ while ((ri = get_rp_inst_tsk(tk)) != NULL) {
+ *((unsigned long *)(ri->stack_addr)) =
+ (unsigned long) ri->ret_addr;
+ recycle_rp_inst(ri);
+ }
+}
+
/*
* Interrupts are disabled on entry as trap3 is an interrupt gate and they
* remain disabled thorough out this function.
@@ -127,8 +207,18 @@ static int kprobe_handler(struct pt_regs *regs)
unlock_kprobes();
goto no_kprobe;
}
- disarm_kprobe(p, regs);
- ret = 1;
+ /* We have reentered the kprobe_handler(), since
+ * another probe was hit while within the handler.
+ * We here save the original kprobes variables and
+ * just single step on the instruction of the new probe
+ * without calling any user handlers.
+ */
+ save_previous_kprobe();
+ set_current_kprobe(p, regs);
+ p->nmissed++;
+ prepare_singlestep(p, regs);
+ kprobe_status = KPROBE_REENTER;
+ return 1;
} else {
p = current_kprobe;
if (p->break_handler && p->break_handler(p, regs)) {
@@ -163,11 +253,7 @@ static int kprobe_handler(struct pt_regs *regs)
}
kprobe_status = KPROBE_HIT_ACTIVE;
- current_kprobe = p;
- kprobe_saved_eflags = kprobe_old_eflags
- = (regs->eflags & (TF_MASK | IF_MASK));
- if (is_IF_modifier(p->opcode))
- kprobe_saved_eflags &= ~IF_MASK;
+ set_current_kprobe(p, regs);
if (p->pre_handler && p->pre_handler(p, regs))
/* handler has already set things up, so skip ss setup */
@@ -184,6 +270,55 @@ no_kprobe:
}
/*
+ * For function-return probes, init_kprobes() establishes a probepoint
+ * here. When a retprobed function returns, this probe is hit and
+ * trampoline_probe_handler() runs, calling the kretprobe's handler.
+ */
+ void kretprobe_trampoline_holder(void)
+ {
+ asm volatile ( ".global kretprobe_trampoline\n"
+ "kretprobe_trampoline: \n"
+ "nop\n");
+ }
+
+/*
+ * Called when we hit the probe point at kretprobe_trampoline
+ */
+int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ struct task_struct *tsk;
+ struct kretprobe_instance *ri;
+ struct hlist_head *head;
+ struct hlist_node *node;
+ unsigned long *sara = ((unsigned long *) &regs->esp) - 1;
+
+ tsk = arch_get_kprobe_task(sara);
+ head = kretprobe_inst_table_head(tsk);
+
+ hlist_for_each_entry(ri, node, head, hlist) {
+ if (ri->stack_addr == sara && ri->rp) {
+ if (ri->rp->handler)
+ ri->rp->handler(ri, regs);
+ }
+ }
+ return 0;
+}
+
+void trampoline_post_handler(struct kprobe *p, struct pt_regs *regs,
+ unsigned long flags)
+{
+ struct kretprobe_instance *ri;
+ /* RA already popped */
+ unsigned long *sara = ((unsigned long *)&regs->esp) - 1;
+
+ while ((ri = get_rp_inst(sara))) {
+ regs->eip = (unsigned long)ri->ret_addr;
+ recycle_rp_inst(ri);
+ }
+ regs->eflags &= ~TF_MASK;
+}
+
+/*
* Called after single-stepping. p->addr is the address of the
* instruction whose first byte has been replaced by the "int 3"
* instruction. To avoid the SMP problems that can occur when we
@@ -263,13 +398,22 @@ static inline int post_kprobe_handler(struct pt_regs *regs)
if (!kprobe_running())
return 0;
- if (current_kprobe->post_handler)
+ if ((kprobe_status != KPROBE_REENTER) && current_kprobe->post_handler) {
+ kprobe_status = KPROBE_HIT_SSDONE;
current_kprobe->post_handler(current_kprobe, regs, 0);
+ }
- resume_execution(current_kprobe, regs);
+ if (current_kprobe->post_handler != trampoline_post_handler)
+ resume_execution(current_kprobe, regs);
regs->eflags |= kprobe_saved_eflags;
+ /*Restore back the original saved kprobes variables and continue. */
+ if (kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe();
+ goto out;
+ }
unlock_kprobes();
+out:
preempt_enable_no_resched();
/*
diff --git a/arch/i386/kernel/mpparse.c b/arch/i386/kernel/mpparse.c
index 1347ab4939e..383a11600d2 100644
--- a/arch/i386/kernel/mpparse.c
+++ b/arch/i386/kernel/mpparse.c
@@ -914,7 +914,10 @@ void __init mp_register_ioapic (
mp_ioapics[idx].mpc_apicaddr = address;
set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
- mp_ioapics[idx].mpc_apicid = io_apic_get_unique_id(idx, id);
+ if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 < 15))
+ mp_ioapics[idx].mpc_apicid = io_apic_get_unique_id(idx, id);
+ else
+ mp_ioapics[idx].mpc_apicid = id;
mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx);
/*
@@ -1055,11 +1058,20 @@ void __init mp_config_acpi_legacy_irqs (void)
}
}
+#define MAX_GSI_NUM 4096
+
int mp_register_gsi (u32 gsi, int edge_level, int active_high_low)
{
int ioapic = -1;
int ioapic_pin = 0;
int idx, bit = 0;
+ static int pci_irq = 16;
+ /*
+ * Mapping between Global System Interrups, which
+ * represent all possible interrupts, and IRQs
+ * assigned to actual devices.
+ */
+ static int gsi_to_irq[MAX_GSI_NUM];
#ifdef CONFIG_ACPI_BUS
/* Don't set up the ACPI SCI because it's already set up */
@@ -1094,11 +1106,26 @@ int mp_register_gsi (u32 gsi, int edge_level, int active_high_low)
if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
- return gsi;
+ return gsi_to_irq[gsi];
}
mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
+ if (edge_level) {
+ /*
+ * For PCI devices assign IRQs in order, avoiding gaps
+ * due to unused I/O APIC pins.
+ */
+ int irq = gsi;
+ if (gsi < MAX_GSI_NUM) {
+ gsi = pci_irq++;
+ gsi_to_irq[irq] = gsi;
+ } else {
+ printk(KERN_ERR "GSI %u is too high\n", gsi);
+ return gsi;
+ }
+ }
+
io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
edge_level == ACPI_EDGE_SENSITIVE ? 0 : 1,
active_high_low == ACPI_ACTIVE_HIGH ? 0 : 1);
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index 2c0ee9c2d02..da6c46d667c 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -28,8 +28,7 @@
#include <linux/sysctl.h>
#include <asm/smp.h>
-#include <asm/mtrr.h>
-#include <asm/mpspec.h>
+#include <asm/div64.h>
#include <asm/nmi.h>
#include "mach_traps.h"
@@ -324,6 +323,16 @@ static void clear_msr_range(unsigned int base, unsigned int n)
wrmsr(base+i, 0, 0);
}
+static inline void write_watchdog_counter(const char *descr)
+{
+ u64 count = (u64)cpu_khz * 1000;
+
+ do_div(count, nmi_hz);
+ if(descr)
+ Dprintk("setting %s to -0x%08Lx\n", descr, count);
+ wrmsrl(nmi_perfctr_msr, 0 - count);
+}
+
static void setup_k7_watchdog(void)
{
unsigned int evntsel;
@@ -339,8 +348,7 @@ static void setup_k7_watchdog(void)
| K7_NMI_EVENT;
wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
- Dprintk("setting K7_PERFCTR0 to %08lx\n", -(cpu_khz/nmi_hz*1000));
- wrmsr(MSR_K7_PERFCTR0, -(cpu_khz/nmi_hz*1000), -1);
+ write_watchdog_counter("K7_PERFCTR0");
apic_write(APIC_LVTPC, APIC_DM_NMI);
evntsel |= K7_EVNTSEL_ENABLE;
wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
@@ -361,8 +369,7 @@ static void setup_p6_watchdog(void)
| P6_NMI_EVENT;
wrmsr(MSR_P6_EVNTSEL0, evntsel, 0);
- Dprintk("setting P6_PERFCTR0 to %08lx\n", -(cpu_khz/nmi_hz*1000));
- wrmsr(MSR_P6_PERFCTR0, -(cpu_khz/nmi_hz*1000), 0);
+ write_watchdog_counter("P6_PERFCTR0");
apic_write(APIC_LVTPC, APIC_DM_NMI);
evntsel |= P6_EVNTSEL0_ENABLE;
wrmsr(MSR_P6_EVNTSEL0, evntsel, 0);
@@ -402,8 +409,7 @@ static int setup_p4_watchdog(void)
wrmsr(MSR_P4_CRU_ESCR0, P4_NMI_CRU_ESCR0, 0);
wrmsr(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0 & ~P4_CCCR_ENABLE, 0);
- Dprintk("setting P4_IQ_COUNTER0 to 0x%08lx\n", -(cpu_khz/nmi_hz*1000));
- wrmsr(MSR_P4_IQ_COUNTER0, -(cpu_khz/nmi_hz*1000), -1);
+ write_watchdog_counter("P4_IQ_COUNTER0");
apic_write(APIC_LVTPC, APIC_DM_NMI);
wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0);
return 1;
@@ -518,7 +524,7 @@ void nmi_watchdog_tick (struct pt_regs * regs)
* other P6 variant */
apic_write(APIC_LVTPC, APIC_DM_NMI);
}
- wrmsr(nmi_perfctr_msr, -(cpu_khz/nmi_hz*1000), -1);
+ write_watchdog_counter(NULL);
}
}
diff --git a/arch/i386/kernel/pci-dma.c b/arch/i386/kernel/pci-dma.c
index 4de2e03c7b4..1e51427cc9e 100644
--- a/arch/i386/kernel/pci-dma.c
+++ b/arch/i386/kernel/pci-dma.c
@@ -11,6 +11,7 @@
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/pci.h>
+#include <linux/module.h>
#include <asm/io.h>
struct dma_coherent_mem {
@@ -54,6 +55,7 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
}
return ret;
}
+EXPORT_SYMBOL(dma_alloc_coherent);
void dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle)
@@ -68,6 +70,7 @@ void dma_free_coherent(struct device *dev, size_t size,
} else
free_pages((unsigned long)vaddr, order);
}
+EXPORT_SYMBOL(dma_free_coherent);
int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
dma_addr_t device_addr, size_t size, int flags)
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index 96e3ea6b17c..aea2ce1145d 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -37,6 +37,7 @@
#include <linux/kallsyms.h>
#include <linux/ptrace.h>
#include <linux/random.h>
+#include <linux/kprobes.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
@@ -73,6 +74,7 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
* Powermanagement idle function, if any..
*/
void (*pm_idle)(void);
+EXPORT_SYMBOL(pm_idle);
static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
void disable_hlt(void)
@@ -105,6 +107,9 @@ void default_idle(void)
cpu_relax();
}
}
+#ifdef CONFIG_APM_MODULE
+EXPORT_SYMBOL(default_idle);
+#endif
/*
* On SMP it's slightly faster (but much more power-consuming!)
@@ -262,7 +267,7 @@ void show_regs(struct pt_regs * regs)
printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id());
print_symbol("EIP is at %s\n", regs->eip);
- if (regs->xcs & 3)
+ if (user_mode(regs))
printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
printk(" EFLAGS: %08lx %s (%s)\n",
regs->eflags, print_tainted(), system_utsname.release);
@@ -325,6 +330,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
/* Ok, create the new process.. */
return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
}
+EXPORT_SYMBOL(kernel_thread);
/*
* Free current thread data structures etc..
@@ -334,6 +340,13 @@ void exit_thread(void)
struct task_struct *tsk = current;
struct thread_struct *t = &tsk->thread;
+ /*
+ * Remove function-return probe instances associated with this task
+ * and put them back on the free list. Do not insert an exit probe for
+ * this function, it will be disabled by kprobe_flush_task if you do.
+ */
+ kprobe_flush_task(tsk);
+
/* The process may have allocated an io port bitmap... nuke it. */
if (unlikely(NULL != t->io_bitmap_ptr)) {
int cpu = get_cpu();
@@ -357,6 +370,13 @@ void flush_thread(void)
{
struct task_struct *tsk = current;
+ /*
+ * Remove function-return probe instances associated with this task
+ * and put them back on the free list. Do not insert an exit probe for
+ * this function, it will be disabled by kprobe_flush_task if you do.
+ */
+ kprobe_flush_task(tsk);
+
memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8);
memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
/*
@@ -508,6 +528,7 @@ void dump_thread(struct pt_regs * regs, struct user * dump)
dump->u_fpvalid = dump_fpu (regs, &dump->i387);
}
+EXPORT_SYMBOL(dump_thread);
/*
* Capture the user space registers if the task is not running (in user space)
@@ -627,13 +648,13 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
* Now maybe reload the debug registers
*/
if (unlikely(next->debugreg[7])) {
- loaddebug(next, 0);
- loaddebug(next, 1);
- loaddebug(next, 2);
- loaddebug(next, 3);
+ set_debugreg(current->thread.debugreg[0], 0);
+ set_debugreg(current->thread.debugreg[1], 1);
+ set_debugreg(current->thread.debugreg[2], 2);
+ set_debugreg(current->thread.debugreg[3], 3);
/* no 4 and 5 */
- loaddebug(next, 6);
- loaddebug(next, 7);
+ set_debugreg(current->thread.debugreg[6], 6);
+ set_debugreg(current->thread.debugreg[7], 7);
}
if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr))
@@ -731,6 +752,7 @@ unsigned long get_wchan(struct task_struct *p)
} while (count++ < 16);
return 0;
}
+EXPORT_SYMBOL(get_wchan);
/*
* sys_alloc_thread_area: get a yet unused TLS descriptor index.
diff --git a/arch/i386/kernel/ptrace.c b/arch/i386/kernel/ptrace.c
index e34f651fa13..0da59b42843 100644
--- a/arch/i386/kernel/ptrace.c
+++ b/arch/i386/kernel/ptrace.c
@@ -668,7 +668,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code)
info.si_code = TRAP_BRKPT;
/* User-mode eip? */
- info.si_addr = user_mode(regs) ? (void __user *) regs->eip : NULL;
+ info.si_addr = user_mode_vm(regs) ? (void __user *) regs->eip : NULL;
/* Send us the fakey SIGTRAP */
force_sig_info(SIGTRAP, &info, tsk);
diff --git a/arch/i386/kernel/reboot.c b/arch/i386/kernel/reboot.c
index 6dc27eb70ee..db912209a8d 100644
--- a/arch/i386/kernel/reboot.c
+++ b/arch/i386/kernel/reboot.c
@@ -2,6 +2,7 @@
* linux/arch/i386/kernel/reboot.c
*/
+#include <linux/config.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/delay.h>
@@ -19,6 +20,7 @@
* Power off function, if any
*/
void (*pm_power_off)(void);
+EXPORT_SYMBOL(pm_power_off);
static int reboot_mode;
static int reboot_thru_bios;
@@ -295,6 +297,9 @@ void machine_real_restart(unsigned char *code, int length)
:
: "i" ((void *) (0x1000 - sizeof (real_mode_switch) - 100)));
}
+#ifdef CONFIG_APM_MODULE
+EXPORT_SYMBOL(machine_real_restart);
+#endif
void machine_restart(char * __unused)
{
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index 2bfbddebdbf..30406fd0b64 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -23,8 +23,10 @@
* This file handles the architecture-dependent parts of initialization
*/
+#include <linux/config.h>
#include <linux/sched.h>
#include <linux/mm.h>
+#include <linux/mmzone.h>
#include <linux/tty.h>
#include <linux/ioport.h>
#include <linux/acpi.h>
@@ -73,6 +75,7 @@ EXPORT_SYMBOL(efi_enabled);
struct cpuinfo_x86 new_cpu_data __initdata = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
/* common cpu data for all cpus */
struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
+EXPORT_SYMBOL(boot_cpu_data);
unsigned long mmu_cr4_features;
@@ -90,12 +93,18 @@ extern acpi_interrupt_flags acpi_sci_flags;
/* for MCA, but anyone else can use it if they want */
unsigned int machine_id;
+#ifdef CONFIG_MCA
+EXPORT_SYMBOL(machine_id);
+#endif
unsigned int machine_submodel_id;
unsigned int BIOS_revision;
unsigned int mca_pentium_flag;
/* For PCI or other memory-mapped resources */
unsigned long pci_mem_start = 0x10000000;
+#ifdef CONFIG_PCI
+EXPORT_SYMBOL(pci_mem_start);
+#endif
/* Boot loader ID as an integer, for the benefit of proc_dointvec */
int bootloader_type;
@@ -107,14 +116,26 @@ static unsigned int highmem_pages = -1;
* Setup options
*/
struct drive_info_struct { char dummy[32]; } drive_info;
+#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_HD) || \
+ defined(CONFIG_BLK_DEV_IDE_MODULE) || defined(CONFIG_BLK_DEV_HD_MODULE)
+EXPORT_SYMBOL(drive_info);
+#endif
struct screen_info screen_info;
+#ifdef CONFIG_VT
+EXPORT_SYMBOL(screen_info);
+#endif
struct apm_info apm_info;
+EXPORT_SYMBOL(apm_info);
struct sys_desc_table_struct {
unsigned short length;
unsigned char table[0];
};
struct edid_info edid_info;
struct ist_info ist_info;
+#if defined(CONFIG_X86_SPEEDSTEP_SMI) || \
+ defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE)
+EXPORT_SYMBOL(ist_info);
+#endif
struct e820map e820;
extern void early_cpu_init(void);
@@ -1022,7 +1043,7 @@ static void __init reserve_ebda_region(void)
reserve_bootmem(addr, PAGE_SIZE);
}
-#ifndef CONFIG_DISCONTIGMEM
+#ifndef CONFIG_NEED_MULTIPLE_NODES
void __init setup_bootmem_allocator(void);
static unsigned long __init setup_memory(void)
{
@@ -1072,9 +1093,9 @@ void __init zone_sizes_init(void)
free_area_init(zones_size);
}
#else
-extern unsigned long setup_memory(void);
+extern unsigned long __init setup_memory(void);
extern void zone_sizes_init(void);
-#endif /* !CONFIG_DISCONTIGMEM */
+#endif /* !CONFIG_NEED_MULTIPLE_NODES */
void __init setup_bootmem_allocator(void)
{
@@ -1475,6 +1496,7 @@ void __init setup_arch(char **cmdline_p)
#endif
paging_init();
remapped_pgdat_init();
+ sparse_init();
zone_sizes_init();
/*
diff --git a/arch/i386/kernel/signal.c b/arch/i386/kernel/signal.c
index ea46d028af0..b9b8f4e20fa 100644
--- a/arch/i386/kernel/signal.c
+++ b/arch/i386/kernel/signal.c
@@ -346,8 +346,8 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
extern void __user __kernel_sigreturn;
extern void __user __kernel_rt_sigreturn;
-static void setup_frame(int sig, struct k_sigaction *ka,
- sigset_t *set, struct pt_regs * regs)
+static int setup_frame(int sig, struct k_sigaction *ka,
+ sigset_t *set, struct pt_regs * regs)
{
void __user *restorer;
struct sigframe __user *frame;
@@ -429,13 +429,14 @@ static void setup_frame(int sig, struct k_sigaction *ka,
current->comm, current->pid, frame, regs->eip, frame->pretcode);
#endif
- return;
+ return 1;
give_sigsegv:
force_sigsegv(sig, current);
+ return 0;
}
-static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *set, struct pt_regs * regs)
{
void __user *restorer;
@@ -522,20 +523,23 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
current->comm, current->pid, frame, regs->eip, frame->pretcode);
#endif
- return;
+ return 1;
give_sigsegv:
force_sigsegv(sig, current);
+ return 0;
}
/*
* OK, we're invoking a handler
*/
-static void
+static int
handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
sigset_t *oldset, struct pt_regs * regs)
{
+ int ret;
+
/* Are we from a system call? */
if (regs->orig_eax >= 0) {
/* If so, check system call restarting.. */
@@ -569,17 +573,19 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
/* Set up the stack frame */
if (ka->sa.sa_flags & SA_SIGINFO)
- setup_rt_frame(sig, ka, info, oldset, regs);
+ ret = setup_rt_frame(sig, ka, info, oldset, regs);
else
- setup_frame(sig, ka, oldset, regs);
+ ret = setup_frame(sig, ka, oldset, regs);
- if (!(ka->sa.sa_flags & SA_NODEFER)) {
+ if (ret && !(ka->sa.sa_flags & SA_NODEFER)) {
spin_lock_irq(&current->sighand->siglock);
sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
sigaddset(&current->blocked,sig);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
}
+
+ return ret;
}
/*
@@ -599,7 +605,7 @@ int fastcall do_signal(struct pt_regs *regs, sigset_t *oldset)
* kernel mode. Just return without doing anything
* if so.
*/
- if ((regs->xcs & 3) != 3)
+ if (!user_mode(regs))
return 1;
if (current->flags & PF_FREEZE) {
@@ -618,12 +624,11 @@ int fastcall do_signal(struct pt_regs *regs, sigset_t *oldset)
* inside the kernel.
*/
if (unlikely(current->thread.debugreg[7])) {
- loaddebug(&current->thread, 7);
+ set_debugreg(current->thread.debugreg[7], 7);
}
/* Whee! Actually deliver the signal. */
- handle_signal(signr, &info, &ka, oldset, regs);
- return 1;
+ return handle_signal(signr, &info, &ka, oldset, regs);
}
no_signal:
diff --git a/arch/i386/kernel/smp.c b/arch/i386/kernel/smp.c
index 6223c33ac91..68be7d0c723 100644
--- a/arch/i386/kernel/smp.c
+++ b/arch/i386/kernel/smp.c
@@ -19,6 +19,7 @@
#include <linux/mc146818rtc.h>
#include <linux/cache.h>
#include <linux/interrupt.h>
+#include <linux/module.h>
#include <asm/mtrr.h>
#include <asm/tlbflush.h>
@@ -452,6 +453,7 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
preempt_enable();
}
+EXPORT_SYMBOL(flush_tlb_page);
static void do_flush_tlb_all(void* info)
{
@@ -547,6 +549,7 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
return 0;
}
+EXPORT_SYMBOL(smp_call_function);
static void stop_this_cpu (void * dummy)
{
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index bc1bb6919e6..c20d96d5c15 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -60,6 +60,9 @@ static int __initdata smp_b_stepping;
/* Number of siblings per CPU package */
int smp_num_siblings = 1;
+#ifdef CONFIG_X86_HT
+EXPORT_SYMBOL(smp_num_siblings);
+#endif
int phys_proc_id[NR_CPUS]; /* Package ID of each logical CPU */
EXPORT_SYMBOL(phys_proc_id);
int cpu_core_id[NR_CPUS]; /* Core ID of each logical CPU */
@@ -67,13 +70,16 @@ EXPORT_SYMBOL(cpu_core_id);
/* bitmap of online cpus */
cpumask_t cpu_online_map;
+EXPORT_SYMBOL(cpu_online_map);
cpumask_t cpu_callin_map;
cpumask_t cpu_callout_map;
+EXPORT_SYMBOL(cpu_callout_map);
static cpumask_t smp_commenced_mask;
/* Per CPU bogomips and other parameters */
struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
+EXPORT_SYMBOL(cpu_data);
u8 x86_cpu_to_apicid[NR_CPUS] =
{ [0 ... NR_CPUS-1] = 0xff };
@@ -199,7 +205,7 @@ static void __init synchronize_tsc_bp (void)
unsigned long long t0;
unsigned long long sum, avg;
long long delta;
- unsigned long one_usec;
+ unsigned int one_usec;
int buggy = 0;
printk(KERN_INFO "checking TSC synchronization across %u CPUs: ", num_booting_cpus());
@@ -885,8 +891,14 @@ static void smp_tune_scheduling (void)
static int boot_cpu_logical_apicid;
/* Where the IO area was mapped on multiquad, always 0 otherwise */
void *xquad_portio;
+#ifdef CONFIG_X86_NUMAQ
+EXPORT_SYMBOL(xquad_portio);
+#endif
cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
+#ifdef CONFIG_X86_HT
+EXPORT_SYMBOL(cpu_sibling_map);
+#endif
cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
EXPORT_SYMBOL(cpu_core_map);
diff --git a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c
index a0dcb7c87c3..e68d9fdb075 100644
--- a/arch/i386/kernel/time.c
+++ b/arch/i386/kernel/time.c
@@ -77,11 +77,13 @@ u64 jiffies_64 = INITIAL_JIFFIES;
EXPORT_SYMBOL(jiffies_64);
-unsigned long cpu_khz; /* Detected as we calibrate the TSC */
+unsigned int cpu_khz; /* Detected as we calibrate the TSC */
+EXPORT_SYMBOL(cpu_khz);
extern unsigned long wall_jiffies;
DEFINE_SPINLOCK(rtc_lock);
+EXPORT_SYMBOL(rtc_lock);
DEFINE_SPINLOCK(i8253_lock);
EXPORT_SYMBOL(i8253_lock);
@@ -324,6 +326,8 @@ unsigned long get_cmos_time(void)
return retval;
}
+EXPORT_SYMBOL(get_cmos_time);
+
static void sync_cmos_clock(unsigned long dummy);
static struct timer_list sync_cmos_timer =
diff --git a/arch/i386/kernel/timers/common.c b/arch/i386/kernel/timers/common.c
index 8e201219f52..37353bd3180 100644
--- a/arch/i386/kernel/timers/common.c
+++ b/arch/i386/kernel/timers/common.c
@@ -139,6 +139,15 @@ bad_calibration:
}
#endif
+
+unsigned long read_timer_tsc(void)
+{
+ unsigned long retval;
+ rdtscl(retval);
+ return retval;
+}
+
+
/* calculate cpu_khz */
void init_cpu_khz(void)
{
@@ -154,7 +163,8 @@ void init_cpu_khz(void)
:"=a" (cpu_khz), "=d" (edx)
:"r" (tsc_quotient),
"0" (eax), "1" (edx));
- printk("Detected %lu.%03lu MHz processor.\n", cpu_khz / 1000, cpu_khz % 1000);
+ printk("Detected %u.%03u MHz processor.\n",
+ cpu_khz / 1000, cpu_khz % 1000);
}
}
}
diff --git a/arch/i386/kernel/timers/timer.c b/arch/i386/kernel/timers/timer.c
index a3d6a288088..7e39ed8e33f 100644
--- a/arch/i386/kernel/timers/timer.c
+++ b/arch/i386/kernel/timers/timer.c
@@ -64,3 +64,12 @@ struct timer_opts* __init select_timer(void)
panic("select_timer: Cannot find a suitable timer\n");
return NULL;
}
+
+int read_current_timer(unsigned long *timer_val)
+{
+ if (cur_timer->read_timer) {
+ *timer_val = cur_timer->read_timer();
+ return 0;
+ }
+ return -1;
+}
diff --git a/arch/i386/kernel/timers/timer_hpet.c b/arch/i386/kernel/timers/timer_hpet.c
index f778f471a09..d766e0963ac 100644
--- a/arch/i386/kernel/timers/timer_hpet.c
+++ b/arch/i386/kernel/timers/timer_hpet.c
@@ -158,7 +158,7 @@ static int __init init_hpet(char* override)
{ unsigned long eax=0, edx=1000;
ASM_DIV64_REG(cpu_khz, edx, tsc_quotient,
eax, edx);
- printk("Detected %lu.%03lu MHz processor.\n",
+ printk("Detected %u.%03u MHz processor.\n",
cpu_khz / 1000, cpu_khz % 1000);
}
set_cyc2ns_scale(cpu_khz/1000);
@@ -186,6 +186,7 @@ static struct timer_opts timer_hpet = {
.get_offset = get_offset_hpet,
.monotonic_clock = monotonic_clock_hpet,
.delay = delay_hpet,
+ .read_timer = read_timer_tsc,
};
struct init_timer_opts __initdata timer_hpet_init = {
diff --git a/arch/i386/kernel/timers/timer_pm.c b/arch/i386/kernel/timers/timer_pm.c
index d77f22030fe..4ef20e66349 100644
--- a/arch/i386/kernel/timers/timer_pm.c
+++ b/arch/i386/kernel/timers/timer_pm.c
@@ -246,6 +246,7 @@ static struct timer_opts timer_pmtmr = {
.get_offset = get_offset_pmtmr,
.monotonic_clock = monotonic_clock_pmtmr,
.delay = delay_pmtmr,
+ .read_timer = read_timer_tsc,
};
struct init_timer_opts __initdata timer_pmtmr_init = {
diff --git a/arch/i386/kernel/timers/timer_tsc.c b/arch/i386/kernel/timers/timer_tsc.c
index 180444d8782..54c36b18202 100644
--- a/arch/i386/kernel/timers/timer_tsc.c
+++ b/arch/i386/kernel/timers/timer_tsc.c
@@ -256,7 +256,7 @@ static unsigned long loops_per_jiffy_ref = 0;
#ifndef CONFIG_SMP
static unsigned long fast_gettimeoffset_ref = 0;
-static unsigned long cpu_khz_ref = 0;
+static unsigned int cpu_khz_ref = 0;
#endif
static int
@@ -323,7 +323,7 @@ static inline void cpufreq_delayed_get(void) { return; }
int recalibrate_cpu_khz(void)
{
#ifndef CONFIG_SMP
- unsigned long cpu_khz_old = cpu_khz;
+ unsigned int cpu_khz_old = cpu_khz;
if (cpu_has_tsc) {
init_cpu_khz();
@@ -534,7 +534,8 @@ static int __init init_tsc(char* override)
:"=a" (cpu_khz), "=d" (edx)
:"r" (tsc_quotient),
"0" (eax), "1" (edx));
- printk("Detected %lu.%03lu MHz processor.\n", cpu_khz / 1000, cpu_khz % 1000);
+ printk("Detected %u.%03u MHz processor.\n",
+ cpu_khz / 1000, cpu_khz % 1000);
}
set_cyc2ns_scale(cpu_khz/1000);
return 0;
@@ -572,6 +573,7 @@ static struct timer_opts timer_tsc = {
.get_offset = get_offset_tsc,
.monotonic_clock = monotonic_clock_tsc,
.delay = delay_tsc,
+ .read_timer = read_timer_tsc,
};
struct init_timer_opts __initdata timer_tsc_init = {
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index 83c579e82a8..e4d4e2162c7 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -104,6 +104,7 @@ int register_die_notifier(struct notifier_block *nb)
spin_unlock_irqrestore(&die_notifier_lock, flags);
return err;
}
+EXPORT_SYMBOL(register_die_notifier);
static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
{
@@ -209,7 +210,7 @@ void show_registers(struct pt_regs *regs)
esp = (unsigned long) (&regs->esp);
ss = __KERNEL_DS;
- if (regs->xcs & 3) {
+ if (user_mode(regs)) {
in_kernel = 0;
esp = regs->esp;
ss = regs->xss & 0xffff;
@@ -265,7 +266,7 @@ static void handle_BUG(struct pt_regs *regs)
char c;
unsigned long eip;
- if (regs->xcs & 3)
+ if (user_mode(regs))
goto no_bug; /* Not in kernel */
eip = regs->eip;
@@ -353,7 +354,7 @@ void die(const char * str, struct pt_regs * regs, long err)
static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
{
- if (!(regs->eflags & VM_MASK) && !(3 & regs->xcs))
+ if (!user_mode_vm(regs))
die(str, regs, err);
}
@@ -366,7 +367,7 @@ static void do_trap(int trapnr, int signr, char *str, int vm86,
goto trap_signal;
}
- if (!(regs->xcs & 3))
+ if (!user_mode(regs))
goto kernel_trap;
trap_signal: {
@@ -488,7 +489,7 @@ fastcall void do_general_protection(struct pt_regs * regs, long error_code)
if (regs->eflags & VM_MASK)
goto gp_in_vm86;
- if (!(regs->xcs & 3))
+ if (!user_mode(regs))
goto gp_in_kernel;
current->thread.error_code = error_code;
@@ -636,11 +637,13 @@ void set_nmi_callback(nmi_callback_t callback)
{
nmi_callback = callback;
}
+EXPORT_SYMBOL_GPL(set_nmi_callback);
void unset_nmi_callback(void)
{
nmi_callback = dummy_nmi_callback;
}
+EXPORT_SYMBOL_GPL(unset_nmi_callback);
#ifdef CONFIG_KPROBES
fastcall void do_int3(struct pt_regs *regs, long error_code)
@@ -682,7 +685,7 @@ fastcall void do_debug(struct pt_regs * regs, long error_code)
unsigned int condition;
struct task_struct *tsk = current;
- __asm__ __volatile__("movl %%db6,%0" : "=r" (condition));
+ get_debugreg(condition, 6);
if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
SIGTRAP) == NOTIFY_STOP)
@@ -713,7 +716,7 @@ fastcall void do_debug(struct pt_regs * regs, long error_code)
* check for kernel mode by just checking the CPL
* of CS.
*/
- if ((regs->xcs & 3) == 0)
+ if (!user_mode(regs))
goto clear_TF_reenable;
}
@@ -724,9 +727,7 @@ fastcall void do_debug(struct pt_regs * regs, long error_code)
* the signal is delivered.
*/
clear_dr7:
- __asm__("movl %0,%%db7"
- : /* no output */
- : "r" (0));
+ set_debugreg(0, 7);
return;
debug_vm86:
diff --git a/arch/i386/lib/dec_and_lock.c b/arch/i386/lib/dec_and_lock.c
index ab43394dc77..8b81b2524fa 100644
--- a/arch/i386/lib/dec_and_lock.c
+++ b/arch/i386/lib/dec_and_lock.c
@@ -8,6 +8,7 @@
*/
#include <linux/spinlock.h>
+#include <linux/module.h>
#include <asm/atomic.h>
int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
@@ -38,3 +39,4 @@ slow_path:
spin_unlock(lock);
return 0;
}
+EXPORT_SYMBOL(_atomic_dec_and_lock);
diff --git a/arch/i386/lib/delay.c b/arch/i386/lib/delay.c
index eb0cdfe9280..c49a6acbee5 100644
--- a/arch/i386/lib/delay.c
+++ b/arch/i386/lib/delay.c
@@ -13,6 +13,7 @@
#include <linux/config.h>
#include <linux/sched.h>
#include <linux/delay.h>
+#include <linux/module.h>
#include <asm/processor.h>
#include <asm/delay.h>
#include <asm/timer.h>
@@ -47,3 +48,8 @@ void __ndelay(unsigned long nsecs)
{
__const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
}
+
+EXPORT_SYMBOL(__delay);
+EXPORT_SYMBOL(__const_udelay);
+EXPORT_SYMBOL(__udelay);
+EXPORT_SYMBOL(__ndelay);
diff --git a/arch/i386/lib/mmx.c b/arch/i386/lib/mmx.c
index 01f8b1a2cc8..2afda94dffd 100644
--- a/arch/i386/lib/mmx.c
+++ b/arch/i386/lib/mmx.c
@@ -3,6 +3,7 @@
#include <linux/string.h>
#include <linux/sched.h>
#include <linux/hardirq.h>
+#include <linux/module.h>
#include <asm/i387.h>
@@ -397,3 +398,7 @@ void mmx_copy_page(void *to, void *from)
else
fast_copy_page(to, from);
}
+
+EXPORT_SYMBOL(_mmx_memcpy);
+EXPORT_SYMBOL(mmx_clear_page);
+EXPORT_SYMBOL(mmx_copy_page);
diff --git a/arch/i386/lib/usercopy.c b/arch/i386/lib/usercopy.c
index 51aa2bbb026..4cf981d70f4 100644
--- a/arch/i386/lib/usercopy.c
+++ b/arch/i386/lib/usercopy.c
@@ -84,6 +84,7 @@ __strncpy_from_user(char *dst, const char __user *src, long count)
__do_strncpy_from_user(dst, src, count, res);
return res;
}
+EXPORT_SYMBOL(__strncpy_from_user);
/**
* strncpy_from_user: - Copy a NUL terminated string from userspace.
@@ -111,7 +112,7 @@ strncpy_from_user(char *dst, const char __user *src, long count)
__do_strncpy_from_user(dst, src, count, res);
return res;
}
-
+EXPORT_SYMBOL(strncpy_from_user);
/*
* Zero Userspace
@@ -157,6 +158,7 @@ clear_user(void __user *to, unsigned long n)
__do_clear_user(to, n);
return n;
}
+EXPORT_SYMBOL(clear_user);
/**
* __clear_user: - Zero a block of memory in user space, with less checking.
@@ -175,6 +177,7 @@ __clear_user(void __user *to, unsigned long n)
__do_clear_user(to, n);
return n;
}
+EXPORT_SYMBOL(__clear_user);
/**
* strlen_user: - Get the size of a string in user space.
@@ -218,6 +221,7 @@ long strnlen_user(const char __user *s, long n)
:"cc");
return res & mask;
}
+EXPORT_SYMBOL(strnlen_user);
#ifdef CONFIG_X86_INTEL_USERCOPY
static unsigned long
@@ -570,6 +574,7 @@ survive:
n = __copy_user_intel(to, from, n);
return n;
}
+EXPORT_SYMBOL(__copy_to_user_ll);
unsigned long
__copy_from_user_ll(void *to, const void __user *from, unsigned long n)
@@ -581,6 +586,7 @@ __copy_from_user_ll(void *to, const void __user *from, unsigned long n)
n = __copy_user_zeroing_intel(to, from, n);
return n;
}
+EXPORT_SYMBOL(__copy_from_user_ll);
/**
* copy_to_user: - Copy a block of data into user space.
diff --git a/arch/i386/mach-voyager/voyager_smp.c b/arch/i386/mach-voyager/voyager_smp.c
index a6e0ddd65bd..8c8527593da 100644
--- a/arch/i386/mach-voyager/voyager_smp.c
+++ b/arch/i386/mach-voyager/voyager_smp.c
@@ -1288,7 +1288,7 @@ smp_local_timer_interrupt(struct pt_regs * regs)
per_cpu(prof_counter, cpu);
}
- update_process_times(user_mode(regs));
+ update_process_times(user_mode_vm(regs));
}
if( ((1<<cpu) & voyager_extended_vic_processors) == 0)
diff --git a/arch/i386/mm/Makefile b/arch/i386/mm/Makefile
index fc327250684..80908b5aa60 100644
--- a/arch/i386/mm/Makefile
+++ b/arch/i386/mm/Makefile
@@ -4,7 +4,7 @@
obj-y := init.o pgtable.o fault.o ioremap.o extable.o pageattr.o mmap.o
-obj-$(CONFIG_DISCONTIGMEM) += discontig.o
+obj-$(CONFIG_NUMA) += discontig.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_HIGHMEM) += highmem.o
obj-$(CONFIG_BOOT_IOREMAP) += boot_ioremap.o
diff --git a/arch/i386/mm/discontig.c b/arch/i386/mm/discontig.c
index 1726b4096b1..f429c871e84 100644
--- a/arch/i386/mm/discontig.c
+++ b/arch/i386/mm/discontig.c
@@ -29,12 +29,14 @@
#include <linux/highmem.h>
#include <linux/initrd.h>
#include <linux/nodemask.h>
+#include <linux/module.h>
#include <asm/e820.h>
#include <asm/setup.h>
#include <asm/mmzone.h>
#include <bios_ebda.h>
struct pglist_data *node_data[MAX_NUMNODES];
+EXPORT_SYMBOL(node_data);
bootmem_data_t node0_bdata;
/*
@@ -42,12 +44,16 @@ bootmem_data_t node0_bdata;
* populated the following initialisation.
*
* 1) node_online_map - the map of all nodes configured (online) in the system
- * 2) physnode_map - the mapping between a pfn and owning node
- * 3) node_start_pfn - the starting page frame number for a node
+ * 2) node_start_pfn - the starting page frame number for a node
* 3) node_end_pfn - the ending page fram number for a node
*/
+unsigned long node_start_pfn[MAX_NUMNODES];
+unsigned long node_end_pfn[MAX_NUMNODES];
+
+#ifdef CONFIG_DISCONTIGMEM
/*
+ * 4) physnode_map - the mapping between a pfn and owning node
* physnode_map keeps track of the physical memory layout of a generic
* numa node on a 256Mb break (each element of the array will
* represent 256Mb of memory and will be marked by the node id. so,
@@ -59,6 +65,7 @@ bootmem_data_t node0_bdata;
* physnode_map[8- ] = -1;
*/
s8 physnode_map[MAX_ELEMENTS] = { [0 ... (MAX_ELEMENTS - 1)] = -1};
+EXPORT_SYMBOL(physnode_map);
void memory_present(int nid, unsigned long start, unsigned long end)
{
@@ -85,9 +92,7 @@ unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn,
return (nr_pages + 1) * sizeof(struct page);
}
-
-unsigned long node_start_pfn[MAX_NUMNODES];
-unsigned long node_end_pfn[MAX_NUMNODES];
+#endif
extern unsigned long find_max_low_pfn(void);
extern void find_max_pfn(void);
@@ -108,6 +113,9 @@ unsigned long node_remap_offset[MAX_NUMNODES];
void *node_remap_start_vaddr[MAX_NUMNODES];
void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
+void *node_remap_end_vaddr[MAX_NUMNODES];
+void *node_remap_alloc_vaddr[MAX_NUMNODES];
+
/*
* FLAT - support for basic PC memory model with discontig enabled, essentially
* a single node with all available processors in it with a flat
@@ -146,6 +154,21 @@ static void __init find_max_pfn_node(int nid)
BUG();
}
+/* Find the owning node for a pfn. */
+int early_pfn_to_nid(unsigned long pfn)
+{
+ int nid;
+
+ for_each_node(nid) {
+ if (node_end_pfn[nid] == 0)
+ break;
+ if (node_start_pfn[nid] <= pfn && node_end_pfn[nid] >= pfn)
+ return nid;
+ }
+
+ return 0;
+}
+
/*
* Allocate memory for the pg_data_t for this node via a crude pre-bootmem
* method. For node zero take this from the bottom of memory, for
@@ -163,6 +186,21 @@ static void __init allocate_pgdat(int nid)
}
}
+void *alloc_remap(int nid, unsigned long size)
+{
+ void *allocation = node_remap_alloc_vaddr[nid];
+
+ size = ALIGN(size, L1_CACHE_BYTES);
+
+ if (!allocation || (allocation + size) >= node_remap_end_vaddr[nid])
+ return 0;
+
+ node_remap_alloc_vaddr[nid] += size;
+ memset(allocation, 0, size);
+
+ return allocation;
+}
+
void __init remap_numa_kva(void)
{
void *vaddr;
@@ -170,8 +208,6 @@ void __init remap_numa_kva(void)
int node;
for_each_online_node(node) {
- if (node == 0)
- continue;
for (pfn=0; pfn < node_remap_size[node]; pfn += PTRS_PER_PTE) {
vaddr = node_remap_start_vaddr[node]+(pfn<<PAGE_SHIFT);
set_pmd_pfn((ulong) vaddr,
@@ -185,13 +221,9 @@ static unsigned long calculate_numa_remap_pages(void)
{
int nid;
unsigned long size, reserve_pages = 0;
+ unsigned long pfn;
for_each_online_node(nid) {
- if (nid == 0)
- continue;
- if (!node_remap_size[nid])
- continue;
-
/*
* The acpi/srat node info can show hot-add memroy zones
* where memory could be added but not currently present.
@@ -208,11 +240,24 @@ static unsigned long calculate_numa_remap_pages(void)
size = (size + LARGE_PAGE_BYTES - 1) / LARGE_PAGE_BYTES;
/* now the roundup is correct, convert to PAGE_SIZE pages */
size = size * PTRS_PER_PTE;
+
+ /*
+ * Validate the region we are allocating only contains valid
+ * pages.
+ */
+ for (pfn = node_end_pfn[nid] - size;
+ pfn < node_end_pfn[nid]; pfn++)
+ if (!page_is_ram(pfn))
+ break;
+
+ if (pfn != node_end_pfn[nid])
+ size = 0;
+
printk("Reserving %ld pages of KVA for lmem_map of node %d\n",
size, nid);
node_remap_size[nid] = size;
- reserve_pages += size;
node_remap_offset[nid] = reserve_pages;
+ reserve_pages += size;
printk("Shrinking node %d from %ld pages to %ld pages\n",
nid, node_end_pfn[nid], node_end_pfn[nid] - size);
node_end_pfn[nid] -= size;
@@ -265,12 +310,18 @@ unsigned long __init setup_memory(void)
(ulong) pfn_to_kaddr(max_low_pfn));
for_each_online_node(nid) {
node_remap_start_vaddr[nid] = pfn_to_kaddr(
- (highstart_pfn + reserve_pages) - node_remap_offset[nid]);
+ highstart_pfn + node_remap_offset[nid]);
+ /* Init the node remap allocator */
+ node_remap_end_vaddr[nid] = node_remap_start_vaddr[nid] +
+ (node_remap_size[nid] * PAGE_SIZE);
+ node_remap_alloc_vaddr[nid] = node_remap_start_vaddr[nid] +
+ ALIGN(sizeof(pg_data_t), PAGE_SIZE);
+
allocate_pgdat(nid);
printk ("node %d will remap to vaddr %08lx - %08lx\n", nid,
(ulong) node_remap_start_vaddr[nid],
- (ulong) pfn_to_kaddr(highstart_pfn + reserve_pages
- - node_remap_offset[nid] + node_remap_size[nid]));
+ (ulong) pfn_to_kaddr(highstart_pfn
+ + node_remap_offset[nid] + node_remap_size[nid]));
}
printk("High memory starts at vaddr %08lx\n",
(ulong) pfn_to_kaddr(highstart_pfn));
@@ -333,23 +384,9 @@ void __init zone_sizes_init(void)
}
zholes_size = get_zholes_size(nid);
- /*
- * We let the lmem_map for node 0 be allocated from the
- * normal bootmem allocator, but other nodes come from the
- * remapped KVA area - mbligh
- */
- if (!nid)
- free_area_init_node(nid, NODE_DATA(nid),
- zones_size, start, zholes_size);
- else {
- unsigned long lmem_map;
- lmem_map = (unsigned long)node_remap_start_vaddr[nid];
- lmem_map += sizeof(pg_data_t) + PAGE_SIZE - 1;
- lmem_map &= PAGE_MASK;
- NODE_DATA(nid)->node_mem_map = (struct page *)lmem_map;
- free_area_init_node(nid, NODE_DATA(nid), zones_size,
- start, zholes_size);
- }
+
+ free_area_init_node(nid, NODE_DATA(nid), zones_size, start,
+ zholes_size);
}
return;
}
@@ -358,24 +395,26 @@ void __init set_highmem_pages_init(int bad_ppro)
{
#ifdef CONFIG_HIGHMEM
struct zone *zone;
+ struct page *page;
for_each_zone(zone) {
- unsigned long node_pfn, node_high_size, zone_start_pfn;
- struct page * zone_mem_map;
-
+ unsigned long node_pfn, zone_start_pfn, zone_end_pfn;
+
if (!is_highmem(zone))
continue;
- printk("Initializing %s for node %d\n", zone->name,
- zone->zone_pgdat->node_id);
-
- node_high_size = zone->spanned_pages;
- zone_mem_map = zone->zone_mem_map;
zone_start_pfn = zone->zone_start_pfn;
+ zone_end_pfn = zone_start_pfn + zone->spanned_pages;
+
+ printk("Initializing %s for node %d (%08lx:%08lx)\n",
+ zone->name, zone->zone_pgdat->node_id,
+ zone_start_pfn, zone_end_pfn);
- for (node_pfn = 0; node_pfn < node_high_size; node_pfn++) {
- one_highpage_init((struct page *)(zone_mem_map + node_pfn),
- zone_start_pfn + node_pfn, bad_ppro);
+ for (node_pfn = zone_start_pfn; node_pfn < zone_end_pfn; node_pfn++) {
+ if (!pfn_valid(node_pfn))
+ continue;
+ page = pfn_to_page(node_pfn);
+ one_highpage_init(page, node_pfn, bad_ppro);
}
}
totalram_pages += totalhigh_pages;
diff --git a/arch/i386/mm/highmem.c b/arch/i386/mm/highmem.c
index fc4c4cad4e9..4b7aaf99d7e 100644
--- a/arch/i386/mm/highmem.c
+++ b/arch/i386/mm/highmem.c
@@ -1,4 +1,5 @@
#include <linux/highmem.h>
+#include <linux/module.h>
void *kmap(struct page *page)
{
@@ -87,3 +88,8 @@ struct page *kmap_atomic_to_page(void *ptr)
return pte_page(*pte);
}
+EXPORT_SYMBOL(kmap);
+EXPORT_SYMBOL(kunmap);
+EXPORT_SYMBOL(kmap_atomic);
+EXPORT_SYMBOL(kunmap_atomic);
+EXPORT_SYMBOL(kmap_atomic_to_page);
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c
index 8766c771bb4..3672e2ef51a 100644
--- a/arch/i386/mm/init.c
+++ b/arch/i386/mm/init.c
@@ -191,7 +191,7 @@ static inline int page_kills_ppro(unsigned long pagenr)
extern int is_available_memory(efi_memory_desc_t *);
-static inline int page_is_ram(unsigned long pagenr)
+int page_is_ram(unsigned long pagenr)
{
int i;
unsigned long addr, end;
@@ -276,7 +276,9 @@ void __init one_highpage_init(struct page *page, int pfn, int bad_ppro)
SetPageReserved(page);
}
-#ifndef CONFIG_DISCONTIGMEM
+#ifdef CONFIG_NUMA
+extern void set_highmem_pages_init(int);
+#else
static void __init set_highmem_pages_init(int bad_ppro)
{
int pfn;
@@ -284,9 +286,7 @@ static void __init set_highmem_pages_init(int bad_ppro)
one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
totalram_pages += totalhigh_pages;
}
-#else
-extern void set_highmem_pages_init(int);
-#endif /* !CONFIG_DISCONTIGMEM */
+#endif /* CONFIG_FLATMEM */
#else
#define kmap_init() do { } while (0)
@@ -295,12 +295,13 @@ extern void set_highmem_pages_init(int);
#endif /* CONFIG_HIGHMEM */
unsigned long long __PAGE_KERNEL = _PAGE_KERNEL;
+EXPORT_SYMBOL(__PAGE_KERNEL);
unsigned long long __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
-#ifndef CONFIG_DISCONTIGMEM
-#define remap_numa_kva() do {} while (0)
-#else
+#ifdef CONFIG_NUMA
extern void __init remap_numa_kva(void);
+#else
+#define remap_numa_kva() do {} while (0)
#endif
static void __init pagetable_init (void)
@@ -525,7 +526,7 @@ static void __init set_max_mapnr_init(void)
#else
num_physpages = max_low_pfn;
#endif
-#ifndef CONFIG_DISCONTIGMEM
+#ifdef CONFIG_FLATMEM
max_mapnr = num_physpages;
#endif
}
@@ -539,7 +540,7 @@ void __init mem_init(void)
int tmp;
int bad_ppro;
-#ifndef CONFIG_DISCONTIGMEM
+#ifdef CONFIG_FLATMEM
if (!mem_map)
BUG();
#endif
diff --git a/arch/i386/mm/ioremap.c b/arch/i386/mm/ioremap.c
index ab542792b27..d393eefc705 100644
--- a/arch/i386/mm/ioremap.c
+++ b/arch/i386/mm/ioremap.c
@@ -11,6 +11,7 @@
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <asm/io.h>
#include <asm/fixmap.h>
#include <asm/cacheflush.h>
@@ -165,7 +166,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
}
return (void __iomem *) (offset + (char __iomem *)addr);
}
-
+EXPORT_SYMBOL(__ioremap);
/**
* ioremap_nocache - map bus memory into CPU space
@@ -222,6 +223,7 @@ void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
return p;
}
+EXPORT_SYMBOL(ioremap_nocache);
void iounmap(volatile void __iomem *addr)
{
@@ -255,6 +257,7 @@ out_unlock:
write_unlock(&vmlist_lock);
kfree(p);
}
+EXPORT_SYMBOL(iounmap);
void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
{
diff --git a/arch/i386/mm/pgtable.c b/arch/i386/mm/pgtable.c
index dd81479ff88..270c59f099a 100644
--- a/arch/i386/mm/pgtable.c
+++ b/arch/i386/mm/pgtable.c
@@ -30,13 +30,14 @@ void show_mem(void)
struct page *page;
pg_data_t *pgdat;
unsigned long i;
+ struct page_state ps;
printk("Mem-info:\n");
show_free_areas();
printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
for_each_pgdat(pgdat) {
for (i = 0; i < pgdat->node_spanned_pages; ++i) {
- page = pgdat->node_mem_map + i;
+ page = pgdat_page_nr(pgdat, i);
total++;
if (PageHighMem(page))
highmem++;
@@ -53,6 +54,13 @@ void show_mem(void)
printk("%d reserved pages\n",reserved);
printk("%d pages shared\n",shared);
printk("%d pages swap cached\n",cached);
+
+ get_page_state(&ps);
+ printk("%lu pages dirty\n", ps.nr_dirty);
+ printk("%lu pages writeback\n", ps.nr_writeback);
+ printk("%lu pages mapped\n", ps.nr_mapped);
+ printk("%lu pages slab\n", ps.nr_slab);
+ printk("%lu pages pagetables\n", ps.nr_page_table_pages);
}
/*
diff --git a/arch/i386/oprofile/backtrace.c b/arch/i386/oprofile/backtrace.c
index 52d72e074f7..65dfd2edb67 100644
--- a/arch/i386/oprofile/backtrace.c
+++ b/arch/i386/oprofile/backtrace.c
@@ -91,7 +91,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
head = (struct frame_head *)regs->ebp;
#endif
- if (!user_mode(regs)) {
+ if (!user_mode_vm(regs)) {
while (depth-- && valid_kernel_stack(head, regs))
head = dump_backtrace(head);
return;
diff --git a/arch/i386/pci/irq.c b/arch/i386/pci/irq.c
index da21b1d07c1..83458f81e66 100644
--- a/arch/i386/pci/irq.c
+++ b/arch/i386/pci/irq.c
@@ -227,6 +227,24 @@ static int pirq_via_set(struct pci_dev *router, struct pci_dev *dev, int pirq, i
}
/*
+ * The VIA pirq rules are nibble-based, like ALI,
+ * but without the ugly irq number munging.
+ * However, for 82C586, nibble map is different .
+ */
+static int pirq_via586_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
+{
+ static unsigned int pirqmap[4] = { 3, 2, 5, 1 };
+ return read_config_nybble(router, 0x55, pirqmap[pirq-1]);
+}
+
+static int pirq_via586_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
+{
+ static unsigned int pirqmap[4] = { 3, 2, 5, 1 };
+ write_config_nybble(router, 0x55, pirqmap[pirq-1], irq);
+ return 1;
+}
+
+/*
* ITE 8330G pirq rules are nibble-based
* FIXME: pirqmap may be { 1, 0, 3, 2 },
* 2+3 are both mapped to irq 9 on my system
@@ -512,6 +530,10 @@ static __init int via_router_probe(struct irq_router *r, struct pci_dev *router,
switch(device)
{
case PCI_DEVICE_ID_VIA_82C586_0:
+ r->name = "VIA";
+ r->get = pirq_via586_get;
+ r->set = pirq_via586_set;
+ return 1;
case PCI_DEVICE_ID_VIA_82C596:
case PCI_DEVICE_ID_VIA_82C686:
case PCI_DEVICE_ID_VIA_8231:
diff --git a/arch/i386/pci/pcbios.c b/arch/i386/pci/pcbios.c
index 141421b673b..b9d65f0bc2d 100644
--- a/arch/i386/pci/pcbios.c
+++ b/arch/i386/pci/pcbios.c
@@ -4,6 +4,7 @@
#include <linux/pci.h>
#include <linux/init.h>
+#include <linux/module.h>
#include "pci.h"
#include "pci-functions.h"
@@ -456,7 +457,7 @@ struct irq_routing_table * __devinit pcibios_get_irq_routing_table(void)
free_page(page);
return rt;
}
-
+EXPORT_SYMBOL(pcibios_get_irq_routing_table);
int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
{
@@ -473,6 +474,7 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
"S" (&pci_indirect));
return !(ret & 0xff00);
}
+EXPORT_SYMBOL(pcibios_set_irq_routing);
static int __init pci_pcbios_init(void)
{
diff --git a/arch/i386/power/cpu.c b/arch/i386/power/cpu.c
index cf337c673d9..6f521cf19a1 100644
--- a/arch/i386/power/cpu.c
+++ b/arch/i386/power/cpu.c
@@ -94,13 +94,13 @@ static void fix_processor_context(void)
* Now maybe reload the debug registers
*/
if (current->thread.debugreg[7]){
- loaddebug(&current->thread, 0);
- loaddebug(&current->thread, 1);
- loaddebug(&current->thread, 2);
- loaddebug(&current->thread, 3);
- /* no 4 and 5 */
- loaddebug(&current->thread, 6);
- loaddebug(&current->thread, 7);
+ set_debugreg(current->thread.debugreg[0], 0);
+ set_debugreg(current->thread.debugreg[1], 1);
+ set_debugreg(current->thread.debugreg[2], 2);
+ set_debugreg(current->thread.debugreg[3], 3);
+ /* no 4 and 5 */
+ set_debugreg(current->thread.debugreg[6], 6);
+ set_debugreg(current->thread.debugreg[7], 7);
}
}
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index ce4dfa8b834..01b78e7f992 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -161,6 +161,8 @@ config IA64_PAGE_SIZE_64KB
endchoice
+source kernel/Kconfig.hz
+
config IA64_BRL_EMU
bool
depends on ITANIUM
@@ -197,7 +199,7 @@ config HOLES_IN_ZONE
bool
default y if VIRTUAL_MEM_MAP
-config DISCONTIGMEM
+config ARCH_DISCONTIGMEM_ENABLE
bool "Discontiguous memory support"
depends on (IA64_DIG || IA64_SGI_SN2 || IA64_GENERIC || IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB) && NUMA && VIRTUAL_MEM_MAP
default y if (IA64_SGI_SN2 || IA64_GENERIC) && NUMA
@@ -300,6 +302,8 @@ config PREEMPT
Say Y here if you are building a kernel for a desktop, embedded
or real-time system. Say N if you are unsure.
+source "mm/Kconfig"
+
config HAVE_DEC_LOCK
bool
depends on (SMP || PREEMPT)
diff --git a/arch/ia64/Kconfig.debug b/arch/ia64/Kconfig.debug
index de9d507ba0f..fda67ac993d 100644
--- a/arch/ia64/Kconfig.debug
+++ b/arch/ia64/Kconfig.debug
@@ -2,6 +2,17 @@ menu "Kernel hacking"
source "lib/Kconfig.debug"
+config KPROBES
+ bool "Kprobes"
+ depends on DEBUG_KERNEL
+ help
+ Kprobes allows you to trap at almost any kernel address and
+ execute a callback function. register_kprobe() establishes
+ a probepoint and specifies the callback. Kprobes is useful
+ for kernel debugging, non-intrusive instrumentation and testing.
+ If in doubt, say "N".
+
+
choice
prompt "Physical memory granularity"
default IA64_GRANULE_64MB
diff --git a/arch/ia64/configs/sn2_defconfig b/arch/ia64/configs/sn2_defconfig
index a01bb02d074..487d2e36b0a 100644
--- a/arch/ia64/configs/sn2_defconfig
+++ b/arch/ia64/configs/sn2_defconfig
@@ -78,7 +78,7 @@ CONFIG_IA64_L1_CACHE_SHIFT=7
CONFIG_NUMA=y
CONFIG_VIRTUAL_MEM_MAP=y
CONFIG_HOLES_IN_ZONE=y
-CONFIG_DISCONTIGMEM=y
+CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
# CONFIG_IA64_CYCLONE is not set
CONFIG_IOSAPIC=y
CONFIG_IA64_SGI_SN_SIM=y
diff --git a/arch/ia64/defconfig b/arch/ia64/defconfig
index 7be8096e056..8444add7638 100644
--- a/arch/ia64/defconfig
+++ b/arch/ia64/defconfig
@@ -84,7 +84,7 @@ CONFIG_IA64_L1_CACHE_SHIFT=7
CONFIG_NUMA=y
CONFIG_VIRTUAL_MEM_MAP=y
CONFIG_HOLES_IN_ZONE=y
-CONFIG_DISCONTIGMEM=y
+CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
CONFIG_IA64_CYCLONE=y
CONFIG_IOSAPIC=y
CONFIG_FORCE_MAX_ZONEORDER=18
diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h
index b2de948bdae..e3e9290e3ff 100644
--- a/arch/ia64/ia32/ia32priv.h
+++ b/arch/ia64/ia32/ia32priv.h
@@ -241,7 +241,7 @@ typedef struct compat_siginfo {
/* POSIX.1b timers */
struct {
- timer_t _tid; /* timer id */
+ compat_timer_t _tid; /* timer id */
int _overrun; /* overrun count */
char _pad[sizeof(unsigned int) - sizeof(int)];
compat_sigval_t _sigval; /* same as below */
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 4c73d8ba2e3..b2e2f6509eb 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_SMP) += smp.o smpboot.o domain.o
obj-$(CONFIG_PERFMON) += perfmon_default_smpl.o
obj-$(CONFIG_IA64_CYCLONE) += cyclone.o
obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o
+obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o
obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o
mca_recovery-y += mca_drv.o mca_drv_asm.o
diff --git a/arch/ia64/kernel/jprobes.S b/arch/ia64/kernel/jprobes.S
new file mode 100644
index 00000000000..b7fa3ccd2b0
--- /dev/null
+++ b/arch/ia64/kernel/jprobes.S
@@ -0,0 +1,61 @@
+/*
+ * Jprobe specific operations
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) Intel Corporation, 2005
+ *
+ * 2005-May Rusty Lynch <rusty.lynch@intel.com> and Anil S Keshavamurthy
+ * <anil.s.keshavamurthy@intel.com> initial implementation
+ *
+ * Jprobes (a.k.a. "jump probes" which is built on-top of kprobes) allow a
+ * probe to be inserted into the beginning of a function call. The fundamental
+ * difference between a jprobe and a kprobe is the jprobe handler is executed
+ * in the same context as the target function, while the kprobe handlers
+ * are executed in interrupt context.
+ *
+ * For jprobes we initially gain control by placing a break point in the
+ * first instruction of the targeted function. When we catch that specific
+ * break, we:
+ * * set the return address to our jprobe_inst_return() function
+ * * jump to the jprobe handler function
+ *
+ * Since we fixed up the return address, the jprobe handler will return to our
+ * jprobe_inst_return() function, giving us control again. At this point we
+ * are back in the parents frame marker, so we do yet another call to our
+ * jprobe_break() function to fix up the frame marker as it would normally
+ * exist in the target function.
+ *
+ * Our jprobe_return function then transfers control back to kprobes.c by
+ * executing a break instruction using one of our reserved numbers. When we
+ * catch that break in kprobes.c, we continue like we do for a normal kprobe
+ * by single stepping the emulated instruction, and then returning execution
+ * to the correct location.
+ */
+#include <asm/asmmacro.h>
+
+ /*
+ * void jprobe_break(void)
+ */
+ENTRY(jprobe_break)
+ break.m 0x80300
+END(jprobe_break)
+
+ /*
+ * void jprobe_inst_return(void)
+ */
+GLOBAL_ENTRY(jprobe_inst_return)
+ br.call.sptk.many b0=jprobe_break
+END(jprobe_inst_return)
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
new file mode 100644
index 00000000000..5978823d5c6
--- /dev/null
+++ b/arch/ia64/kernel/kprobes.c
@@ -0,0 +1,601 @@
+/*
+ * Kernel Probes (KProbes)
+ * arch/ia64/kernel/kprobes.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2002, 2004
+ * Copyright (C) Intel Corporation, 2005
+ *
+ * 2005-Apr Rusty Lynch <rusty.lynch@intel.com> and Anil S Keshavamurthy
+ * <anil.s.keshavamurthy@intel.com> adapted from i386
+ */
+
+#include <linux/config.h>
+#include <linux/kprobes.h>
+#include <linux/ptrace.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/preempt.h>
+#include <linux/moduleloader.h>
+
+#include <asm/pgtable.h>
+#include <asm/kdebug.h>
+
+extern void jprobe_inst_return(void);
+
+/* kprobe_status settings */
+#define KPROBE_HIT_ACTIVE 0x00000001
+#define KPROBE_HIT_SS 0x00000002
+
+static struct kprobe *current_kprobe, *kprobe_prev;
+static unsigned long kprobe_status, kprobe_status_prev;
+static struct pt_regs jprobe_saved_regs;
+
+enum instruction_type {A, I, M, F, B, L, X, u};
+static enum instruction_type bundle_encoding[32][3] = {
+ { M, I, I }, /* 00 */
+ { M, I, I }, /* 01 */
+ { M, I, I }, /* 02 */
+ { M, I, I }, /* 03 */
+ { M, L, X }, /* 04 */
+ { M, L, X }, /* 05 */
+ { u, u, u }, /* 06 */
+ { u, u, u }, /* 07 */
+ { M, M, I }, /* 08 */
+ { M, M, I }, /* 09 */
+ { M, M, I }, /* 0A */
+ { M, M, I }, /* 0B */
+ { M, F, I }, /* 0C */
+ { M, F, I }, /* 0D */
+ { M, M, F }, /* 0E */
+ { M, M, F }, /* 0F */
+ { M, I, B }, /* 10 */
+ { M, I, B }, /* 11 */
+ { M, B, B }, /* 12 */
+ { M, B, B }, /* 13 */
+ { u, u, u }, /* 14 */
+ { u, u, u }, /* 15 */
+ { B, B, B }, /* 16 */
+ { B, B, B }, /* 17 */
+ { M, M, B }, /* 18 */
+ { M, M, B }, /* 19 */
+ { u, u, u }, /* 1A */
+ { u, u, u }, /* 1B */
+ { M, F, B }, /* 1C */
+ { M, F, B }, /* 1D */
+ { u, u, u }, /* 1E */
+ { u, u, u }, /* 1F */
+};
+
+/*
+ * In this function we check to see if the instruction
+ * is IP relative instruction and update the kprobe
+ * inst flag accordingly
+ */
+static void update_kprobe_inst_flag(uint template, uint slot, uint major_opcode,
+ unsigned long kprobe_inst, struct kprobe *p)
+{
+ p->ainsn.inst_flag = 0;
+ p->ainsn.target_br_reg = 0;
+
+ if (bundle_encoding[template][slot] == B) {
+ switch (major_opcode) {
+ case INDIRECT_CALL_OPCODE:
+ p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG;
+ p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7);
+ break;
+ case IP_RELATIVE_PREDICT_OPCODE:
+ case IP_RELATIVE_BRANCH_OPCODE:
+ p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR;
+ break;
+ case IP_RELATIVE_CALL_OPCODE:
+ p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR;
+ p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG;
+ p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7);
+ break;
+ }
+ } else if (bundle_encoding[template][slot] == X) {
+ switch (major_opcode) {
+ case LONG_CALL_OPCODE:
+ p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG;
+ p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7);
+ break;
+ }
+ }
+ return;
+}
+
+/*
+ * In this function we check to see if the instruction
+ * on which we are inserting kprobe is supported.
+ * Returns 0 if supported
+ * Returns -EINVAL if unsupported
+ */
+static int unsupported_inst(uint template, uint slot, uint major_opcode,
+ unsigned long kprobe_inst, struct kprobe *p)
+{
+ unsigned long addr = (unsigned long)p->addr;
+
+ if (bundle_encoding[template][slot] == I) {
+ switch (major_opcode) {
+ case 0x0: //I_UNIT_MISC_OPCODE:
+ /*
+ * Check for Integer speculation instruction
+ * - Bit 33-35 to be equal to 0x1
+ */
+ if (((kprobe_inst >> 33) & 0x7) == 1) {
+ printk(KERN_WARNING
+ "Kprobes on speculation inst at <0x%lx> not supported\n",
+ addr);
+ return -EINVAL;
+ }
+
+ /*
+ * IP relative mov instruction
+ * - Bit 27-35 to be equal to 0x30
+ */
+ if (((kprobe_inst >> 27) & 0x1FF) == 0x30) {
+ printk(KERN_WARNING
+ "Kprobes on \"mov r1=ip\" at <0x%lx> not supported\n",
+ addr);
+ return -EINVAL;
+
+ }
+ }
+ }
+ return 0;
+}
+
+
+/*
+ * In this function we check to see if the instruction
+ * (qp) cmpx.crel.ctype p1,p2=r2,r3
+ * on which we are inserting kprobe is cmp instruction
+ * with ctype as unc.
+ */
+static uint is_cmp_ctype_unc_inst(uint template, uint slot, uint major_opcode,
+unsigned long kprobe_inst)
+{
+ cmp_inst_t cmp_inst;
+ uint ctype_unc = 0;
+
+ if (!((bundle_encoding[template][slot] == I) ||
+ (bundle_encoding[template][slot] == M)))
+ goto out;
+
+ if (!((major_opcode == 0xC) || (major_opcode == 0xD) ||
+ (major_opcode == 0xE)))
+ goto out;
+
+ cmp_inst.l = kprobe_inst;
+ if ((cmp_inst.f.x2 == 0) || (cmp_inst.f.x2 == 1)) {
+ /* Integere compare - Register Register (A6 type)*/
+ if ((cmp_inst.f.tb == 0) && (cmp_inst.f.ta == 0)
+ &&(cmp_inst.f.c == 1))
+ ctype_unc = 1;
+ } else if ((cmp_inst.f.x2 == 2)||(cmp_inst.f.x2 == 3)) {
+ /* Integere compare - Immediate Register (A8 type)*/
+ if ((cmp_inst.f.ta == 0) &&(cmp_inst.f.c == 1))
+ ctype_unc = 1;
+ }
+out:
+ return ctype_unc;
+}
+
+/*
+ * In this function we override the bundle with
+ * the break instruction at the given slot.
+ */
+static void prepare_break_inst(uint template, uint slot, uint major_opcode,
+ unsigned long kprobe_inst, struct kprobe *p)
+{
+ unsigned long break_inst = BREAK_INST;
+ bundle_t *bundle = &p->ainsn.insn.bundle;
+
+ /*
+ * Copy the original kprobe_inst qualifying predicate(qp)
+ * to the break instruction iff !is_cmp_ctype_unc_inst
+ * because for cmp instruction with ctype equal to unc,
+ * which is a special instruction always needs to be
+ * executed regradless of qp
+ */
+ if (!is_cmp_ctype_unc_inst(template, slot, major_opcode, kprobe_inst))
+ break_inst |= (0x3f & kprobe_inst);
+
+ switch (slot) {
+ case 0:
+ bundle->quad0.slot0 = break_inst;
+ break;
+ case 1:
+ bundle->quad0.slot1_p0 = break_inst;
+ bundle->quad1.slot1_p1 = break_inst >> (64-46);
+ break;
+ case 2:
+ bundle->quad1.slot2 = break_inst;
+ break;
+ }
+
+ /*
+ * Update the instruction flag, so that we can
+ * emulate the instruction properly after we
+ * single step on original instruction
+ */
+ update_kprobe_inst_flag(template, slot, major_opcode, kprobe_inst, p);
+}
+
+static inline void get_kprobe_inst(bundle_t *bundle, uint slot,
+ unsigned long *kprobe_inst, uint *major_opcode)
+{
+ unsigned long kprobe_inst_p0, kprobe_inst_p1;
+ unsigned int template;
+
+ template = bundle->quad0.template;
+
+ switch (slot) {
+ case 0:
+ *major_opcode = (bundle->quad0.slot0 >> SLOT0_OPCODE_SHIFT);
+ *kprobe_inst = bundle->quad0.slot0;
+ break;
+ case 1:
+ *major_opcode = (bundle->quad1.slot1_p1 >> SLOT1_p1_OPCODE_SHIFT);
+ kprobe_inst_p0 = bundle->quad0.slot1_p0;
+ kprobe_inst_p1 = bundle->quad1.slot1_p1;
+ *kprobe_inst = kprobe_inst_p0 | (kprobe_inst_p1 << (64-46));
+ break;
+ case 2:
+ *major_opcode = (bundle->quad1.slot2 >> SLOT2_OPCODE_SHIFT);
+ *kprobe_inst = bundle->quad1.slot2;
+ break;
+ }
+}
+
+static int valid_kprobe_addr(int template, int slot, unsigned long addr)
+{
+ if ((slot > 2) || ((bundle_encoding[template][1] == L) && slot > 1)) {
+ printk(KERN_WARNING "Attempting to insert unaligned kprobe at 0x%lx\n",
+ addr);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static inline void save_previous_kprobe(void)
+{
+ kprobe_prev = current_kprobe;
+ kprobe_status_prev = kprobe_status;
+}
+
+static inline void restore_previous_kprobe(void)
+{
+ current_kprobe = kprobe_prev;
+ kprobe_status = kprobe_status_prev;
+}
+
+static inline void set_current_kprobe(struct kprobe *p)
+{
+ current_kprobe = p;
+}
+
+int arch_prepare_kprobe(struct kprobe *p)
+{
+ unsigned long addr = (unsigned long) p->addr;
+ unsigned long *kprobe_addr = (unsigned long *)(addr & ~0xFULL);
+ unsigned long kprobe_inst=0;
+ unsigned int slot = addr & 0xf, template, major_opcode = 0;
+ bundle_t *bundle = &p->ainsn.insn.bundle;
+
+ memcpy(&p->opcode.bundle, kprobe_addr, sizeof(bundle_t));
+ memcpy(&p->ainsn.insn.bundle, kprobe_addr, sizeof(bundle_t));
+
+ template = bundle->quad0.template;
+
+ if(valid_kprobe_addr(template, slot, addr))
+ return -EINVAL;
+
+ /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */
+ if (slot == 1 && bundle_encoding[template][1] == L)
+ slot++;
+
+ /* Get kprobe_inst and major_opcode from the bundle */
+ get_kprobe_inst(bundle, slot, &kprobe_inst, &major_opcode);
+
+ if (unsupported_inst(template, slot, major_opcode, kprobe_inst, p))
+ return -EINVAL;
+
+ prepare_break_inst(template, slot, major_opcode, kprobe_inst, p);
+
+ return 0;
+}
+
+void arch_arm_kprobe(struct kprobe *p)
+{
+ unsigned long addr = (unsigned long)p->addr;
+ unsigned long arm_addr = addr & ~0xFULL;
+
+ memcpy((char *)arm_addr, &p->ainsn.insn.bundle, sizeof(bundle_t));
+ flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t));
+}
+
+void arch_disarm_kprobe(struct kprobe *p)
+{
+ unsigned long addr = (unsigned long)p->addr;
+ unsigned long arm_addr = addr & ~0xFULL;
+
+ /* p->opcode contains the original unaltered bundle */
+ memcpy((char *) arm_addr, (char *) &p->opcode.bundle, sizeof(bundle_t));
+ flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t));
+}
+
+void arch_remove_kprobe(struct kprobe *p)
+{
+}
+
+/*
+ * We are resuming execution after a single step fault, so the pt_regs
+ * structure reflects the register state after we executed the instruction
+ * located in the kprobe (p->ainsn.insn.bundle). We still need to adjust
+ * the ip to point back to the original stack address. To set the IP address
+ * to original stack address, handle the case where we need to fixup the
+ * relative IP address and/or fixup branch register.
+ */
+static void resume_execution(struct kprobe *p, struct pt_regs *regs)
+{
+ unsigned long bundle_addr = ((unsigned long) (&p->opcode.bundle)) & ~0xFULL;
+ unsigned long resume_addr = (unsigned long)p->addr & ~0xFULL;
+ unsigned long template;
+ int slot = ((unsigned long)p->addr & 0xf);
+
+ template = p->opcode.bundle.quad0.template;
+
+ if (slot == 1 && bundle_encoding[template][1] == L)
+ slot = 2;
+
+ if (p->ainsn.inst_flag) {
+
+ if (p->ainsn.inst_flag & INST_FLAG_FIX_RELATIVE_IP_ADDR) {
+ /* Fix relative IP address */
+ regs->cr_iip = (regs->cr_iip - bundle_addr) + resume_addr;
+ }
+
+ if (p->ainsn.inst_flag & INST_FLAG_FIX_BRANCH_REG) {
+ /*
+ * Fix target branch register, software convention is
+ * to use either b0 or b6 or b7, so just checking
+ * only those registers
+ */
+ switch (p->ainsn.target_br_reg) {
+ case 0:
+ if ((regs->b0 == bundle_addr) ||
+ (regs->b0 == bundle_addr + 0x10)) {
+ regs->b0 = (regs->b0 - bundle_addr) +
+ resume_addr;
+ }
+ break;
+ case 6:
+ if ((regs->b6 == bundle_addr) ||
+ (regs->b6 == bundle_addr + 0x10)) {
+ regs->b6 = (regs->b6 - bundle_addr) +
+ resume_addr;
+ }
+ break;
+ case 7:
+ if ((regs->b7 == bundle_addr) ||
+ (regs->b7 == bundle_addr + 0x10)) {
+ regs->b7 = (regs->b7 - bundle_addr) +
+ resume_addr;
+ }
+ break;
+ } /* end switch */
+ }
+ goto turn_ss_off;
+ }
+
+ if (slot == 2) {
+ if (regs->cr_iip == bundle_addr + 0x10) {
+ regs->cr_iip = resume_addr + 0x10;
+ }
+ } else {
+ if (regs->cr_iip == bundle_addr) {
+ regs->cr_iip = resume_addr;
+ }
+ }
+
+turn_ss_off:
+ /* Turn off Single Step bit */
+ ia64_psr(regs)->ss = 0;
+}
+
+static void prepare_ss(struct kprobe *p, struct pt_regs *regs)
+{
+ unsigned long bundle_addr = (unsigned long) &p->opcode.bundle;
+ unsigned long slot = (unsigned long)p->addr & 0xf;
+
+ /* Update instruction pointer (IIP) and slot number (IPSR.ri) */
+ regs->cr_iip = bundle_addr & ~0xFULL;
+
+ if (slot > 2)
+ slot = 0;
+
+ ia64_psr(regs)->ri = slot;
+
+ /* turn on single stepping */
+ ia64_psr(regs)->ss = 1;
+}
+
+static int pre_kprobes_handler(struct die_args *args)
+{
+ struct kprobe *p;
+ int ret = 0;
+ struct pt_regs *regs = args->regs;
+ kprobe_opcode_t *addr = (kprobe_opcode_t *)instruction_pointer(regs);
+
+ preempt_disable();
+
+ /* Handle recursion cases */
+ if (kprobe_running()) {
+ p = get_kprobe(addr);
+ if (p) {
+ if (kprobe_status == KPROBE_HIT_SS) {
+ unlock_kprobes();
+ goto no_kprobe;
+ }
+ /* We have reentered the pre_kprobe_handler(), since
+ * another probe was hit while within the handler.
+ * We here save the original kprobes variables and
+ * just single step on the instruction of the new probe
+ * without calling any user handlers.
+ */
+ save_previous_kprobe();
+ set_current_kprobe(p);
+ p->nmissed++;
+ prepare_ss(p, regs);
+ kprobe_status = KPROBE_REENTER;
+ return 1;
+ } else if (args->err == __IA64_BREAK_JPROBE) {
+ /*
+ * jprobe instrumented function just completed
+ */
+ p = current_kprobe;
+ if (p->break_handler && p->break_handler(p, regs)) {
+ goto ss_probe;
+ }
+ } else {
+ /* Not our break */
+ goto no_kprobe;
+ }
+ }
+
+ lock_kprobes();
+ p = get_kprobe(addr);
+ if (!p) {
+ unlock_kprobes();
+ goto no_kprobe;
+ }
+
+ kprobe_status = KPROBE_HIT_ACTIVE;
+ set_current_kprobe(p);
+
+ if (p->pre_handler && p->pre_handler(p, regs))
+ /*
+ * Our pre-handler is specifically requesting that we just
+ * do a return. This is handling the case where the
+ * pre-handler is really our special jprobe pre-handler.
+ */
+ return 1;
+
+ss_probe:
+ prepare_ss(p, regs);
+ kprobe_status = KPROBE_HIT_SS;
+ return 1;
+
+no_kprobe:
+ preempt_enable_no_resched();
+ return ret;
+}
+
+static int post_kprobes_handler(struct pt_regs *regs)
+{
+ if (!kprobe_running())
+ return 0;
+
+ if ((kprobe_status != KPROBE_REENTER) && current_kprobe->post_handler) {
+ kprobe_status = KPROBE_HIT_SSDONE;
+ current_kprobe->post_handler(current_kprobe, regs, 0);
+ }
+
+ resume_execution(current_kprobe, regs);
+
+ /*Restore back the original saved kprobes variables and continue. */
+ if (kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe();
+ goto out;
+ }
+
+ unlock_kprobes();
+
+out:
+ preempt_enable_no_resched();
+ return 1;
+}
+
+static int kprobes_fault_handler(struct pt_regs *regs, int trapnr)
+{
+ if (!kprobe_running())
+ return 0;
+
+ if (current_kprobe->fault_handler &&
+ current_kprobe->fault_handler(current_kprobe, regs, trapnr))
+ return 1;
+
+ if (kprobe_status & KPROBE_HIT_SS) {
+ resume_execution(current_kprobe, regs);
+ unlock_kprobes();
+ preempt_enable_no_resched();
+ }
+
+ return 0;
+}
+
+int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
+ void *data)
+{
+ struct die_args *args = (struct die_args *)data;
+ switch(val) {
+ case DIE_BREAK:
+ if (pre_kprobes_handler(args))
+ return NOTIFY_STOP;
+ break;
+ case DIE_SS:
+ if (post_kprobes_handler(args->regs))
+ return NOTIFY_STOP;
+ break;
+ case DIE_PAGE_FAULT:
+ if (kprobes_fault_handler(args->regs, args->trapnr))
+ return NOTIFY_STOP;
+ default:
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ struct jprobe *jp = container_of(p, struct jprobe, kp);
+ unsigned long addr = ((struct fnptr *)(jp->entry))->ip;
+
+ /* save architectural state */
+ jprobe_saved_regs = *regs;
+
+ /* after rfi, execute the jprobe instrumented function */
+ regs->cr_iip = addr & ~0xFULL;
+ ia64_psr(regs)->ri = addr & 0xf;
+ regs->r1 = ((struct fnptr *)(jp->entry))->gp;
+
+ /*
+ * fix the return address to our jprobe_inst_return() function
+ * in the jprobes.S file
+ */
+ regs->b0 = ((struct fnptr *)(jprobe_inst_return))->ip;
+
+ return 1;
+}
+
+int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ *regs = jprobe_saved_regs;
+ return 1;
+}
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index 1861173bd4f..e7e520d90f0 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -21,12 +21,26 @@
#include <asm/intrinsics.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
+#include <asm/kdebug.h>
extern spinlock_t timerlist_lock;
fpswa_interface_t *fpswa_interface;
EXPORT_SYMBOL(fpswa_interface);
+struct notifier_block *ia64die_chain;
+static DEFINE_SPINLOCK(die_notifier_lock);
+
+int register_die_notifier(struct notifier_block *nb)
+{
+ int err = 0;
+ unsigned long flags;
+ spin_lock_irqsave(&die_notifier_lock, flags);
+ err = notifier_chain_register(&ia64die_chain, nb);
+ spin_unlock_irqrestore(&die_notifier_lock, flags);
+ return err;
+}
+
void __init
trap_init (void)
{
@@ -137,6 +151,10 @@ ia64_bad_break (unsigned long break_num, struct pt_regs *regs)
switch (break_num) {
case 0: /* unknown error (used by GCC for __builtin_abort()) */
+ if (notify_die(DIE_BREAK, "break 0", regs, break_num, TRAP_BRKPT, SIGTRAP)
+ == NOTIFY_STOP) {
+ return;
+ }
die_if_kernel("bugcheck!", regs, break_num);
sig = SIGILL; code = ILL_ILLOPC;
break;
@@ -189,6 +207,15 @@ ia64_bad_break (unsigned long break_num, struct pt_regs *regs)
sig = SIGILL; code = __ILL_BNDMOD;
break;
+ case 0x80200:
+ case 0x80300:
+ if (notify_die(DIE_BREAK, "kprobe", regs, break_num, TRAP_BRKPT, SIGTRAP)
+ == NOTIFY_STOP) {
+ return;
+ }
+ sig = SIGTRAP; code = TRAP_BRKPT;
+ break;
+
default:
if (break_num < 0x40000 || break_num > 0x100000)
die_if_kernel("Bad break", regs, break_num);
@@ -548,7 +575,11 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
#endif
break;
case 35: siginfo.si_code = TRAP_BRANCH; ifa = 0; break;
- case 36: siginfo.si_code = TRAP_TRACE; ifa = 0; break;
+ case 36:
+ if (notify_die(DIE_SS, "ss", &regs, vector,
+ vector, SIGTRAP) == NOTIFY_STOP)
+ return;
+ siginfo.si_code = TRAP_TRACE; ifa = 0; break;
}
siginfo.si_signo = SIGTRAP;
siginfo.si_errno = 0;
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index c0071092939..f3fd528ead3 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -560,14 +560,15 @@ void show_mem(void)
int shared = 0, cached = 0, reserved = 0;
printk("Node ID: %d\n", pgdat->node_id);
for(i = 0; i < pgdat->node_spanned_pages; i++) {
+ struct page *page = pgdat_page_nr(pgdat, i);
if (!ia64_pfn_valid(pgdat->node_start_pfn+i))
continue;
- if (PageReserved(pgdat->node_mem_map+i))
+ if (PageReserved(page))
reserved++;
- else if (PageSwapCache(pgdat->node_mem_map+i))
+ else if (PageSwapCache(page))
cached++;
- else if (page_count(pgdat->node_mem_map+i))
- shared += page_count(pgdat->node_mem_map+i)-1;
+ else if (page_count(page))
+ shared += page_count(page)-1;
}
total_present += present;
total_reserved += reserved;
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index 4174ec999dd..ff62551eb3a 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -14,6 +14,7 @@
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/uaccess.h>
+#include <asm/kdebug.h>
extern void die (char *, struct pt_regs *, long);
@@ -102,6 +103,13 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
goto bad_area_no_up;
#endif
+ /*
+ * This is to handle the kprobes on user space access instructions
+ */
+ if (notify_die(DIE_PAGE_FAULT, "page fault", regs, code, TRAP_BRKPT,
+ SIGSEGV) == NOTIFY_STOP)
+ return;
+
down_read(&mm->mmap_sem);
vma = find_vma_prev(mm, address, &prev_vma);
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index 64c133344af..42ca8a39798 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -172,11 +172,13 @@ config NOHIGHMEM
bool
default y
-config DISCONTIGMEM
+config ARCH_DISCONTIGMEM_ENABLE
bool "Internal RAM Support"
depends on CHIP_M32700 || CHIP_M32102 || CHIP_VDEC2 || CHIP_OPSP
default y
+source "mm/Kconfig"
+
config IRAM_START
hex "Internal memory start address (hex)"
default "00f00000"
diff --git a/arch/m32r/mm/init.c b/arch/m32r/mm/init.c
index bc423d838fb..d9a40b1fe8b 100644
--- a/arch/m32r/mm/init.c
+++ b/arch/m32r/mm/init.c
@@ -49,7 +49,7 @@ void show_mem(void)
printk("Free swap: %6ldkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
for_each_pgdat(pgdat) {
for (i = 0; i < pgdat->node_spanned_pages; ++i) {
- page = pgdat->node_mem_map + i;
+ page = pgdat_page_nr(pgdat, i);
total++;
if (PageHighMem(page))
highmem++;
@@ -152,7 +152,7 @@ int __init reservedpages_count(void)
reservedpages = 0;
for_each_online_node(nid)
for (i = 0 ; i < MAX_LOW_PFN(nid) - START_PFN(nid) ; i++)
- if (PageReserved(NODE_DATA(nid)->node_mem_map + i))
+ if (PageReserved(nid_page_nr(nid, i)))
reservedpages++;
return reservedpages;
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index d0713c7d9f0..691a2469ff3 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -357,6 +357,8 @@ config 060_WRITETHROUGH
is hardwired on. The 53c710 SCSI driver is known to suffer from
this problem.
+source "mm/Kconfig"
+
endmenu
menu "General setup"
diff --git a/arch/m68knommu/Kconfig b/arch/m68knommu/Kconfig
index e729bd28062..dbfcdc8e608 100644
--- a/arch/m68knommu/Kconfig
+++ b/arch/m68knommu/Kconfig
@@ -532,6 +532,8 @@ config ROMKERNEL
endchoice
+source "mm/Kconfig"
+
endmenu
config ISA_DMA_API
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index ab9944693f1..94f5a8eb2c2 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -492,7 +492,7 @@ config SGI_SN0_N_MODE
which allows for more memory. Your system is most probably
running in M-Mode, so you should say N here.
-config DISCONTIGMEM
+config ARCH_DISCONTIGMEM_ENABLE
bool
default y if SGI_IP27
help
diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig
index 13472292d0e..b5bab3a42fc 100644
--- a/arch/mips/configs/ip27_defconfig
+++ b/arch/mips/configs/ip27_defconfig
@@ -82,7 +82,7 @@ CONFIG_STOP_MACHINE=y
# CONFIG_SGI_IP22 is not set
CONFIG_SGI_IP27=y
# CONFIG_SGI_SN0_N_MODE is not set
-CONFIG_DISCONTIGMEM=y
+CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
CONFIG_NUMA=y
# CONFIG_MAPPED_KERNEL is not set
# CONFIG_REPLICATE_KTEXT is not set
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index 0a44a98d7ad..a160d04f7db 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -549,9 +549,8 @@ void __init mem_init(void)
*/
numslots = node_getlastslot(node);
for (slot = 1; slot <= numslots; slot++) {
- p = NODE_DATA(node)->node_mem_map +
- (slot_getbasepfn(node, slot) -
- slot_getbasepfn(node, 0));
+ p = nid_page_nr(node, slot_getbasepfn(node, slot) -
+ slot_getbasepfn(node, 0));
/*
* Free valid memory in current slot.
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index e7e7c56fc21..ce327c799b4 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -148,7 +148,7 @@ config HOTPLUG_CPU
default y if SMP
select HOTPLUG
-config DISCONTIGMEM
+config ARCH_DISCONTIGMEM_ENABLE
bool "Discontiguous memory support (EXPERIMENTAL)"
depends on EXPERIMENTAL
help
@@ -157,6 +157,8 @@ config DISCONTIGMEM
or have huge holes in the physical address space for other reasons.
See <file:Documentation/vm/numa> for more.
+source "mm/Kconfig"
+
config PREEMPT
bool
# bool "Preemptible Kernel"
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index cac37589e35..2886ad70db4 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -506,7 +506,7 @@ void show_mem(void)
for (j = node_start_pfn(i); j < node_end_pfn(i); j++) {
struct page *p;
- p = node_mem_map(i) + j - node_start_pfn(i);
+ p = nid_page_nr(i, j) - node_start_pfn(i);
total++;
if (PageReserved(p))
diff --git a/arch/ppc/Kconfig b/arch/ppc/Kconfig
index 10162b187bc..848f43970a4 100644
--- a/arch/ppc/Kconfig
+++ b/arch/ppc/Kconfig
@@ -905,6 +905,8 @@ config PREEMPT
config HIGHMEM
bool "High memory support"
+source "mm/Kconfig"
+
source "fs/Kconfig.binfmt"
config PROC_DEVICETREE
diff --git a/arch/ppc/boot/simple/misc.c b/arch/ppc/boot/simple/misc.c
index ab0f9902cb6..e02de5b467a 100644
--- a/arch/ppc/boot/simple/misc.c
+++ b/arch/ppc/boot/simple/misc.c
@@ -222,7 +222,7 @@ decompress_kernel(unsigned long load_addr, int num_words, unsigned long cksum)
puts("\n");
puts("Uncompressing Linux...");
- gunzip(0x0, 0x400000, zimage_start, &zimage_size);
+ gunzip(NULL, 0x400000, zimage_start, &zimage_size);
puts("done.\n");
/* get the bi_rec address */
diff --git a/arch/ppc/boot/simple/mpc10x_memory.c b/arch/ppc/boot/simple/mpc10x_memory.c
index 977daedc14c..20d92a34ceb 100644
--- a/arch/ppc/boot/simple/mpc10x_memory.c
+++ b/arch/ppc/boot/simple/mpc10x_memory.c
@@ -33,7 +33,7 @@
#define MPC10X_PCI_OP(rw, size, type, op, mask) \
static void \
-mpc10x_##rw##_config_##size(unsigned int *cfg_addr, \
+mpc10x_##rw##_config_##size(unsigned int __iomem *cfg_addr, \
unsigned int *cfg_data, int devfn, int offset, \
type val) \
{ \
diff --git a/arch/ppc64/Kconfig b/arch/ppc64/Kconfig
index 0f1fa289744..cb27068bfcd 100644
--- a/arch/ppc64/Kconfig
+++ b/arch/ppc64/Kconfig
@@ -77,6 +77,10 @@ config PPC_PSERIES
bool " IBM pSeries & new iSeries"
default y
+config PPC_BPA
+ bool " Broadband Processor Architecture"
+ depends on PPC_MULTIPLATFORM
+
config PPC_PMAC
depends on PPC_MULTIPLATFORM
bool " Apple G5 based machines"
@@ -106,6 +110,21 @@ config PPC_OF
bool
default y
+config XICS
+ depends on PPC_PSERIES
+ bool
+ default y
+
+config MPIC
+ depends on PPC_PSERIES || PPC_PMAC || PPC_MAPLE
+ bool
+ default y
+
+config BPA_IIC
+ depends on PPC_BPA
+ bool
+ default y
+
# VMX is pSeries only for now until somebody writes the iSeries
# exception vectors for it
config ALTIVEC
@@ -198,13 +217,49 @@ config HMT
This option enables hardware multithreading on RS64 cpus.
pSeries systems p620 and p660 have such a cpu type.
-config DISCONTIGMEM
- bool "Discontiguous Memory Support"
+config ARCH_SELECT_MEMORY_MODEL
+ def_bool y
+
+config ARCH_FLATMEM_ENABLE
+ def_bool y
+ depends on !NUMA
+
+config ARCH_DISCONTIGMEM_ENABLE
+ def_bool y
depends on SMP && PPC_PSERIES
+config ARCH_DISCONTIGMEM_DEFAULT
+ def_bool y
+ depends on ARCH_DISCONTIGMEM_ENABLE
+
+config ARCH_FLATMEM_ENABLE
+ def_bool y
+
+config ARCH_SPARSEMEM_ENABLE
+ def_bool y
+ depends on ARCH_DISCONTIGMEM_ENABLE
+
+source "mm/Kconfig"
+
+config HAVE_ARCH_EARLY_PFN_TO_NID
+ def_bool y
+ depends on NEED_MULTIPLE_NODES
+
+# Some NUMA nodes have memory ranges that span
+# other nodes. Even though a pfn is valid and
+# between a node's start and end pfns, it may not
+# reside on that node.
+#
+# This is a relatively temporary hack that should
+# be able to go away when sparsemem is fully in
+# place
+config NODES_SPAN_OTHER_NODES
+ def_bool y
+ depends on NEED_MULTIPLE_NODES
+
config NUMA
bool "NUMA support"
- depends on DISCONTIGMEM
+ default y if DISCONTIGMEM || SPARSEMEM
config SCHED_SMT
bool "SMT (Hyperthreading) scheduler support"
@@ -256,7 +311,7 @@ config MSCHUNKS
config PPC_RTAS
bool
- depends on PPC_PSERIES
+ depends on PPC_PSERIES || PPC_BPA
default y
config RTAS_PROC
diff --git a/arch/ppc64/Makefile b/arch/ppc64/Makefile
index 33c752ceca4..731b8475833 100644
--- a/arch/ppc64/Makefile
+++ b/arch/ppc64/Makefile
@@ -90,12 +90,14 @@ boot := arch/ppc64/boot
boottarget-$(CONFIG_PPC_PSERIES) := zImage zImage.initrd
boottarget-$(CONFIG_PPC_MAPLE) := zImage zImage.initrd
boottarget-$(CONFIG_PPC_ISERIES) := vmlinux.sminitrd vmlinux.initrd vmlinux.sm
+boottarget-$(CONFIG_PPC_BPA) := zImage zImage.initrd
$(boottarget-y): vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
bootimage-$(CONFIG_PPC_PSERIES) := $(boot)/zImage
bootimage-$(CONFIG_PPC_PMAC) := vmlinux
bootimage-$(CONFIG_PPC_MAPLE) := $(boot)/zImage
+bootimage-$(CONFIG_PPC_BPA) := zImage
bootimage-$(CONFIG_PPC_ISERIES) := vmlinux
BOOTIMAGE := $(bootimage-y)
install: vmlinux
diff --git a/arch/ppc64/boot/install.sh b/arch/ppc64/boot/install.sh
index 955c5681db6..cb2d6626b55 100644
--- a/arch/ppc64/boot/install.sh
+++ b/arch/ppc64/boot/install.sh
@@ -22,8 +22,8 @@
# User may have a custom install script
-if [ -x ~/bin/installkernel ]; then exec ~/bin/installkernel "$@"; fi
-if [ -x /sbin/installkernel ]; then exec /sbin/installkernel "$@"; fi
+if [ -x ~/bin/${CROSS_COMPILE}installkernel ]; then exec ~/bin/${CROSS_COMPILE}installkernel "$@"; fi
+if [ -x /sbin/${CROSS_COMPILE}installkernel ]; then exec /sbin/${CROSS_COMPILE}installkernel "$@"; fi
# Default install
diff --git a/arch/ppc64/configs/pSeries_defconfig b/arch/ppc64/configs/pSeries_defconfig
index 3eb5ef25d3a..d0db8b5966c 100644
--- a/arch/ppc64/configs/pSeries_defconfig
+++ b/arch/ppc64/configs/pSeries_defconfig
@@ -88,7 +88,7 @@ CONFIG_IBMVIO=y
CONFIG_IOMMU_VMERGE=y
CONFIG_SMP=y
CONFIG_NR_CPUS=128
-CONFIG_DISCONTIGMEM=y
+CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
CONFIG_NUMA=y
CONFIG_SCHED_SMT=y
# CONFIG_PREEMPT is not set
diff --git a/arch/ppc64/defconfig b/arch/ppc64/defconfig
index 2f31bf3046f..b8e2066dde7 100644
--- a/arch/ppc64/defconfig
+++ b/arch/ppc64/defconfig
@@ -89,7 +89,7 @@ CONFIG_BOOTX_TEXT=y
CONFIG_IOMMU_VMERGE=y
CONFIG_SMP=y
CONFIG_NR_CPUS=32
-CONFIG_DISCONTIGMEM=y
+CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
# CONFIG_NUMA is not set
# CONFIG_SCHED_SMT is not set
# CONFIG_PREEMPT is not set
diff --git a/arch/ppc64/kernel/Makefile b/arch/ppc64/kernel/Makefile
index b5e167cf1a0..dffbfb7ac8d 100644
--- a/arch/ppc64/kernel/Makefile
+++ b/arch/ppc64/kernel/Makefile
@@ -27,17 +27,21 @@ obj-$(CONFIG_PPC_ISERIES) += HvCall.o HvLpConfig.o LparData.o \
mf.o HvLpEvent.o iSeries_proc.o iSeries_htab.o \
iSeries_iommu.o
-obj-$(CONFIG_PPC_MULTIPLATFORM) += nvram.o i8259.o prom_init.o prom.o mpic.o
+obj-$(CONFIG_PPC_MULTIPLATFORM) += nvram.o i8259.o prom_init.o prom.o
obj-$(CONFIG_PPC_PSERIES) += pSeries_pci.o pSeries_lpar.o pSeries_hvCall.o \
pSeries_nvram.o rtasd.o ras.o pSeries_reconfig.o \
- xics.o rtas.o pSeries_setup.o pSeries_iommu.o
+ pSeries_setup.o pSeries_iommu.o
+
+obj-$(CONFIG_PPC_BPA) += bpa_setup.o bpa_iommu.o bpa_nvram.o \
+ bpa_iic.o spider-pic.o
obj-$(CONFIG_EEH) += eeh.o
obj-$(CONFIG_PROC_FS) += proc_ppc64.o
obj-$(CONFIG_RTAS_FLASH) += rtas_flash.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_MODULES) += module.o ppc_ksyms.o
+obj-$(CONFIG_PPC_RTAS) += rtas.o rtas_pci.o
obj-$(CONFIG_RTAS_PROC) += rtas-proc.o
obj-$(CONFIG_SCANLOG) += scanlog.o
obj-$(CONFIG_VIOPATH) += viopath.o
@@ -46,6 +50,8 @@ obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o
obj-$(CONFIG_BOOTX_TEXT) += btext.o
obj-$(CONFIG_HVCS) += hvcserver.o
obj-$(CONFIG_IBMVIO) += vio.o
+obj-$(CONFIG_XICS) += xics.o
+obj-$(CONFIG_MPIC) += mpic.o
obj-$(CONFIG_PPC_PMAC) += pmac_setup.o pmac_feature.o pmac_pci.o \
pmac_time.o pmac_nvram.o pmac_low_i2c.o
@@ -58,6 +64,7 @@ ifdef CONFIG_SMP
obj-$(CONFIG_PPC_PMAC) += pmac_smp.o smp-tbsync.o
obj-$(CONFIG_PPC_ISERIES) += iSeries_smp.o
obj-$(CONFIG_PPC_PSERIES) += pSeries_smp.o
+obj-$(CONFIG_PPC_BPA) += pSeries_smp.o
obj-$(CONFIG_PPC_MAPLE) += smp-tbsync.o
endif
diff --git a/arch/ppc64/kernel/bpa_iic.c b/arch/ppc64/kernel/bpa_iic.c
new file mode 100644
index 00000000000..c8f3dc3fad7
--- /dev/null
+++ b/arch/ppc64/kernel/bpa_iic.c
@@ -0,0 +1,270 @@
+/*
+ * BPA Internal Interrupt Controller
+ *
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
+ *
+ * Author: Arnd Bergmann <arndb@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/config.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/percpu.h>
+#include <linux/types.h>
+
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/prom.h>
+#include <asm/ptrace.h>
+
+#include "bpa_iic.h"
+
+struct iic_pending_bits {
+ u32 data;
+ u8 flags;
+ u8 class;
+ u8 source;
+ u8 prio;
+};
+
+enum iic_pending_flags {
+ IIC_VALID = 0x80,
+ IIC_IPI = 0x40,
+};
+
+struct iic_regs {
+ struct iic_pending_bits pending;
+ struct iic_pending_bits pending_destr;
+ u64 generate;
+ u64 prio;
+};
+
+struct iic {
+ struct iic_regs __iomem *regs;
+};
+
+static DEFINE_PER_CPU(struct iic, iic);
+
+void iic_local_enable(void)
+{
+ out_be64(&__get_cpu_var(iic).regs->prio, 0xff);
+}
+
+void iic_local_disable(void)
+{
+ out_be64(&__get_cpu_var(iic).regs->prio, 0x0);
+}
+
+static unsigned int iic_startup(unsigned int irq)
+{
+ return 0;
+}
+
+static void iic_enable(unsigned int irq)
+{
+ iic_local_enable();
+}
+
+static void iic_disable(unsigned int irq)
+{
+}
+
+static void iic_end(unsigned int irq)
+{
+ iic_local_enable();
+}
+
+static struct hw_interrupt_type iic_pic = {
+ .typename = " BPA-IIC ",
+ .startup = iic_startup,
+ .enable = iic_enable,
+ .disable = iic_disable,
+ .end = iic_end,
+};
+
+static int iic_external_get_irq(struct iic_pending_bits pending)
+{
+ int irq;
+ unsigned char node, unit;
+
+ node = pending.source >> 4;
+ unit = pending.source & 0xf;
+ irq = -1;
+
+ /*
+ * This mapping is specific to the Broadband
+ * Engine. We might need to get the numbers
+ * from the device tree to support future CPUs.
+ */
+ switch (unit) {
+ case 0x00:
+ case 0x0b:
+ /*
+ * One of these units can be connected
+ * to an external interrupt controller.
+ */
+ if (pending.prio > 0x3f ||
+ pending.class != 2)
+ break;
+ irq = IIC_EXT_OFFSET
+ + spider_get_irq(pending.prio + node * IIC_NODE_STRIDE)
+ + node * IIC_NODE_STRIDE;
+ break;
+ case 0x01 ... 0x04:
+ case 0x07 ... 0x0a:
+ /*
+ * These units are connected to the SPEs
+ */
+ if (pending.class > 2)
+ break;
+ irq = IIC_SPE_OFFSET
+ + pending.class * IIC_CLASS_STRIDE
+ + node * IIC_NODE_STRIDE
+ + unit;
+ break;
+ }
+ if (irq == -1)
+ printk(KERN_WARNING "Unexpected interrupt class %02x, "
+ "source %02x, prio %02x, cpu %02x\n", pending.class,
+ pending.source, pending.prio, smp_processor_id());
+ return irq;
+}
+
+/* Get an IRQ number from the pending state register of the IIC */
+int iic_get_irq(struct pt_regs *regs)
+{
+ struct iic *iic;
+ int irq;
+ struct iic_pending_bits pending;
+
+ iic = &__get_cpu_var(iic);
+ *(unsigned long *) &pending =
+ in_be64((unsigned long __iomem *) &iic->regs->pending_destr);
+
+ irq = -1;
+ if (pending.flags & IIC_VALID) {
+ if (pending.flags & IIC_IPI) {
+ irq = IIC_IPI_OFFSET + (pending.prio >> 4);
+/*
+ if (irq > 0x80)
+ printk(KERN_WARNING "Unexpected IPI prio %02x"
+ "on CPU %02x\n", pending.prio,
+ smp_processor_id());
+*/
+ } else {
+ irq = iic_external_get_irq(pending);
+ }
+ }
+ return irq;
+}
+
+static struct iic_regs __iomem *find_iic(int cpu)
+{
+ struct device_node *np;
+ int nodeid = cpu / 2;
+ unsigned long regs;
+ struct iic_regs __iomem *iic_regs;
+
+ for (np = of_find_node_by_type(NULL, "cpu");
+ np;
+ np = of_find_node_by_type(np, "cpu")) {
+ if (nodeid == *(int *)get_property(np, "node-id", NULL))
+ break;
+ }
+
+ if (!np) {
+ printk(KERN_WARNING "IIC: CPU %d not found\n", cpu);
+ iic_regs = NULL;
+ } else {
+ regs = *(long *)get_property(np, "iic", NULL);
+
+ /* hack until we have decided on the devtree info */
+ regs += 0x400;
+ if (cpu & 1)
+ regs += 0x20;
+
+ printk(KERN_DEBUG "IIC for CPU %d at %lx\n", cpu, regs);
+ iic_regs = __ioremap(regs, sizeof(struct iic_regs),
+ _PAGE_NO_CACHE);
+ }
+ return iic_regs;
+}
+
+#ifdef CONFIG_SMP
+void iic_setup_cpu(void)
+{
+ out_be64(&__get_cpu_var(iic).regs->prio, 0xff);
+}
+
+void iic_cause_IPI(int cpu, int mesg)
+{
+ out_be64(&per_cpu(iic, cpu).regs->generate, mesg);
+}
+
+static irqreturn_t iic_ipi_action(int irq, void *dev_id, struct pt_regs *regs)
+{
+
+ smp_message_recv(irq - IIC_IPI_OFFSET, regs);
+ return IRQ_HANDLED;
+}
+
+static void iic_request_ipi(int irq, const char *name)
+{
+ /* IPIs are marked SA_INTERRUPT as they must run with irqs
+ * disabled */
+ get_irq_desc(irq)->handler = &iic_pic;
+ get_irq_desc(irq)->status |= IRQ_PER_CPU;
+ request_irq(irq, iic_ipi_action, SA_INTERRUPT, name, NULL);
+}
+
+void iic_request_IPIs(void)
+{
+ iic_request_ipi(IIC_IPI_OFFSET + PPC_MSG_CALL_FUNCTION, "IPI-call");
+ iic_request_ipi(IIC_IPI_OFFSET + PPC_MSG_RESCHEDULE, "IPI-resched");
+#ifdef CONFIG_DEBUGGER
+ iic_request_ipi(IIC_IPI_OFFSET + PPC_MSG_DEBUGGER_BREAK, "IPI-debug");
+#endif /* CONFIG_DEBUGGER */
+}
+#endif /* CONFIG_SMP */
+
+static void iic_setup_spe_handlers(void)
+{
+ int be, isrc;
+
+ /* Assume two threads per BE are present */
+ for (be=0; be < num_present_cpus() / 2; be++) {
+ for (isrc = 0; isrc < IIC_CLASS_STRIDE * 3; isrc++) {
+ int irq = IIC_NODE_STRIDE * be + IIC_SPE_OFFSET + isrc;
+ get_irq_desc(irq)->handler = &iic_pic;
+ }
+ }
+}
+
+void iic_init_IRQ(void)
+{
+ int cpu, irq_offset;
+ struct iic *iic;
+
+ irq_offset = 0;
+ for_each_cpu(cpu) {
+ iic = &per_cpu(iic, cpu);
+ iic->regs = find_iic(cpu);
+ if (iic->regs)
+ out_be64(&iic->regs->prio, 0xff);
+ }
+ iic_setup_spe_handlers();
+}
diff --git a/arch/ppc64/kernel/bpa_iic.h b/arch/ppc64/kernel/bpa_iic.h
new file mode 100644
index 00000000000..6833c302216
--- /dev/null
+++ b/arch/ppc64/kernel/bpa_iic.h
@@ -0,0 +1,62 @@
+#ifndef ASM_BPA_IIC_H
+#define ASM_BPA_IIC_H
+#ifdef __KERNEL__
+/*
+ * Mapping of IIC pending bits into per-node
+ * interrupt numbers.
+ *
+ * IRQ FF CC SS PP FF CC SS PP Description
+ *
+ * 00-3f 80 02 +0 00 - 80 02 +0 3f South Bridge
+ * 00-3f 80 02 +b 00 - 80 02 +b 3f South Bridge
+ * 41-4a 80 00 +1 ** - 80 00 +a ** SPU Class 0
+ * 51-5a 80 01 +1 ** - 80 01 +a ** SPU Class 1
+ * 61-6a 80 02 +1 ** - 80 02 +a ** SPU Class 2
+ * 70-7f C0 ** ** 00 - C0 ** ** 0f IPI
+ *
+ * F flags
+ * C class
+ * S source
+ * P Priority
+ * + node number
+ * * don't care
+ *
+ * A node consists of a Broadband Engine and an optional
+ * south bridge device providing a maximum of 64 IRQs.
+ * The south bridge may be connected to either IOIF0
+ * or IOIF1.
+ * Each SPE is represented as three IRQ lines, one per
+ * interrupt class.
+ * 16 IRQ numbers are reserved for inter processor
+ * interruptions, although these are only used in the
+ * range of the first node.
+ *
+ * This scheme needs 128 IRQ numbers per BIF node ID,
+ * which means that with the total of 512 lines
+ * available, we can have a maximum of four nodes.
+ */
+
+enum {
+ IIC_EXT_OFFSET = 0x00, /* Start of south bridge IRQs */
+ IIC_NUM_EXT = 0x40, /* Number of south bridge IRQs */
+ IIC_SPE_OFFSET = 0x40, /* Start of SPE interrupts */
+ IIC_CLASS_STRIDE = 0x10, /* SPE IRQs per class */
+ IIC_IPI_OFFSET = 0x70, /* Start of IPI IRQs */
+ IIC_NUM_IPIS = 0x10, /* IRQs reserved for IPI */
+ IIC_NODE_STRIDE = 0x80, /* Total IRQs per node */
+};
+
+extern void iic_init_IRQ(void);
+extern int iic_get_irq(struct pt_regs *regs);
+extern void iic_cause_IPI(int cpu, int mesg);
+extern void iic_request_IPIs(void);
+extern void iic_setup_cpu(void);
+extern void iic_local_enable(void);
+extern void iic_local_disable(void);
+
+
+extern void spider_init_IRQ(void);
+extern int spider_get_irq(unsigned long int_pending);
+
+#endif
+#endif /* ASM_BPA_IIC_H */
diff --git a/arch/ppc64/kernel/bpa_iommu.c b/arch/ppc64/kernel/bpa_iommu.c
new file mode 100644
index 00000000000..f33a7bccb0d
--- /dev/null
+++ b/arch/ppc64/kernel/bpa_iommu.c
@@ -0,0 +1,377 @@
+/*
+ * IOMMU implementation for Broadband Processor Architecture
+ * We just establish a linear mapping at boot by setting all the
+ * IOPT cache entries in the CPU.
+ * The mapping functions should be identical to pci_direct_iommu,
+ * except for the handling of the high order bit that is required
+ * by the Spider bridge. These should be split into a separate
+ * file at the point where we get a different bridge chip.
+ *
+ * Copyright (C) 2005 IBM Deutschland Entwicklung GmbH,
+ * Arnd Bergmann <arndb@de.ibm.com>
+ *
+ * Based on linear mapping
+ * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#undef DEBUG
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/sections.h>
+#include <asm/iommu.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/pci-bridge.h>
+#include <asm/machdep.h>
+#include <asm/pmac_feature.h>
+#include <asm/abs_addr.h>
+#include <asm/system.h>
+
+#include "pci.h"
+#include "bpa_iommu.h"
+
+static inline unsigned long
+get_iopt_entry(unsigned long real_address, unsigned long ioid,
+ unsigned long prot)
+{
+ return (prot & IOPT_PROT_MASK)
+ | (IOPT_COHERENT)
+ | (IOPT_ORDER_VC)
+ | (real_address & IOPT_RPN_MASK)
+ | (ioid & IOPT_IOID_MASK);
+}
+
+typedef struct {
+ unsigned long val;
+} ioste;
+
+static inline ioste
+mk_ioste(unsigned long val)
+{
+ ioste ioste = { .val = val, };
+ return ioste;
+}
+
+static inline ioste
+get_iost_entry(unsigned long iopt_base, unsigned long io_address, unsigned page_size)
+{
+ unsigned long ps;
+ unsigned long iostep;
+ unsigned long nnpt;
+ unsigned long shift;
+
+ switch (page_size) {
+ case 0x1000000:
+ ps = IOST_PS_16M;
+ nnpt = 0; /* one page per segment */
+ shift = 5; /* segment has 16 iopt entries */
+ break;
+
+ case 0x100000:
+ ps = IOST_PS_1M;
+ nnpt = 0; /* one page per segment */
+ shift = 1; /* segment has 256 iopt entries */
+ break;
+
+ case 0x10000:
+ ps = IOST_PS_64K;
+ nnpt = 0x07; /* 8 pages per io page table */
+ shift = 0; /* all entries are used */
+ break;
+
+ case 0x1000:
+ ps = IOST_PS_4K;
+ nnpt = 0x7f; /* 128 pages per io page table */
+ shift = 0; /* all entries are used */
+ break;
+
+ default: /* not a known compile time constant */
+ BUILD_BUG_ON(1);
+ break;
+ }
+
+ iostep = iopt_base +
+ /* need 8 bytes per iopte */
+ (((io_address / page_size * 8)
+ /* align io page tables on 4k page boundaries */
+ << shift)
+ /* nnpt+1 pages go into each iopt */
+ & ~(nnpt << 12));
+
+ nnpt++; /* this seems to work, but the documentation is not clear
+ about wether we put nnpt or nnpt-1 into the ioste bits.
+ In theory, this can't work for 4k pages. */
+ return mk_ioste(IOST_VALID_MASK
+ | (iostep & IOST_PT_BASE_MASK)
+ | ((nnpt << 5) & IOST_NNPT_MASK)
+ | (ps & IOST_PS_MASK));
+}
+
+/* compute the address of an io pte */
+static inline unsigned long
+get_ioptep(ioste iost_entry, unsigned long io_address)
+{
+ unsigned long iopt_base;
+ unsigned long page_size;
+ unsigned long page_number;
+ unsigned long iopt_offset;
+
+ iopt_base = iost_entry.val & IOST_PT_BASE_MASK;
+ page_size = iost_entry.val & IOST_PS_MASK;
+
+ /* decode page size to compute page number */
+ page_number = (io_address & 0x0fffffff) >> (10 + 2 * page_size);
+ /* page number is an offset into the io page table */
+ iopt_offset = (page_number << 3) & 0x7fff8ul;
+ return iopt_base + iopt_offset;
+}
+
+/* compute the tag field of the iopt cache entry */
+static inline unsigned long
+get_ioc_tag(ioste iost_entry, unsigned long io_address)
+{
+ unsigned long iopte = get_ioptep(iost_entry, io_address);
+
+ return IOPT_VALID_MASK
+ | ((iopte & 0x00000000000000ff8ul) >> 3)
+ | ((iopte & 0x0000003fffffc0000ul) >> 9);
+}
+
+/* compute the hashed 6 bit index for the 4-way associative pte cache */
+static inline unsigned long
+get_ioc_hash(ioste iost_entry, unsigned long io_address)
+{
+ unsigned long iopte = get_ioptep(iost_entry, io_address);
+
+ return ((iopte & 0x000000000000001f8ul) >> 3)
+ ^ ((iopte & 0x00000000000020000ul) >> 17)
+ ^ ((iopte & 0x00000000000010000ul) >> 15)
+ ^ ((iopte & 0x00000000000008000ul) >> 13)
+ ^ ((iopte & 0x00000000000004000ul) >> 11)
+ ^ ((iopte & 0x00000000000002000ul) >> 9)
+ ^ ((iopte & 0x00000000000001000ul) >> 7);
+}
+
+/* same as above, but pretend that we have a simpler 1-way associative
+ pte cache with an 8 bit index */
+static inline unsigned long
+get_ioc_hash_1way(ioste iost_entry, unsigned long io_address)
+{
+ unsigned long iopte = get_ioptep(iost_entry, io_address);
+
+ return ((iopte & 0x000000000000001f8ul) >> 3)
+ ^ ((iopte & 0x00000000000020000ul) >> 17)
+ ^ ((iopte & 0x00000000000010000ul) >> 15)
+ ^ ((iopte & 0x00000000000008000ul) >> 13)
+ ^ ((iopte & 0x00000000000004000ul) >> 11)
+ ^ ((iopte & 0x00000000000002000ul) >> 9)
+ ^ ((iopte & 0x00000000000001000ul) >> 7)
+ ^ ((iopte & 0x0000000000000c000ul) >> 8);
+}
+
+static inline ioste
+get_iost_cache(void __iomem *base, unsigned long index)
+{
+ unsigned long __iomem *p = (base + IOC_ST_CACHE_DIR);
+ return mk_ioste(in_be64(&p[index]));
+}
+
+static inline void
+set_iost_cache(void __iomem *base, unsigned long index, ioste ste)
+{
+ unsigned long __iomem *p = (base + IOC_ST_CACHE_DIR);
+ pr_debug("ioste %02lx was %016lx, store %016lx", index,
+ get_iost_cache(base, index).val, ste.val);
+ out_be64(&p[index], ste.val);
+ pr_debug(" now %016lx\n", get_iost_cache(base, index).val);
+}
+
+static inline unsigned long
+get_iopt_cache(void __iomem *base, unsigned long index, unsigned long *tag)
+{
+ unsigned long __iomem *tags = (void *)(base + IOC_PT_CACHE_DIR);
+ unsigned long __iomem *p = (void *)(base + IOC_PT_CACHE_REG);
+
+ *tag = tags[index];
+ rmb();
+ return *p;
+}
+
+static inline void
+set_iopt_cache(void __iomem *base, unsigned long index,
+ unsigned long tag, unsigned long val)
+{
+ unsigned long __iomem *tags = base + IOC_PT_CACHE_DIR;
+ unsigned long __iomem *p = base + IOC_PT_CACHE_REG;
+ pr_debug("iopt %02lx was v%016lx/t%016lx, store v%016lx/t%016lx\n",
+ index, get_iopt_cache(base, index, &oldtag), oldtag, val, tag);
+
+ out_be64(p, val);
+ out_be64(&tags[index], tag);
+}
+
+static inline void
+set_iost_origin(void __iomem *base)
+{
+ unsigned long __iomem *p = base + IOC_ST_ORIGIN;
+ unsigned long origin = IOSTO_ENABLE | IOSTO_SW;
+
+ pr_debug("iost_origin %016lx, now %016lx\n", in_be64(p), origin);
+ out_be64(p, origin);
+}
+
+static inline void
+set_iocmd_config(void __iomem *base)
+{
+ unsigned long __iomem *p = base + 0xc00;
+ unsigned long conf;
+
+ conf = in_be64(p);
+ pr_debug("iost_conf %016lx, now %016lx\n", conf, conf | IOCMD_CONF_TE);
+ out_be64(p, conf | IOCMD_CONF_TE);
+}
+
+/* FIXME: get these from the device tree */
+#define ioc_base 0x20000511000ull
+#define ioc_mmio_base 0x20000510000ull
+#define ioid 0x48a
+#define iopt_phys_offset (- 0x20000000) /* We have a 512MB offset from the SB */
+#define io_page_size 0x1000000
+
+static unsigned long map_iopt_entry(unsigned long address)
+{
+ switch (address >> 20) {
+ case 0x600:
+ address = 0x24020000000ull; /* spider i/o */
+ break;
+ default:
+ address += iopt_phys_offset;
+ break;
+ }
+
+ return get_iopt_entry(address, ioid, IOPT_PROT_RW);
+}
+
+static void iommu_bus_setup_null(struct pci_bus *b) { }
+static void iommu_dev_setup_null(struct pci_dev *d) { }
+
+/* initialize the iommu to support a simple linear mapping
+ * for each DMA window used by any device. For now, we
+ * happen to know that there is only one DMA window in use,
+ * starting at iopt_phys_offset. */
+static void bpa_map_iommu(void)
+{
+ unsigned long address;
+ void __iomem *base;
+ ioste ioste;
+ unsigned long index;
+
+ base = __ioremap(ioc_base, 0x1000, _PAGE_NO_CACHE);
+ pr_debug("%lx mapped to %p\n", ioc_base, base);
+ set_iocmd_config(base);
+ iounmap(base);
+
+ base = __ioremap(ioc_mmio_base, 0x1000, _PAGE_NO_CACHE);
+ pr_debug("%lx mapped to %p\n", ioc_mmio_base, base);
+
+ set_iost_origin(base);
+
+ for (address = 0; address < 0x100000000ul; address += io_page_size) {
+ ioste = get_iost_entry(0x10000000000ul, address, io_page_size);
+ if ((address & 0xfffffff) == 0) /* segment start */
+ set_iost_cache(base, address >> 28, ioste);
+ index = get_ioc_hash_1way(ioste, address);
+ pr_debug("addr %08lx, index %02lx, ioste %016lx\n",
+ address, index, ioste.val);
+ set_iopt_cache(base,
+ get_ioc_hash_1way(ioste, address),
+ get_ioc_tag(ioste, address),
+ map_iopt_entry(address));
+ }
+ iounmap(base);
+}
+
+
+static void *bpa_alloc_coherent(struct device *hwdev, size_t size,
+ dma_addr_t *dma_handle, unsigned int __nocast flag)
+{
+ void *ret;
+
+ ret = (void *)__get_free_pages(flag, get_order(size));
+ if (ret != NULL) {
+ memset(ret, 0, size);
+ *dma_handle = virt_to_abs(ret) | BPA_DMA_VALID;
+ }
+ return ret;
+}
+
+static void bpa_free_coherent(struct device *hwdev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
+{
+ free_pages((unsigned long)vaddr, get_order(size));
+}
+
+static dma_addr_t bpa_map_single(struct device *hwdev, void *ptr,
+ size_t size, enum dma_data_direction direction)
+{
+ return virt_to_abs(ptr) | BPA_DMA_VALID;
+}
+
+static void bpa_unmap_single(struct device *hwdev, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction direction)
+{
+}
+
+static int bpa_map_sg(struct device *hwdev, struct scatterlist *sg,
+ int nents, enum dma_data_direction direction)
+{
+ int i;
+
+ for (i = 0; i < nents; i++, sg++) {
+ sg->dma_address = (page_to_phys(sg->page) + sg->offset)
+ | BPA_DMA_VALID;
+ sg->dma_length = sg->length;
+ }
+
+ return nents;
+}
+
+static void bpa_unmap_sg(struct device *hwdev, struct scatterlist *sg,
+ int nents, enum dma_data_direction direction)
+{
+}
+
+static int bpa_dma_supported(struct device *dev, u64 mask)
+{
+ return mask < 0x100000000ull;
+}
+
+void bpa_init_iommu(void)
+{
+ bpa_map_iommu();
+
+ /* Direct I/O, IOMMU off */
+ ppc_md.iommu_dev_setup = iommu_dev_setup_null;
+ ppc_md.iommu_bus_setup = iommu_bus_setup_null;
+
+ pci_dma_ops.alloc_coherent = bpa_alloc_coherent;
+ pci_dma_ops.free_coherent = bpa_free_coherent;
+ pci_dma_ops.map_single = bpa_map_single;
+ pci_dma_ops.unmap_single = bpa_unmap_single;
+ pci_dma_ops.map_sg = bpa_map_sg;
+ pci_dma_ops.unmap_sg = bpa_unmap_sg;
+ pci_dma_ops.dma_supported = bpa_dma_supported;
+}
diff --git a/arch/ppc64/kernel/bpa_iommu.h b/arch/ppc64/kernel/bpa_iommu.h
new file mode 100644
index 00000000000..e547d77dfa0
--- /dev/null
+++ b/arch/ppc64/kernel/bpa_iommu.h
@@ -0,0 +1,65 @@
+#ifndef BPA_IOMMU_H
+#define BPA_IOMMU_H
+
+/* some constants */
+enum {
+ /* segment table entries */
+ IOST_VALID_MASK = 0x8000000000000000ul,
+ IOST_TAG_MASK = 0x3000000000000000ul,
+ IOST_PT_BASE_MASK = 0x000003fffffff000ul,
+ IOST_NNPT_MASK = 0x0000000000000fe0ul,
+ IOST_PS_MASK = 0x000000000000000ful,
+
+ IOST_PS_4K = 0x1,
+ IOST_PS_64K = 0x3,
+ IOST_PS_1M = 0x5,
+ IOST_PS_16M = 0x7,
+
+ /* iopt tag register */
+ IOPT_VALID_MASK = 0x0000000200000000ul,
+ IOPT_TAG_MASK = 0x00000001fffffffful,
+
+ /* iopt cache register */
+ IOPT_PROT_MASK = 0xc000000000000000ul,
+ IOPT_PROT_NONE = 0x0000000000000000ul,
+ IOPT_PROT_READ = 0x4000000000000000ul,
+ IOPT_PROT_WRITE = 0x8000000000000000ul,
+ IOPT_PROT_RW = 0xc000000000000000ul,
+ IOPT_COHERENT = 0x2000000000000000ul,
+
+ IOPT_ORDER_MASK = 0x1800000000000000ul,
+ /* order access to same IOID/VC on same address */
+ IOPT_ORDER_ADDR = 0x0800000000000000ul,
+ /* similar, but only after a write access */
+ IOPT_ORDER_WRITES = 0x1000000000000000ul,
+ /* Order all accesses to same IOID/VC */
+ IOPT_ORDER_VC = 0x1800000000000000ul,
+
+ IOPT_RPN_MASK = 0x000003fffffff000ul,
+ IOPT_HINT_MASK = 0x0000000000000800ul,
+ IOPT_IOID_MASK = 0x00000000000007fful,
+
+ IOSTO_ENABLE = 0x8000000000000000ul,
+ IOSTO_ORIGIN = 0x000003fffffff000ul,
+ IOSTO_HW = 0x0000000000000800ul,
+ IOSTO_SW = 0x0000000000000400ul,
+
+ IOCMD_CONF_TE = 0x0000800000000000ul,
+
+ /* memory mapped registers */
+ IOC_PT_CACHE_DIR = 0x000,
+ IOC_ST_CACHE_DIR = 0x800,
+ IOC_PT_CACHE_REG = 0x910,
+ IOC_ST_ORIGIN = 0x918,
+ IOC_CONF = 0x930,
+
+ /* The high bit needs to be set on every DMA address,
+ only 2GB are addressable */
+ BPA_DMA_VALID = 0x80000000,
+ BPA_DMA_MASK = 0x7fffffff,
+};
+
+
+void bpa_init_iommu(void);
+
+#endif
diff --git a/arch/ppc64/kernel/bpa_nvram.c b/arch/ppc64/kernel/bpa_nvram.c
new file mode 100644
index 00000000000..06a119cfceb
--- /dev/null
+++ b/arch/ppc64/kernel/bpa_nvram.c
@@ -0,0 +1,118 @@
+/*
+ * NVRAM for CPBW
+ *
+ * (C) Copyright IBM Corp. 2005
+ *
+ * Authors : Utz Bacher <utz.bacher@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <asm/machdep.h>
+#include <asm/nvram.h>
+#include <asm/prom.h>
+
+static void __iomem *bpa_nvram_start;
+static long bpa_nvram_len;
+static spinlock_t bpa_nvram_lock = SPIN_LOCK_UNLOCKED;
+
+static ssize_t bpa_nvram_read(char *buf, size_t count, loff_t *index)
+{
+ unsigned long flags;
+
+ if (*index >= bpa_nvram_len)
+ return 0;
+ if (*index + count > bpa_nvram_len)
+ count = bpa_nvram_len - *index;
+
+ spin_lock_irqsave(&bpa_nvram_lock, flags);
+
+ memcpy_fromio(buf, bpa_nvram_start + *index, count);
+
+ spin_unlock_irqrestore(&bpa_nvram_lock, flags);
+
+ *index += count;
+ return count;
+}
+
+static ssize_t bpa_nvram_write(char *buf, size_t count, loff_t *index)
+{
+ unsigned long flags;
+
+ if (*index >= bpa_nvram_len)
+ return 0;
+ if (*index + count > bpa_nvram_len)
+ count = bpa_nvram_len - *index;
+
+ spin_lock_irqsave(&bpa_nvram_lock, flags);
+
+ memcpy_toio(bpa_nvram_start + *index, buf, count);
+
+ spin_unlock_irqrestore(&bpa_nvram_lock, flags);
+
+ *index += count;
+ return count;
+}
+
+static ssize_t bpa_nvram_get_size(void)
+{
+ return bpa_nvram_len;
+}
+
+int __init bpa_nvram_init(void)
+{
+ struct device_node *nvram_node;
+ unsigned long *buffer;
+ int proplen;
+ unsigned long nvram_addr;
+ int ret;
+
+ ret = -ENODEV;
+ nvram_node = of_find_node_by_type(NULL, "nvram");
+ if (!nvram_node)
+ goto out;
+
+ ret = -EIO;
+ buffer = (unsigned long *)get_property(nvram_node, "reg", &proplen);
+ if (proplen != 2*sizeof(unsigned long))
+ goto out;
+
+ ret = -ENODEV;
+ nvram_addr = buffer[0];
+ bpa_nvram_len = buffer[1];
+ if ( (!bpa_nvram_len) || (!nvram_addr) )
+ goto out;
+
+ bpa_nvram_start = ioremap(nvram_addr, bpa_nvram_len);
+ if (!bpa_nvram_start)
+ goto out;
+
+ printk(KERN_INFO "BPA NVRAM, %luk mapped to %p\n",
+ bpa_nvram_len >> 10, bpa_nvram_start);
+
+ ppc_md.nvram_read = bpa_nvram_read;
+ ppc_md.nvram_write = bpa_nvram_write;
+ ppc_md.nvram_size = bpa_nvram_get_size;
+
+out:
+ of_node_put(nvram_node);
+ return ret;
+}
diff --git a/arch/ppc64/kernel/bpa_setup.c b/arch/ppc64/kernel/bpa_setup.c
new file mode 100644
index 00000000000..57b3db66f45
--- /dev/null
+++ b/arch/ppc64/kernel/bpa_setup.c
@@ -0,0 +1,140 @@
+/*
+ * linux/arch/ppc/kernel/bpa_setup.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ * Adapted from 'alpha' version by Gary Thomas
+ * Modified by Cort Dougan (cort@cs.nmt.edu)
+ * Modified by PPC64 Team, IBM Corp
+ * Modified by BPA Team, IBM Deutschland Entwicklung GmbH
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#undef DEBUG
+
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/slab.h>
+#include <linux/user.h>
+#include <linux/reboot.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/seq_file.h>
+#include <linux/root_dev.h>
+#include <linux/console.h>
+
+#include <asm/mmu.h>
+#include <asm/processor.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/prom.h>
+#include <asm/rtas.h>
+#include <asm/pci-bridge.h>
+#include <asm/iommu.h>
+#include <asm/dma.h>
+#include <asm/machdep.h>
+#include <asm/time.h>
+#include <asm/nvram.h>
+#include <asm/cputable.h>
+
+#include "pci.h"
+#include "bpa_iic.h"
+#include "bpa_iommu.h"
+
+#ifdef DEBUG
+#define DBG(fmt...) udbg_printf(fmt)
+#else
+#define DBG(fmt...)
+#endif
+
+void bpa_get_cpuinfo(struct seq_file *m)
+{
+ struct device_node *root;
+ const char *model = "";
+
+ root = of_find_node_by_path("/");
+ if (root)
+ model = get_property(root, "model", NULL);
+ seq_printf(m, "machine\t\t: BPA %s\n", model);
+ of_node_put(root);
+}
+
+static void bpa_progress(char *s, unsigned short hex)
+{
+ printk("*** %04x : %s\n", hex, s ? s : "");
+}
+
+static void __init bpa_setup_arch(void)
+{
+ ppc_md.init_IRQ = iic_init_IRQ;
+ ppc_md.get_irq = iic_get_irq;
+
+#ifdef CONFIG_SMP
+ smp_init_pSeries();
+#endif
+
+ /* init to some ~sane value until calibrate_delay() runs */
+ loops_per_jiffy = 50000000;
+
+ if (ROOT_DEV == 0) {
+ printk("No ramdisk, default root is /dev/hda2\n");
+ ROOT_DEV = Root_HDA2;
+ }
+
+ /* Find and initialize PCI host bridges */
+ init_pci_config_tokens();
+ find_and_init_phbs();
+ spider_init_IRQ();
+#ifdef CONFIG_DUMMY_CONSOLE
+ conswitchp = &dummy_con;
+#endif
+
+ bpa_nvram_init();
+}
+
+/*
+ * Early initialization. Relocation is on but do not reference unbolted pages
+ */
+static void __init bpa_init_early(void)
+{
+ DBG(" -> bpa_init_early()\n");
+
+ hpte_init_native();
+
+ bpa_init_iommu();
+
+ ppc64_interrupt_controller = IC_BPA_IIC;
+
+ DBG(" <- bpa_init_early()\n");
+}
+
+
+static int __init bpa_probe(int platform)
+{
+ if (platform != PLATFORM_BPA)
+ return 0;
+
+ return 1;
+}
+
+struct machdep_calls __initdata bpa_md = {
+ .probe = bpa_probe,
+ .setup_arch = bpa_setup_arch,
+ .init_early = bpa_init_early,
+ .get_cpuinfo = bpa_get_cpuinfo,
+ .restart = rtas_restart,
+ .power_off = rtas_power_off,
+ .halt = rtas_halt,
+ .get_boot_time = rtas_get_boot_time,
+ .get_rtc_time = rtas_get_rtc_time,
+ .set_rtc_time = rtas_set_rtc_time,
+ .calibrate_decr = generic_calibrate_decr,
+ .progress = bpa_progress,
+};
diff --git a/arch/ppc64/kernel/cpu_setup_power4.S b/arch/ppc64/kernel/cpu_setup_power4.S
index 3bd95182085..42fc08cf87a 100644
--- a/arch/ppc64/kernel/cpu_setup_power4.S
+++ b/arch/ppc64/kernel/cpu_setup_power4.S
@@ -73,7 +73,21 @@ _GLOBAL(__970_cpu_preinit)
_GLOBAL(__setup_cpu_power4)
blr
-
+
+_GLOBAL(__setup_cpu_be)
+ /* Set large page sizes LP=0: 16MB, LP=1: 64KB */
+ addi r3, 0, 0
+ ori r3, r3, HID6_LB
+ sldi r3, r3, 32
+ nor r3, r3, r3
+ mfspr r4, SPRN_HID6
+ and r4, r4, r3
+ addi r3, 0, 0x02000
+ sldi r3, r3, 32
+ or r4, r4, r3
+ mtspr SPRN_HID6, r4
+ blr
+
_GLOBAL(__setup_cpu_ppc970)
mfspr r0,SPRN_HID0
li r11,5 /* clear DOZE and SLEEP */
diff --git a/arch/ppc64/kernel/cputable.c b/arch/ppc64/kernel/cputable.c
index 8644a864805..1d162c7c59d 100644
--- a/arch/ppc64/kernel/cputable.c
+++ b/arch/ppc64/kernel/cputable.c
@@ -34,6 +34,7 @@ EXPORT_SYMBOL(cur_cpu_spec);
extern void __setup_cpu_power3(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_power4(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec);
+extern void __setup_cpu_be(unsigned long offset, struct cpu_spec* spec);
/* We only set the altivec features if the kernel was compiled with altivec
@@ -162,6 +163,16 @@ struct cpu_spec cpu_specs[] = {
__setup_cpu_power4,
COMMON_PPC64_FW
},
+ { /* BE DD1.x */
+ 0xffff0000, 0x00700000, "Broadband Engine",
+ CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
+ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP |
+ CPU_FTR_SMT,
+ COMMON_USER_PPC64 | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ 128, 128,
+ __setup_cpu_be,
+ COMMON_PPC64_FW
+ },
{ /* default match */
0x00000000, 0x00000000, "POWER4 (compatible)",
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
diff --git a/arch/ppc64/kernel/iSeries_setup.c b/arch/ppc64/kernel/iSeries_setup.c
index b31962436fe..86966ce76b5 100644
--- a/arch/ppc64/kernel/iSeries_setup.c
+++ b/arch/ppc64/kernel/iSeries_setup.c
@@ -671,9 +671,6 @@ static void __init iSeries_bolt_kernel(unsigned long saddr, unsigned long eaddr)
}
}
-extern unsigned long ppc_proc_freq;
-extern unsigned long ppc_tb_freq;
-
/*
* Document me.
*/
@@ -772,8 +769,6 @@ static void iSeries_halt(void)
mf_power_off();
}
-extern void setup_default_decr(void);
-
/*
* void __init iSeries_calibrate_decr()
*
diff --git a/arch/ppc64/kernel/irq.c b/arch/ppc64/kernel/irq.c
index d860467b8f0..3defc8c33ad 100644
--- a/arch/ppc64/kernel/irq.c
+++ b/arch/ppc64/kernel/irq.c
@@ -395,6 +395,9 @@ int virt_irq_create_mapping(unsigned int real_irq)
if (ppc64_interrupt_controller == IC_OPEN_PIC)
return real_irq; /* no mapping for openpic (for now) */
+ if (ppc64_interrupt_controller == IC_BPA_IIC)
+ return real_irq; /* no mapping for iic either */
+
/* don't map interrupts < MIN_VIRT_IRQ */
if (real_irq < MIN_VIRT_IRQ) {
virt_irq_to_real_map[real_irq] = real_irq;
diff --git a/arch/ppc64/kernel/kprobes.c b/arch/ppc64/kernel/kprobes.c
index e950a2058a1..782ce3efa2c 100644
--- a/arch/ppc64/kernel/kprobes.c
+++ b/arch/ppc64/kernel/kprobes.c
@@ -32,15 +32,14 @@
#include <linux/ptrace.h>
#include <linux/spinlock.h>
#include <linux/preempt.h>
+#include <asm/cacheflush.h>
#include <asm/kdebug.h>
#include <asm/sstep.h>
-/* kprobe_status settings */
-#define KPROBE_HIT_ACTIVE 0x00000001
-#define KPROBE_HIT_SS 0x00000002
-
static struct kprobe *current_kprobe;
static unsigned long kprobe_status, kprobe_saved_msr;
+static struct kprobe *kprobe_prev;
+static unsigned long kprobe_status_prev, kprobe_saved_msr_prev;
static struct pt_regs jprobe_saved_regs;
int arch_prepare_kprobe(struct kprobe *p)
@@ -61,16 +60,25 @@ int arch_prepare_kprobe(struct kprobe *p)
void arch_copy_kprobe(struct kprobe *p)
{
memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
+ p->opcode = *p->addr;
}
-void arch_remove_kprobe(struct kprobe *p)
+void arch_arm_kprobe(struct kprobe *p)
{
+ *p->addr = BREAKPOINT_INSTRUCTION;
+ flush_icache_range((unsigned long) p->addr,
+ (unsigned long) p->addr + sizeof(kprobe_opcode_t));
}
-static inline void disarm_kprobe(struct kprobe *p, struct pt_regs *regs)
+void arch_disarm_kprobe(struct kprobe *p)
{
*p->addr = p->opcode;
- regs->nip = (unsigned long)p->addr;
+ flush_icache_range((unsigned long) p->addr,
+ (unsigned long) p->addr + sizeof(kprobe_opcode_t));
+}
+
+void arch_remove_kprobe(struct kprobe *p)
+{
}
static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
@@ -83,6 +91,20 @@ static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
regs->nip = (unsigned long)&p->ainsn.insn;
}
+static inline void save_previous_kprobe(void)
+{
+ kprobe_prev = current_kprobe;
+ kprobe_status_prev = kprobe_status;
+ kprobe_saved_msr_prev = kprobe_saved_msr;
+}
+
+static inline void restore_previous_kprobe(void)
+{
+ current_kprobe = kprobe_prev;
+ kprobe_status = kprobe_status_prev;
+ kprobe_saved_msr = kprobe_saved_msr_prev;
+}
+
static inline int kprobe_handler(struct pt_regs *regs)
{
struct kprobe *p;
@@ -101,8 +123,19 @@ static inline int kprobe_handler(struct pt_regs *regs)
unlock_kprobes();
goto no_kprobe;
}
- disarm_kprobe(p, regs);
- ret = 1;
+ /* We have reentered the kprobe_handler(), since
+ * another probe was hit while within the handler.
+ * We here save the original kprobes variables and
+ * just single step on the instruction of the new probe
+ * without calling any user handlers.
+ */
+ save_previous_kprobe();
+ current_kprobe = p;
+ kprobe_saved_msr = regs->msr;
+ p->nmissed++;
+ prepare_singlestep(p, regs);
+ kprobe_status = KPROBE_REENTER;
+ return 1;
} else {
p = current_kprobe;
if (p->break_handler && p->break_handler(p, regs)) {
@@ -184,13 +217,21 @@ static inline int post_kprobe_handler(struct pt_regs *regs)
if (!kprobe_running())
return 0;
- if (current_kprobe->post_handler)
+ if ((kprobe_status != KPROBE_REENTER) && current_kprobe->post_handler) {
+ kprobe_status = KPROBE_HIT_SSDONE;
current_kprobe->post_handler(current_kprobe, regs, 0);
+ }
resume_execution(current_kprobe, regs);
regs->msr |= kprobe_saved_msr;
+ /*Restore back the original saved kprobes variables and continue. */
+ if (kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe();
+ goto out;
+ }
unlock_kprobes();
+out:
preempt_enable_no_resched();
/*
diff --git a/arch/ppc64/kernel/maple_setup.c b/arch/ppc64/kernel/maple_setup.c
index 8cf95a27178..da8900b51f4 100644
--- a/arch/ppc64/kernel/maple_setup.c
+++ b/arch/ppc64/kernel/maple_setup.c
@@ -78,17 +78,77 @@ extern int maple_pci_get_legacy_ide_irq(struct pci_dev *dev, int channel);
extern void generic_find_legacy_serial_ports(u64 *physport,
unsigned int *default_speed);
-
static void maple_restart(char *cmd)
{
+ unsigned int maple_nvram_base;
+ unsigned int maple_nvram_offset;
+ unsigned int maple_nvram_command;
+ struct device_node *rtcs;
+
+ /* find NVRAM device */
+ rtcs = find_compatible_devices("nvram", "AMD8111");
+ if (rtcs && rtcs->addrs) {
+ maple_nvram_base = rtcs->addrs[0].address;
+ } else {
+ printk(KERN_EMERG "Maple: Unable to find NVRAM\n");
+ printk(KERN_EMERG "Maple: Manual Restart Required\n");
+ return;
+ }
+
+ /* find service processor device */
+ rtcs = find_devices("service-processor");
+ if (!rtcs) {
+ printk(KERN_EMERG "Maple: Unable to find Service Processor\n");
+ printk(KERN_EMERG "Maple: Manual Restart Required\n");
+ return;
+ }
+ maple_nvram_offset = *(unsigned int*) get_property(rtcs,
+ "restart-addr", NULL);
+ maple_nvram_command = *(unsigned int*) get_property(rtcs,
+ "restart-value", NULL);
+
+ /* send command */
+ outb_p(maple_nvram_command, maple_nvram_base + maple_nvram_offset);
+ for (;;) ;
}
static void maple_power_off(void)
{
+ unsigned int maple_nvram_base;
+ unsigned int maple_nvram_offset;
+ unsigned int maple_nvram_command;
+ struct device_node *rtcs;
+
+ /* find NVRAM device */
+ rtcs = find_compatible_devices("nvram", "AMD8111");
+ if (rtcs && rtcs->addrs) {
+ maple_nvram_base = rtcs->addrs[0].address;
+ } else {
+ printk(KERN_EMERG "Maple: Unable to find NVRAM\n");
+ printk(KERN_EMERG "Maple: Manual Power-Down Required\n");
+ return;
+ }
+
+ /* find service processor device */
+ rtcs = find_devices("service-processor");
+ if (!rtcs) {
+ printk(KERN_EMERG "Maple: Unable to find Service Processor\n");
+ printk(KERN_EMERG "Maple: Manual Power-Down Required\n");
+ return;
+ }
+ maple_nvram_offset = *(unsigned int*) get_property(rtcs,
+ "power-off-addr", NULL);
+ maple_nvram_command = *(unsigned int*) get_property(rtcs,
+ "power-off-value", NULL);
+
+ /* send command */
+ outb_p(maple_nvram_command, maple_nvram_base + maple_nvram_offset);
+ for (;;) ;
}
static void maple_halt(void)
{
+ maple_power_off();
}
#ifdef CONFIG_SMP
@@ -235,6 +295,6 @@ struct machdep_calls __initdata maple_md = {
.get_boot_time = maple_get_boot_time,
.set_rtc_time = maple_set_rtc_time,
.get_rtc_time = maple_get_rtc_time,
- .calibrate_decr = maple_calibrate_decr,
+ .calibrate_decr = generic_calibrate_decr,
.progress = maple_progress,
};
diff --git a/arch/ppc64/kernel/maple_time.c b/arch/ppc64/kernel/maple_time.c
index 07ce7895b43..d65210abcd0 100644
--- a/arch/ppc64/kernel/maple_time.c
+++ b/arch/ppc64/kernel/maple_time.c
@@ -42,11 +42,8 @@
#define DBG(x...)
#endif
-extern void setup_default_decr(void);
extern void GregorianDay(struct rtc_time * tm);
-extern unsigned long ppc_tb_freq;
-extern unsigned long ppc_proc_freq;
static int maple_rtc_addr;
static int maple_clock_read(int addr)
@@ -176,51 +173,3 @@ void __init maple_get_boot_time(struct rtc_time *tm)
maple_get_rtc_time(tm);
}
-/* XXX FIXME: Some sane defaults: 125 MHz timebase, 1GHz processor */
-#define DEFAULT_TB_FREQ 125000000UL
-#define DEFAULT_PROC_FREQ (DEFAULT_TB_FREQ * 8)
-
-void __init maple_calibrate_decr(void)
-{
- struct device_node *cpu;
- struct div_result divres;
- unsigned int *fp = NULL;
-
- /*
- * The cpu node should have a timebase-frequency property
- * to tell us the rate at which the decrementer counts.
- */
- cpu = of_find_node_by_type(NULL, "cpu");
-
- ppc_tb_freq = DEFAULT_TB_FREQ;
- if (cpu != 0)
- fp = (unsigned int *)get_property(cpu, "timebase-frequency", NULL);
- if (fp != NULL)
- ppc_tb_freq = *fp;
- else
- printk(KERN_ERR "WARNING: Estimating decrementer frequency (not found)\n");
- fp = NULL;
- ppc_proc_freq = DEFAULT_PROC_FREQ;
- if (cpu != 0)
- fp = (unsigned int *)get_property(cpu, "clock-frequency", NULL);
- if (fp != NULL)
- ppc_proc_freq = *fp;
- else
- printk(KERN_ERR "WARNING: Estimating processor frequency (not found)\n");
-
- of_node_put(cpu);
-
- printk(KERN_INFO "time_init: decrementer frequency = %lu.%.6lu MHz\n",
- ppc_tb_freq/1000000, ppc_tb_freq%1000000);
- printk(KERN_INFO "time_init: processor frequency = %lu.%.6lu MHz\n",
- ppc_proc_freq/1000000, ppc_proc_freq%1000000);
-
- tb_ticks_per_jiffy = ppc_tb_freq / HZ;
- tb_ticks_per_sec = tb_ticks_per_jiffy * HZ;
- tb_ticks_per_usec = ppc_tb_freq / 1000000;
- tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
- div128_by_32(1024*1024, 0, tb_ticks_per_sec, &divres);
- tb_to_xs = divres.result_low;
-
- setup_default_decr();
-}
diff --git a/arch/ppc64/kernel/mpic.h b/arch/ppc64/kernel/mpic.h
index 571b3c99e06..63e177143ea 100644
--- a/arch/ppc64/kernel/mpic.h
+++ b/arch/ppc64/kernel/mpic.h
@@ -265,3 +265,6 @@ extern void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask);
extern int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs);
/* This one gets to the primary mpic */
extern int mpic_get_irq(struct pt_regs *regs);
+
+/* global mpic for pSeries */
+extern struct mpic *pSeries_mpic;
diff --git a/arch/ppc64/kernel/pSeries_pci.c b/arch/ppc64/kernel/pSeries_pci.c
index 0b1cca28140..1f5f141fb7a 100644
--- a/arch/ppc64/kernel/pSeries_pci.c
+++ b/arch/ppc64/kernel/pSeries_pci.c
@@ -1,13 +1,11 @@
/*
- * pSeries_pci.c
+ * arch/ppc64/kernel/pSeries_pci.c
*
* Copyright (C) 2001 Dave Engebretsen, IBM Corporation
* Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
*
* pSeries specific routines for PCI.
*
- * Based on code from pci.c and chrp_pci.c
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -23,430 +21,18 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/init.h>
+#include <linux/ioport.h>
#include <linux/kernel.h>
-#include <linux/threads.h>
#include <linux/pci.h>
#include <linux/string.h>
-#include <linux/init.h>
-#include <linux/bootmem.h>
-#include <asm/io.h>
-#include <asm/pgtable.h>
-#include <asm/irq.h>
-#include <asm/prom.h>
-#include <asm/machdep.h>
#include <asm/pci-bridge.h>
-#include <asm/iommu.h>
-#include <asm/rtas.h>
+#include <asm/prom.h>
-#include "mpic.h"
#include "pci.h"
-/* RTAS tokens */
-static int read_pci_config;
-static int write_pci_config;
-static int ibm_read_pci_config;
-static int ibm_write_pci_config;
-
-static int s7a_workaround;
-
-extern struct mpic *pSeries_mpic;
-
-static int config_access_valid(struct device_node *dn, int where)
-{
- if (where < 256)
- return 1;
- if (where < 4096 && dn->pci_ext_config_space)
- return 1;
-
- return 0;
-}
-
-static int rtas_read_config(struct device_node *dn, int where, int size, u32 *val)
-{
- int returnval = -1;
- unsigned long buid, addr;
- int ret;
-
- if (!dn)
- return PCIBIOS_DEVICE_NOT_FOUND;
- if (!config_access_valid(dn, where))
- return PCIBIOS_BAD_REGISTER_NUMBER;
-
- addr = ((where & 0xf00) << 20) | (dn->busno << 16) |
- (dn->devfn << 8) | (where & 0xff);
- buid = dn->phb->buid;
- if (buid) {
- ret = rtas_call(ibm_read_pci_config, 4, 2, &returnval,
- addr, buid >> 32, buid & 0xffffffff, size);
- } else {
- ret = rtas_call(read_pci_config, 2, 2, &returnval, addr, size);
- }
- *val = returnval;
-
- if (ret)
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- if (returnval == EEH_IO_ERROR_VALUE(size)
- && eeh_dn_check_failure (dn, NULL))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static int rtas_pci_read_config(struct pci_bus *bus,
- unsigned int devfn,
- int where, int size, u32 *val)
-{
- struct device_node *busdn, *dn;
-
- if (bus->self)
- busdn = pci_device_to_OF_node(bus->self);
- else
- busdn = bus->sysdata; /* must be a phb */
-
- /* Search only direct children of the bus */
- for (dn = busdn->child; dn; dn = dn->sibling)
- if (dn->devfn == devfn)
- return rtas_read_config(dn, where, size, val);
- return PCIBIOS_DEVICE_NOT_FOUND;
-}
-
-static int rtas_write_config(struct device_node *dn, int where, int size, u32 val)
-{
- unsigned long buid, addr;
- int ret;
-
- if (!dn)
- return PCIBIOS_DEVICE_NOT_FOUND;
- if (!config_access_valid(dn, where))
- return PCIBIOS_BAD_REGISTER_NUMBER;
-
- addr = ((where & 0xf00) << 20) | (dn->busno << 16) |
- (dn->devfn << 8) | (where & 0xff);
- buid = dn->phb->buid;
- if (buid) {
- ret = rtas_call(ibm_write_pci_config, 5, 1, NULL, addr, buid >> 32, buid & 0xffffffff, size, (ulong) val);
- } else {
- ret = rtas_call(write_pci_config, 3, 1, NULL, addr, size, (ulong)val);
- }
-
- if (ret)
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static int rtas_pci_write_config(struct pci_bus *bus,
- unsigned int devfn,
- int where, int size, u32 val)
-{
- struct device_node *busdn, *dn;
-
- if (bus->self)
- busdn = pci_device_to_OF_node(bus->self);
- else
- busdn = bus->sysdata; /* must be a phb */
-
- /* Search only direct children of the bus */
- for (dn = busdn->child; dn; dn = dn->sibling)
- if (dn->devfn == devfn)
- return rtas_write_config(dn, where, size, val);
- return PCIBIOS_DEVICE_NOT_FOUND;
-}
-
-struct pci_ops rtas_pci_ops = {
- rtas_pci_read_config,
- rtas_pci_write_config
-};
-
-int is_python(struct device_node *dev)
-{
- char *model = (char *)get_property(dev, "model", NULL);
-
- if (model && strstr(model, "Python"))
- return 1;
-
- return 0;
-}
-
-static int get_phb_reg_prop(struct device_node *dev,
- unsigned int addr_size_words,
- struct reg_property64 *reg)
-{
- unsigned int *ui_ptr = NULL, len;
-
- /* Found a PHB, now figure out where his registers are mapped. */
- ui_ptr = (unsigned int *)get_property(dev, "reg", &len);
- if (ui_ptr == NULL)
- return 1;
-
- if (addr_size_words == 1) {
- reg->address = ((struct reg_property32 *)ui_ptr)->address;
- reg->size = ((struct reg_property32 *)ui_ptr)->size;
- } else {
- *reg = *((struct reg_property64 *)ui_ptr);
- }
-
- return 0;
-}
-
-static void python_countermeasures(struct device_node *dev,
- unsigned int addr_size_words)
-{
- struct reg_property64 reg_struct;
- void __iomem *chip_regs;
- volatile u32 val;
-
- if (get_phb_reg_prop(dev, addr_size_words, &reg_struct))
- return;
-
- /* Python's register file is 1 MB in size. */
- chip_regs = ioremap(reg_struct.address & ~(0xfffffUL), 0x100000);
-
- /*
- * Firmware doesn't always clear this bit which is critical
- * for good performance - Anton
- */
-
-#define PRG_CL_RESET_VALID 0x00010000
-
- val = in_be32(chip_regs + 0xf6030);
- if (val & PRG_CL_RESET_VALID) {
- printk(KERN_INFO "Python workaround: ");
- val &= ~PRG_CL_RESET_VALID;
- out_be32(chip_regs + 0xf6030, val);
- /*
- * We must read it back for changes to
- * take effect
- */
- val = in_be32(chip_regs + 0xf6030);
- printk("reg0: %x\n", val);
- }
-
- iounmap(chip_regs);
-}
-
-void __init init_pci_config_tokens (void)
-{
- read_pci_config = rtas_token("read-pci-config");
- write_pci_config = rtas_token("write-pci-config");
- ibm_read_pci_config = rtas_token("ibm,read-pci-config");
- ibm_write_pci_config = rtas_token("ibm,write-pci-config");
-}
-
-unsigned long __devinit get_phb_buid (struct device_node *phb)
-{
- int addr_cells;
- unsigned int *buid_vals;
- unsigned int len;
- unsigned long buid;
-
- if (ibm_read_pci_config == -1) return 0;
-
- /* PHB's will always be children of the root node,
- * or so it is promised by the current firmware. */
- if (phb->parent == NULL)
- return 0;
- if (phb->parent->parent)
- return 0;
-
- buid_vals = (unsigned int *) get_property(phb, "reg", &len);
- if (buid_vals == NULL)
- return 0;
-
- addr_cells = prom_n_addr_cells(phb);
- if (addr_cells == 1) {
- buid = (unsigned long) buid_vals[0];
- } else {
- buid = (((unsigned long)buid_vals[0]) << 32UL) |
- (((unsigned long)buid_vals[1]) & 0xffffffff);
- }
- return buid;
-}
-
-static int phb_set_bus_ranges(struct device_node *dev,
- struct pci_controller *phb)
-{
- int *bus_range;
- unsigned int len;
-
- bus_range = (int *) get_property(dev, "bus-range", &len);
- if (bus_range == NULL || len < 2 * sizeof(int)) {
- return 1;
- }
-
- phb->first_busno = bus_range[0];
- phb->last_busno = bus_range[1];
-
- return 0;
-}
-
-static int __devinit setup_phb(struct device_node *dev,
- struct pci_controller *phb,
- unsigned int addr_size_words)
-{
- pci_setup_pci_controller(phb);
-
- if (is_python(dev))
- python_countermeasures(dev, addr_size_words);
-
- if (phb_set_bus_ranges(dev, phb))
- return 1;
-
- phb->arch_data = dev;
- phb->ops = &rtas_pci_ops;
- phb->buid = get_phb_buid(dev);
-
- return 0;
-}
-
-static void __devinit add_linux_pci_domain(struct device_node *dev,
- struct pci_controller *phb,
- struct property *of_prop)
-{
- memset(of_prop, 0, sizeof(struct property));
- of_prop->name = "linux,pci-domain";
- of_prop->length = sizeof(phb->global_number);
- of_prop->value = (unsigned char *)&of_prop[1];
- memcpy(of_prop->value, &phb->global_number, sizeof(phb->global_number));
- prom_add_property(dev, of_prop);
-}
-
-static struct pci_controller * __init alloc_phb(struct device_node *dev,
- unsigned int addr_size_words)
-{
- struct pci_controller *phb;
- struct property *of_prop;
-
- phb = alloc_bootmem(sizeof(struct pci_controller));
- if (phb == NULL)
- return NULL;
-
- of_prop = alloc_bootmem(sizeof(struct property) +
- sizeof(phb->global_number));
- if (!of_prop)
- return NULL;
-
- if (setup_phb(dev, phb, addr_size_words))
- return NULL;
-
- add_linux_pci_domain(dev, phb, of_prop);
-
- return phb;
-}
-
-static struct pci_controller * __devinit alloc_phb_dynamic(struct device_node *dev, unsigned int addr_size_words)
-{
- struct pci_controller *phb;
-
- phb = (struct pci_controller *)kmalloc(sizeof(struct pci_controller),
- GFP_KERNEL);
- if (phb == NULL)
- return NULL;
-
- if (setup_phb(dev, phb, addr_size_words))
- return NULL;
-
- phb->is_dynamic = 1;
-
- /* TODO: linux,pci-domain? */
-
- return phb;
-}
-
-unsigned long __init find_and_init_phbs(void)
-{
- struct device_node *node;
- struct pci_controller *phb;
- unsigned int root_size_cells = 0;
- unsigned int index;
- unsigned int *opprop = NULL;
- struct device_node *root = of_find_node_by_path("/");
-
- if (ppc64_interrupt_controller == IC_OPEN_PIC) {
- opprop = (unsigned int *)get_property(root,
- "platform-open-pic", NULL);
- }
-
- root_size_cells = prom_n_size_cells(root);
-
- index = 0;
-
- for (node = of_get_next_child(root, NULL);
- node != NULL;
- node = of_get_next_child(root, node)) {
- if (node->type == NULL || strcmp(node->type, "pci") != 0)
- continue;
-
- phb = alloc_phb(node, root_size_cells);
- if (!phb)
- continue;
-
- pci_process_bridge_OF_ranges(phb, node);
- pci_setup_phb_io(phb, index == 0);
-
- if (ppc64_interrupt_controller == IC_OPEN_PIC && pSeries_mpic) {
- int addr = root_size_cells * (index + 2) - 1;
- mpic_assign_isu(pSeries_mpic, index, opprop[addr]);
- }
-
- index++;
- }
-
- of_node_put(root);
- pci_devs_phb_init();
-
- /*
- * pci_probe_only and pci_assign_all_buses can be set via properties
- * in chosen.
- */
- if (of_chosen) {
- int *prop;
-
- prop = (int *)get_property(of_chosen, "linux,pci-probe-only",
- NULL);
- if (prop)
- pci_probe_only = *prop;
-
- prop = (int *)get_property(of_chosen,
- "linux,pci-assign-all-buses", NULL);
- if (prop)
- pci_assign_all_buses = *prop;
- }
-
- return 0;
-}
-
-struct pci_controller * __devinit init_phb_dynamic(struct device_node *dn)
-{
- struct device_node *root = of_find_node_by_path("/");
- unsigned int root_size_cells = 0;
- struct pci_controller *phb;
- struct pci_bus *bus;
- int primary;
-
- root_size_cells = prom_n_size_cells(root);
-
- primary = list_empty(&hose_list);
- phb = alloc_phb_dynamic(dn, root_size_cells);
- if (!phb)
- return NULL;
-
- pci_process_bridge_OF_ranges(phb, dn);
-
- pci_setup_phb_io_dynamic(phb, primary);
- of_node_put(root);
-
- pci_devs_phb_init_dynamic(phb);
- phb->last_busno = 0xff;
- bus = pci_scan_bus(phb->first_busno, phb->ops, phb->arch_data);
- phb->bus = bus;
- phb->last_busno = bus->subordinate;
-
- return phb;
-}
-EXPORT_SYMBOL(init_phb_dynamic);
+static int __initdata s7a_workaround = -1;
#if 0
void pcibios_name_device(struct pci_dev *dev)
@@ -474,11 +60,12 @@ void pcibios_name_device(struct pci_dev *dev)
DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_name_device);
#endif
-static void check_s7a(void)
+static void __init check_s7a(void)
{
struct device_node *root;
char *model;
+ s7a_workaround = 0;
root = of_find_node_by_path("/");
if (root) {
model = get_property(root, "model", NULL);
@@ -488,55 +75,23 @@ static void check_s7a(void)
}
}
-/* RPA-specific bits for removing PHBs */
-int pcibios_remove_root_bus(struct pci_controller *phb)
+void __devinit pSeries_irq_bus_setup(struct pci_bus *bus)
{
- struct pci_bus *b = phb->bus;
- struct resource *res;
- int rc, i;
-
- res = b->resource[0];
- if (!res->flags) {
- printk(KERN_ERR "%s: no IO resource for PHB %s\n", __FUNCTION__,
- b->name);
- return 1;
- }
-
- rc = unmap_bus_range(b);
- if (rc) {
- printk(KERN_ERR "%s: failed to unmap IO on bus %s\n",
- __FUNCTION__, b->name);
- return 1;
- }
+ struct pci_dev *dev;
- if (release_resource(res)) {
- printk(KERN_ERR "%s: failed to release IO on bus %s\n",
- __FUNCTION__, b->name);
- return 1;
- }
-
- for (i = 1; i < 3; ++i) {
- res = b->resource[i];
- if (!res->flags && i == 0) {
- printk(KERN_ERR "%s: no MEM resource for PHB %s\n",
- __FUNCTION__, b->name);
- return 1;
- }
- if (res->flags && release_resource(res)) {
- printk(KERN_ERR
- "%s: failed to release IO %d on bus %s\n",
- __FUNCTION__, i, b->name);
- return 1;
+ if (s7a_workaround < 0)
+ check_s7a();
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ pci_read_irq_line(dev);
+ if (s7a_workaround) {
+ if (dev->irq > 16) {
+ dev->irq -= 3;
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE,
+ dev->irq);
+ }
}
}
-
- list_del(&phb->list_node);
- if (phb->is_dynamic)
- kfree(phb);
-
- return 0;
}
-EXPORT_SYMBOL(pcibios_remove_root_bus);
static void __init pSeries_request_regions(void)
{
@@ -553,20 +108,6 @@ static void __init pSeries_request_regions(void)
void __init pSeries_final_fixup(void)
{
- struct pci_dev *dev = NULL;
-
- check_s7a();
-
- for_each_pci_dev(dev) {
- pci_read_irq_line(dev);
- if (s7a_workaround) {
- if (dev->irq > 16) {
- dev->irq -= 3;
- pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
- }
- }
- }
-
phbs_remap_io();
pSeries_request_regions();
diff --git a/arch/ppc64/kernel/pSeries_setup.c b/arch/ppc64/kernel/pSeries_setup.c
index 6c0d1d58a55..f2b41243342 100644
--- a/arch/ppc64/kernel/pSeries_setup.c
+++ b/arch/ppc64/kernel/pSeries_setup.c
@@ -71,11 +71,6 @@
#define DBG(fmt...)
#endif
-extern void pSeries_final_fixup(void);
-
-extern void pSeries_get_boot_time(struct rtc_time *rtc_time);
-extern void pSeries_get_rtc_time(struct rtc_time *rtc_time);
-extern int pSeries_set_rtc_time(struct rtc_time *rtc_time);
extern void find_udbg_vterm(void);
extern void system_reset_fwnmi(void); /* from head.S */
extern void machine_check_fwnmi(void); /* from head.S */
@@ -84,9 +79,6 @@ extern void generic_find_legacy_serial_ports(u64 *physport,
int fwnmi_active; /* TRUE if an FWNMI handler is present */
-extern unsigned long ppc_proc_freq;
-extern unsigned long ppc_tb_freq;
-
extern void pSeries_system_reset_exception(struct pt_regs *regs);
extern int pSeries_machine_check_exception(struct pt_regs *regs);
@@ -381,171 +373,6 @@ static void __init pSeries_init_early(void)
}
-static void pSeries_progress(char *s, unsigned short hex)
-{
- struct device_node *root;
- int width, *p;
- char *os;
- static int display_character, set_indicator;
- static int max_width;
- static DEFINE_SPINLOCK(progress_lock);
- static int pending_newline = 0; /* did last write end with unprinted newline? */
-
- if (!rtas.base)
- return;
-
- if (max_width == 0) {
- if ((root = find_path_device("/rtas")) &&
- (p = (unsigned int *)get_property(root,
- "ibm,display-line-length",
- NULL)))
- max_width = *p;
- else
- max_width = 0x10;
- display_character = rtas_token("display-character");
- set_indicator = rtas_token("set-indicator");
- }
-
- if (display_character == RTAS_UNKNOWN_SERVICE) {
- /* use hex display if available */
- if (set_indicator != RTAS_UNKNOWN_SERVICE)
- rtas_call(set_indicator, 3, 1, NULL, 6, 0, hex);
- return;
- }
-
- spin_lock(&progress_lock);
-
- /*
- * Last write ended with newline, but we didn't print it since
- * it would just clear the bottom line of output. Print it now
- * instead.
- *
- * If no newline is pending, print a CR to start output at the
- * beginning of the line.
- */
- if (pending_newline) {
- rtas_call(display_character, 1, 1, NULL, '\r');
- rtas_call(display_character, 1, 1, NULL, '\n');
- pending_newline = 0;
- } else {
- rtas_call(display_character, 1, 1, NULL, '\r');
- }
-
- width = max_width;
- os = s;
- while (*os) {
- if (*os == '\n' || *os == '\r') {
- /* Blank to end of line. */
- while (width-- > 0)
- rtas_call(display_character, 1, 1, NULL, ' ');
-
- /* If newline is the last character, save it
- * until next call to avoid bumping up the
- * display output.
- */
- if (*os == '\n' && !os[1]) {
- pending_newline = 1;
- spin_unlock(&progress_lock);
- return;
- }
-
- /* RTAS wants CR-LF, not just LF */
-
- if (*os == '\n') {
- rtas_call(display_character, 1, 1, NULL, '\r');
- rtas_call(display_character, 1, 1, NULL, '\n');
- } else {
- /* CR might be used to re-draw a line, so we'll
- * leave it alone and not add LF.
- */
- rtas_call(display_character, 1, 1, NULL, *os);
- }
-
- width = max_width;
- } else {
- width--;
- rtas_call(display_character, 1, 1, NULL, *os);
- }
-
- os++;
-
- /* if we overwrite the screen length */
- if (width <= 0)
- while ((*os != 0) && (*os != '\n') && (*os != '\r'))
- os++;
- }
-
- /* Blank to end of line. */
- while (width-- > 0)
- rtas_call(display_character, 1, 1, NULL, ' ');
-
- spin_unlock(&progress_lock);
-}
-
-extern void setup_default_decr(void);
-
-/* Some sane defaults: 125 MHz timebase, 1GHz processor */
-#define DEFAULT_TB_FREQ 125000000UL
-#define DEFAULT_PROC_FREQ (DEFAULT_TB_FREQ * 8)
-
-static void __init pSeries_calibrate_decr(void)
-{
- struct device_node *cpu;
- struct div_result divres;
- unsigned int *fp;
- int node_found;
-
- /*
- * The cpu node should have a timebase-frequency property
- * to tell us the rate at which the decrementer counts.
- */
- cpu = of_find_node_by_type(NULL, "cpu");
-
- ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */
- node_found = 0;
- if (cpu != 0) {
- fp = (unsigned int *)get_property(cpu, "timebase-frequency",
- NULL);
- if (fp != 0) {
- node_found = 1;
- ppc_tb_freq = *fp;
- }
- }
- if (!node_found)
- printk(KERN_ERR "WARNING: Estimating decrementer frequency "
- "(not found)\n");
-
- ppc_proc_freq = DEFAULT_PROC_FREQ;
- node_found = 0;
- if (cpu != 0) {
- fp = (unsigned int *)get_property(cpu, "clock-frequency",
- NULL);
- if (fp != 0) {
- node_found = 1;
- ppc_proc_freq = *fp;
- }
- }
- if (!node_found)
- printk(KERN_ERR "WARNING: Estimating processor frequency "
- "(not found)\n");
-
- of_node_put(cpu);
-
- printk(KERN_INFO "time_init: decrementer frequency = %lu.%.6lu MHz\n",
- ppc_tb_freq/1000000, ppc_tb_freq%1000000);
- printk(KERN_INFO "time_init: processor frequency = %lu.%.6lu MHz\n",
- ppc_proc_freq/1000000, ppc_proc_freq%1000000);
-
- tb_ticks_per_jiffy = ppc_tb_freq / HZ;
- tb_ticks_per_sec = tb_ticks_per_jiffy * HZ;
- tb_ticks_per_usec = ppc_tb_freq / 1000000;
- tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
- div128_by_32(1024*1024, 0, tb_ticks_per_sec, &divres);
- tb_to_xs = divres.result_low;
-
- setup_default_decr();
-}
-
static int pSeries_check_legacy_ioport(unsigned int baseport)
{
struct device_node *np;
@@ -596,16 +423,17 @@ struct machdep_calls __initdata pSeries_md = {
.get_cpuinfo = pSeries_get_cpuinfo,
.log_error = pSeries_log_error,
.pcibios_fixup = pSeries_final_fixup,
+ .irq_bus_setup = pSeries_irq_bus_setup,
.restart = rtas_restart,
.power_off = rtas_power_off,
.halt = rtas_halt,
.panic = rtas_os_term,
.cpu_die = pSeries_mach_cpu_die,
- .get_boot_time = pSeries_get_boot_time,
- .get_rtc_time = pSeries_get_rtc_time,
- .set_rtc_time = pSeries_set_rtc_time,
- .calibrate_decr = pSeries_calibrate_decr,
- .progress = pSeries_progress,
+ .get_boot_time = rtas_get_boot_time,
+ .get_rtc_time = rtas_get_rtc_time,
+ .set_rtc_time = rtas_set_rtc_time,
+ .calibrate_decr = generic_calibrate_decr,
+ .progress = rtas_progress,
.check_legacy_ioport = pSeries_check_legacy_ioport,
.system_reset_exception = pSeries_system_reset_exception,
.machine_check_exception = pSeries_machine_check_exception,
diff --git a/arch/ppc64/kernel/pSeries_smp.c b/arch/ppc64/kernel/pSeries_smp.c
index 4203bd020c8..30154140f7e 100644
--- a/arch/ppc64/kernel/pSeries_smp.c
+++ b/arch/ppc64/kernel/pSeries_smp.c
@@ -1,5 +1,5 @@
/*
- * SMP support for pSeries machines.
+ * SMP support for pSeries and BPA machines.
*
* Dave Engebretsen, Peter Bergner, and
* Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
@@ -47,6 +47,7 @@
#include <asm/pSeries_reconfig.h>
#include "mpic.h"
+#include "bpa_iic.h"
#ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt)
@@ -286,6 +287,7 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu)
return 1;
}
+#ifdef CONFIG_XICS
static inline void smp_xics_do_message(int cpu, int msg)
{
set_bit(msg, &xics_ipi_message[cpu].value);
@@ -327,6 +329,37 @@ static void __devinit smp_xics_setup_cpu(int cpu)
cpu_clear(cpu, of_spin_map);
}
+#endif /* CONFIG_XICS */
+#ifdef CONFIG_BPA_IIC
+static void smp_iic_message_pass(int target, int msg)
+{
+ unsigned int i;
+
+ if (target < NR_CPUS) {
+ iic_cause_IPI(target, msg);
+ } else {
+ for_each_online_cpu(i) {
+ if (target == MSG_ALL_BUT_SELF
+ && i == smp_processor_id())
+ continue;
+ iic_cause_IPI(i, msg);
+ }
+ }
+}
+
+static int __init smp_iic_probe(void)
+{
+ iic_request_IPIs();
+
+ return cpus_weight(cpu_possible_map);
+}
+
+static void __devinit smp_iic_setup_cpu(int cpu)
+{
+ if (cpu != boot_cpuid)
+ iic_setup_cpu();
+}
+#endif /* CONFIG_BPA_IIC */
static DEFINE_SPINLOCK(timebase_lock);
static unsigned long timebase = 0;
@@ -381,14 +414,15 @@ static int smp_pSeries_cpu_bootable(unsigned int nr)
return 1;
}
-
+#ifdef CONFIG_MPIC
static struct smp_ops_t pSeries_mpic_smp_ops = {
.message_pass = smp_mpic_message_pass,
.probe = smp_mpic_probe,
.kick_cpu = smp_pSeries_kick_cpu,
.setup_cpu = smp_mpic_setup_cpu,
};
-
+#endif
+#ifdef CONFIG_XICS
static struct smp_ops_t pSeries_xics_smp_ops = {
.message_pass = smp_xics_message_pass,
.probe = smp_xics_probe,
@@ -396,6 +430,16 @@ static struct smp_ops_t pSeries_xics_smp_ops = {
.setup_cpu = smp_xics_setup_cpu,
.cpu_bootable = smp_pSeries_cpu_bootable,
};
+#endif
+#ifdef CONFIG_BPA_IIC
+static struct smp_ops_t bpa_iic_smp_ops = {
+ .message_pass = smp_iic_message_pass,
+ .probe = smp_iic_probe,
+ .kick_cpu = smp_pSeries_kick_cpu,
+ .setup_cpu = smp_iic_setup_cpu,
+ .cpu_bootable = smp_pSeries_cpu_bootable,
+};
+#endif
/* This is called very early */
void __init smp_init_pSeries(void)
@@ -404,10 +448,25 @@ void __init smp_init_pSeries(void)
DBG(" -> smp_init_pSeries()\n");
- if (ppc64_interrupt_controller == IC_OPEN_PIC)
+ switch (ppc64_interrupt_controller) {
+#ifdef CONFIG_MPIC
+ case IC_OPEN_PIC:
smp_ops = &pSeries_mpic_smp_ops;
- else
+ break;
+#endif
+#ifdef CONFIG_XICS
+ case IC_PPC_XIC:
smp_ops = &pSeries_xics_smp_ops;
+ break;
+#endif
+#ifdef CONFIG_BPA_IIC
+ case IC_BPA_IIC:
+ smp_ops = &bpa_iic_smp_ops;
+ break;
+#endif
+ default:
+ panic("Invalid interrupt controller");
+ }
#ifdef CONFIG_HOTPLUG_CPU
smp_ops->cpu_disable = pSeries_cpu_disable;
diff --git a/arch/ppc64/kernel/pci.c b/arch/ppc64/kernel/pci.c
index 2bf0513f3ec..580676f87d2 100644
--- a/arch/ppc64/kernel/pci.c
+++ b/arch/ppc64/kernel/pci.c
@@ -902,6 +902,9 @@ void __devinit pcibios_fixup_bus(struct pci_bus *bus)
list_for_each_entry(dev, &bus->devices, bus_list)
ppc_md.iommu_dev_setup(dev);
+ if (ppc_md.irq_bus_setup)
+ ppc_md.irq_bus_setup(bus);
+
if (!pci_probe_only)
return;
diff --git a/arch/ppc64/kernel/pci.h b/arch/ppc64/kernel/pci.h
index 0fd7d849aa7..26be78b13af 100644
--- a/arch/ppc64/kernel/pci.h
+++ b/arch/ppc64/kernel/pci.h
@@ -40,10 +40,14 @@ struct device_node *fetch_dev_dn(struct pci_dev *dev);
void pci_addr_cache_insert_device(struct pci_dev *dev);
void pci_addr_cache_remove_device(struct pci_dev *dev);
-/* From pSeries_pci.h */
+/* From rtas_pci.h */
void init_pci_config_tokens (void);
unsigned long get_phb_buid (struct device_node *);
+/* From pSeries_pci.h */
+extern void pSeries_final_fixup(void);
+extern void pSeries_irq_bus_setup(struct pci_bus *bus);
+
extern unsigned long pci_probe_only;
extern unsigned long pci_assign_all_buses;
extern int pci_read_irq_line(struct pci_dev *pci_dev);
diff --git a/arch/ppc64/kernel/pmac_time.c b/arch/ppc64/kernel/pmac_time.c
index f24827581dd..3059edb09cc 100644
--- a/arch/ppc64/kernel/pmac_time.c
+++ b/arch/ppc64/kernel/pmac_time.c
@@ -40,11 +40,6 @@
#define DBG(x...)
#endif
-extern void setup_default_decr(void);
-
-extern unsigned long ppc_tb_freq;
-extern unsigned long ppc_proc_freq;
-
/* Apparently the RTC stores seconds since 1 Jan 1904 */
#define RTC_OFFSET 2082844800
@@ -161,8 +156,7 @@ void __init pmac_get_boot_time(struct rtc_time *tm)
/*
* Query the OF and get the decr frequency.
- * This was taken from the pmac time_init() when merging the prep/pmac
- * time functions.
+ * FIXME: merge this with generic_calibrate_decr
*/
void __init pmac_calibrate_decr(void)
{
diff --git a/arch/ppc64/kernel/proc_ppc64.c b/arch/ppc64/kernel/proc_ppc64.c
index 0914b0669b0..a87c66a9652 100644
--- a/arch/ppc64/kernel/proc_ppc64.c
+++ b/arch/ppc64/kernel/proc_ppc64.c
@@ -53,7 +53,7 @@ static int __init proc_ppc64_create(void)
if (!root)
return 1;
- if (!(systemcfg->platform & PLATFORM_PSERIES))
+ if (!(systemcfg->platform & (PLATFORM_PSERIES | PLATFORM_BPA)))
return 0;
if (!proc_mkdir("rtas", root))
diff --git a/arch/ppc64/kernel/prom_init.c b/arch/ppc64/kernel/prom_init.c
index b7683abfbe6..e248a7950ae 100644
--- a/arch/ppc64/kernel/prom_init.c
+++ b/arch/ppc64/kernel/prom_init.c
@@ -1915,9 +1915,9 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, unsigned long
prom_send_capabilities();
/*
- * On pSeries, copy the CPU hold code
+ * On pSeries and BPA, copy the CPU hold code
*/
- if (RELOC(of_platform) & PLATFORM_PSERIES)
+ if (RELOC(of_platform) & (PLATFORM_PSERIES | PLATFORM_BPA))
copy_and_flush(0, KERNELBASE - offset, 0x100, 0);
/*
diff --git a/arch/ppc64/kernel/ptrace.c b/arch/ppc64/kernel/ptrace.c
index 9f8c6087ae5..2993f108d96 100644
--- a/arch/ppc64/kernel/ptrace.c
+++ b/arch/ppc64/kernel/ptrace.c
@@ -305,6 +305,8 @@ static void do_syscall_trace(void)
void do_syscall_trace_enter(struct pt_regs *regs)
{
+ secure_computing(regs->gpr[0]);
+
if (test_thread_flag(TIF_SYSCALL_TRACE)
&& (current->ptrace & PT_PTRACED))
do_syscall_trace();
@@ -320,8 +322,6 @@ void do_syscall_trace_enter(struct pt_regs *regs)
void do_syscall_trace_leave(struct pt_regs *regs)
{
- secure_computing(regs->gpr[0]);
-
if (unlikely(current->audit_context))
audit_syscall_exit(current,
(regs->ccr&0x1000)?AUDITSC_FAILURE:AUDITSC_SUCCESS,
diff --git a/arch/ppc64/kernel/rtas-proc.c b/arch/ppc64/kernel/rtas-proc.c
index 28b1f1521f2..1f3ff860fdf 100644
--- a/arch/ppc64/kernel/rtas-proc.c
+++ b/arch/ppc64/kernel/rtas-proc.c
@@ -371,11 +371,11 @@ static ssize_t ppc_rtas_progress_write(struct file *file,
/* Lets see if the user passed hexdigits */
hex = simple_strtoul(progress_led, NULL, 10);
- ppc_md.progress ((char *)progress_led, hex);
+ rtas_progress ((char *)progress_led, hex);
return count;
/* clear the line */
- /* ppc_md.progress(" ", 0xffff);*/
+ /* rtas_progress(" ", 0xffff);*/
}
/* ****************************************************************** */
static int ppc_rtas_progress_show(struct seq_file *m, void *v)
diff --git a/arch/ppc64/kernel/rtas.c b/arch/ppc64/kernel/rtas.c
index 5575603def2..5e8eb33b8e5 100644
--- a/arch/ppc64/kernel/rtas.c
+++ b/arch/ppc64/kernel/rtas.c
@@ -91,6 +91,123 @@ call_rtas_display_status_delay(unsigned char c)
}
}
+void
+rtas_progress(char *s, unsigned short hex)
+{
+ struct device_node *root;
+ int width, *p;
+ char *os;
+ static int display_character, set_indicator;
+ static int display_width, display_lines, *row_width, form_feed;
+ static DEFINE_SPINLOCK(progress_lock);
+ static int current_line;
+ static int pending_newline = 0; /* did last write end with unprinted newline? */
+
+ if (!rtas.base)
+ return;
+
+ if (display_width == 0) {
+ display_width = 0x10;
+ if ((root = find_path_device("/rtas"))) {
+ if ((p = (unsigned int *)get_property(root,
+ "ibm,display-line-length", NULL)))
+ display_width = *p;
+ if ((p = (unsigned int *)get_property(root,
+ "ibm,form-feed", NULL)))
+ form_feed = *p;
+ if ((p = (unsigned int *)get_property(root,
+ "ibm,display-number-of-lines", NULL)))
+ display_lines = *p;
+ row_width = (unsigned int *)get_property(root,
+ "ibm,display-truncation-length", NULL);
+ }
+ display_character = rtas_token("display-character");
+ set_indicator = rtas_token("set-indicator");
+ }
+
+ if (display_character == RTAS_UNKNOWN_SERVICE) {
+ /* use hex display if available */
+ if (set_indicator != RTAS_UNKNOWN_SERVICE)
+ rtas_call(set_indicator, 3, 1, NULL, 6, 0, hex);
+ return;
+ }
+
+ spin_lock(&progress_lock);
+
+ /*
+ * Last write ended with newline, but we didn't print it since
+ * it would just clear the bottom line of output. Print it now
+ * instead.
+ *
+ * If no newline is pending and form feed is supported, clear the
+ * display with a form feed; otherwise, print a CR to start output
+ * at the beginning of the line.
+ */
+ if (pending_newline) {
+ rtas_call(display_character, 1, 1, NULL, '\r');
+ rtas_call(display_character, 1, 1, NULL, '\n');
+ pending_newline = 0;
+ } else {
+ current_line = 0;
+ if (form_feed)
+ rtas_call(display_character, 1, 1, NULL,
+ (char)form_feed);
+ else
+ rtas_call(display_character, 1, 1, NULL, '\r');
+ }
+
+ if (row_width)
+ width = row_width[current_line];
+ else
+ width = display_width;
+ os = s;
+ while (*os) {
+ if (*os == '\n' || *os == '\r') {
+ /* If newline is the last character, save it
+ * until next call to avoid bumping up the
+ * display output.
+ */
+ if (*os == '\n' && !os[1]) {
+ pending_newline = 1;
+ current_line++;
+ if (current_line > display_lines-1)
+ current_line = display_lines-1;
+ spin_unlock(&progress_lock);
+ return;
+ }
+
+ /* RTAS wants CR-LF, not just LF */
+
+ if (*os == '\n') {
+ rtas_call(display_character, 1, 1, NULL, '\r');
+ rtas_call(display_character, 1, 1, NULL, '\n');
+ } else {
+ /* CR might be used to re-draw a line, so we'll
+ * leave it alone and not add LF.
+ */
+ rtas_call(display_character, 1, 1, NULL, *os);
+ }
+
+ if (row_width)
+ width = row_width[current_line];
+ else
+ width = display_width;
+ } else {
+ width--;
+ rtas_call(display_character, 1, 1, NULL, *os);
+ }
+
+ os++;
+
+ /* if we overwrite the screen length */
+ if (width <= 0)
+ while ((*os != 0) && (*os != '\n') && (*os != '\r'))
+ os++;
+ }
+
+ spin_unlock(&progress_lock);
+}
+
int
rtas_token(const char *service)
{
@@ -425,8 +542,8 @@ rtas_flash_firmware(void)
printk(KERN_ALERT "FLASH: flash image is %ld bytes\n", image_size);
printk(KERN_ALERT "FLASH: performing flash and reboot\n");
- ppc_md.progress("Flashing \n", 0x0);
- ppc_md.progress("Please Wait... ", 0x0);
+ rtas_progress("Flashing \n", 0x0);
+ rtas_progress("Please Wait... ", 0x0);
printk(KERN_ALERT "FLASH: this will take several minutes. Do not power off!\n");
status = rtas_call(update_token, 1, 1, NULL, rtas_block_list);
switch (status) { /* should only get "bad" status */
diff --git a/arch/ppc64/kernel/rtas_pci.c b/arch/ppc64/kernel/rtas_pci.c
new file mode 100644
index 00000000000..1048817befb
--- /dev/null
+++ b/arch/ppc64/kernel/rtas_pci.c
@@ -0,0 +1,495 @@
+/*
+ * arch/ppc64/kernel/rtas_pci.c
+ *
+ * Copyright (C) 2001 Dave Engebretsen, IBM Corporation
+ * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
+ *
+ * RTAS specific routines for PCI.
+ *
+ * Based on code from pci.c, chrp_pci.c and pSeries_pci.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/threads.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/irq.h>
+#include <asm/prom.h>
+#include <asm/machdep.h>
+#include <asm/pci-bridge.h>
+#include <asm/iommu.h>
+#include <asm/rtas.h>
+
+#include "mpic.h"
+#include "pci.h"
+
+/* RTAS tokens */
+static int read_pci_config;
+static int write_pci_config;
+static int ibm_read_pci_config;
+static int ibm_write_pci_config;
+
+static int config_access_valid(struct device_node *dn, int where)
+{
+ if (where < 256)
+ return 1;
+ if (where < 4096 && dn->pci_ext_config_space)
+ return 1;
+
+ return 0;
+}
+
+static int rtas_read_config(struct device_node *dn, int where, int size, u32 *val)
+{
+ int returnval = -1;
+ unsigned long buid, addr;
+ int ret;
+
+ if (!dn)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ if (!config_access_valid(dn, where))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ addr = ((where & 0xf00) << 20) | (dn->busno << 16) |
+ (dn->devfn << 8) | (where & 0xff);
+ buid = dn->phb->buid;
+ if (buid) {
+ ret = rtas_call(ibm_read_pci_config, 4, 2, &returnval,
+ addr, buid >> 32, buid & 0xffffffff, size);
+ } else {
+ ret = rtas_call(read_pci_config, 2, 2, &returnval, addr, size);
+ }
+ *val = returnval;
+
+ if (ret)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (returnval == EEH_IO_ERROR_VALUE(size)
+ && eeh_dn_check_failure (dn, NULL))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int rtas_pci_read_config(struct pci_bus *bus,
+ unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ struct device_node *busdn, *dn;
+
+ if (bus->self)
+ busdn = pci_device_to_OF_node(bus->self);
+ else
+ busdn = bus->sysdata; /* must be a phb */
+
+ /* Search only direct children of the bus */
+ for (dn = busdn->child; dn; dn = dn->sibling)
+ if (dn->devfn == devfn)
+ return rtas_read_config(dn, where, size, val);
+ return PCIBIOS_DEVICE_NOT_FOUND;
+}
+
+static int rtas_write_config(struct device_node *dn, int where, int size, u32 val)
+{
+ unsigned long buid, addr;
+ int ret;
+
+ if (!dn)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ if (!config_access_valid(dn, where))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ addr = ((where & 0xf00) << 20) | (dn->busno << 16) |
+ (dn->devfn << 8) | (where & 0xff);
+ buid = dn->phb->buid;
+ if (buid) {
+ ret = rtas_call(ibm_write_pci_config, 5, 1, NULL, addr, buid >> 32, buid & 0xffffffff, size, (ulong) val);
+ } else {
+ ret = rtas_call(write_pci_config, 3, 1, NULL, addr, size, (ulong)val);
+ }
+
+ if (ret)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int rtas_pci_write_config(struct pci_bus *bus,
+ unsigned int devfn,
+ int where, int size, u32 val)
+{
+ struct device_node *busdn, *dn;
+
+ if (bus->self)
+ busdn = pci_device_to_OF_node(bus->self);
+ else
+ busdn = bus->sysdata; /* must be a phb */
+
+ /* Search only direct children of the bus */
+ for (dn = busdn->child; dn; dn = dn->sibling)
+ if (dn->devfn == devfn)
+ return rtas_write_config(dn, where, size, val);
+ return PCIBIOS_DEVICE_NOT_FOUND;
+}
+
+struct pci_ops rtas_pci_ops = {
+ rtas_pci_read_config,
+ rtas_pci_write_config
+};
+
+int is_python(struct device_node *dev)
+{
+ char *model = (char *)get_property(dev, "model", NULL);
+
+ if (model && strstr(model, "Python"))
+ return 1;
+
+ return 0;
+}
+
+static int get_phb_reg_prop(struct device_node *dev,
+ unsigned int addr_size_words,
+ struct reg_property64 *reg)
+{
+ unsigned int *ui_ptr = NULL, len;
+
+ /* Found a PHB, now figure out where his registers are mapped. */
+ ui_ptr = (unsigned int *)get_property(dev, "reg", &len);
+ if (ui_ptr == NULL)
+ return 1;
+
+ if (addr_size_words == 1) {
+ reg->address = ((struct reg_property32 *)ui_ptr)->address;
+ reg->size = ((struct reg_property32 *)ui_ptr)->size;
+ } else {
+ *reg = *((struct reg_property64 *)ui_ptr);
+ }
+
+ return 0;
+}
+
+static void python_countermeasures(struct device_node *dev,
+ unsigned int addr_size_words)
+{
+ struct reg_property64 reg_struct;
+ void __iomem *chip_regs;
+ volatile u32 val;
+
+ if (get_phb_reg_prop(dev, addr_size_words, &reg_struct))
+ return;
+
+ /* Python's register file is 1 MB in size. */
+ chip_regs = ioremap(reg_struct.address & ~(0xfffffUL), 0x100000);
+
+ /*
+ * Firmware doesn't always clear this bit which is critical
+ * for good performance - Anton
+ */
+
+#define PRG_CL_RESET_VALID 0x00010000
+
+ val = in_be32(chip_regs + 0xf6030);
+ if (val & PRG_CL_RESET_VALID) {
+ printk(KERN_INFO "Python workaround: ");
+ val &= ~PRG_CL_RESET_VALID;
+ out_be32(chip_regs + 0xf6030, val);
+ /*
+ * We must read it back for changes to
+ * take effect
+ */
+ val = in_be32(chip_regs + 0xf6030);
+ printk("reg0: %x\n", val);
+ }
+
+ iounmap(chip_regs);
+}
+
+void __init init_pci_config_tokens (void)
+{
+ read_pci_config = rtas_token("read-pci-config");
+ write_pci_config = rtas_token("write-pci-config");
+ ibm_read_pci_config = rtas_token("ibm,read-pci-config");
+ ibm_write_pci_config = rtas_token("ibm,write-pci-config");
+}
+
+unsigned long __devinit get_phb_buid (struct device_node *phb)
+{
+ int addr_cells;
+ unsigned int *buid_vals;
+ unsigned int len;
+ unsigned long buid;
+
+ if (ibm_read_pci_config == -1) return 0;
+
+ /* PHB's will always be children of the root node,
+ * or so it is promised by the current firmware. */
+ if (phb->parent == NULL)
+ return 0;
+ if (phb->parent->parent)
+ return 0;
+
+ buid_vals = (unsigned int *) get_property(phb, "reg", &len);
+ if (buid_vals == NULL)
+ return 0;
+
+ addr_cells = prom_n_addr_cells(phb);
+ if (addr_cells == 1) {
+ buid = (unsigned long) buid_vals[0];
+ } else {
+ buid = (((unsigned long)buid_vals[0]) << 32UL) |
+ (((unsigned long)buid_vals[1]) & 0xffffffff);
+ }
+ return buid;
+}
+
+static int phb_set_bus_ranges(struct device_node *dev,
+ struct pci_controller *phb)
+{
+ int *bus_range;
+ unsigned int len;
+
+ bus_range = (int *) get_property(dev, "bus-range", &len);
+ if (bus_range == NULL || len < 2 * sizeof(int)) {
+ return 1;
+ }
+
+ phb->first_busno = bus_range[0];
+ phb->last_busno = bus_range[1];
+
+ return 0;
+}
+
+static int __devinit setup_phb(struct device_node *dev,
+ struct pci_controller *phb,
+ unsigned int addr_size_words)
+{
+ pci_setup_pci_controller(phb);
+
+ if (is_python(dev))
+ python_countermeasures(dev, addr_size_words);
+
+ if (phb_set_bus_ranges(dev, phb))
+ return 1;
+
+ phb->arch_data = dev;
+ phb->ops = &rtas_pci_ops;
+ phb->buid = get_phb_buid(dev);
+
+ return 0;
+}
+
+static void __devinit add_linux_pci_domain(struct device_node *dev,
+ struct pci_controller *phb,
+ struct property *of_prop)
+{
+ memset(of_prop, 0, sizeof(struct property));
+ of_prop->name = "linux,pci-domain";
+ of_prop->length = sizeof(phb->global_number);
+ of_prop->value = (unsigned char *)&of_prop[1];
+ memcpy(of_prop->value, &phb->global_number, sizeof(phb->global_number));
+ prom_add_property(dev, of_prop);
+}
+
+static struct pci_controller * __init alloc_phb(struct device_node *dev,
+ unsigned int addr_size_words)
+{
+ struct pci_controller *phb;
+ struct property *of_prop;
+
+ phb = alloc_bootmem(sizeof(struct pci_controller));
+ if (phb == NULL)
+ return NULL;
+
+ of_prop = alloc_bootmem(sizeof(struct property) +
+ sizeof(phb->global_number));
+ if (!of_prop)
+ return NULL;
+
+ if (setup_phb(dev, phb, addr_size_words))
+ return NULL;
+
+ add_linux_pci_domain(dev, phb, of_prop);
+
+ return phb;
+}
+
+static struct pci_controller * __devinit alloc_phb_dynamic(struct device_node *dev, unsigned int addr_size_words)
+{
+ struct pci_controller *phb;
+
+ phb = (struct pci_controller *)kmalloc(sizeof(struct pci_controller),
+ GFP_KERNEL);
+ if (phb == NULL)
+ return NULL;
+
+ if (setup_phb(dev, phb, addr_size_words))
+ return NULL;
+
+ phb->is_dynamic = 1;
+
+ /* TODO: linux,pci-domain? */
+
+ return phb;
+}
+
+unsigned long __init find_and_init_phbs(void)
+{
+ struct device_node *node;
+ struct pci_controller *phb;
+ unsigned int root_size_cells = 0;
+ unsigned int index;
+ unsigned int *opprop = NULL;
+ struct device_node *root = of_find_node_by_path("/");
+
+ if (ppc64_interrupt_controller == IC_OPEN_PIC) {
+ opprop = (unsigned int *)get_property(root,
+ "platform-open-pic", NULL);
+ }
+
+ root_size_cells = prom_n_size_cells(root);
+
+ index = 0;
+
+ for (node = of_get_next_child(root, NULL);
+ node != NULL;
+ node = of_get_next_child(root, node)) {
+ if (node->type == NULL || strcmp(node->type, "pci") != 0)
+ continue;
+
+ phb = alloc_phb(node, root_size_cells);
+ if (!phb)
+ continue;
+
+ pci_process_bridge_OF_ranges(phb, node);
+ pci_setup_phb_io(phb, index == 0);
+#ifdef CONFIG_PPC_PSERIES
+ if (ppc64_interrupt_controller == IC_OPEN_PIC && pSeries_mpic) {
+ int addr = root_size_cells * (index + 2) - 1;
+ mpic_assign_isu(pSeries_mpic, index, opprop[addr]);
+ }
+#endif
+ index++;
+ }
+
+ of_node_put(root);
+ pci_devs_phb_init();
+
+ /*
+ * pci_probe_only and pci_assign_all_buses can be set via properties
+ * in chosen.
+ */
+ if (of_chosen) {
+ int *prop;
+
+ prop = (int *)get_property(of_chosen, "linux,pci-probe-only",
+ NULL);
+ if (prop)
+ pci_probe_only = *prop;
+
+ prop = (int *)get_property(of_chosen,
+ "linux,pci-assign-all-buses", NULL);
+ if (prop)
+ pci_assign_all_buses = *prop;
+ }
+
+ return 0;
+}
+
+struct pci_controller * __devinit init_phb_dynamic(struct device_node *dn)
+{
+ struct device_node *root = of_find_node_by_path("/");
+ unsigned int root_size_cells = 0;
+ struct pci_controller *phb;
+ struct pci_bus *bus;
+ int primary;
+
+ root_size_cells = prom_n_size_cells(root);
+
+ primary = list_empty(&hose_list);
+ phb = alloc_phb_dynamic(dn, root_size_cells);
+ if (!phb)
+ return NULL;
+
+ pci_process_bridge_OF_ranges(phb, dn);
+
+ pci_setup_phb_io_dynamic(phb, primary);
+ of_node_put(root);
+
+ pci_devs_phb_init_dynamic(phb);
+ phb->last_busno = 0xff;
+ bus = pci_scan_bus(phb->first_busno, phb->ops, phb->arch_data);
+ phb->bus = bus;
+ phb->last_busno = bus->subordinate;
+
+ return phb;
+}
+EXPORT_SYMBOL(init_phb_dynamic);
+
+/* RPA-specific bits for removing PHBs */
+int pcibios_remove_root_bus(struct pci_controller *phb)
+{
+ struct pci_bus *b = phb->bus;
+ struct resource *res;
+ int rc, i;
+
+ res = b->resource[0];
+ if (!res->flags) {
+ printk(KERN_ERR "%s: no IO resource for PHB %s\n", __FUNCTION__,
+ b->name);
+ return 1;
+ }
+
+ rc = unmap_bus_range(b);
+ if (rc) {
+ printk(KERN_ERR "%s: failed to unmap IO on bus %s\n",
+ __FUNCTION__, b->name);
+ return 1;
+ }
+
+ if (release_resource(res)) {
+ printk(KERN_ERR "%s: failed to release IO on bus %s\n",
+ __FUNCTION__, b->name);
+ return 1;
+ }
+
+ for (i = 1; i < 3; ++i) {
+ res = b->resource[i];
+ if (!res->flags && i == 0) {
+ printk(KERN_ERR "%s: no MEM resource for PHB %s\n",
+ __FUNCTION__, b->name);
+ return 1;
+ }
+ if (res->flags && release_resource(res)) {
+ printk(KERN_ERR
+ "%s: failed to release IO %d on bus %s\n",
+ __FUNCTION__, i, b->name);
+ return 1;
+ }
+ }
+
+ list_del(&phb->list_node);
+ if (phb->is_dynamic)
+ kfree(phb);
+
+ return 0;
+}
+EXPORT_SYMBOL(pcibios_remove_root_bus);
diff --git a/arch/ppc64/kernel/rtc.c b/arch/ppc64/kernel/rtc.c
index de02aedbe08..d729fefa0df 100644
--- a/arch/ppc64/kernel/rtc.c
+++ b/arch/ppc64/kernel/rtc.c
@@ -301,7 +301,7 @@ void iSeries_get_boot_time(struct rtc_time *tm)
#ifdef CONFIG_PPC_RTAS
#define MAX_RTC_WAIT 5000 /* 5 sec */
#define RTAS_CLOCK_BUSY (-2)
-void pSeries_get_boot_time(struct rtc_time *rtc_tm)
+void rtas_get_boot_time(struct rtc_time *rtc_tm)
{
int ret[8];
int error, wait_time;
@@ -336,7 +336,7 @@ void pSeries_get_boot_time(struct rtc_time *rtc_tm)
* and if a delay is needed to read the clock. In this case we just
* silently return without updating rtc_tm.
*/
-void pSeries_get_rtc_time(struct rtc_time *rtc_tm)
+void rtas_get_rtc_time(struct rtc_time *rtc_tm)
{
int ret[8];
int error, wait_time;
@@ -371,7 +371,7 @@ void pSeries_get_rtc_time(struct rtc_time *rtc_tm)
rtc_tm->tm_year = ret[0] - 1900;
}
-int pSeries_set_rtc_time(struct rtc_time *tm)
+int rtas_set_rtc_time(struct rtc_time *tm)
{
int error, wait_time;
unsigned long max_wait_tb;
diff --git a/arch/ppc64/kernel/setup.c b/arch/ppc64/kernel/setup.c
index 8e439a81764..0a47a5ef428 100644
--- a/arch/ppc64/kernel/setup.c
+++ b/arch/ppc64/kernel/setup.c
@@ -344,6 +344,7 @@ static void __init setup_cpu_maps(void)
extern struct machdep_calls pSeries_md;
extern struct machdep_calls pmac_md;
extern struct machdep_calls maple_md;
+extern struct machdep_calls bpa_md;
/* Ultimately, stuff them in an elf section like initcalls... */
static struct machdep_calls __initdata *machines[] = {
@@ -356,6 +357,9 @@ static struct machdep_calls __initdata *machines[] = {
#ifdef CONFIG_PPC_MAPLE
&maple_md,
#endif /* CONFIG_PPC_MAPLE */
+#ifdef CONFIG_PPC_BPA
+ &bpa_md,
+#endif
NULL
};
@@ -679,6 +683,12 @@ void machine_restart(char *cmd)
if (ppc_md.nvram_sync)
ppc_md.nvram_sync();
ppc_md.restart(cmd);
+#ifdef CONFIG_SMP
+ smp_send_stop();
+#endif
+ printk(KERN_EMERG "System Halted, OK to turn off power\n");
+ local_irq_disable();
+ while (1) ;
}
EXPORT_SYMBOL(machine_restart);
@@ -688,6 +698,12 @@ void machine_power_off(void)
if (ppc_md.nvram_sync)
ppc_md.nvram_sync();
ppc_md.power_off();
+#ifdef CONFIG_SMP
+ smp_send_stop();
+#endif
+ printk(KERN_EMERG "System Halted, OK to turn off power\n");
+ local_irq_disable();
+ while (1) ;
}
EXPORT_SYMBOL(machine_power_off);
@@ -697,13 +713,16 @@ void machine_halt(void)
if (ppc_md.nvram_sync)
ppc_md.nvram_sync();
ppc_md.halt();
+#ifdef CONFIG_SMP
+ smp_send_stop();
+#endif
+ printk(KERN_EMERG "System Halted, OK to turn off power\n");
+ local_irq_disable();
+ while (1) ;
}
EXPORT_SYMBOL(machine_halt);
-unsigned long ppc_proc_freq;
-unsigned long ppc_tb_freq;
-
static int ppc64_panic_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
@@ -1055,6 +1074,7 @@ void __init setup_arch(char **cmdline_p)
/* set up the bootmem stuff with available memory */
do_init_bootmem();
+ sparse_init();
/* initialize the syscall map in systemcfg */
setup_syscall_map();
@@ -1079,11 +1099,11 @@ void __init setup_arch(char **cmdline_p)
static void ppc64_do_msg(unsigned int src, const char *msg)
{
if (ppc_md.progress) {
- char buf[32];
+ char buf[128];
- sprintf(buf, "%08x \n", src);
+ sprintf(buf, "%08X\n", src);
ppc_md.progress(buf, 0);
- sprintf(buf, "%-16s", msg);
+ snprintf(buf, 128, "%s", msg);
ppc_md.progress(buf, 0);
}
}
@@ -1117,7 +1137,7 @@ void ppc64_dump_msg(unsigned int src, const char *msg)
}
/* This should only be called on processor 0 during calibrate decr */
-void setup_default_decr(void)
+void __init setup_default_decr(void)
{
struct paca_struct *lpaca = get_paca();
diff --git a/arch/ppc64/kernel/smp.c b/arch/ppc64/kernel/smp.c
index 9ef5d36d6b2..2fcddfcb594 100644
--- a/arch/ppc64/kernel/smp.c
+++ b/arch/ppc64/kernel/smp.c
@@ -71,7 +71,7 @@ void smp_call_function_interrupt(void);
int smt_enabled_at_boot = 1;
-#ifdef CONFIG_PPC_MULTIPLATFORM
+#ifdef CONFIG_MPIC
void smp_mpic_message_pass(int target, int msg)
{
/* make sure we're sending something that translates to an IPI */
@@ -128,7 +128,7 @@ void __devinit smp_generic_kick_cpu(int nr)
smp_mb();
}
-#endif /* CONFIG_PPC_MULTIPLATFORM */
+#endif /* CONFIG_MPIC */
static void __init smp_space_timers(unsigned int max_cpus)
{
diff --git a/arch/ppc64/kernel/spider-pic.c b/arch/ppc64/kernel/spider-pic.c
new file mode 100644
index 00000000000..d5c9a02fb11
--- /dev/null
+++ b/arch/ppc64/kernel/spider-pic.c
@@ -0,0 +1,191 @@
+/*
+ * External Interrupt Controller on Spider South Bridge
+ *
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
+ *
+ * Author: Arnd Bergmann <arndb@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+
+#include <asm/pgtable.h>
+#include <asm/prom.h>
+#include <asm/io.h>
+
+#include "bpa_iic.h"
+
+/* register layout taken from Spider spec, table 7.4-4 */
+enum {
+ TIR_DEN = 0x004, /* Detection Enable Register */
+ TIR_MSK = 0x084, /* Mask Level Register */
+ TIR_EDC = 0x0c0, /* Edge Detection Clear Register */
+ TIR_PNDA = 0x100, /* Pending Register A */
+ TIR_PNDB = 0x104, /* Pending Register B */
+ TIR_CS = 0x144, /* Current Status Register */
+ TIR_LCSA = 0x150, /* Level Current Status Register A */
+ TIR_LCSB = 0x154, /* Level Current Status Register B */
+ TIR_LCSC = 0x158, /* Level Current Status Register C */
+ TIR_LCSD = 0x15c, /* Level Current Status Register D */
+ TIR_CFGA = 0x200, /* Setting Register A0 */
+ TIR_CFGB = 0x204, /* Setting Register B0 */
+ /* 0x208 ... 0x3ff Setting Register An/Bn */
+ TIR_PPNDA = 0x400, /* Packet Pending Register A */
+ TIR_PPNDB = 0x404, /* Packet Pending Register B */
+ TIR_PIERA = 0x408, /* Packet Output Error Register A */
+ TIR_PIERB = 0x40c, /* Packet Output Error Register B */
+ TIR_PIEN = 0x444, /* Packet Output Enable Register */
+ TIR_PIPND = 0x454, /* Packet Output Pending Register */
+ TIRDID = 0x484, /* Spider Device ID Register */
+ REISTIM = 0x500, /* Reissue Command Timeout Time Setting */
+ REISTIMEN = 0x504, /* Reissue Command Timeout Setting */
+ REISWAITEN = 0x508, /* Reissue Wait Control*/
+};
+
+static void __iomem *spider_pics[4];
+
+static void __iomem *spider_get_pic(int irq)
+{
+ int node = irq / IIC_NODE_STRIDE;
+ irq %= IIC_NODE_STRIDE;
+
+ if (irq >= IIC_EXT_OFFSET &&
+ irq < IIC_EXT_OFFSET + IIC_NUM_EXT &&
+ spider_pics)
+ return spider_pics[node];
+ return NULL;
+}
+
+static int spider_get_nr(unsigned int irq)
+{
+ return (irq % IIC_NODE_STRIDE) - IIC_EXT_OFFSET;
+}
+
+static void __iomem *spider_get_irq_config(int irq)
+{
+ void __iomem *pic;
+ pic = spider_get_pic(irq);
+ return pic + TIR_CFGA + 8 * spider_get_nr(irq);
+}
+
+static void spider_enable_irq(unsigned int irq)
+{
+ void __iomem *cfg = spider_get_irq_config(irq);
+ irq = spider_get_nr(irq);
+
+ out_be32(cfg, in_be32(cfg) | 0x3107000eu);
+ out_be32(cfg + 4, in_be32(cfg + 4) | 0x00020000u | irq);
+}
+
+static void spider_disable_irq(unsigned int irq)
+{
+ void __iomem *cfg = spider_get_irq_config(irq);
+ irq = spider_get_nr(irq);
+
+ out_be32(cfg, in_be32(cfg) & ~0x30000000u);
+}
+
+static unsigned int spider_startup_irq(unsigned int irq)
+{
+ spider_enable_irq(irq);
+ return 0;
+}
+
+static void spider_shutdown_irq(unsigned int irq)
+{
+ spider_disable_irq(irq);
+}
+
+static void spider_end_irq(unsigned int irq)
+{
+ spider_enable_irq(irq);
+}
+
+static void spider_ack_irq(unsigned int irq)
+{
+ spider_disable_irq(irq);
+ iic_local_enable();
+}
+
+static struct hw_interrupt_type spider_pic = {
+ .typename = " SPIDER ",
+ .startup = spider_startup_irq,
+ .shutdown = spider_shutdown_irq,
+ .enable = spider_enable_irq,
+ .disable = spider_disable_irq,
+ .ack = spider_ack_irq,
+ .end = spider_end_irq,
+};
+
+
+int spider_get_irq(unsigned long int_pending)
+{
+ void __iomem *regs = spider_get_pic(int_pending);
+ unsigned long cs;
+ int irq;
+
+ cs = in_be32(regs + TIR_CS);
+
+ irq = cs >> 24;
+ if (irq != 63)
+ return irq;
+
+ return -1;
+}
+
+void spider_init_IRQ(void)
+{
+ int node;
+ struct device_node *dn;
+ unsigned int *property;
+ long spiderpic;
+ int n;
+
+/* FIXME: detect multiple PICs as soon as the device tree has them */
+ for (node = 0; node < 1; node++) {
+ dn = of_find_node_by_path("/");
+ n = prom_n_addr_cells(dn);
+ property = (unsigned int *) get_property(dn,
+ "platform-spider-pic", NULL);
+
+ if (!property)
+ continue;
+ for (spiderpic = 0; n > 0; --n)
+ spiderpic = (spiderpic << 32) + *property++;
+ printk(KERN_DEBUG "SPIDER addr: %lx\n", spiderpic);
+ spider_pics[node] = __ioremap(spiderpic, 0x800, _PAGE_NO_CACHE);
+ for (n = 0; n < IIC_NUM_EXT; n++) {
+ int irq = n + IIC_EXT_OFFSET + node * IIC_NODE_STRIDE;
+ get_irq_desc(irq)->handler = &spider_pic;
+
+ /* do not mask any interrupts because of level */
+ out_be32(spider_pics[node] + TIR_MSK, 0x0);
+
+ /* disable edge detection clear */
+ /* out_be32(spider_pics[node] + TIR_EDC, 0x0); */
+
+ /* enable interrupt packets to be output */
+ out_be32(spider_pics[node] + TIR_PIEN,
+ in_be32(spider_pics[node] + TIR_PIEN) | 0x1);
+
+ /* Enable the interrupt detection enable bit. Do this last! */
+ out_be32(spider_pics[node] + TIR_DEN,
+ in_be32(spider_pics[node] +TIR_DEN) | 0x1);
+
+ }
+ }
+}
diff --git a/arch/ppc64/kernel/time.c b/arch/ppc64/kernel/time.c
index 33364a7d2cd..2348a75e050 100644
--- a/arch/ppc64/kernel/time.c
+++ b/arch/ppc64/kernel/time.c
@@ -107,6 +107,9 @@ void ppc_adjtimex(void);
static unsigned adjusting_time = 0;
+unsigned long ppc_proc_freq;
+unsigned long ppc_tb_freq;
+
static __inline__ void timer_check_rtc(void)
{
/*
@@ -472,6 +475,66 @@ int do_settimeofday(struct timespec *tv)
EXPORT_SYMBOL(do_settimeofday);
+#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_MAPLE) || defined(CONFIG_PPC_BPA)
+void __init generic_calibrate_decr(void)
+{
+ struct device_node *cpu;
+ struct div_result divres;
+ unsigned int *fp;
+ int node_found;
+
+ /*
+ * The cpu node should have a timebase-frequency property
+ * to tell us the rate at which the decrementer counts.
+ */
+ cpu = of_find_node_by_type(NULL, "cpu");
+
+ ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */
+ node_found = 0;
+ if (cpu != 0) {
+ fp = (unsigned int *)get_property(cpu, "timebase-frequency",
+ NULL);
+ if (fp != 0) {
+ node_found = 1;
+ ppc_tb_freq = *fp;
+ }
+ }
+ if (!node_found)
+ printk(KERN_ERR "WARNING: Estimating decrementer frequency "
+ "(not found)\n");
+
+ ppc_proc_freq = DEFAULT_PROC_FREQ;
+ node_found = 0;
+ if (cpu != 0) {
+ fp = (unsigned int *)get_property(cpu, "clock-frequency",
+ NULL);
+ if (fp != 0) {
+ node_found = 1;
+ ppc_proc_freq = *fp;
+ }
+ }
+ if (!node_found)
+ printk(KERN_ERR "WARNING: Estimating processor frequency "
+ "(not found)\n");
+
+ of_node_put(cpu);
+
+ printk(KERN_INFO "time_init: decrementer frequency = %lu.%.6lu MHz\n",
+ ppc_tb_freq/1000000, ppc_tb_freq%1000000);
+ printk(KERN_INFO "time_init: processor frequency = %lu.%.6lu MHz\n",
+ ppc_proc_freq/1000000, ppc_proc_freq%1000000);
+
+ tb_ticks_per_jiffy = ppc_tb_freq / HZ;
+ tb_ticks_per_sec = tb_ticks_per_jiffy * HZ;
+ tb_ticks_per_usec = ppc_tb_freq / 1000000;
+ tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
+ div128_by_32(1024*1024, 0, tb_ticks_per_sec, &divres);
+ tb_to_xs = divres.result_low;
+
+ setup_default_decr();
+}
+#endif
+
void __init time_init(void)
{
/* This function is only called on the boot processor */
diff --git a/arch/ppc64/kernel/traps.c b/arch/ppc64/kernel/traps.c
index 7e52cb2605e..a8d5e83ee89 100644
--- a/arch/ppc64/kernel/traps.c
+++ b/arch/ppc64/kernel/traps.c
@@ -126,6 +126,10 @@ int die(const char *str, struct pt_regs *regs, long err)
printk("POWERMAC ");
nl = 1;
break;
+ case PLATFORM_BPA:
+ printk("BPA ");
+ nl = 1;
+ break;
}
if (nl)
printk("\n");
diff --git a/arch/ppc64/mm/Makefile b/arch/ppc64/mm/Makefile
index ac522d57b2a..3695d00d347 100644
--- a/arch/ppc64/mm/Makefile
+++ b/arch/ppc64/mm/Makefile
@@ -6,6 +6,6 @@ EXTRA_CFLAGS += -mno-minimal-toc
obj-y := fault.o init.o imalloc.o hash_utils.o hash_low.o tlb.o \
slb_low.o slb.o stab.o mmap.o
-obj-$(CONFIG_DISCONTIGMEM) += numa.o
+obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_PPC_MULTIPLATFORM) += hash_native.o
diff --git a/arch/ppc64/mm/init.c b/arch/ppc64/mm/init.c
index 6fa1e6490b5..b50b3a446db 100644
--- a/arch/ppc64/mm/init.c
+++ b/arch/ppc64/mm/init.c
@@ -98,7 +98,7 @@ void show_mem(void)
printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
for_each_pgdat(pgdat) {
for (i = 0; i < pgdat->node_spanned_pages; i++) {
- page = pgdat->node_mem_map + i;
+ page = pgdat_page_nr(pgdat, i);
total++;
if (PageReserved(page))
reserved++;
@@ -531,7 +531,7 @@ EXPORT_SYMBOL(page_is_ram);
* Initialize the bootmem system and give it all the memory we
* have available.
*/
-#ifndef CONFIG_DISCONTIGMEM
+#ifndef CONFIG_NEED_MULTIPLE_NODES
void __init do_init_bootmem(void)
{
unsigned long i;
@@ -553,12 +553,20 @@ void __init do_init_bootmem(void)
max_pfn = max_low_pfn;
- /* add all physical memory to the bootmem map. Also find the first */
+ /* Add all physical memory to the bootmem map, mark each area
+ * present.
+ */
for (i=0; i < lmb.memory.cnt; i++) {
unsigned long physbase, size;
+ unsigned long start_pfn, end_pfn;
physbase = lmb.memory.region[i].physbase;
size = lmb.memory.region[i].size;
+
+ start_pfn = physbase >> PAGE_SHIFT;
+ end_pfn = start_pfn + (size >> PAGE_SHIFT);
+ memory_present(0, start_pfn, end_pfn);
+
free_bootmem(physbase, size);
}
@@ -597,7 +605,7 @@ void __init paging_init(void)
free_area_init_node(0, NODE_DATA(0), zones_size,
__pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size);
}
-#endif /* CONFIG_DISCONTIGMEM */
+#endif /* ! CONFIG_NEED_MULTIPLE_NODES */
static struct kcore_list kcore_vmem;
@@ -628,7 +636,7 @@ module_init(setup_kcore);
void __init mem_init(void)
{
-#ifdef CONFIG_DISCONTIGMEM
+#ifdef CONFIG_NEED_MULTIPLE_NODES
int nid;
#endif
pg_data_t *pgdat;
@@ -639,7 +647,7 @@ void __init mem_init(void)
num_physpages = max_low_pfn; /* RAM is assumed contiguous */
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
-#ifdef CONFIG_DISCONTIGMEM
+#ifdef CONFIG_NEED_MULTIPLE_NODES
for_each_online_node(nid) {
if (NODE_DATA(nid)->node_spanned_pages != 0) {
printk("freeing bootmem node %x\n", nid);
@@ -654,7 +662,7 @@ void __init mem_init(void)
for_each_pgdat(pgdat) {
for (i = 0; i < pgdat->node_spanned_pages; i++) {
- page = pgdat->node_mem_map + i;
+ page = pgdat_page_nr(pgdat, i);
if (PageReserved(page))
reservedpages++;
}
diff --git a/arch/ppc64/mm/numa.c b/arch/ppc64/mm/numa.c
index ea862ec643d..cafd91aef28 100644
--- a/arch/ppc64/mm/numa.c
+++ b/arch/ppc64/mm/numa.c
@@ -440,6 +440,8 @@ new_range:
for (i = start ; i < (start+size); i += MEMORY_INCREMENT)
numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] =
numa_domain;
+ memory_present(numa_domain, start >> PAGE_SHIFT,
+ (start + size) >> PAGE_SHIFT);
if (--ranges)
goto new_range;
@@ -481,6 +483,7 @@ static void __init setup_nonnuma(void)
for (i = 0 ; i < top_of_ram; i += MEMORY_INCREMENT)
numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] = 0;
+ memory_present(0, 0, init_node_data[0].node_end_pfn);
}
static void __init dump_numa_topology(void)
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index ab79af84699..32696c1d928 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -226,6 +226,8 @@ config WARN_STACK_SIZE
This allows you to specify the maximum frame size a function may
have without the compiler complaining about it.
+source "mm/Kconfig"
+
comment "I/O subsystem configuration"
config MACHCHK_WARNING
diff --git a/arch/s390/boot/install.sh b/arch/s390/boot/install.sh
index 278a8139cb1..d4026f62cb0 100644
--- a/arch/s390/boot/install.sh
+++ b/arch/s390/boot/install.sh
@@ -21,8 +21,8 @@
# User may have a custom install script
-if [ -x ~/bin/installkernel ]; then exec ~/bin/installkernel "$@"; fi
-if [ -x /sbin/installkernel ]; then exec /sbin/installkernel "$@"; fi
+if [ -x ~/bin/${CROSS_COMPILE}installkernel ]; then exec ~/bin/${CROSS_COMPILE}installkernel "$@"; fi
+if [ -x /sbin/${CROSS_COMPILE}installkernel ]; then exec /sbin/${CROSS_COMPILE}installkernel "$@"; fi
# Default install - same as make zlilo
diff --git a/arch/s390/kernel/compat_linux.h b/arch/s390/kernel/compat_linux.h
index bf33dcfec7d..3898f66d0b2 100644
--- a/arch/s390/kernel/compat_linux.h
+++ b/arch/s390/kernel/compat_linux.h
@@ -45,7 +45,7 @@ typedef struct compat_siginfo {
/* POSIX.1b timers */
struct {
- timer_t _tid; /* timer id */
+ compat_timer_t _tid; /* timer id */
int _overrun; /* overrun count */
compat_sigval_t _sigval; /* same as below */
int _sys_private; /* not to be passed to user */
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 3468d512722..a7c8bfc1160 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -486,7 +486,7 @@ config CPU_SUBTYPE_ST40
depends on CPU_SUBTYPE_ST40STB1 || CPU_SUBTYPE_ST40GX1
default y
-config DISCONTIGMEM
+config ARCH_DISCONTIGMEM_ENABLE
bool
depends on SH_HP690
default y
@@ -496,6 +496,8 @@ config DISCONTIGMEM
or have huge holes in the physical address space for other reasons.
See <file:Documentation/vm/numa> for more.
+source "mm/Kconfig"
+
config ZERO_PAGE_OFFSET
hex "Zero page offset"
default "0x00001000" if !(SH_MPC1211 || SH_SH03)
diff --git a/arch/sh64/Kconfig b/arch/sh64/Kconfig
index 76eb81fba45..708e59736a4 100644
--- a/arch/sh64/Kconfig
+++ b/arch/sh64/Kconfig
@@ -217,6 +217,8 @@ config PREEMPT
bool "Preemptible Kernel (EXPERIMENTAL)"
depends on EXPERIMENTAL
+source "mm/Kconfig"
+
endmenu
menu "Bus options (PCI, PCMCIA, EISA, MCA, ISA)"
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 237f922520f..262e13d086f 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -264,7 +264,11 @@ config SUNOS_EMUL
want to run SunOS binaries on an Ultra you must also say Y to
"Kernel support for 32-bit a.out binaries" above.
-source "drivers/parport/Kconfig"
+source "mm/Kconfig"
+
+endmenu
+
+source "drivers/Kconfig"
config PRINTER
tristate "Parallel printer support"
@@ -291,6 +295,8 @@ config PRINTER
If you have more than 8 printers, you need to increase the LP_NO
macro in lp.c and the PARPORT_MAX macro in parport.h.
+source "mm/Kconfig"
+
endmenu
source "drivers/base/Kconfig"
@@ -372,18 +378,8 @@ config UNIX98_PTY_COUNT
endmenu
-source "drivers/input/Kconfig"
-
source "fs/Kconfig"
-source "sound/Kconfig"
-
-source "drivers/usb/Kconfig"
-
-source "drivers/infiniband/Kconfig"
-
-source "drivers/char/watchdog/Kconfig"
-
source "arch/sparc/Kconfig.debug"
source "security/Kconfig"
diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig
index a72fd15d5ea..e2b050eb3b9 100644
--- a/arch/sparc64/Kconfig
+++ b/arch/sparc64/Kconfig
@@ -484,6 +484,8 @@ config CMDLINE
NOTE: This option WILL override the PROM bootargs setting!
+source "mm/Kconfig"
+
endmenu
source "drivers/base/Kconfig"
diff --git a/arch/sparc64/kernel/kprobes.c b/arch/sparc64/kernel/kprobes.c
index 7066d7ba667..bdac631cf01 100644
--- a/arch/sparc64/kernel/kprobes.c
+++ b/arch/sparc64/kernel/kprobes.c
@@ -6,7 +6,6 @@
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/kprobes.h>
-
#include <asm/kdebug.h>
#include <asm/signal.h>
@@ -47,25 +46,59 @@ void arch_copy_kprobe(struct kprobe *p)
{
p->ainsn.insn[0] = *p->addr;
p->ainsn.insn[1] = BREAKPOINT_INSTRUCTION_2;
+ p->opcode = *p->addr;
}
-void arch_remove_kprobe(struct kprobe *p)
+void arch_arm_kprobe(struct kprobe *p)
{
+ *p->addr = BREAKPOINT_INSTRUCTION;
+ flushi(p->addr);
}
-/* kprobe_status settings */
-#define KPROBE_HIT_ACTIVE 0x00000001
-#define KPROBE_HIT_SS 0x00000002
+void arch_disarm_kprobe(struct kprobe *p)
+{
+ *p->addr = p->opcode;
+ flushi(p->addr);
+}
+
+void arch_remove_kprobe(struct kprobe *p)
+{
+}
static struct kprobe *current_kprobe;
static unsigned long current_kprobe_orig_tnpc;
static unsigned long current_kprobe_orig_tstate_pil;
static unsigned int kprobe_status;
+static struct kprobe *kprobe_prev;
+static unsigned long kprobe_orig_tnpc_prev;
+static unsigned long kprobe_orig_tstate_pil_prev;
+static unsigned int kprobe_status_prev;
-static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
+static inline void save_previous_kprobe(void)
+{
+ kprobe_status_prev = kprobe_status;
+ kprobe_orig_tnpc_prev = current_kprobe_orig_tnpc;
+ kprobe_orig_tstate_pil_prev = current_kprobe_orig_tstate_pil;
+ kprobe_prev = current_kprobe;
+}
+
+static inline void restore_previous_kprobe(void)
+{
+ kprobe_status = kprobe_status_prev;
+ current_kprobe_orig_tnpc = kprobe_orig_tnpc_prev;
+ current_kprobe_orig_tstate_pil = kprobe_orig_tstate_pil_prev;
+ current_kprobe = kprobe_prev;
+}
+
+static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs)
{
current_kprobe_orig_tnpc = regs->tnpc;
current_kprobe_orig_tstate_pil = (regs->tstate & TSTATE_PIL);
+ current_kprobe = p;
+}
+
+static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
+{
regs->tstate |= TSTATE_PIL;
/*single step inline, if it a breakpoint instruction*/
@@ -78,17 +111,6 @@ static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
}
}
-static inline void disarm_kprobe(struct kprobe *p, struct pt_regs *regs)
-{
- *p->addr = p->opcode;
- flushi(p->addr);
-
- regs->tpc = (unsigned long) p->addr;
- regs->tnpc = current_kprobe_orig_tnpc;
- regs->tstate = ((regs->tstate & ~TSTATE_PIL) |
- current_kprobe_orig_tstate_pil);
-}
-
static int kprobe_handler(struct pt_regs *regs)
{
struct kprobe *p;
@@ -109,8 +131,18 @@ static int kprobe_handler(struct pt_regs *regs)
unlock_kprobes();
goto no_kprobe;
}
- disarm_kprobe(p, regs);
- ret = 1;
+ /* We have reentered the kprobe_handler(), since
+ * another probe was hit while within the handler.
+ * We here save the original kprobes variables and
+ * just single step on the instruction of the new probe
+ * without calling any user handlers.
+ */
+ save_previous_kprobe();
+ set_current_kprobe(p, regs);
+ p->nmissed++;
+ kprobe_status = KPROBE_REENTER;
+ prepare_singlestep(p, regs);
+ return 1;
} else {
p = current_kprobe;
if (p->break_handler && p->break_handler(p, regs))
@@ -138,8 +170,8 @@ static int kprobe_handler(struct pt_regs *regs)
goto no_kprobe;
}
+ set_current_kprobe(p, regs);
kprobe_status = KPROBE_HIT_ACTIVE;
- current_kprobe = p;
if (p->pre_handler && p->pre_handler(p, regs))
return 1;
@@ -245,12 +277,20 @@ static inline int post_kprobe_handler(struct pt_regs *regs)
if (!kprobe_running())
return 0;
- if (current_kprobe->post_handler)
+ if ((kprobe_status != KPROBE_REENTER) && current_kprobe->post_handler) {
+ kprobe_status = KPROBE_HIT_SSDONE;
current_kprobe->post_handler(current_kprobe, regs, 0);
+ }
resume_execution(current_kprobe, regs);
+ /*Restore back the original saved kprobes variables and continue. */
+ if (kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe();
+ goto out;
+ }
unlock_kprobes();
+out:
preempt_enable_no_resched();
return 1;
@@ -392,3 +432,4 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
}
return 0;
}
+
diff --git a/arch/sparc64/kernel/signal32.c b/arch/sparc64/kernel/signal32.c
index 9a375e975cf..f28428f4170 100644
--- a/arch/sparc64/kernel/signal32.c
+++ b/arch/sparc64/kernel/signal32.c
@@ -102,7 +102,7 @@ typedef struct compat_siginfo{
/* POSIX.1b timers */
struct {
- timer_t _tid; /* timer id */
+ compat_timer_t _tid; /* timer id */
int _overrun; /* overrun count */
compat_sigval_t _sigval; /* same as below */
int _sys_private; /* not to be passed to user */
diff --git a/arch/um/Kconfig b/arch/um/Kconfig
index b8e952c88fd..9469e77303e 100644
--- a/arch/um/Kconfig
+++ b/arch/um/Kconfig
@@ -74,6 +74,7 @@ config MODE_SKAS
option will shrink the UML binary slightly.
source "arch/um/Kconfig_arch"
+source "mm/Kconfig"
config LD_SCRIPT_STATIC
bool
diff --git a/arch/um/kernel/process_kern.c b/arch/um/kernel/process_kern.c
index 804c6bbdf67..157584ae479 100644
--- a/arch/um/kernel/process_kern.c
+++ b/arch/um/kernel/process_kern.c
@@ -8,6 +8,7 @@
#include "linux/kernel.h"
#include "linux/sched.h"
#include "linux/interrupt.h"
+#include "linux/string.h"
#include "linux/mm.h"
#include "linux/slab.h"
#include "linux/utsname.h"
@@ -322,12 +323,7 @@ void do_uml_exitcalls(void)
char *uml_strdup(char *string)
{
- char *new;
-
- new = kmalloc(strlen(string) + 1, GFP_KERNEL);
- if(new == NULL) return(NULL);
- strcpy(new, string);
- return(new);
+ return kstrdup(string, GFP_KERNEL);
}
int copy_to_user_proc(void __user *to, void *from, int size)
diff --git a/arch/v850/Kconfig b/arch/v850/Kconfig
index 90cd4baa75e..27febd6ffa8 100644
--- a/arch/v850/Kconfig
+++ b/arch/v850/Kconfig
@@ -218,6 +218,8 @@ menu "Processor type and features"
a lot of RAM, and you need to able to allocate very large
contiguous chunks. If unsure, say N.
+source "mm/Kconfig"
+
endmenu
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index 289f448ac89..db259757dc8 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -265,7 +265,7 @@ config NUMA_EMU
into virtual nodes when booted with "numa=fake=N", where N is the
number of nodes. This is only useful for debugging.
-config DISCONTIGMEM
+config ARCH_DISCONTIGMEM_ENABLE
bool
depends on NUMA
default y
@@ -274,6 +274,27 @@ config NUMA
bool
default n
+config ARCH_DISCONTIGMEM_ENABLE
+ def_bool y
+ depends on NUMA
+
+config ARCH_DISCONTIGMEM_DEFAULT
+ def_bool y
+ depends on NUMA
+
+config ARCH_SPARSEMEM_ENABLE
+ def_bool y
+ depends on NUMA
+
+config ARCH_FLATMEM_ENABLE
+ def_bool y
+ depends on !NUMA
+
+source "mm/Kconfig"
+
+config HAVE_ARCH_EARLY_PFN_TO_NID
+ def_bool y
+
config HAVE_DEC_LOCK
bool
depends on SMP
@@ -381,6 +402,8 @@ config SECCOMP
If unsure, say Y. Only embedded should say N here.
+source kernel/Kconfig.hz
+
endmenu
#
diff --git a/arch/x86_64/boot/install.sh b/arch/x86_64/boot/install.sh
index 90f2452b3b9..f17b40dfc0f 100644
--- a/arch/x86_64/boot/install.sh
+++ b/arch/x86_64/boot/install.sh
@@ -21,8 +21,8 @@
# User may have a custom install script
-if [ -x ~/bin/installkernel ]; then exec ~/bin/installkernel "$@"; fi
-if [ -x /sbin/installkernel ]; then exec /sbin/installkernel "$@"; fi
+if [ -x ~/bin/${CROSS_COMPILE}installkernel ]; then exec ~/bin/${CROSS_COMPILE}installkernel "$@"; fi
+if [ -x /sbin/${CROSS_COMPILE}installkernel ]; then exec /sbin/${CROSS_COMPILE}installkernel "$@"; fi
# Default install - same as make zlilo
diff --git a/arch/x86_64/ia32/ia32_signal.c b/arch/x86_64/ia32/ia32_signal.c
index fbd09b5126c..66e2821533d 100644
--- a/arch/x86_64/ia32/ia32_signal.c
+++ b/arch/x86_64/ia32/ia32_signal.c
@@ -428,8 +428,8 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
return (void __user *)((rsp - frame_size) & -8UL);
}
-void ia32_setup_frame(int sig, struct k_sigaction *ka,
- compat_sigset_t *set, struct pt_regs * regs)
+int ia32_setup_frame(int sig, struct k_sigaction *ka,
+ compat_sigset_t *set, struct pt_regs * regs)
{
struct sigframe __user *frame;
int err = 0;
@@ -514,14 +514,15 @@ void ia32_setup_frame(int sig, struct k_sigaction *ka,
current->comm, current->pid, frame, regs->rip, frame->pretcode);
#endif
- return;
+ return 1;
give_sigsegv:
force_sigsegv(sig, current);
+ return 0;
}
-void ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
- compat_sigset_t *set, struct pt_regs * regs)
+int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+ compat_sigset_t *set, struct pt_regs * regs)
{
struct rt_sigframe __user *frame;
int err = 0;
@@ -613,9 +614,9 @@ void ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
current->comm, current->pid, frame, regs->rip, frame->pretcode);
#endif
- return;
+ return 1;
give_sigsegv:
force_sigsegv(sig, current);
+ return 0;
}
-
diff --git a/arch/x86_64/kernel/aperture.c b/arch/x86_64/kernel/aperture.c
index 504e6347499..c9a6b812e92 100644
--- a/arch/x86_64/kernel/aperture.c
+++ b/arch/x86_64/kernel/aperture.c
@@ -40,11 +40,7 @@ int fix_aperture __initdata = 1;
static u32 __init allocate_aperture(void)
{
-#ifdef CONFIG_DISCONTIGMEM
pg_data_t *nd0 = NODE_DATA(0);
-#else
- pg_data_t *nd0 = &contig_page_data;
-#endif
u32 aper_size;
void *p;
diff --git a/arch/x86_64/kernel/early_printk.c b/arch/x86_64/kernel/early_printk.c
index e3a19e8ebbf..9631c747c5e 100644
--- a/arch/x86_64/kernel/early_printk.c
+++ b/arch/x86_64/kernel/early_printk.c
@@ -2,20 +2,24 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/string.h>
+#include <linux/tty.h>
#include <asm/io.h>
#include <asm/processor.h>
/* Simple VGA output */
#ifdef __i386__
+#include <asm/setup.h>
#define VGABASE (__ISA_IO_base + 0xb8000)
#else
+#include <asm/bootsetup.h>
#define VGABASE ((void __iomem *)0xffffffff800b8000UL)
#endif
-#define MAX_YPOS 25
-#define MAX_XPOS 80
+#define MAX_YPOS max_ypos
+#define MAX_XPOS max_xpos
+static int max_ypos = 25, max_xpos = 80;
static int current_ypos = 1, current_xpos = 0;
static void early_vga_write(struct console *con, const char *str, unsigned n)
@@ -196,7 +200,10 @@ int __init setup_early_printk(char *opt)
} else if (!strncmp(buf, "ttyS", 4)) {
early_serial_init(buf);
early_console = &early_serial_console;
- } else if (!strncmp(buf, "vga", 3)) {
+ } else if (!strncmp(buf, "vga", 3)
+ && SCREEN_INFO.orig_video_isVGA == 1) {
+ max_xpos = SCREEN_INFO.orig_video_cols;
+ max_ypos = SCREEN_INFO.orig_video_lines;
early_console = &early_vga_console;
}
early_console_initialized = 1;
diff --git a/arch/x86_64/kernel/head64.c b/arch/x86_64/kernel/head64.c
index 0f8c78dcd38..cf6ab147a2a 100644
--- a/arch/x86_64/kernel/head64.c
+++ b/arch/x86_64/kernel/head64.c
@@ -94,7 +94,7 @@ void __init x86_64_start_kernel(char * real_mode_data)
s = strstr(saved_command_line, "earlyprintk=");
if (s != NULL)
setup_early_printk(s);
-#ifdef CONFIG_DISCONTIGMEM
+#ifdef CONFIG_NUMA
s = strstr(saved_command_line, "numa=");
if (s != NULL)
numa_setup(s+5);
diff --git a/arch/x86_64/kernel/i8259.c b/arch/x86_64/kernel/i8259.c
index 7873d9ba881..19eafa0aa95 100644
--- a/arch/x86_64/kernel/i8259.c
+++ b/arch/x86_64/kernel/i8259.c
@@ -157,14 +157,13 @@ static unsigned int startup_8259A_irq(unsigned int irq)
}
static struct hw_interrupt_type i8259A_irq_type = {
- "XT-PIC",
- startup_8259A_irq,
- shutdown_8259A_irq,
- enable_8259A_irq,
- disable_8259A_irq,
- mask_and_ack_8259A,
- end_8259A_irq,
- NULL
+ .typename = "XT-PIC",
+ .startup = startup_8259A_irq,
+ .shutdown = shutdown_8259A_irq,
+ .enable = enable_8259A_irq,
+ .disable = disable_8259A_irq,
+ .ack = mask_and_ack_8259A,
+ .end = end_8259A_irq,
};
/*
diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c
index f77f8a0ff18..4e680f87a75 100644
--- a/arch/x86_64/kernel/kprobes.c
+++ b/arch/x86_64/kernel/kprobes.c
@@ -27,6 +27,8 @@
* <prasanna@in.ibm.com> adapted for x86_64
* 2005-Mar Roland McGrath <roland@redhat.com>
* Fixed to handle %rip-relative addressing mode correctly.
+ * 2005-May Rusty Lynch <rusty.lynch@intel.com>
+ * Added function return probes functionality
*/
#include <linux/config.h>
@@ -37,18 +39,16 @@
#include <linux/slab.h>
#include <linux/preempt.h>
#include <linux/moduleloader.h>
-
+#include <asm/cacheflush.h>
#include <asm/pgtable.h>
#include <asm/kdebug.h>
static DECLARE_MUTEX(kprobe_mutex);
-/* kprobe_status settings */
-#define KPROBE_HIT_ACTIVE 0x00000001
-#define KPROBE_HIT_SS 0x00000002
-
static struct kprobe *current_kprobe;
static unsigned long kprobe_status, kprobe_old_rflags, kprobe_saved_rflags;
+static struct kprobe *kprobe_prev;
+static unsigned long kprobe_status_prev, kprobe_old_rflags_prev, kprobe_saved_rflags_prev;
static struct pt_regs jprobe_saved_regs;
static long *jprobe_saved_rsp;
static kprobe_opcode_t *get_insn_slot(void);
@@ -214,6 +214,21 @@ void arch_copy_kprobe(struct kprobe *p)
BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
*ripdisp = disp;
}
+ p->opcode = *p->addr;
+}
+
+void arch_arm_kprobe(struct kprobe *p)
+{
+ *p->addr = BREAKPOINT_INSTRUCTION;
+ flush_icache_range((unsigned long) p->addr,
+ (unsigned long) p->addr + sizeof(kprobe_opcode_t));
+}
+
+void arch_disarm_kprobe(struct kprobe *p)
+{
+ *p->addr = p->opcode;
+ flush_icache_range((unsigned long) p->addr,
+ (unsigned long) p->addr + sizeof(kprobe_opcode_t));
}
void arch_remove_kprobe(struct kprobe *p)
@@ -223,10 +238,29 @@ void arch_remove_kprobe(struct kprobe *p)
down(&kprobe_mutex);
}
-static inline void disarm_kprobe(struct kprobe *p, struct pt_regs *regs)
+static inline void save_previous_kprobe(void)
{
- *p->addr = p->opcode;
- regs->rip = (unsigned long)p->addr;
+ kprobe_prev = current_kprobe;
+ kprobe_status_prev = kprobe_status;
+ kprobe_old_rflags_prev = kprobe_old_rflags;
+ kprobe_saved_rflags_prev = kprobe_saved_rflags;
+}
+
+static inline void restore_previous_kprobe(void)
+{
+ current_kprobe = kprobe_prev;
+ kprobe_status = kprobe_status_prev;
+ kprobe_old_rflags = kprobe_old_rflags_prev;
+ kprobe_saved_rflags = kprobe_saved_rflags_prev;
+}
+
+static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs)
+{
+ current_kprobe = p;
+ kprobe_saved_rflags = kprobe_old_rflags
+ = (regs->eflags & (TF_MASK | IF_MASK));
+ if (is_IF_modifier(p->ainsn.insn))
+ kprobe_saved_rflags &= ~IF_MASK;
}
static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
@@ -240,6 +274,50 @@ static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
regs->rip = (unsigned long)p->ainsn.insn;
}
+struct task_struct *arch_get_kprobe_task(void *ptr)
+{
+ return ((struct thread_info *) (((unsigned long) ptr) &
+ (~(THREAD_SIZE -1))))->task;
+}
+
+void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs)
+{
+ unsigned long *sara = (unsigned long *)regs->rsp;
+ struct kretprobe_instance *ri;
+ static void *orig_ret_addr;
+
+ /*
+ * Save the return address when the return probe hits
+ * the first time, and use it to populate the (krprobe
+ * instance)->ret_addr for subsequent return probes at
+ * the same addrress since stack address would have
+ * the kretprobe_trampoline by then.
+ */
+ if (((void*) *sara) != kretprobe_trampoline)
+ orig_ret_addr = (void*) *sara;
+
+ if ((ri = get_free_rp_inst(rp)) != NULL) {
+ ri->rp = rp;
+ ri->stack_addr = sara;
+ ri->ret_addr = orig_ret_addr;
+ add_rp_inst(ri);
+ /* Replace the return addr with trampoline addr */
+ *sara = (unsigned long) &kretprobe_trampoline;
+ } else {
+ rp->nmissed++;
+ }
+}
+
+void arch_kprobe_flush_task(struct task_struct *tk)
+{
+ struct kretprobe_instance *ri;
+ while ((ri = get_rp_inst_tsk(tk)) != NULL) {
+ *((unsigned long *)(ri->stack_addr)) =
+ (unsigned long) ri->ret_addr;
+ recycle_rp_inst(ri);
+ }
+}
+
/*
* Interrupts are disabled on entry as trap3 is an interrupt gate and they
* remain disabled thorough out this function.
@@ -264,9 +342,30 @@ int kprobe_handler(struct pt_regs *regs)
regs->eflags |= kprobe_saved_rflags;
unlock_kprobes();
goto no_kprobe;
+ } else if (kprobe_status == KPROBE_HIT_SSDONE) {
+ /* TODO: Provide re-entrancy from
+ * post_kprobes_handler() and avoid exception
+ * stack corruption while single-stepping on
+ * the instruction of the new probe.
+ */
+ arch_disarm_kprobe(p);
+ regs->rip = (unsigned long)p->addr;
+ ret = 1;
+ } else {
+ /* We have reentered the kprobe_handler(), since
+ * another probe was hit while within the
+ * handler. We here save the original kprobe
+ * variables and just single step on instruction
+ * of the new probe without calling any user
+ * handlers.
+ */
+ save_previous_kprobe();
+ set_current_kprobe(p, regs);
+ p->nmissed++;
+ prepare_singlestep(p, regs);
+ kprobe_status = KPROBE_REENTER;
+ return 1;
}
- disarm_kprobe(p, regs);
- ret = 1;
} else {
p = current_kprobe;
if (p->break_handler && p->break_handler(p, regs)) {
@@ -296,11 +395,7 @@ int kprobe_handler(struct pt_regs *regs)
}
kprobe_status = KPROBE_HIT_ACTIVE;
- current_kprobe = p;
- kprobe_saved_rflags = kprobe_old_rflags
- = (regs->eflags & (TF_MASK | IF_MASK));
- if (is_IF_modifier(p->ainsn.insn))
- kprobe_saved_rflags &= ~IF_MASK;
+ set_current_kprobe(p, regs);
if (p->pre_handler && p->pre_handler(p, regs))
/* handler has already set things up, so skip ss setup */
@@ -317,6 +412,55 @@ no_kprobe:
}
/*
+ * For function-return probes, init_kprobes() establishes a probepoint
+ * here. When a retprobed function returns, this probe is hit and
+ * trampoline_probe_handler() runs, calling the kretprobe's handler.
+ */
+ void kretprobe_trampoline_holder(void)
+ {
+ asm volatile ( ".global kretprobe_trampoline\n"
+ "kretprobe_trampoline: \n"
+ "nop\n");
+ }
+
+/*
+ * Called when we hit the probe point at kretprobe_trampoline
+ */
+int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ struct task_struct *tsk;
+ struct kretprobe_instance *ri;
+ struct hlist_head *head;
+ struct hlist_node *node;
+ unsigned long *sara = (unsigned long *)regs->rsp - 1;
+
+ tsk = arch_get_kprobe_task(sara);
+ head = kretprobe_inst_table_head(tsk);
+
+ hlist_for_each_entry(ri, node, head, hlist) {
+ if (ri->stack_addr == sara && ri->rp) {
+ if (ri->rp->handler)
+ ri->rp->handler(ri, regs);
+ }
+ }
+ return 0;
+}
+
+void trampoline_post_handler(struct kprobe *p, struct pt_regs *regs,
+ unsigned long flags)
+{
+ struct kretprobe_instance *ri;
+ /* RA already popped */
+ unsigned long *sara = ((unsigned long *)regs->rsp) - 1;
+
+ while ((ri = get_rp_inst(sara))) {
+ regs->rip = (unsigned long)ri->ret_addr;
+ recycle_rp_inst(ri);
+ }
+ regs->eflags &= ~TF_MASK;
+}
+
+/*
* Called after single-stepping. p->addr is the address of the
* instruction whose first byte has been replaced by the "int 3"
* instruction. To avoid the SMP problems that can occur when we
@@ -401,13 +545,23 @@ int post_kprobe_handler(struct pt_regs *regs)
if (!kprobe_running())
return 0;
- if (current_kprobe->post_handler)
+ if ((kprobe_status != KPROBE_REENTER) && current_kprobe->post_handler) {
+ kprobe_status = KPROBE_HIT_SSDONE;
current_kprobe->post_handler(current_kprobe, regs, 0);
+ }
- resume_execution(current_kprobe, regs);
+ if (current_kprobe->post_handler != trampoline_post_handler)
+ resume_execution(current_kprobe, regs);
regs->eflags |= kprobe_saved_rflags;
- unlock_kprobes();
+ /* Restore the original saved kprobes variables and continue. */
+ if (kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe();
+ goto out;
+ } else {
+ unlock_kprobes();
+ }
+out:
preempt_enable_no_resched();
/*
diff --git a/arch/x86_64/kernel/mpparse.c b/arch/x86_64/kernel/mpparse.c
index 61a63be6b29..9c5aa2a790c 100644
--- a/arch/x86_64/kernel/mpparse.c
+++ b/arch/x86_64/kernel/mpparse.c
@@ -23,6 +23,7 @@
#include <linux/kernel_stat.h>
#include <linux/mc146818rtc.h>
#include <linux/acpi.h>
+#include <linux/module.h>
#include <asm/smp.h>
#include <asm/mtrr.h>
@@ -45,7 +46,8 @@ int acpi_found_madt;
int apic_version [MAX_APICS];
unsigned char mp_bus_id_to_type [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
-cpumask_t pci_bus_to_cpumask [256] = { [0 ... 255] = CPU_MASK_ALL };
+unsigned char pci_bus_to_node [256];
+EXPORT_SYMBOL(pci_bus_to_node);
static int mp_current_pci_id = 0;
/* I/O APIC entries */
@@ -904,11 +906,20 @@ void __init mp_config_acpi_legacy_irqs (void)
return;
}
+#define MAX_GSI_NUM 4096
+
int mp_register_gsi(u32 gsi, int edge_level, int active_high_low)
{
int ioapic = -1;
int ioapic_pin = 0;
int idx, bit = 0;
+ static int pci_irq = 16;
+ /*
+ * Mapping between Global System Interrupts, which
+ * represent all possible interrupts, to the IRQs
+ * assigned to actual devices.
+ */
+ static int gsi_to_irq[MAX_GSI_NUM];
if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC)
return gsi;
@@ -943,11 +954,21 @@ int mp_register_gsi(u32 gsi, int edge_level, int active_high_low)
if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
- return gsi;
+ return gsi_to_irq[gsi];
}
mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
+ if (edge_level) {
+ /*
+ * For PCI devices assign IRQs in order, avoiding gaps
+ * due to unused I/O APIC pins.
+ */
+ int irq = gsi;
+ gsi = pci_irq++;
+ gsi_to_irq[irq] = gsi;
+ }
+
io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
edge_level == ACPI_EDGE_SENSITIVE ? 0 : 1,
active_high_low == ACPI_ACTIVE_HIGH ? 0 : 1);
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c
index dce8bab4306..e59d1f9d616 100644
--- a/arch/x86_64/kernel/process.c
+++ b/arch/x86_64/kernel/process.c
@@ -34,6 +34,7 @@
#include <linux/ptrace.h>
#include <linux/utsname.h>
#include <linux/random.h>
+#include <linux/kprobes.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
@@ -293,6 +294,14 @@ void exit_thread(void)
{
struct task_struct *me = current;
struct thread_struct *t = &me->thread;
+
+ /*
+ * Remove function-return probe instances associated with this task
+ * and put them back on the free list. Do not insert an exit probe for
+ * this function, it will be disabled by kprobe_flush_task if you do.
+ */
+ kprobe_flush_task(me);
+
if (me->thread.io_bitmap_ptr) {
struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
@@ -312,6 +321,13 @@ void flush_thread(void)
struct task_struct *tsk = current;
struct thread_info *t = current_thread_info();
+ /*
+ * Remove function-return probe instances associated with this task
+ * and put them back on the free list. Do not insert an exit probe for
+ * this function, it will be disabled by kprobe_flush_task if you do.
+ */
+ kprobe_flush_task(tsk);
+
if (t->flags & _TIF_ABI_PENDING)
t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32);
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index 99f038ede23..000015dd5a8 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -40,6 +40,8 @@
#include <linux/acpi.h>
#include <linux/kallsyms.h>
#include <linux/edd.h>
+#include <linux/mmzone.h>
+
#include <asm/mtrr.h>
#include <asm/uaccess.h>
#include <asm/system.h>
@@ -348,7 +350,7 @@ static __init void parse_cmdline_early (char ** cmdline_p)
if (!memcmp(from, "mem=", 4))
parse_memopt(from+4, &from);
-#ifdef CONFIG_DISCONTIGMEM
+#ifdef CONFIG_NUMA
if (!memcmp(from, "numa=", 5))
numa_setup(from+5);
#endif
@@ -377,17 +379,20 @@ static __init void parse_cmdline_early (char ** cmdline_p)
*cmdline_p = command_line;
}
-#ifndef CONFIG_DISCONTIGMEM
-static void __init contig_initmem_init(void)
+#ifndef CONFIG_NUMA
+static void __init
+contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
{
- unsigned long bootmap_size, bootmap;
- bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
- bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
- if (bootmap == -1L)
- panic("Cannot find bootmem map of size %ld\n",bootmap_size);
- bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
- e820_bootmem_free(&contig_page_data, 0, end_pfn << PAGE_SHIFT);
- reserve_bootmem(bootmap, bootmap_size);
+ unsigned long bootmap_size, bootmap;
+
+ memory_present(0, start_pfn, end_pfn);
+ bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
+ bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
+ if (bootmap == -1L)
+ panic("Cannot find bootmem map of size %ld\n",bootmap_size);
+ bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
+ e820_bootmem_free(NODE_DATA(0), 0, end_pfn << PAGE_SHIFT);
+ reserve_bootmem(bootmap, bootmap_size);
}
#endif
@@ -554,10 +559,10 @@ void __init setup_arch(char **cmdline_p)
acpi_numa_init();
#endif
-#ifdef CONFIG_DISCONTIGMEM
+#ifdef CONFIG_NUMA
numa_initmem_init(0, end_pfn);
#else
- contig_initmem_init();
+ contig_initmem_init(0, end_pfn);
#endif
/* Reserve direct mapping */
@@ -618,6 +623,8 @@ void __init setup_arch(char **cmdline_p)
}
}
#endif
+
+ sparse_init();
paging_init();
check_ioapic();
diff --git a/arch/x86_64/kernel/signal.c b/arch/x86_64/kernel/signal.c
index 429c0269dc4..98b7ba95d58 100644
--- a/arch/x86_64/kernel/signal.c
+++ b/arch/x86_64/kernel/signal.c
@@ -34,9 +34,9 @@
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-void ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *set, struct pt_regs * regs);
-void ia32_setup_frame(int sig, struct k_sigaction *ka,
+int ia32_setup_frame(int sig, struct k_sigaction *ka,
sigset_t *set, struct pt_regs * regs);
asmlinkage long
@@ -238,7 +238,7 @@ get_stack(struct k_sigaction *ka, struct pt_regs *regs, unsigned long size)
return (void __user *)round_down(rsp - size, 16);
}
-static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *set, struct pt_regs * regs)
{
struct rt_sigframe __user *frame;
@@ -327,20 +327,23 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
current->comm, current->pid, frame, regs->rip, frame->pretcode);
#endif
- return;
+ return 1;
give_sigsegv:
force_sigsegv(sig, current);
+ return 0;
}
/*
* OK, we're invoking a handler
*/
-static void
+static int
handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
sigset_t *oldset, struct pt_regs *regs)
{
+ int ret;
+
#ifdef DEBUG_SIG
printk("handle_signal pid:%d sig:%lu rip:%lx rsp:%lx regs=%p\n",
current->pid, sig,
@@ -384,20 +387,22 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
#ifdef CONFIG_IA32_EMULATION
if (test_thread_flag(TIF_IA32)) {
if (ka->sa.sa_flags & SA_SIGINFO)
- ia32_setup_rt_frame(sig, ka, info, oldset, regs);
+ ret = ia32_setup_rt_frame(sig, ka, info, oldset, regs);
else
- ia32_setup_frame(sig, ka, oldset, regs);
+ ret = ia32_setup_frame(sig, ka, oldset, regs);
} else
#endif
- setup_rt_frame(sig, ka, info, oldset, regs);
+ ret = setup_rt_frame(sig, ka, info, oldset, regs);
- if (!(ka->sa.sa_flags & SA_NODEFER)) {
+ if (ret && !(ka->sa.sa_flags & SA_NODEFER)) {
spin_lock_irq(&current->sighand->siglock);
sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
sigaddset(&current->blocked,sig);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
}
+
+ return ret;
}
/*
@@ -417,7 +422,7 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
* kernel mode. Just return without doing anything
* if so.
*/
- if ((regs->cs & 3) != 3)
+ if (!user_mode(regs))
return 1;
if (try_to_freeze(0))
@@ -434,11 +439,10 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
* inside the kernel.
*/
if (current->thread.debugreg7)
- asm volatile("movq %0,%%db7" : : "r" (current->thread.debugreg7));
+ set_debugreg(current->thread.debugreg7, 7);
/* Whee! Actually deliver the signal. */
- handle_signal(signr, &info, &ka, oldset, regs);
- return 1;
+ return handle_signal(signr, &info, &ka, oldset, regs);
}
no_signal:
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index fb8c809b4cd..66bf6ddeb0c 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -64,6 +64,7 @@ static int notsc __initdata = 0;
unsigned int cpu_khz; /* TSC clocks / usec, not used here */
static unsigned long hpet_period; /* fsecs / HPET clock */
unsigned long hpet_tick; /* HPET clocks / interrupt */
+static int hpet_use_timer;
unsigned long vxtime_hz = PIT_TICK_RATE;
int report_lost_ticks; /* command line option */
unsigned long long monotonic_base;
@@ -105,7 +106,9 @@ static inline unsigned int do_gettimeoffset_tsc(void)
static inline unsigned int do_gettimeoffset_hpet(void)
{
- return ((hpet_readl(HPET_COUNTER) - vxtime.last) * vxtime.quot) >> 32;
+ /* cap counter read to one tick to avoid inconsistencies */
+ unsigned long counter = hpet_readl(HPET_COUNTER) - vxtime.last;
+ return (min(counter,hpet_tick) * vxtime.quot) >> 32;
}
unsigned int (*do_gettimeoffset)(void) = do_gettimeoffset_tsc;
@@ -301,7 +304,7 @@ unsigned long long monotonic_clock(void)
last_offset = vxtime.last;
base = monotonic_base;
- this_offset = hpet_readl(HPET_T0_CMP) - hpet_tick;
+ this_offset = hpet_readl(HPET_COUNTER);
} while (read_seqretry(&xtime_lock, seq));
offset = (this_offset - last_offset);
@@ -377,7 +380,14 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
write_seqlock(&xtime_lock);
- if (vxtime.hpet_address) {
+ if (vxtime.hpet_address)
+ offset = hpet_readl(HPET_COUNTER);
+
+ if (hpet_use_timer) {
+ /* if we're using the hpet timer functionality,
+ * we can more accurately know the counter value
+ * when the timer interrupt occured.
+ */
offset = hpet_readl(HPET_T0_CMP) - hpet_tick;
delay = hpet_readl(HPET_COUNTER) - offset;
} else {
@@ -803,17 +813,18 @@ static int hpet_timer_stop_set_go(unsigned long tick)
* Set up timer 0, as periodic with first interrupt to happen at hpet_tick,
* and period also hpet_tick.
*/
-
- hpet_writel(HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL |
+ if (hpet_use_timer) {
+ hpet_writel(HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL |
HPET_TN_32BIT, HPET_T0_CFG);
- hpet_writel(hpet_tick, HPET_T0_CMP);
- hpet_writel(hpet_tick, HPET_T0_CMP); /* AK: why twice? */
-
+ hpet_writel(hpet_tick, HPET_T0_CMP);
+ hpet_writel(hpet_tick, HPET_T0_CMP); /* AK: why twice? */
+ cfg |= HPET_CFG_LEGACY;
+ }
/*
* Go!
*/
- cfg |= HPET_CFG_ENABLE | HPET_CFG_LEGACY;
+ cfg |= HPET_CFG_ENABLE;
hpet_writel(cfg, HPET_CFG);
return 0;
@@ -834,8 +845,7 @@ static int hpet_init(void)
id = hpet_readl(HPET_ID);
- if (!(id & HPET_ID_VENDOR) || !(id & HPET_ID_NUMBER) ||
- !(id & HPET_ID_LEGSUP))
+ if (!(id & HPET_ID_VENDOR) || !(id & HPET_ID_NUMBER))
return -1;
hpet_period = hpet_readl(HPET_PERIOD);
@@ -845,6 +855,8 @@ static int hpet_init(void)
hpet_tick = (1000000000L * (USEC_PER_SEC / HZ) + hpet_period / 2) /
hpet_period;
+ hpet_use_timer = (id & HPET_ID_LEGSUP);
+
return hpet_timer_stop_set_go(hpet_tick);
}
@@ -901,9 +913,11 @@ void __init time_init(void)
set_normalized_timespec(&wall_to_monotonic,
-xtime.tv_sec, -xtime.tv_nsec);
- if (!hpet_init()) {
+ if (!hpet_init())
vxtime_hz = (1000000000000000L + hpet_period / 2) /
hpet_period;
+
+ if (hpet_use_timer) {
cpu_khz = hpet_calibrate_tsc();
timename = "HPET";
#ifdef CONFIG_X86_PM_TIMER
@@ -968,7 +982,7 @@ void __init time_init_gtod(void)
if (unsynchronized_tsc())
notsc = 1;
if (vxtime.hpet_address && notsc) {
- timetype = "HPET";
+ timetype = hpet_use_timer ? "HPET" : "PIT/HPET";
vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick;
vxtime.mode = VXTIME_HPET;
do_gettimeoffset = do_gettimeoffset_hpet;
@@ -983,7 +997,7 @@ void __init time_init_gtod(void)
printk(KERN_INFO "Disabling vsyscall due to use of PM timer\n");
#endif
} else {
- timetype = vxtime.hpet_address ? "HPET/TSC" : "PIT/TSC";
+ timetype = hpet_use_timer ? "HPET/TSC" : "PIT/TSC";
vxtime.mode = VXTIME_TSC;
}
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c
index 3dfec8fdabc..121646fc43f 100644
--- a/arch/x86_64/kernel/traps.c
+++ b/arch/x86_64/kernel/traps.c
@@ -274,7 +274,7 @@ EXPORT_SYMBOL(dump_stack);
void show_registers(struct pt_regs *regs)
{
int i;
- int in_kernel = (regs->cs & 3) == 0;
+ int in_kernel = !user_mode(regs);
unsigned long rsp;
const int cpu = safe_smp_processor_id();
struct task_struct *cur = cpu_pda[cpu].pcurrent;
@@ -318,7 +318,7 @@ void handle_BUG(struct pt_regs *regs)
struct bug_frame f;
char tmp;
- if (regs->cs & 3)
+ if (user_mode(regs))
return;
if (__copy_from_user(&f, (struct bug_frame *) regs->rip,
sizeof(struct bug_frame)))
@@ -437,7 +437,7 @@ static void do_trap(int trapnr, int signr, char *str,
}
#endif
- if ((regs->cs & 3) != 0) {
+ if (user_mode(regs)) {
struct task_struct *tsk = current;
if (exception_trace && unhandled_signal(tsk, signr))
@@ -522,7 +522,7 @@ asmlinkage void do_general_protection(struct pt_regs * regs, long error_code)
}
#endif
- if ((regs->cs & 3)!=0) {
+ if (user_mode(regs)) {
struct task_struct *tsk = current;
if (exception_trace && unhandled_signal(tsk, SIGSEGV))
@@ -638,7 +638,7 @@ asmlinkage struct pt_regs *sync_regs(struct pt_regs *eregs)
if (eregs == (struct pt_regs *)eregs->rsp)
;
/* Exception from user space */
- else if (eregs->cs & 3)
+ else if (user_mode(eregs))
regs = ((struct pt_regs *)current->thread.rsp0) - 1;
/* Exception from kernel and interrupts are enabled. Move to
kernel process stack. */
@@ -669,7 +669,7 @@ asmlinkage void do_debug(struct pt_regs * regs, unsigned long error_code)
}
#endif
- asm("movq %%db6,%0" : "=r" (condition));
+ get_debugreg(condition, 6);
if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
SIGTRAP) == NOTIFY_STOP)
@@ -697,7 +697,7 @@ asmlinkage void do_debug(struct pt_regs * regs, unsigned long error_code)
* allowing programs to debug themselves without the ptrace()
* interface.
*/
- if ((regs->cs & 3) == 0)
+ if (!user_mode(regs))
goto clear_TF_reenable;
/*
* Was the TF flag set by a debugger? If so, clear it now,
@@ -715,13 +715,13 @@ asmlinkage void do_debug(struct pt_regs * regs, unsigned long error_code)
info.si_signo = SIGTRAP;
info.si_errno = 0;
info.si_code = TRAP_BRKPT;
- if ((regs->cs & 3) == 0)
+ if (!user_mode(regs))
goto clear_dr7;
info.si_addr = (void __user *)regs->rip;
force_sig_info(SIGTRAP, &info, tsk);
clear_dr7:
- asm volatile("movq %0,%%db7"::"r"(0UL));
+ set_debugreg(0UL, 7);
return;
clear_TF_reenable:
@@ -756,7 +756,7 @@ asmlinkage void do_coprocessor_error(struct pt_regs *regs)
unsigned short cwd, swd;
conditional_sti(regs);
- if ((regs->cs & 3) == 0 &&
+ if (!user_mode(regs) &&
kernel_math_error(regs, "kernel x87 math error"))
return;
@@ -822,7 +822,7 @@ asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
unsigned short mxcsr;
conditional_sti(regs);
- if ((regs->cs & 3) == 0 &&
+ if (!user_mode(regs) &&
kernel_math_error(regs, "kernel simd math error"))
return;
diff --git a/arch/x86_64/lib/delay.c b/arch/x86_64/lib/delay.c
index aed61a668a1..33a873a3c22 100644
--- a/arch/x86_64/lib/delay.c
+++ b/arch/x86_64/lib/delay.c
@@ -12,6 +12,7 @@
#include <linux/sched.h>
#include <linux/delay.h>
#include <asm/delay.h>
+#include <asm/msr.h>
#ifdef CONFIG_SMP
#include <asm/smp.h>
@@ -19,6 +20,12 @@
int x86_udelay_tsc = 0; /* Delay via TSC */
+int read_current_timer(unsigned long *timer_value)
+{
+ rdtscll(*timer_value);
+ return 0;
+}
+
void __delay(unsigned long loops)
{
unsigned bclock, now;
diff --git a/arch/x86_64/mm/Makefile b/arch/x86_64/mm/Makefile
index 66c354ad80c..1d232a87f11 100644
--- a/arch/x86_64/mm/Makefile
+++ b/arch/x86_64/mm/Makefile
@@ -4,7 +4,7 @@
obj-y := init.o fault.o ioremap.o extable.o pageattr.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
-obj-$(CONFIG_DISCONTIGMEM) += numa.o
+obj-$(CONFIG_NUMA) += numa.o
obj-$(CONFIG_K8_NUMA) += k8topology.o
obj-$(CONFIG_ACPI_NUMA) += srat.o
diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c
index 57d3ab15a5c..2f187986f94 100644
--- a/arch/x86_64/mm/fault.c
+++ b/arch/x86_64/mm/fault.c
@@ -74,7 +74,7 @@ static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr,
instr = (unsigned char *)convert_rip_to_linear(current, regs);
max_instr = instr + 15;
- if ((regs->cs & 3) != 0 && instr >= (unsigned char *)TASK_SIZE)
+ if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE)
return 0;
while (scan_more && instr < max_instr) {
@@ -106,7 +106,7 @@ static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr,
/* Could check the LDT for lm, but for now it's good
enough to assume that long mode only uses well known
segments or kernel. */
- scan_more = ((regs->cs & 3) == 0) || (regs->cs == __USER_CS);
+ scan_more = (!user_mode(regs)) || (regs->cs == __USER_CS);
break;
case 0x60:
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c
index dbe53b4c7e6..72e4b364ed7 100644
--- a/arch/x86_64/mm/init.c
+++ b/arch/x86_64/mm/init.c
@@ -318,7 +318,7 @@ void zap_low_mappings(void)
flush_tlb_all();
}
-#ifndef CONFIG_DISCONTIGMEM
+#ifndef CONFIG_NUMA
void __init paging_init(void)
{
{
@@ -427,13 +427,16 @@ void __init mem_init(void)
reservedpages = 0;
/* this will put all low memory onto the freelists */
-#ifdef CONFIG_DISCONTIGMEM
+#ifdef CONFIG_NUMA
totalram_pages += numa_free_all_bootmem();
tmp = 0;
/* should count reserved pages here for all nodes */
#else
+
+#ifdef CONFIG_FLATMEM
max_mapnr = end_pfn;
if (!mem_map) BUG();
+#endif
totalram_pages += free_all_bootmem();
@@ -515,7 +518,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
{
/* Should check here against the e820 map to avoid double free */
-#ifdef CONFIG_DISCONTIGMEM
+#ifdef CONFIG_NUMA
int nid = phys_to_nid(phys);
reserve_bootmem_node(NODE_DATA(nid), phys, len);
#else
diff --git a/arch/x86_64/mm/ioremap.c b/arch/x86_64/mm/ioremap.c
index 58aac23760e..6972df480d2 100644
--- a/arch/x86_64/mm/ioremap.c
+++ b/arch/x86_64/mm/ioremap.c
@@ -178,7 +178,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
return (__force void __iomem *)phys_to_virt(phys_addr);
-#ifndef CONFIG_DISCONTIGMEM
+#ifdef CONFIG_FLATMEM
/*
* Don't allow anybody to remap normal RAM that we're using..
*/
diff --git a/arch/x86_64/mm/numa.c b/arch/x86_64/mm/numa.c
index fd9f25d7a6c..84cde796ecb 100644
--- a/arch/x86_64/mm/numa.c
+++ b/arch/x86_64/mm/numa.c
@@ -66,6 +66,13 @@ int __init compute_hash_shift(struct node *nodes, int numnodes)
return -1;
}
+#ifdef CONFIG_SPARSEMEM
+int early_pfn_to_nid(unsigned long pfn)
+{
+ return phys_to_nid(pfn << PAGE_SHIFT);
+}
+#endif
+
/* Initialize bootmem allocator for a node */
void __init setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
{
@@ -80,6 +87,7 @@ void __init setup_node_bootmem(int nodeid, unsigned long start, unsigned long en
start_pfn = start >> PAGE_SHIFT;
end_pfn = end >> PAGE_SHIFT;
+ memory_present(nodeid, start_pfn, end_pfn);
nodedata_phys = find_e820_area(start, end, pgdat_size);
if (nodedata_phys == -1L)
panic("Cannot find memory pgdat in node %d\n", nodeid);
diff --git a/arch/x86_64/pci/k8-bus.c b/arch/x86_64/pci/k8-bus.c
index 62349c78db5..7e7d0c2a002 100644
--- a/arch/x86_64/pci/k8-bus.c
+++ b/arch/x86_64/pci/k8-bus.c
@@ -53,25 +53,11 @@ fill_mp_bus_to_cpumask(void)
for (j = SECONDARY_LDT_BUS_NUMBER(ldtbus);
j <= SUBORDINATE_LDT_BUS_NUMBER(ldtbus);
j++)
- pci_bus_to_cpumask[j] =
- node_to_cpumask(NODE_ID(nid));
+ pci_bus_to_node[j] = NODE_ID(nid);
}
}
}
- /* quick sanity check */
- printed = 0;
- for (i = 0; i < 256; i++) {
- if (cpus_empty(pci_bus_to_cpumask[i])) {
- pci_bus_to_cpumask[i] = CPU_MASK_ALL;
- if (printed)
- continue;
- printk(KERN_ERR
- "k8-bus.c: some busses have empty cpu mask\n");
- printed = 1;
- }
- }
-
return 0;
}
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
new file mode 100644
index 00000000000..3e89767cea7
--- /dev/null
+++ b/arch/xtensa/Kconfig
@@ -0,0 +1,258 @@
+# For a description of the syntax of this configuration file,
+# see Documentation/kbuild/config-language.txt.
+
+mainmenu "Linux/Xtensa Kernel Configuration"
+
+config FRAME_POINTER
+ bool
+ default n
+
+config XTENSA
+ bool
+ default y
+ help
+ Xtensa processors are 32-bit RISC machines designed by Tensilica
+ primarily for embedded systems. These processors are both
+ configurable and extensible. The Linux port to the Xtensa
+ architecture supports all processor configurations and extensions,
+ with reasonable minimum requirements. The Xtensa Linux project has
+ a home page at <http://xtensa.sourceforge.net/>.
+
+config UID16
+ bool
+ default n
+
+config RWSEM_XCHGADD_ALGORITHM
+ bool
+ default y
+
+config HAVE_DEC_LOCK
+ bool
+ default y
+
+config GENERIC_HARDIRQS
+ bool
+ default y
+
+source "init/Kconfig"
+
+menu "Processor type and features"
+
+choice
+ prompt "Xtensa Processor Configuration"
+ default XTENSA_CPU_LINUX_BE
+
+config XTENSA_CPU_LINUX_BE
+ bool "linux_be"
+ ---help---
+ The linux_be processor configuration is the baseline Xtensa
+ configurations included in this kernel and also used by
+ binutils, gcc, and gdb. It contains no TIE, no coprocessors,
+ and the following configuration options:
+
+ Code Density Option 2 Misc Special Registers
+ NSA/NSAU Instructions 128-bit Data Bus Width
+ Processor ID 8K, 2-way I and D Caches
+ Zero-Overhead Loops 2 Inst Address Break Registers
+ Big Endian 2 Data Address Break Registers
+ 64 General-Purpose Registers JTAG Interface and Trace Port
+ 17 Interrupts MMU w/ TLBs and Autorefill
+ 3 Interrupt Levels 8 Autorefill Ways (I/D TLBs)
+ 3 Timers Unaligned Exceptions
+endchoice
+
+config MMU
+ bool
+ default y
+
+config XTENSA_UNALIGNED_USER
+ bool "Unaligned memory access in use space"
+ ---help---
+ The Xtensa architecture currently does not handle unaligned
+ memory accesses in hardware but through an exception handler.
+ Per default, unaligned memory accesses are disabled in user space.
+
+ Say Y here to enable unaligned memory access in user space.
+
+config PREEMPT
+ bool "Preemptible Kernel"
+ ---help---
+ This option reduces the latency of the kernel when reacting to
+ real-time or interactive events by allowing a low priority process to
+ be preempted even if it is in kernel mode executing a system call.
+ Unfortunately the kernel code has some race conditions if both
+ CONFIG_SMP and CONFIG_PREEMPT are enabled, so this option is
+ currently disabled if you are building an SMP kernel.
+
+ Say Y here if you are building a kernel for a desktop, embedded
+ or real-time system. Say N if you are unsure.
+
+config MATH_EMULATION
+ bool "Math emulation"
+ help
+ Can we use information of configuration file?
+
+config HIGHMEM
+ bool "High memory support"
+
+endmenu
+
+menu "Platform options"
+
+choice
+ prompt "Xtensa System Type"
+ default XTENSA_PLATFORM_ISS
+
+config XTENSA_PLATFORM_ISS
+ bool "ISS"
+ help
+ ISS is an acronym for Tensilica's Instruction Set Simulator.
+
+config XTENSA_PLATFORM_XT2000
+ bool "XT2000"
+ help
+ XT2000 is the name of Tensilica's feature-rich emulation platform.
+ This hardware is capable of running a full Linux distribution.
+
+endchoice
+
+
+config XTENSA_CALIBRATE_CCOUNT
+ bool "Auto calibration of the CPU clock rate"
+ ---help---
+ On some platforms (XT2000, for example), the CPU clock rate can
+ vary. The frequency can be determined, however, by measuring
+ against a well known, fixed frequency, such as an UART oscillator.
+
+config XTENSA_CPU_CLOCK
+ int "CPU clock rate [MHz]"
+ depends on !XTENSA_CALIBRATE_CCOUNT
+ default "16"
+
+config GENERIC_CALIBRATE_DELAY
+ bool "Auto calibration of the BogoMIPS value"
+ ---help---
+ The BogoMIPS value can easily derived from the CPU frequency.
+
+config CMDLINE_BOOL
+ bool "Default bootloader kernel arguments"
+
+config CMDLINE
+ string "Initial kernel command string"
+ depends on CMDLINE_BOOL
+ default "console=ttyS0,38400 root=/dev/ram"
+ help
+ On some architectures (EBSA110 and CATS), there is currently no way
+ for the boot loader to pass arguments to the kernel. For these
+ architectures, you should supply some command-line options at build
+ time by entering them here. As a minimum, you should specify the
+ memory size and the root device (e.g., mem=64M root=/dev/nfs).
+
+config SERIAL_CONSOLE
+ bool
+ depends on XTENSA_PLATFORM_ISS
+ default y
+
+config XTENSA_ISS_NETWORK
+ bool
+ depends on XTENSA_PLATFORM_ISS
+ default y
+
+endmenu
+
+menu "Bus options"
+
+config PCI
+ bool "PCI support" if !XTENSA_PLATFORM_ISS
+ depends on !XTENSA_PLATFORM_ISS
+ default y
+ help
+ Find out whether you have a PCI motherboard. PCI is the name of a
+ bus system, i.e. the way the CPU talks to the other stuff inside
+ your box. Other bus systems are ISA, EISA, MicroChannel (MCA) or
+ VESA. If you have PCI, say Y, otherwise N.
+
+ The PCI-HOWTO, available from
+ <http://www.linuxdoc.org/docs.html#howto>, contains valuable
+ information about which PCI hardware does work under Linux and which
+ doesn't
+
+source "drivers/pci/Kconfig"
+
+config HOTPLUG
+
+ bool "Support for hot-pluggable devices"
+ ---help---
+ Say Y here if you want to plug devices into your computer while
+ the system is running, and be able to use them quickly. In many
+ cases, the devices can likewise be unplugged at any time too.
+
+ One well known example of this is PCMCIA- or PC-cards, credit-card
+ size devices such as network cards, modems or hard drives which are
+ plugged into slots found on all modern laptop computers. Another
+ example, used on modern desktops as well as laptops, is USB.
+
+ Enable HOTPLUG and KMOD, and build a modular kernel. Get agent
+ software (at <http://linux-hotplug.sourceforge.net/>) and install it.
+ Then your kernel will automatically call out to a user mode "policy
+ agent" (/sbin/hotplug) to load modules and set up software needed
+ to use devices as you hotplug them.
+
+source "drivers/pcmcia/Kconfig"
+
+source "drivers/pci/hotplug/Kconfig"
+
+endmenu
+
+menu "Exectuable file formats"
+
+# only elf supported
+config KCORE_ELF
+ bool
+ depends on PROC_FS
+ default y
+ help
+ If you enabled support for /proc file system then the file
+ /proc/kcore will contain the kernel core image in ELF format. This
+ can be used in gdb:
+
+ $ cd /usr/src/linux ; gdb vmlinux /proc/kcore
+
+ This is especially useful if you have compiled the kernel with the
+ "-g" option to preserve debugging information. It is mainly used
+ for examining kernel data structures on the live kernel.
+
+source "fs/Kconfig.binfmt"
+
+endmenu
+
+source "drivers/Kconfig"
+
+source "fs/Kconfig"
+
+menu "Xtensa initrd options"
+ depends on BLK_DEV_INITRD
+
+ config EMBEDDED_RAMDISK
+ bool "Embed root filesystem ramdisk into the kernel"
+
+config EMBEDDED_RAMDISK_IMAGE
+ string "Filename of gziped ramdisk image"
+ depends on EMBEDDED_RAMDISK
+ default "ramdisk.gz"
+ help
+ This is the filename of the ramdisk image to be built into the
+ kernel. Relative pathnames are relative to arch/xtensa/boot/ramdisk/.
+ The ramdisk image is not part of the kernel distribution; you must
+ provide one yourself.
+endmenu
+
+source "arch/xtensa/Kconfig.debug"
+
+source "security/Kconfig"
+
+source "crypto/Kconfig"
+
+source "lib/Kconfig"
+
+
diff --git a/arch/xtensa/Kconfig.debug b/arch/xtensa/Kconfig.debug
new file mode 100644
index 00000000000..11c585295dd
--- /dev/null
+++ b/arch/xtensa/Kconfig.debug
@@ -0,0 +1,7 @@
+menu "Kernel hacking"
+
+source "lib/Kconfig.debug"
+
+endmenu
+
+
diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile
new file mode 100644
index 00000000000..4fa27453b1f
--- /dev/null
+++ b/arch/xtensa/Makefile
@@ -0,0 +1,102 @@
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 2001 - 2005 Tensilica Inc.
+#
+# This file is included by the global makefile so that you can add your own
+# architecture-specific flags and dependencies. Remember to do have actions
+# for "archclean" and "archdep" for cleaning up and making dependencies for
+# this architecture
+
+# Core configuration.
+# (Use CPU=<xtensa_config> to use another default compiler.)
+
+cpu-$(CONFIG_XTENSA_CPU_LINUX_BE) := linux_be
+cpu-$(CONFIG_XTENSA_CPU_LINUX_CUSTOM) := linux_custom
+
+CPU = $(cpu-y)
+export CPU
+
+# Platform configuration
+
+platform-y := common
+platform-$(CONFIG_XTENSA_PLATFORM_XT2000) := xt2000
+platform-$(CONFIG_XTENSA_PLATFORM_ISS) := iss
+
+PLATFORM = $(platform-y)
+export PLATFORM
+
+#LDFLAGS_vmlinux := -T$(word 1,$(LINKSCRIPT))
+AFLAGS_vmlinux.lds.o := -Uxtensa
+CPPFLAGS += -Iarch/xtensa -Iinclude/asm -mlongcalls -g
+AFLAGS += -Iarch/xtensa -Iinclude/asm
+CPP = $(CC) -E $(CFLAGS)
+
+cflags-y += -Iarch/xtensa -pipe -mlongcalls
+
+
+KBUILD_DEFCONFIG := common_defconfig
+
+# ramdisk/initrd support
+# You need a compressed ramdisk image, named ramdisk.gz in
+# arch/xtensa/boot/ramdisk
+
+core-$(CONFIG_EMBEDDED_RAMDISK) += arch/xtensa/boot/ramdisk/
+
+# Test for cross compiling
+
+ifneq ($(CPU),)
+ COMPILE_ARCH = $(shell uname -m)
+
+ ifneq ($(COMPILE_ARCH), xtensa)
+ ifndef CROSS_COMPILE
+ CROSS_COMPILE = xtensa_$(CPU)-
+ endif
+ endif
+endif
+
+#
+
+LIBGCC := $(shell $(CC) $(CFLAGS) -print-libgcc-file-name)
+
+head-y := arch/xtensa/kernel/head.o
+core-y += arch/xtensa/kernel/ \
+ arch/xtensa/mm/ arch/xtensa/platform-$(PLATFORM)/
+libs-y += arch/xtensa/lib/ $(LIBGCC)
+
+boot := arch/xtensa/boot
+
+arch/xtensa/kernel/asm-offsets.s: \
+ arch/xtensa/kernel/asm-offsets.c \
+ include/asm-xtensa/.platform
+
+include/asm-xtensa/offsets.h: arch/xtensa/kernel/asm-offsets.s
+ $(call filechk,gen-asm-offsets)
+
+prepare: include/asm-xtensa/.platform include/asm-xtensa/offsets.h
+
+# Update machine cpu and platform symlinks if something which affects
+# them changed.
+
+include/asm-xtensa/.platform: $(wildcard include/config/arch/*.h)
+ @echo ' Setting up cpu ($(CPU)) and platform ($(PLATFORM)) symlinks'
+ $(Q)rm -f include/asm-xtensa/platform
+ $(Q)rm -f include/asm-xtensa/xtensa/config
+ $(Q)(cd include/asm-xtensa/; ln -sf platform-$(PLATFORM) platform)
+ $(Q)(cd include/asm-xtensa/xtensa; ln -sf config-$(CPU) config)
+
+all: zImage
+
+bzImage : zImage
+
+zImage zImage.initrd: vmlinux
+ $(Q)$(MAKE) $(build)=$(boot) $@
+
+CLEAN_FILES += arch/xtensa/vmlinux.lds include/asm-xtensa/offset.h
+
+define archhelp
+ @echo '* zImage - Compressed kernel image (arch/xtensa/boot/images/zImage.*)'
+endef
+
diff --git a/arch/xtensa/boot/Makefile b/arch/xtensa/boot/Makefile
new file mode 100644
index 00000000000..260f456ccf0
--- /dev/null
+++ b/arch/xtensa/boot/Makefile
@@ -0,0 +1,37 @@
+#
+# arch/xtensa/boot/Makefile
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+#
+
+
+CFLAGS += -fno-builtin -Iarch/$(ARCH)/boot/include
+HOSTFLAGS += -Iarch/$(ARCH)/boot/include
+
+BIG_ENDIAN := $(shell echo -e "\#ifdef __XTENSA_EL__\nint little;\n\#else\nint big;\n\#endif" | $(CC) -E -|grep -c big)
+
+
+export CFLAGS
+export AFLAGS
+export BIG_ENDIAN
+
+# Subdirs for the boot loader(s)
+
+bootdir-$(CONFIG_XTENSA_PLATFORM_ISS) += boot-elf
+bootdir-$(CONFIG_XTENSA_PLATFORM_XT2000) += boot-redboot boot-elf
+
+subdir-y := lib/
+
+subdir-y += boot-elf/ boot-redboot/
+
+zImage zImage.initrd Image Image.initrd: $(bootdir-y)
+
+$(bootdir-y): $(addprefix $(obj)/,$(subdir-y)) \
+ $(addprefix $(obj)/,$(host-progs))
+ $(Q)$(MAKE) $(build)=$(obj)/$@ $(MAKECMDGOALS)
+
+
+
diff --git a/arch/xtensa/boot/boot-elf/Makefile b/arch/xtensa/boot/boot-elf/Makefile
new file mode 100644
index 00000000000..f6ef6a36966
--- /dev/null
+++ b/arch/xtensa/boot/boot-elf/Makefile
@@ -0,0 +1,52 @@
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+
+GZIP = gzip
+GZIP_FLAGS = -v9fc
+
+ifeq ($(BIG_ENDIAN),1)
+OBJCOPY_ARGS := -O elf32-xtensa-be
+else
+OBJCOPY_ARGS := -O elf32-xtensa-le
+endif
+
+export OBJCOPY_ARGS
+
+boot-y := bootstrap.o
+
+OBJS := $(addprefix $(obj)/,$(boot-y))
+
+Image: vmlinux $(OBJS)
+ $(OBJCOPY) --strip-all -R .comment -R .xt.insn -O binary \
+ vmlinux vmlinux.tmp
+ $(OBJCOPY) $(OBJCOPY_ARGS) -R .comment \
+ --add-section image=vmlinux.tmp \
+ --set-section-flags image=contents,alloc,load,load,data \
+ $(OBJS) $@.tmp
+ $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux) \
+ -T arch/$(ARCH)/boot/boot-elf/boot.ld \
+ -o arch/$(ARCH)/boot/$@.elf $@.tmp
+ rm -f $@.tmp vmlinux.tmp
+
+Image.initrd: vmlinux $(OBJS)
+ $(OBJCOPY) --strip-all -R .comment -R .xt.insn -O binary \
+ --add-section .initrd=arch/$(ARCH)/boot/ramdisk \
+ --set-section-flags .initrd=contents,alloc,load,load,data \
+ vmlinux vmlinux.tmp
+ $(OBJCOPY) $(OBJCOPY_ARGS) -R .comment \
+ --add-section image=vmlinux.tmp \
+ --set-section-flags image=contents,alloc,load,load,data \
+ $(OBJS) $@.tmp
+ $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux) \
+ -T arch/$(ARCH)/boot/boot-elf/boot.ld \
+ -o arch/$(ARCH)/boot/$@.elf $@.tmp
+ rm -f $@.tmp vmlinux.tmp
+
+
+zImage: Image
+
+zImage.initrd: Image.initrd
+
diff --git a/arch/xtensa/boot/boot-elf/boot.ld b/arch/xtensa/boot/boot-elf/boot.ld
new file mode 100644
index 00000000000..4ab06a0a7a6
--- /dev/null
+++ b/arch/xtensa/boot/boot-elf/boot.ld
@@ -0,0 +1,71 @@
+OUTPUT_ARCH(xtensa)
+
+SECTIONS
+{
+ .start 0xD0000000 : { *(.start) }
+
+ .text 0xD0000000:
+ {
+ __reloc_start = . ;
+ _text_start = . ;
+ *(.literal .text.literal .text)
+ _text_end = . ;
+ }
+
+ .rodata ALIGN(0x04):
+ {
+ *(.rodata)
+ *(.rodata1)
+ }
+
+ .data ALIGN(0x04):
+ {
+ *(.data)
+ *(.data1)
+ *(.sdata)
+ *(.sdata2)
+ *(.got.plt)
+ *(.got)
+ *(.dynamic)
+ }
+
+ __reloc_end = . ;
+
+ .initrd ALIGN(0x10) :
+ {
+ boot_initrd_start = . ;
+ *(.initrd)
+ boot_initrd_end = .;
+ }
+
+ . = ALIGN(0x10);
+ __image_load = . ;
+ .image 0xd0001000:
+ {
+ _image_start = .;
+ *(image)
+ . = (. + 3) & ~ 3;
+ _image_end = . ;
+ }
+
+
+ .bss ((LOADADDR(.image) + SIZEOF(.image) + 3) & ~ 3):
+ {
+ __bss_start = .;
+ *(.sbss)
+ *(.scommon)
+ *(.dynbss)
+ *(.bss)
+ __bss_end = .;
+ }
+ _end = .;
+ _param_start = .;
+
+ .ResetVector.text 0xfe000020 :
+ {
+ *(.ResetVector.text)
+ }
+
+
+ PROVIDE (end = .);
+}
diff --git a/arch/xtensa/boot/boot-elf/bootstrap.S b/arch/xtensa/boot/boot-elf/bootstrap.S
new file mode 100644
index 00000000000..7cba94abdab
--- /dev/null
+++ b/arch/xtensa/boot/boot-elf/bootstrap.S
@@ -0,0 +1,37 @@
+
+#include <xtensa/config/specreg.h>
+#include <xtensa/config/core.h>
+
+#include <linux/config.h>
+#include <asm/bootparam.h>
+
+
+/* ResetVector
+ */
+ .section .ResetVector.text, "ax"
+ .global _ResetVector
+_ResetVector:
+ _j reset
+ .align 4
+RomInitAddr:
+ .word 0xd0001000
+RomBootParam:
+ .word _bootparam
+reset:
+ l32r a0, RomInitAddr
+ l32r a2, RomBootParam
+ movi a3, 0
+ movi a4, 0
+ jx a0
+
+ .align 4
+ .section .bootstrap.data, "aw"
+
+ .globl _bootparam
+_bootparam:
+ .short BP_TAG_FIRST
+ .short 4
+ .long BP_VERSION
+ .short BP_TAG_LAST
+ .short 0
+ .long 0
diff --git a/arch/xtensa/boot/boot-redboot/Makefile b/arch/xtensa/boot/boot-redboot/Makefile
new file mode 100644
index 00000000000..ca8a68bc847
--- /dev/null
+++ b/arch/xtensa/boot/boot-redboot/Makefile
@@ -0,0 +1,35 @@
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+
+GZIP = gzip
+GZIP_FLAGS = -v9fc
+ifeq ($(BIG_ENDIAN),1)
+OBJCOPY_ARGS := -O elf32-xtensa-be
+else
+OBJCOPY_ARGS := -O elf32-xtensa-le
+endif
+
+LD_ARGS = -T $(obj)/boot.ld
+
+boot-y := bootstrap.o
+
+OBJS := $(addprefix $(obj)/,$(boot-y))
+LIBS := arch/$(ARCH)/boot/lib/lib.a arch/$(ARCH)/lib/lib.a
+
+LIBGCC := $(shell $(CC) $(CFLAGS) -print-libgcc-file-name)
+
+zImage: vmlinux $(OBJS) $(LIBS)
+ $(OBJCOPY) --strip-all -R .comment -R .xt.insn -O binary \
+ $(TOPDIR)/vmlinux vmlinux.tmp
+ gzip -vf9 vmlinux.tmp
+ $(OBJCOPY) $(OBJCOPY_ARGS) -R .comment \
+ --add-section image=vmlinux.tmp.gz \
+ --set-section-flags image=contents,alloc,load,load,data \
+ $(OBJS) $@.tmp
+ $(LD) $(LD_ARGS) -o $@.elf $@.tmp $(LIBS) -L/xtensa-elf/lib $(LIBGCC)
+ $(OBJCOPY) -S -O binary $@.elf arch/$(ARCH)/boot/images/$@.redboot
+# rm -f $@.tmp $@.elf vmlinux.tmp.gz
+
diff --git a/arch/xtensa/boot/boot-redboot/boot.ld b/arch/xtensa/boot/boot-redboot/boot.ld
new file mode 100644
index 00000000000..65b726410e8
--- /dev/null
+++ b/arch/xtensa/boot/boot-redboot/boot.ld
@@ -0,0 +1,66 @@
+OUTPUT_ARCH(xtensa)
+
+SECTIONS
+{
+ .start 0xD0200000 : { *(.start) }
+
+ .text :
+ {
+ __reloc_start = . ;
+ _text_start = . ;
+ *(.literal .text.literal .text)
+ _text_end = . ;
+ }
+
+ .rodata ALIGN(0x04):
+ {
+ *(.rodata)
+ *(.rodata1)
+ }
+
+ .data ALIGN(0x04):
+ {
+ *(.data)
+ *(.data1)
+ *(.sdata)
+ *(.sdata2)
+ *(.got.plt)
+ *(.got)
+ *(.dynamic)
+ }
+
+ __reloc_end = . ;
+
+ .initrd ALIGN(0x10) :
+ {
+ boot_initrd_start = . ;
+ *(.initrd)
+ boot_initrd_end = .;
+ }
+
+ . = ALIGN(0x10);
+ __image_load = . ;
+ .image 0xd0001000: AT(__image_load)
+ {
+ _image_start = .;
+ *(image)
+ . = (. + 3) & ~ 3;
+ _image_end = . ;
+ }
+
+
+ .bss ((LOADADDR(.image) + SIZEOF(.image) + 3) & ~ 3):
+ {
+ __bss_start = .;
+ *(.sbss)
+ *(.scommon)
+ *(.dynbss)
+ *(.bss)
+ __bss_end = .;
+ }
+ _end = .;
+ _param_start = .;
+
+
+ PROVIDE (end = .);
+}
diff --git a/arch/xtensa/boot/boot-redboot/bootstrap.S b/arch/xtensa/boot/boot-redboot/bootstrap.S
new file mode 100644
index 00000000000..ee636b0da81
--- /dev/null
+++ b/arch/xtensa/boot/boot-redboot/bootstrap.S
@@ -0,0 +1,246 @@
+
+#define _ASMLANGUAGE
+#include <xtensa/config/specreg.h>
+#include <xtensa/config/core.h>
+#include <xtensa/cacheasm.h>
+
+ /*
+ * RB-Data: RedBoot data/bss
+ * P: Boot-Parameters
+ * L: Kernel-Loader
+ *
+ * The Linux-Kernel image including the loader must be loaded
+ * to a position so that the kernel and the boot parameters
+ * can fit in the space before the load address.
+ * ______________________________________________________
+ * |_RB-Data_|_P_|__________|_L_|___Linux-Kernel___|______|
+ * ^
+ * ^ Load address
+ * ______________________________________________________
+ * |___Linux-Kernel___|_P_|_L_|___________________________|
+ *
+ * The loader copies the parameter to the position that will
+ * be the end of the kernel and itself to the end of the
+ * parameter list.
+ */
+
+/* Make sure we have enough space for the 'uncompressor' */
+
+#define STACK_SIZE 32768
+#define HEAP_SIZE (131072*4)
+
+ # a2: Parameter list
+ # a3: Size of parameter list
+
+ .section .start, "ax"
+
+ .globl __start
+ /* this must be the first byte of the loader! */
+__start:
+ entry sp, 32 # we do not intend to return
+ _call0 _start
+__start_a0:
+ .align 4
+
+ .section .text, "ax"
+ .begin literal_prefix .text
+
+ /* put literals in here! */
+
+ .globl _start
+_start:
+
+ /* 'reset' window registers */
+
+ movi a4, 1
+ wsr a4, PS
+ rsync
+
+ rsr a5, WINDOWBASE
+ ssl a5
+ sll a4, a4
+ wsr a4, WINDOWSTART
+ rsync
+
+ movi a4, 0x00040000
+ wsr a4, PS
+ rsync
+
+ /* copy the loader to its address
+ * Note: The loader itself is a very small piece, so we assume we
+ * don't partially overlap. We also assume (even more important)
+ * that the kernel image is out of the way. Usually, when the
+ * load address of this image is not at an arbitrary address,
+ * but aligned to some 10K's we shouldn't overlap.
+ */
+
+ /* Note: The assembler cannot relax "addi a0, a0, ..." to an
+ l32r, so we load to a4 first. */
+
+ addi a4, a0, __start - __start_a0
+ mov a0, a4
+ movi a4, __start
+ movi a5, __reloc_end
+
+ # a0: address where this code has been loaded
+ # a4: compiled address of __start
+ # a5: compiled end address
+
+ mov.n a7, a0
+ mov.n a8, a4
+
+1:
+ l32i a10, a7, 0
+ l32i a11, a7, 4
+ s32i a10, a8, 0
+ s32i a11, a8, 4
+ l32i a10, a7, 8
+ l32i a11, a7, 12
+ s32i a10, a8, 8
+ s32i a11, a8, 12
+ addi a8, a8, 16
+ addi a7, a7, 16
+ blt a8, a5, 1b
+
+
+ /* We have to flush and invalidate the caches here before we jump. */
+
+#if XCHAL_DCACHE_IS_WRITEBACK
+ dcache_writeback_all a5, a6
+#endif
+ icache_invalidate_all a5, a6
+
+ movi a11, _reloc
+ jx a11
+
+ .globl _reloc
+_reloc:
+
+ /* RedBoot is now at the end of the memory, so we don't have
+ * to copy the parameter list. Keep the code around; in case
+ * we need it again. */
+#if 0
+ # a0: load address
+ # a2: start address of parameter list
+ # a3: length of parameter list
+ # a4: __start
+
+ /* copy the parameter list out of the way */
+
+ movi a6, _param_start
+ add a3, a2, a3
+2:
+ l32i a8, a2, 0
+ s32i a8, a6, 0
+ addi a2, a2, 4
+ addi a6, a6, 4
+ blt a2, a3, 2b
+#endif
+
+ /* clear BSS section */
+ movi a6, __bss_start
+ movi a7, __bss_end
+ movi.n a5, 0
+3:
+ s32i a5, a6, 0
+ addi a6, a6, 4
+ blt a6, a7, 3b
+
+ movi a5, -16
+ movi a1, _stack + STACK_SIZE
+ and a1, a1, a5
+
+ /* Uncompress the kernel */
+
+ # a0: load address
+ # a2: boot parameter
+ # a4: __start
+
+ movi a3, __image_load
+ sub a4, a3, a4
+ add a8, a0, a4
+
+ # a1 Stack
+ # a8(a4) Load address of the image
+
+ movi a6, _image_start
+ movi a10, _image_end
+ movi a7, 0x1000000
+ sub a11, a10, a6
+ movi a9, complen
+ s32i a11, a9, 0
+
+ movi a0, 0
+
+ # a6 destination
+ # a7 maximum size of destination
+ # a8 source
+ # a9 ptr to length
+
+ .extern gunzip
+ movi a4, gunzip
+ beqz a4, 1f
+
+ callx4 a4
+
+ j 2f
+
+
+ # a6 destination start
+ # a7 maximum size of destination
+ # a8 source start
+ # a9 ptr to length
+ # a10 destination end
+
+1:
+ l32i a9, a8, 0
+ l32i a11, a8, 4
+ s32i a9, a6, 0
+ s32i a11, a6, 4
+ l32i a9, a8, 8
+ l32i a11, a8, 12
+ s32i a9, a6, 8
+ s32i a11, a6, 12
+ addi a6, a6, 16
+ addi a8, a8, 16
+ blt a6, a10, 1b
+
+
+ /* jump to the kernel */
+2:
+#if XCHAL_DCACHE_IS_WRITEBACK
+ dcache_writeback_all a5, a6
+#endif
+ icache_invalidate_all a5, a6
+
+ movi a5, __start
+ movi a3, boot_initrd_start
+ movi a4, boot_initrd_end
+ sub a3, a3, a5
+ sub a4, a4, a5
+ add a3, a0, a3
+ add a4, a0, a4
+
+ # a2 Boot parameter list
+ # a3 initrd_start (virtual load address)
+ # a4 initrd_end (virtual load address)
+
+ movi a0, _image_start
+ jx a0
+
+ .align 16
+ .data
+ .globl avail_ram
+avail_ram:
+ .long _heap
+ .globl end_avail
+end_avail:
+ .long _heap + HEAP_SIZE
+
+ .comm _stack, STACK_SIZE
+ .comm _heap, HEAP_SIZE
+
+ .globl end_avail
+ .comm complen, 4
+
+ .end literal_prefix
diff --git a/arch/xtensa/boot/include/zlib.h b/arch/xtensa/boot/include/zlib.h
new file mode 100644
index 00000000000..ea29b623785
--- /dev/null
+++ b/arch/xtensa/boot/include/zlib.h
@@ -0,0 +1,433 @@
+/*
+ * BK Id: SCCS/s.zlib.h 1.8 05/18/01 15:17:23 cort
+ */
+/*
+ * This file is derived from zlib.h and zconf.h from the zlib-0.95
+ * distribution by Jean-loup Gailly and Mark Adler, with some additions
+ * by Paul Mackerras to aid in implementing Deflate compression and
+ * decompression for PPP packets.
+ */
+
+/*
+ * ==FILEVERSION 960122==
+ *
+ * This marker is used by the Linux installation script to determine
+ * whether an up-to-date version of this file is already installed.
+ */
+
+/* zlib.h -- interface of the 'zlib' general purpose compression library
+ version 0.95, Aug 16th, 1995.
+
+ Copyright (C) 1995 Jean-loup Gailly and Mark Adler
+
+ This software is provided 'as-is', without any express or implied
+ warranty. In no event will the authors be held liable for any damages
+ arising from the use of this software.
+
+ Permission is granted to anyone to use this software for any purpose,
+ including commercial applications, and to alter it and redistribute it
+ freely, subject to the following restrictions:
+
+ 1. The origin of this software must not be misrepresented; you must not
+ claim that you wrote the original software. If you use this software
+ in a product, an acknowledgment in the product documentation would be
+ appreciated but is not required.
+ 2. Altered source versions must be plainly marked as such, and must not be
+ misrepresented as being the original software.
+ 3. This notice may not be removed or altered from any source distribution.
+
+ Jean-loup Gailly Mark Adler
+ gzip@prep.ai.mit.edu madler@alumni.caltech.edu
+ */
+
+#ifndef _ZLIB_H
+#define _ZLIB_H
+
+/* #include "zconf.h" */ /* included directly here */
+
+/* zconf.h -- configuration of the zlib compression library
+ * Copyright (C) 1995 Jean-loup Gailly.
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* From: zconf.h,v 1.12 1995/05/03 17:27:12 jloup Exp */
+
+/*
+ The library does not install any signal handler. It is recommended to
+ add at least a handler for SIGSEGV when decompressing; the library checks
+ the consistency of the input data whenever possible but may go nuts
+ for some forms of corrupted input.
+ */
+
+/*
+ * Compile with -DMAXSEG_64K if the alloc function cannot allocate more
+ * than 64k bytes at a time (needed on systems with 16-bit int).
+ * Compile with -DUNALIGNED_OK if it is OK to access shorts or ints
+ * at addresses which are not a multiple of their size.
+ * Under DOS, -DFAR=far or -DFAR=__far may be needed.
+ */
+
+#ifndef STDC
+# if defined(MSDOS) || defined(__STDC__) || defined(__cplusplus)
+# define STDC
+# endif
+#endif
+
+#ifdef __MWERKS__ /* Metrowerks CodeWarrior declares fileno() in unix.h */
+# include <unix.h>
+#endif
+
+/* Maximum value for memLevel in deflateInit2 */
+#ifndef MAX_MEM_LEVEL
+# ifdef MAXSEG_64K
+# define MAX_MEM_LEVEL 8
+# else
+# define MAX_MEM_LEVEL 9
+# endif
+#endif
+
+#ifndef FAR
+# define FAR
+#endif
+
+/* Maximum value for windowBits in deflateInit2 and inflateInit2 */
+#ifndef MAX_WBITS
+# define MAX_WBITS 15 /* 32K LZ77 window */
+#endif
+
+/* The memory requirements for deflate are (in bytes):
+ 1 << (windowBits+2) + 1 << (memLevel+9)
+ that is: 128K for windowBits=15 + 128K for memLevel = 8 (default values)
+ plus a few kilobytes for small objects. For example, if you want to reduce
+ the default memory requirements from 256K to 128K, compile with
+ make CFLAGS="-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7"
+ Of course this will generally degrade compression (there's no free lunch).
+
+ The memory requirements for inflate are (in bytes) 1 << windowBits
+ that is, 32K for windowBits=15 (default value) plus a few kilobytes
+ for small objects.
+*/
+
+ /* Type declarations */
+
+#ifndef OF /* function prototypes */
+# ifdef STDC
+# define OF(args) args
+# else
+# define OF(args) ()
+# endif
+#endif
+
+typedef unsigned char Byte; /* 8 bits */
+typedef unsigned int uInt; /* 16 bits or more */
+typedef unsigned long uLong; /* 32 bits or more */
+
+typedef Byte FAR Bytef;
+typedef char FAR charf;
+typedef int FAR intf;
+typedef uInt FAR uIntf;
+typedef uLong FAR uLongf;
+
+#ifdef STDC
+ typedef void FAR *voidpf;
+ typedef void *voidp;
+#else
+ typedef Byte FAR *voidpf;
+ typedef Byte *voidp;
+#endif
+
+/* end of original zconf.h */
+
+#define ZLIB_VERSION "0.95P"
+
+/*
+ The 'zlib' compression library provides in-memory compression and
+ decompression functions, including integrity checks of the uncompressed
+ data. This version of the library supports only one compression method
+ (deflation) but other algorithms may be added later and will have the same
+ stream interface.
+
+ For compression the application must provide the output buffer and
+ may optionally provide the input buffer for optimization. For decompression,
+ the application must provide the input buffer and may optionally provide
+ the output buffer for optimization.
+
+ Compression can be done in a single step if the buffers are large
+ enough (for example if an input file is mmap'ed), or can be done by
+ repeated calls of the compression function. In the latter case, the
+ application must provide more input and/or consume the output
+ (providing more output space) before each call.
+*/
+
+typedef voidpf (*alloc_func) OF((voidpf opaque, uInt items, uInt size));
+typedef void (*free_func) OF((voidpf opaque, voidpf address, uInt nbytes));
+
+struct internal_state;
+
+typedef struct z_stream_s {
+ Bytef *next_in; /* next input byte */
+ uInt avail_in; /* number of bytes available at next_in */
+ uLong total_in; /* total nb of input bytes read so far */
+
+ Bytef *next_out; /* next output byte should be put there */
+ uInt avail_out; /* remaining free space at next_out */
+ uLong total_out; /* total nb of bytes output so far */
+
+ char *msg; /* last error message, NULL if no error */
+ struct internal_state FAR *state; /* not visible by applications */
+
+ alloc_func zalloc; /* used to allocate the internal state */
+ free_func zfree; /* used to free the internal state */
+ voidp opaque; /* private data object passed to zalloc and zfree */
+
+ Byte data_type; /* best guess about the data type: ascii or binary */
+
+} z_stream;
+
+/*
+ The application must update next_in and avail_in when avail_in has
+ dropped to zero. It must update next_out and avail_out when avail_out
+ has dropped to zero. The application must initialize zalloc, zfree and
+ opaque before calling the init function. All other fields are set by the
+ compression library and must not be updated by the application.
+
+ The opaque value provided by the application will be passed as the first
+ parameter for calls of zalloc and zfree. This can be useful for custom
+ memory management. The compression library attaches no meaning to the
+ opaque value.
+
+ zalloc must return Z_NULL if there is not enough memory for the object.
+ On 16-bit systems, the functions zalloc and zfree must be able to allocate
+ exactly 65536 bytes, but will not be required to allocate more than this
+ if the symbol MAXSEG_64K is defined (see zconf.h). WARNING: On MSDOS,
+ pointers returned by zalloc for objects of exactly 65536 bytes *must*
+ have their offset normalized to zero. The default allocation function
+ provided by this library ensures this (see zutil.c). To reduce memory
+ requirements and avoid any allocation of 64K objects, at the expense of
+ compression ratio, compile the library with -DMAX_WBITS=14 (see zconf.h).
+
+ The fields total_in and total_out can be used for statistics or
+ progress reports. After compression, total_in holds the total size of
+ the uncompressed data and may be saved for use in the decompressor
+ (particularly if the decompressor wants to decompress everything in
+ a single step).
+*/
+
+ /* constants */
+
+#define Z_NO_FLUSH 0
+#define Z_PARTIAL_FLUSH 1
+#define Z_FULL_FLUSH 2
+#define Z_SYNC_FLUSH 3 /* experimental: partial_flush + byte align */
+#define Z_FINISH 4
+#define Z_PACKET_FLUSH 5
+/* See deflate() below for the usage of these constants */
+
+#define Z_OK 0
+#define Z_STREAM_END 1
+#define Z_ERRNO (-1)
+#define Z_STREAM_ERROR (-2)
+#define Z_DATA_ERROR (-3)
+#define Z_MEM_ERROR (-4)
+#define Z_BUF_ERROR (-5)
+/* error codes for the compression/decompression functions */
+
+#define Z_BEST_SPEED 1
+#define Z_BEST_COMPRESSION 9
+#define Z_DEFAULT_COMPRESSION (-1)
+/* compression levels */
+
+#define Z_FILTERED 1
+#define Z_HUFFMAN_ONLY 2
+#define Z_DEFAULT_STRATEGY 0
+
+#define Z_BINARY 0
+#define Z_ASCII 1
+#define Z_UNKNOWN 2
+/* Used to set the data_type field */
+
+#define Z_NULL 0 /* for initializing zalloc, zfree, opaque */
+
+extern char *zlib_version;
+/* The application can compare zlib_version and ZLIB_VERSION for consistency.
+ If the first character differs, the library code actually used is
+ not compatible with the zlib.h header file used by the application.
+ */
+
+ /* basic functions */
+
+extern int inflateInit OF((z_stream *strm));
+/*
+ Initializes the internal stream state for decompression. The fields
+ zalloc and zfree must be initialized before by the caller. If zalloc and
+ zfree are set to Z_NULL, inflateInit updates them to use default allocation
+ functions.
+
+ inflateInit returns Z_OK if success, Z_MEM_ERROR if there was not
+ enough memory. msg is set to null if there is no error message.
+ inflateInit does not perform any decompression: this will be done by
+ inflate().
+*/
+
+
+extern int inflate OF((z_stream *strm, int flush));
+/*
+ Performs one or both of the following actions:
+
+ - Decompress more input starting at next_in and update next_in and avail_in
+ accordingly. If not all input can be processed (because there is not
+ enough room in the output buffer), next_in is updated and processing
+ will resume at this point for the next call of inflate().
+
+ - Provide more output starting at next_out and update next_out and avail_out
+ accordingly. inflate() always provides as much output as possible
+ (until there is no more input data or no more space in the output buffer).
+
+ Before the call of inflate(), the application should ensure that at least
+ one of the actions is possible, by providing more input and/or consuming
+ more output, and updating the next_* and avail_* values accordingly.
+ The application can consume the uncompressed output when it wants, for
+ example when the output buffer is full (avail_out == 0), or after each
+ call of inflate().
+
+ If the parameter flush is set to Z_PARTIAL_FLUSH or Z_PACKET_FLUSH,
+ inflate flushes as much output as possible to the output buffer. The
+ flushing behavior of inflate is not specified for values of the flush
+ parameter other than Z_PARTIAL_FLUSH, Z_PACKET_FLUSH or Z_FINISH, but the
+ current implementation actually flushes as much output as possible
+ anyway. For Z_PACKET_FLUSH, inflate checks that once all the input data
+ has been consumed, it is expecting to see the length field of a stored
+ block; if not, it returns Z_DATA_ERROR.
+
+ inflate() should normally be called until it returns Z_STREAM_END or an
+ error. However if all decompression is to be performed in a single step
+ (a single call of inflate), the parameter flush should be set to
+ Z_FINISH. In this case all pending input is processed and all pending
+ output is flushed; avail_out must be large enough to hold all the
+ uncompressed data. (The size of the uncompressed data may have been saved
+ by the compressor for this purpose.) The next operation on this stream must
+ be inflateEnd to deallocate the decompression state. The use of Z_FINISH
+ is never required, but can be used to inform inflate that a faster routine
+ may be used for the single inflate() call.
+
+ inflate() returns Z_OK if some progress has been made (more input
+ processed or more output produced), Z_STREAM_END if the end of the
+ compressed data has been reached and all uncompressed output has been
+ produced, Z_DATA_ERROR if the input data was corrupted, Z_STREAM_ERROR if
+ the stream structure was inconsistent (for example if next_in or next_out
+ was NULL), Z_MEM_ERROR if there was not enough memory, Z_BUF_ERROR if no
+ progress is possible or if there was not enough room in the output buffer
+ when Z_FINISH is used. In the Z_DATA_ERROR case, the application may then
+ call inflateSync to look for a good compression block. */
+
+
+extern int inflateEnd OF((z_stream *strm));
+/*
+ All dynamically allocated data structures for this stream are freed.
+ This function discards any unprocessed input and does not flush any
+ pending output.
+
+ inflateEnd returns Z_OK if success, Z_STREAM_ERROR if the stream state
+ was inconsistent. In the error case, msg may be set but then points to a
+ static string (which must not be deallocated).
+*/
+
+ /* advanced functions */
+
+extern int inflateInit2 OF((z_stream *strm,
+ int windowBits));
+/*
+ This is another version of inflateInit with more compression options. The
+ fields next_out, zalloc and zfree must be initialized before by the caller.
+
+ The windowBits parameter is the base two logarithm of the maximum window
+ size (the size of the history buffer). It should be in the range 8..15 for
+ this version of the library (the value 16 will be allowed soon). The
+ default value is 15 if inflateInit is used instead. If a compressed stream
+ with a larger window size is given as input, inflate() will return with
+ the error code Z_DATA_ERROR instead of trying to allocate a larger window.
+
+ If next_out is not null, the library will use this buffer for the history
+ buffer; the buffer must either be large enough to hold the entire output
+ data, or have at least 1<<windowBits bytes. If next_out is null, the
+ library will allocate its own buffer (and leave next_out null). next_in
+ need not be provided here but must be provided by the application for the
+ next call of inflate().
+
+ If the history buffer is provided by the application, next_out must
+ never be changed by the application since the decompressor maintains
+ history information inside this buffer from call to call; the application
+ can only reset next_out to the beginning of the history buffer when
+ avail_out is zero and all output has been consumed.
+
+ inflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was
+ not enough memory, Z_STREAM_ERROR if a parameter is invalid (such as
+ windowBits < 8). msg is set to null if there is no error message.
+ inflateInit2 does not perform any decompression: this will be done by
+ inflate().
+*/
+
+extern int inflateSync OF((z_stream *strm));
+/*
+ Skips invalid compressed data until the special marker (see deflate()
+ above) can be found, or until all available input is skipped. No output
+ is provided.
+
+ inflateSync returns Z_OK if the special marker has been found, Z_BUF_ERROR
+ if no more input was provided, Z_DATA_ERROR if no marker has been found,
+ or Z_STREAM_ERROR if the stream structure was inconsistent. In the success
+ case, the application may save the current current value of total_in which
+ indicates where valid compressed data was found. In the error case, the
+ application may repeatedly call inflateSync, providing more input each time,
+ until success or end of the input data.
+*/
+
+extern int inflateReset OF((z_stream *strm));
+/*
+ This function is equivalent to inflateEnd followed by inflateInit,
+ but does not free and reallocate all the internal decompression state.
+ The stream will keep attributes that may have been set by inflateInit2.
+
+ inflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source
+ stream state was inconsistent (such as zalloc or state being NULL).
+*/
+
+extern int inflateIncomp OF((z_stream *strm));
+/*
+ This function adds the data at next_in (avail_in bytes) to the output
+ history without performing any output. There must be no pending output,
+ and the decompressor must be expecting to see the start of a block.
+ Calling this function is equivalent to decompressing a stored block
+ containing the data at next_in (except that the data is not output).
+*/
+
+ /* checksum functions */
+
+/*
+ This function is not related to compression but is exported
+ anyway because it might be useful in applications using the
+ compression library.
+*/
+
+extern uLong adler32 OF((uLong adler, Bytef *buf, uInt len));
+
+/*
+ Update a running Adler-32 checksum with the bytes buf[0..len-1] and
+ return the updated checksum. If buf is NULL, this function returns
+ the required initial value for the checksum.
+ An Adler-32 checksum is almost as reliable as a CRC32 but can be computed
+ much faster. Usage example:
+
+ uLong adler = adler32(0L, Z_NULL, 0);
+
+ while (read_buffer(buffer, length) != EOF) {
+ adler = adler32(adler, buffer, length);
+ }
+ if (adler != original_adler) error();
+*/
+
+#ifndef _Z_UTIL_H
+ struct internal_state {int dummy;}; /* hack for buggy compilers */
+#endif
+
+#endif /* _ZLIB_H */
diff --git a/arch/xtensa/boot/lib/Makefile b/arch/xtensa/boot/lib/Makefile
new file mode 100644
index 00000000000..c0a74dc3a0d
--- /dev/null
+++ b/arch/xtensa/boot/lib/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for some libs needed by zImage.
+#
+
+
+lib-y := zlib.o zmem.o
diff --git a/arch/xtensa/boot/lib/memcpy.S b/arch/xtensa/boot/lib/memcpy.S
new file mode 100644
index 00000000000..a029f5df2d5
--- /dev/null
+++ b/arch/xtensa/boot/lib/memcpy.S
@@ -0,0 +1,36 @@
+/*
+ * arch/xtensa/lib/memcpy.S
+ *
+ * ANSI C standard library function memcpy
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2002 Tensilica Inc.
+ */
+
+#define _ASMLANGUAGE
+#include <xtensa/config/core.h>
+
+.text
+.align 4
+.global bcopy
+.type bcopy,@function
+bcopy:
+ movi a14, xthal_bcopy // a14 safe to use regardless of whether caller
+ // used call4 or call8 (can't have used call12)
+ jx a14 // let the Core HAL do the work
+
+.text
+.align 4
+.global memcpy
+.type memcpy,@function
+memcpy:
+.global memmove
+.type memmove,@function
+memmove:
+ movi a14, xthal_memcpy // a14 safe to use regardless of whether caller
+ // used call4 or call8 (can't have used call12)
+ jx a14 // let the Core HAL do the work
+
diff --git a/arch/xtensa/boot/lib/zlib.c b/arch/xtensa/boot/lib/zlib.c
new file mode 100644
index 00000000000..e3859f63107
--- /dev/null
+++ b/arch/xtensa/boot/lib/zlib.c
@@ -0,0 +1,2150 @@
+/*
+ * BK Id: SCCS/s.zlib.c 1.8 05/18/01 15:17:24 cort
+ */
+/*
+ * This file is derived from various .h and .c files from the zlib-0.95
+ * distribution by Jean-loup Gailly and Mark Adler, with some additions
+ * by Paul Mackerras to aid in implementing Deflate compression and
+ * decompression for PPP packets. See zlib.h for conditions of
+ * distribution and use.
+ *
+ * Changes that have been made include:
+ * - changed functions not used outside this file to "local"
+ * - added minCompression parameter to deflateInit2
+ * - added Z_PACKET_FLUSH (see zlib.h for details)
+ * - added inflateIncomp
+ *
+ */
+
+/*+++++*/
+/* zutil.h -- internal interface and configuration of the compression library
+ * Copyright (C) 1995 Jean-loup Gailly.
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+/* From: zutil.h,v 1.9 1995/05/03 17:27:12 jloup Exp */
+
+#define _Z_UTIL_H
+
+#include "zlib.h"
+
+#ifndef local
+# define local static
+#endif
+/* compile with -Dlocal if your debugger can't find static symbols */
+
+#define FAR
+
+typedef unsigned char uch;
+typedef uch FAR uchf;
+typedef unsigned short ush;
+typedef ush FAR ushf;
+typedef unsigned long ulg;
+
+extern char *z_errmsg[]; /* indexed by 1-zlib_error */
+
+#define ERR_RETURN(strm,err) return (strm->msg=z_errmsg[1-err], err)
+/* To be used only when the state is known to be valid */
+
+#ifndef NULL
+#define NULL ((void *) 0)
+#endif
+
+ /* common constants */
+
+#define DEFLATED 8
+
+#ifndef DEF_WBITS
+# define DEF_WBITS MAX_WBITS
+#endif
+/* default windowBits for decompression. MAX_WBITS is for compression only */
+
+#if MAX_MEM_LEVEL >= 8
+# define DEF_MEM_LEVEL 8
+#else
+# define DEF_MEM_LEVEL MAX_MEM_LEVEL
+#endif
+/* default memLevel */
+
+#define STORED_BLOCK 0
+#define STATIC_TREES 1
+#define DYN_TREES 2
+/* The three kinds of block type */
+
+#define MIN_MATCH 3
+#define MAX_MATCH 258
+/* The minimum and maximum match lengths */
+
+ /* functions */
+
+#include <linux/string.h>
+#define zmemcpy memcpy
+#define zmemzero(dest, len) memset(dest, 0, len)
+
+/* Diagnostic functions */
+#ifdef DEBUG_ZLIB
+# include <stdio.h>
+# ifndef verbose
+# define verbose 0
+# endif
+# define Assert(cond,msg) {if(!(cond)) z_error(msg);}
+# define Trace(x) fprintf x
+# define Tracev(x) {if (verbose) fprintf x ;}
+# define Tracevv(x) {if (verbose>1) fprintf x ;}
+# define Tracec(c,x) {if (verbose && (c)) fprintf x ;}
+# define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;}
+#else
+# define Assert(cond,msg)
+# define Trace(x)
+# define Tracev(x)
+# define Tracevv(x)
+# define Tracec(c,x)
+# define Tracecv(c,x)
+#endif
+
+
+typedef uLong (*check_func) OF((uLong check, Bytef *buf, uInt len));
+
+/* voidpf zcalloc OF((voidpf opaque, unsigned items, unsigned size)); */
+/* void zcfree OF((voidpf opaque, voidpf ptr)); */
+
+#define ZALLOC(strm, items, size) \
+ (*((strm)->zalloc))((strm)->opaque, (items), (size))
+#define ZFREE(strm, addr, size) \
+ (*((strm)->zfree))((strm)->opaque, (voidpf)(addr), (size))
+#define TRY_FREE(s, p, n) {if (p) ZFREE(s, p, n);}
+
+/* deflate.h -- internal compression state
+ * Copyright (C) 1995 Jean-loup Gailly
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+/*+++++*/
+/* infblock.h -- header to use infblock.c
+ * Copyright (C) 1995 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+struct inflate_blocks_state;
+typedef struct inflate_blocks_state FAR inflate_blocks_statef;
+
+local inflate_blocks_statef * inflate_blocks_new OF((
+ z_stream *z,
+ check_func c, /* check function */
+ uInt w)); /* window size */
+
+local int inflate_blocks OF((
+ inflate_blocks_statef *,
+ z_stream *,
+ int)); /* initial return code */
+
+local void inflate_blocks_reset OF((
+ inflate_blocks_statef *,
+ z_stream *,
+ uLongf *)); /* check value on output */
+
+local int inflate_blocks_free OF((
+ inflate_blocks_statef *,
+ z_stream *,
+ uLongf *)); /* check value on output */
+
+local int inflate_addhistory OF((
+ inflate_blocks_statef *,
+ z_stream *));
+
+local int inflate_packet_flush OF((
+ inflate_blocks_statef *));
+
+/*+++++*/
+/* inftrees.h -- header to use inftrees.c
+ * Copyright (C) 1995 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+/* Huffman code lookup table entry--this entry is four bytes for machines
+ that have 16-bit pointers (e.g. PC's in the small or medium model). */
+
+typedef struct inflate_huft_s FAR inflate_huft;
+
+struct inflate_huft_s {
+ union {
+ struct {
+ Byte Exop; /* number of extra bits or operation */
+ Byte Bits; /* number of bits in this code or subcode */
+ } what;
+ uInt Nalloc; /* number of these allocated here */
+ Bytef *pad; /* pad structure to a power of 2 (4 bytes for */
+ } word; /* 16-bit, 8 bytes for 32-bit machines) */
+ union {
+ uInt Base; /* literal, length base, or distance base */
+ inflate_huft *Next; /* pointer to next level of table */
+ } more;
+};
+
+#ifdef DEBUG_ZLIB
+ local uInt inflate_hufts;
+#endif
+
+local int inflate_trees_bits OF((
+ uIntf *, /* 19 code lengths */
+ uIntf *, /* bits tree desired/actual depth */
+ inflate_huft * FAR *, /* bits tree result */
+ z_stream *)); /* for zalloc, zfree functions */
+
+local int inflate_trees_dynamic OF((
+ uInt, /* number of literal/length codes */
+ uInt, /* number of distance codes */
+ uIntf *, /* that many (total) code lengths */
+ uIntf *, /* literal desired/actual bit depth */
+ uIntf *, /* distance desired/actual bit depth */
+ inflate_huft * FAR *, /* literal/length tree result */
+ inflate_huft * FAR *, /* distance tree result */
+ z_stream *)); /* for zalloc, zfree functions */
+
+local int inflate_trees_fixed OF((
+ uIntf *, /* literal desired/actual bit depth */
+ uIntf *, /* distance desired/actual bit depth */
+ inflate_huft * FAR *, /* literal/length tree result */
+ inflate_huft * FAR *)); /* distance tree result */
+
+local int inflate_trees_free OF((
+ inflate_huft *, /* tables to free */
+ z_stream *)); /* for zfree function */
+
+
+/*+++++*/
+/* infcodes.h -- header to use infcodes.c
+ * Copyright (C) 1995 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+struct inflate_codes_state;
+typedef struct inflate_codes_state FAR inflate_codes_statef;
+
+local inflate_codes_statef *inflate_codes_new OF((
+ uInt, uInt,
+ inflate_huft *, inflate_huft *,
+ z_stream *));
+
+local int inflate_codes OF((
+ inflate_blocks_statef *,
+ z_stream *,
+ int));
+
+local void inflate_codes_free OF((
+ inflate_codes_statef *,
+ z_stream *));
+
+
+/*+++++*/
+/* inflate.c -- zlib interface to inflate modules
+ * Copyright (C) 1995 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* inflate private state */
+struct internal_state {
+
+ /* mode */
+ enum {
+ METHOD, /* waiting for method byte */
+ FLAG, /* waiting for flag byte */
+ BLOCKS, /* decompressing blocks */
+ CHECK4, /* four check bytes to go */
+ CHECK3, /* three check bytes to go */
+ CHECK2, /* two check bytes to go */
+ CHECK1, /* one check byte to go */
+ DONE, /* finished check, done */
+ BAD} /* got an error--stay here */
+ mode; /* current inflate mode */
+
+ /* mode dependent information */
+ union {
+ uInt method; /* if FLAGS, method byte */
+ struct {
+ uLong was; /* computed check value */
+ uLong need; /* stream check value */
+ } check; /* if CHECK, check values to compare */
+ uInt marker; /* if BAD, inflateSync's marker bytes count */
+ } sub; /* submode */
+
+ /* mode independent information */
+ int nowrap; /* flag for no wrapper */
+ uInt wbits; /* log2(window size) (8..15, defaults to 15) */
+ inflate_blocks_statef
+ *blocks; /* current inflate_blocks state */
+
+};
+
+
+int inflateReset(z)
+z_stream *z;
+{
+ uLong c;
+
+ if (z == Z_NULL || z->state == Z_NULL)
+ return Z_STREAM_ERROR;
+ z->total_in = z->total_out = 0;
+ z->msg = Z_NULL;
+ z->state->mode = z->state->nowrap ? BLOCKS : METHOD;
+ inflate_blocks_reset(z->state->blocks, z, &c);
+ Trace((stderr, "inflate: reset\n"));
+ return Z_OK;
+}
+
+
+int inflateEnd(z)
+z_stream *z;
+{
+ uLong c;
+
+ if (z == Z_NULL || z->state == Z_NULL || z->zfree == Z_NULL)
+ return Z_STREAM_ERROR;
+ if (z->state->blocks != Z_NULL)
+ inflate_blocks_free(z->state->blocks, z, &c);
+ ZFREE(z, z->state, sizeof(struct internal_state));
+ z->state = Z_NULL;
+ Trace((stderr, "inflate: end\n"));
+ return Z_OK;
+}
+
+
+int inflateInit2(z, w)
+z_stream *z;
+int w;
+{
+ /* initialize state */
+ if (z == Z_NULL)
+ return Z_STREAM_ERROR;
+/* if (z->zalloc == Z_NULL) z->zalloc = zcalloc; */
+/* if (z->zfree == Z_NULL) z->zfree = zcfree; */
+ if ((z->state = (struct internal_state FAR *)
+ ZALLOC(z,1,sizeof(struct internal_state))) == Z_NULL)
+ return Z_MEM_ERROR;
+ z->state->blocks = Z_NULL;
+
+ /* handle undocumented nowrap option (no zlib header or check) */
+ z->state->nowrap = 0;
+ if (w < 0)
+ {
+ w = - w;
+ z->state->nowrap = 1;
+ }
+
+ /* set window size */
+ if (w < 8 || w > 15)
+ {
+ inflateEnd(z);
+ return Z_STREAM_ERROR;
+ }
+ z->state->wbits = (uInt)w;
+
+ /* create inflate_blocks state */
+ if ((z->state->blocks =
+ inflate_blocks_new(z, z->state->nowrap ? Z_NULL : adler32, 1 << w))
+ == Z_NULL)
+ {
+ inflateEnd(z);
+ return Z_MEM_ERROR;
+ }
+ Trace((stderr, "inflate: allocated\n"));
+
+ /* reset state */
+ inflateReset(z);
+ return Z_OK;
+}
+
+
+int inflateInit(z)
+z_stream *z;
+{
+ return inflateInit2(z, DEF_WBITS);
+}
+
+
+#define NEEDBYTE {if(z->avail_in==0)goto empty;r=Z_OK;}
+#define NEXTBYTE (z->avail_in--,z->total_in++,*z->next_in++)
+
+int inflate(z, f)
+z_stream *z;
+int f;
+{
+ int r;
+ uInt b;
+
+ if (z == Z_NULL || z->next_in == Z_NULL)
+ return Z_STREAM_ERROR;
+ r = Z_BUF_ERROR;
+ while (1) switch (z->state->mode)
+ {
+ case METHOD:
+ NEEDBYTE
+ if (((z->state->sub.method = NEXTBYTE) & 0xf) != DEFLATED)
+ {
+ z->state->mode = BAD;
+ z->msg = "unknown compression method";
+ z->state->sub.marker = 5; /* can't try inflateSync */
+ break;
+ }
+ if ((z->state->sub.method >> 4) + 8 > z->state->wbits)
+ {
+ z->state->mode = BAD;
+ z->msg = "invalid window size";
+ z->state->sub.marker = 5; /* can't try inflateSync */
+ break;
+ }
+ z->state->mode = FLAG;
+ case FLAG:
+ NEEDBYTE
+ if ((b = NEXTBYTE) & 0x20)
+ {
+ z->state->mode = BAD;
+ z->msg = "invalid reserved bit";
+ z->state->sub.marker = 5; /* can't try inflateSync */
+ break;
+ }
+ if (((z->state->sub.method << 8) + b) % 31)
+ {
+ z->state->mode = BAD;
+ z->msg = "incorrect header check";
+ z->state->sub.marker = 5; /* can't try inflateSync */
+ break;
+ }
+ Trace((stderr, "inflate: zlib header ok\n"));
+ z->state->mode = BLOCKS;
+ case BLOCKS:
+ r = inflate_blocks(z->state->blocks, z, r);
+ if (f == Z_PACKET_FLUSH && z->avail_in == 0 && z->avail_out != 0)
+ r = inflate_packet_flush(z->state->blocks);
+ if (r == Z_DATA_ERROR)
+ {
+ z->state->mode = BAD;
+ z->state->sub.marker = 0; /* can try inflateSync */
+ break;
+ }
+ if (r != Z_STREAM_END)
+ return r;
+ r = Z_OK;
+ inflate_blocks_reset(z->state->blocks, z, &z->state->sub.check.was);
+ if (z->state->nowrap)
+ {
+ z->state->mode = DONE;
+ break;
+ }
+ z->state->mode = CHECK4;
+ case CHECK4:
+ NEEDBYTE
+ z->state->sub.check.need = (uLong)NEXTBYTE << 24;
+ z->state->mode = CHECK3;
+ case CHECK3:
+ NEEDBYTE
+ z->state->sub.check.need += (uLong)NEXTBYTE << 16;
+ z->state->mode = CHECK2;
+ case CHECK2:
+ NEEDBYTE
+ z->state->sub.check.need += (uLong)NEXTBYTE << 8;
+ z->state->mode = CHECK1;
+ case CHECK1:
+ NEEDBYTE
+ z->state->sub.check.need += (uLong)NEXTBYTE;
+
+ if (z->state->sub.check.was != z->state->sub.check.need)
+ {
+ z->state->mode = BAD;
+ z->msg = "incorrect data check";
+ z->state->sub.marker = 5; /* can't try inflateSync */
+ break;
+ }
+ Trace((stderr, "inflate: zlib check ok\n"));
+ z->state->mode = DONE;
+ case DONE:
+ return Z_STREAM_END;
+ case BAD:
+ return Z_DATA_ERROR;
+ default:
+ return Z_STREAM_ERROR;
+ }
+
+ empty:
+ if (f != Z_PACKET_FLUSH)
+ return r;
+ z->state->mode = BAD;
+ z->state->sub.marker = 0; /* can try inflateSync */
+ return Z_DATA_ERROR;
+}
+
+/*
+ * This subroutine adds the data at next_in/avail_in to the output history
+ * without performing any output. The output buffer must be "caught up";
+ * i.e. no pending output (hence s->read equals s->write), and the state must
+ * be BLOCKS (i.e. we should be willing to see the start of a series of
+ * BLOCKS). On exit, the output will also be caught up, and the checksum
+ * will have been updated if need be.
+ */
+
+int inflateIncomp(z)
+z_stream *z;
+{
+ if (z->state->mode != BLOCKS)
+ return Z_DATA_ERROR;
+ return inflate_addhistory(z->state->blocks, z);
+}
+
+
+int inflateSync(z)
+z_stream *z;
+{
+ uInt n; /* number of bytes to look at */
+ Bytef *p; /* pointer to bytes */
+ uInt m; /* number of marker bytes found in a row */
+ uLong r, w; /* temporaries to save total_in and total_out */
+
+ /* set up */
+ if (z == Z_NULL || z->state == Z_NULL)
+ return Z_STREAM_ERROR;
+ if (z->state->mode != BAD)
+ {
+ z->state->mode = BAD;
+ z->state->sub.marker = 0;
+ }
+ if ((n = z->avail_in) == 0)
+ return Z_BUF_ERROR;
+ p = z->next_in;
+ m = z->state->sub.marker;
+
+ /* search */
+ while (n && m < 4)
+ {
+ if (*p == (Byte)(m < 2 ? 0 : 0xff))
+ m++;
+ else if (*p)
+ m = 0;
+ else
+ m = 4 - m;
+ p++, n--;
+ }
+
+ /* restore */
+ z->total_in += p - z->next_in;
+ z->next_in = p;
+ z->avail_in = n;
+ z->state->sub.marker = m;
+
+ /* return no joy or set up to restart on a new block */
+ if (m != 4)
+ return Z_DATA_ERROR;
+ r = z->total_in; w = z->total_out;
+ inflateReset(z);
+ z->total_in = r; z->total_out = w;
+ z->state->mode = BLOCKS;
+ return Z_OK;
+}
+
+#undef NEEDBYTE
+#undef NEXTBYTE
+
+/*+++++*/
+/* infutil.h -- types and macros common to blocks and codes
+ * Copyright (C) 1995 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+/* inflate blocks semi-private state */
+struct inflate_blocks_state {
+
+ /* mode */
+ enum {
+ TYPE, /* get type bits (3, including end bit) */
+ LENS, /* get lengths for stored */
+ STORED, /* processing stored block */
+ TABLE, /* get table lengths */
+ BTREE, /* get bit lengths tree for a dynamic block */
+ DTREE, /* get length, distance trees for a dynamic block */
+ CODES, /* processing fixed or dynamic block */
+ DRY, /* output remaining window bytes */
+ DONEB, /* finished last block, done */
+ BADB} /* got a data error--stuck here */
+ mode; /* current inflate_block mode */
+
+ /* mode dependent information */
+ union {
+ uInt left; /* if STORED, bytes left to copy */
+ struct {
+ uInt table; /* table lengths (14 bits) */
+ uInt index; /* index into blens (or border) */
+ uIntf *blens; /* bit lengths of codes */
+ uInt bb; /* bit length tree depth */
+ inflate_huft *tb; /* bit length decoding tree */
+ int nblens; /* # elements allocated at blens */
+ } trees; /* if DTREE, decoding info for trees */
+ struct {
+ inflate_huft *tl, *td; /* trees to free */
+ inflate_codes_statef
+ *codes;
+ } decode; /* if CODES, current state */
+ } sub; /* submode */
+ uInt last; /* true if this block is the last block */
+
+ /* mode independent information */
+ uInt bitk; /* bits in bit buffer */
+ uLong bitb; /* bit buffer */
+ Bytef *window; /* sliding window */
+ Bytef *end; /* one byte after sliding window */
+ Bytef *read; /* window read pointer */
+ Bytef *write; /* window write pointer */
+ check_func checkfn; /* check function */
+ uLong check; /* check on output */
+
+};
+
+
+/* defines for inflate input/output */
+/* update pointers and return */
+#define UPDBITS {s->bitb=b;s->bitk=k;}
+#define UPDIN {z->avail_in=n;z->total_in+=p-z->next_in;z->next_in=p;}
+#define UPDOUT {s->write=q;}
+#define UPDATE {UPDBITS UPDIN UPDOUT}
+#define LEAVE {UPDATE return inflate_flush(s,z,r);}
+/* get bytes and bits */
+#define LOADIN {p=z->next_in;n=z->avail_in;b=s->bitb;k=s->bitk;}
+#define NEEDBYTE {if(n)r=Z_OK;else LEAVE}
+#define NEXTBYTE (n--,*p++)
+#define NEEDBITS(j) {while(k<(j)){NEEDBYTE;b|=((uLong)NEXTBYTE)<<k;k+=8;}}
+#define DUMPBITS(j) {b>>=(j);k-=(j);}
+/* output bytes */
+#define WAVAIL (q<s->read?s->read-q-1:s->end-q)
+#define LOADOUT {q=s->write;m=WAVAIL;}
+#define WRAP {if(q==s->end&&s->read!=s->window){q=s->window;m=WAVAIL;}}
+#define FLUSH {UPDOUT r=inflate_flush(s,z,r); LOADOUT}
+#define NEEDOUT {if(m==0){WRAP if(m==0){FLUSH WRAP if(m==0) LEAVE}}r=Z_OK;}
+#define OUTBYTE(a) {*q++=(Byte)(a);m--;}
+/* load local pointers */
+#define LOAD {LOADIN LOADOUT}
+
+/*
+ * The IBM 150 firmware munges the data right after _etext[]. This
+ * protects it. -- Cort
+ */
+local uInt protect_mask[] = {0, 0, 0, 0, 0, 0, 0, 0, 0 ,0 ,0 ,0};
+/* And'ing with mask[n] masks the lower n bits */
+local uInt inflate_mask[] = {
+ 0x0000,
+ 0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f, 0x007f, 0x00ff,
+ 0x01ff, 0x03ff, 0x07ff, 0x0fff, 0x1fff, 0x3fff, 0x7fff, 0xffff
+};
+
+/* copy as much as possible from the sliding window to the output area */
+local int inflate_flush OF((
+ inflate_blocks_statef *,
+ z_stream *,
+ int));
+
+/*+++++*/
+/* inffast.h -- header to use inffast.c
+ * Copyright (C) 1995 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+local int inflate_fast OF((
+ uInt,
+ uInt,
+ inflate_huft *,
+ inflate_huft *,
+ inflate_blocks_statef *,
+ z_stream *));
+
+
+/*+++++*/
+/* infblock.c -- interpret and process block types to last block
+ * Copyright (C) 1995 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* Table for deflate from PKZIP's appnote.txt. */
+local uInt border[] = { /* Order of the bit length code lengths */
+ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
+
+/*
+ Notes beyond the 1.93a appnote.txt:
+
+ 1. Distance pointers never point before the beginning of the output
+ stream.
+ 2. Distance pointers can point back across blocks, up to 32k away.
+ 3. There is an implied maximum of 7 bits for the bit length table and
+ 15 bits for the actual data.
+ 4. If only one code exists, then it is encoded using one bit. (Zero
+ would be more efficient, but perhaps a little confusing.) If two
+ codes exist, they are coded using one bit each (0 and 1).
+ 5. There is no way of sending zero distance codes--a dummy must be
+ sent if there are none. (History: a pre 2.0 version of PKZIP would
+ store blocks with no distance codes, but this was discovered to be
+ too harsh a criterion.) Valid only for 1.93a. 2.04c does allow
+ zero distance codes, which is sent as one code of zero bits in
+ length.
+ 6. There are up to 286 literal/length codes. Code 256 represents the
+ end-of-block. Note however that the static length tree defines
+ 288 codes just to fill out the Huffman codes. Codes 286 and 287
+ cannot be used though, since there is no length base or extra bits
+ defined for them. Similarily, there are up to 30 distance codes.
+ However, static trees define 32 codes (all 5 bits) to fill out the
+ Huffman codes, but the last two had better not show up in the data.
+ 7. Unzip can check dynamic Huffman blocks for complete code sets.
+ The exception is that a single code would not be complete (see #4).
+ 8. The five bits following the block type is really the number of
+ literal codes sent minus 257.
+ 9. Length codes 8,16,16 are interpreted as 13 length codes of 8 bits
+ (1+6+6). Therefore, to output three times the length, you output
+ three codes (1+1+1), whereas to output four times the same length,
+ you only need two codes (1+3). Hmm.
+ 10. In the tree reconstruction algorithm, Code = Code + Increment
+ only if BitLength(i) is not zero. (Pretty obvious.)
+ 11. Correction: 4 Bits: # of Bit Length codes - 4 (4 - 19)
+ 12. Note: length code 284 can represent 227-258, but length code 285
+ really is 258. The last length deserves its own, short code
+ since it gets used a lot in very redundant files. The length
+ 258 is special since 258 - 3 (the min match length) is 255.
+ 13. The literal/length and distance code bit lengths are read as a
+ single stream of lengths. It is possible (and advantageous) for
+ a repeat code (16, 17, or 18) to go across the boundary between
+ the two sets of lengths.
+ */
+
+
+local void inflate_blocks_reset(s, z, c)
+inflate_blocks_statef *s;
+z_stream *z;
+uLongf *c;
+{
+ if (s->checkfn != Z_NULL)
+ *c = s->check;
+ if (s->mode == BTREE || s->mode == DTREE)
+ ZFREE(z, s->sub.trees.blens, s->sub.trees.nblens * sizeof(uInt));
+ if (s->mode == CODES)
+ {
+ inflate_codes_free(s->sub.decode.codes, z);
+ inflate_trees_free(s->sub.decode.td, z);
+ inflate_trees_free(s->sub.decode.tl, z);
+ }
+ s->mode = TYPE;
+ s->bitk = 0;
+ s->bitb = 0;
+ s->read = s->write = s->window;
+ if (s->checkfn != Z_NULL)
+ s->check = (*s->checkfn)(0L, Z_NULL, 0);
+ Trace((stderr, "inflate: blocks reset\n"));
+}
+
+
+local inflate_blocks_statef *inflate_blocks_new(z, c, w)
+z_stream *z;
+check_func c;
+uInt w;
+{
+ inflate_blocks_statef *s;
+
+ if ((s = (inflate_blocks_statef *)ZALLOC
+ (z,1,sizeof(struct inflate_blocks_state))) == Z_NULL)
+ return s;
+ if ((s->window = (Bytef *)ZALLOC(z, 1, w)) == Z_NULL)
+ {
+ ZFREE(z, s, sizeof(struct inflate_blocks_state));
+ return Z_NULL;
+ }
+ s->end = s->window + w;
+ s->checkfn = c;
+ s->mode = TYPE;
+ Trace((stderr, "inflate: blocks allocated\n"));
+ inflate_blocks_reset(s, z, &s->check);
+ return s;
+}
+
+
+local int inflate_blocks(s, z, r)
+inflate_blocks_statef *s;
+z_stream *z;
+int r;
+{
+ uInt t; /* temporary storage */
+ uLong b; /* bit buffer */
+ uInt k; /* bits in bit buffer */
+ Bytef *p; /* input data pointer */
+ uInt n; /* bytes available there */
+ Bytef *q; /* output window write pointer */
+ uInt m; /* bytes to end of window or read pointer */
+
+ /* copy input/output information to locals (UPDATE macro restores) */
+ LOAD
+
+ /* process input based on current state */
+ while (1) switch (s->mode)
+ {
+ case TYPE:
+ NEEDBITS(3)
+ t = (uInt)b & 7;
+ s->last = t & 1;
+ switch (t >> 1)
+ {
+ case 0: /* stored */
+ Trace((stderr, "inflate: stored block%s\n",
+ s->last ? " (last)" : ""));
+ DUMPBITS(3)
+ t = k & 7; /* go to byte boundary */
+ DUMPBITS(t)
+ s->mode = LENS; /* get length of stored block */
+ break;
+ case 1: /* fixed */
+ Trace((stderr, "inflate: fixed codes block%s\n",
+ s->last ? " (last)" : ""));
+ {
+ uInt bl, bd;
+ inflate_huft *tl, *td;
+
+ inflate_trees_fixed(&bl, &bd, &tl, &td);
+ s->sub.decode.codes = inflate_codes_new(bl, bd, tl, td, z);
+ if (s->sub.decode.codes == Z_NULL)
+ {
+ r = Z_MEM_ERROR;
+ LEAVE
+ }
+ s->sub.decode.tl = Z_NULL; /* don't try to free these */
+ s->sub.decode.td = Z_NULL;
+ }
+ DUMPBITS(3)
+ s->mode = CODES;
+ break;
+ case 2: /* dynamic */
+ Trace((stderr, "inflate: dynamic codes block%s\n",
+ s->last ? " (last)" : ""));
+ DUMPBITS(3)
+ s->mode = TABLE;
+ break;
+ case 3: /* illegal */
+ DUMPBITS(3)
+ s->mode = BADB;
+ z->msg = "invalid block type";
+ r = Z_DATA_ERROR;
+ LEAVE
+ }
+ break;
+ case LENS:
+ NEEDBITS(32)
+ if (((~b) >> 16) != (b & 0xffff))
+ {
+ s->mode = BADB;
+ z->msg = "invalid stored block lengths";
+ r = Z_DATA_ERROR;
+ LEAVE
+ }
+ s->sub.left = (uInt)b & 0xffff;
+ b = k = 0; /* dump bits */
+ Tracev((stderr, "inflate: stored length %u\n", s->sub.left));
+ s->mode = s->sub.left ? STORED : TYPE;
+ break;
+ case STORED:
+ if (n == 0)
+ LEAVE
+ NEEDOUT
+ t = s->sub.left;
+ if (t > n) t = n;
+ if (t > m) t = m;
+ zmemcpy(q, p, t);
+ p += t; n -= t;
+ q += t; m -= t;
+ if ((s->sub.left -= t) != 0)
+ break;
+ Tracev((stderr, "inflate: stored end, %lu total out\n",
+ z->total_out + (q >= s->read ? q - s->read :
+ (s->end - s->read) + (q - s->window))));
+ s->mode = s->last ? DRY : TYPE;
+ break;
+ case TABLE:
+ NEEDBITS(14)
+ s->sub.trees.table = t = (uInt)b & 0x3fff;
+#ifndef PKZIP_BUG_WORKAROUND
+ if ((t & 0x1f) > 29 || ((t >> 5) & 0x1f) > 29)
+ {
+ s->mode = BADB;
+ z->msg = "too many length or distance symbols";
+ r = Z_DATA_ERROR;
+ LEAVE
+ }
+#endif
+ t = 258 + (t & 0x1f) + ((t >> 5) & 0x1f);
+ if (t < 19)
+ t = 19;
+ if ((s->sub.trees.blens = (uIntf*)ZALLOC(z, t, sizeof(uInt))) == Z_NULL)
+ {
+ r = Z_MEM_ERROR;
+ LEAVE
+ }
+ s->sub.trees.nblens = t;
+ DUMPBITS(14)
+ s->sub.trees.index = 0;
+ Tracev((stderr, "inflate: table sizes ok\n"));
+ s->mode = BTREE;
+ case BTREE:
+ while (s->sub.trees.index < 4 + (s->sub.trees.table >> 10))
+ {
+ NEEDBITS(3)
+ s->sub.trees.blens[border[s->sub.trees.index++]] = (uInt)b & 7;
+ DUMPBITS(3)
+ }
+ while (s->sub.trees.index < 19)
+ s->sub.trees.blens[border[s->sub.trees.index++]] = 0;
+ s->sub.trees.bb = 7;
+ t = inflate_trees_bits(s->sub.trees.blens, &s->sub.trees.bb,
+ &s->sub.trees.tb, z);
+ if (t != Z_OK)
+ {
+ r = t;
+ if (r == Z_DATA_ERROR)
+ s->mode = BADB;
+ LEAVE
+ }
+ s->sub.trees.index = 0;
+ Tracev((stderr, "inflate: bits tree ok\n"));
+ s->mode = DTREE;
+ case DTREE:
+ while (t = s->sub.trees.table,
+ s->sub.trees.index < 258 + (t & 0x1f) + ((t >> 5) & 0x1f))
+ {
+ inflate_huft *h;
+ uInt i, j, c;
+
+ t = s->sub.trees.bb;
+ NEEDBITS(t)
+ h = s->sub.trees.tb + ((uInt)b & inflate_mask[t]);
+ t = h->word.what.Bits;
+ c = h->more.Base;
+ if (c < 16)
+ {
+ DUMPBITS(t)
+ s->sub.trees.blens[s->sub.trees.index++] = c;
+ }
+ else /* c == 16..18 */
+ {
+ i = c == 18 ? 7 : c - 14;
+ j = c == 18 ? 11 : 3;
+ NEEDBITS(t + i)
+ DUMPBITS(t)
+ j += (uInt)b & inflate_mask[i];
+ DUMPBITS(i)
+ i = s->sub.trees.index;
+ t = s->sub.trees.table;
+ if (i + j > 258 + (t & 0x1f) + ((t >> 5) & 0x1f) ||
+ (c == 16 && i < 1))
+ {
+ s->mode = BADB;
+ z->msg = "invalid bit length repeat";
+ r = Z_DATA_ERROR;
+ LEAVE
+ }
+ c = c == 16 ? s->sub.trees.blens[i - 1] : 0;
+ do {
+ s->sub.trees.blens[i++] = c;
+ } while (--j);
+ s->sub.trees.index = i;
+ }
+ }
+ inflate_trees_free(s->sub.trees.tb, z);
+ s->sub.trees.tb = Z_NULL;
+ {
+ uInt bl, bd;
+ inflate_huft *tl, *td;
+ inflate_codes_statef *c;
+
+ bl = 9; /* must be <= 9 for lookahead assumptions */
+ bd = 6; /* must be <= 9 for lookahead assumptions */
+ t = s->sub.trees.table;
+ t = inflate_trees_dynamic(257 + (t & 0x1f), 1 + ((t >> 5) & 0x1f),
+ s->sub.trees.blens, &bl, &bd, &tl, &td, z);
+ if (t != Z_OK)
+ {
+ if (t == (uInt)Z_DATA_ERROR)
+ s->mode = BADB;
+ r = t;
+ LEAVE
+ }
+ Tracev((stderr, "inflate: trees ok\n"));
+ if ((c = inflate_codes_new(bl, bd, tl, td, z)) == Z_NULL)
+ {
+ inflate_trees_free(td, z);
+ inflate_trees_free(tl, z);
+ r = Z_MEM_ERROR;
+ LEAVE
+ }
+ ZFREE(z, s->sub.trees.blens, s->sub.trees.nblens * sizeof(uInt));
+ s->sub.decode.codes = c;
+ s->sub.decode.tl = tl;
+ s->sub.decode.td = td;
+ }
+ s->mode = CODES;
+ case CODES:
+ UPDATE
+ if ((r = inflate_codes(s, z, r)) != Z_STREAM_END)
+ return inflate_flush(s, z, r);
+ r = Z_OK;
+ inflate_codes_free(s->sub.decode.codes, z);
+ inflate_trees_free(s->sub.decode.td, z);
+ inflate_trees_free(s->sub.decode.tl, z);
+ LOAD
+ Tracev((stderr, "inflate: codes end, %lu total out\n",
+ z->total_out + (q >= s->read ? q - s->read :
+ (s->end - s->read) + (q - s->window))));
+ if (!s->last)
+ {
+ s->mode = TYPE;
+ break;
+ }
+ if (k > 7) /* return unused byte, if any */
+ {
+ Assert(k < 16, "inflate_codes grabbed too many bytes")
+ k -= 8;
+ n++;
+ p--; /* can always return one */
+ }
+ s->mode = DRY;
+ case DRY:
+ FLUSH
+ if (s->read != s->write)
+ LEAVE
+ s->mode = DONEB;
+ case DONEB:
+ r = Z_STREAM_END;
+ LEAVE
+ case BADB:
+ r = Z_DATA_ERROR;
+ LEAVE
+ default:
+ r = Z_STREAM_ERROR;
+ LEAVE
+ }
+}
+
+
+local int inflate_blocks_free(s, z, c)
+inflate_blocks_statef *s;
+z_stream *z;
+uLongf *c;
+{
+ inflate_blocks_reset(s, z, c);
+ ZFREE(z, s->window, s->end - s->window);
+ ZFREE(z, s, sizeof(struct inflate_blocks_state));
+ Trace((stderr, "inflate: blocks freed\n"));
+ return Z_OK;
+}
+
+/*
+ * This subroutine adds the data at next_in/avail_in to the output history
+ * without performing any output. The output buffer must be "caught up";
+ * i.e. no pending output (hence s->read equals s->write), and the state must
+ * be BLOCKS (i.e. we should be willing to see the start of a series of
+ * BLOCKS). On exit, the output will also be caught up, and the checksum
+ * will have been updated if need be.
+ */
+local int inflate_addhistory(s, z)
+inflate_blocks_statef *s;
+z_stream *z;
+{
+ uLong b; /* bit buffer */ /* NOT USED HERE */
+ uInt k; /* bits in bit buffer */ /* NOT USED HERE */
+ uInt t; /* temporary storage */
+ Bytef *p; /* input data pointer */
+ uInt n; /* bytes available there */
+ Bytef *q; /* output window write pointer */
+ uInt m; /* bytes to end of window or read pointer */
+
+ if (s->read != s->write)
+ return Z_STREAM_ERROR;
+ if (s->mode != TYPE)
+ return Z_DATA_ERROR;
+
+ /* we're ready to rock */
+ LOAD
+ /* while there is input ready, copy to output buffer, moving
+ * pointers as needed.
+ */
+ while (n) {
+ t = n; /* how many to do */
+ /* is there room until end of buffer? */
+ if (t > m) t = m;
+ /* update check information */
+ if (s->checkfn != Z_NULL)
+ s->check = (*s->checkfn)(s->check, q, t);
+ zmemcpy(q, p, t);
+ q += t;
+ p += t;
+ n -= t;
+ z->total_out += t;
+ s->read = q; /* drag read pointer forward */
+/* WRAP */ /* expand WRAP macro by hand to handle s->read */
+ if (q == s->end) {
+ s->read = q = s->window;
+ m = WAVAIL;
+ }
+ }
+ UPDATE
+ return Z_OK;
+}
+
+
+/*
+ * At the end of a Deflate-compressed PPP packet, we expect to have seen
+ * a `stored' block type value but not the (zero) length bytes.
+ */
+local int inflate_packet_flush(s)
+ inflate_blocks_statef *s;
+{
+ if (s->mode != LENS)
+ return Z_DATA_ERROR;
+ s->mode = TYPE;
+ return Z_OK;
+}
+
+
+/*+++++*/
+/* inftrees.c -- generate Huffman trees for efficient decoding
+ * Copyright (C) 1995 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* simplify the use of the inflate_huft type with some defines */
+#define base more.Base
+#define next more.Next
+#define exop word.what.Exop
+#define bits word.what.Bits
+
+
+local int huft_build OF((
+ uIntf *, /* code lengths in bits */
+ uInt, /* number of codes */
+ uInt, /* number of "simple" codes */
+ uIntf *, /* list of base values for non-simple codes */
+ uIntf *, /* list of extra bits for non-simple codes */
+ inflate_huft * FAR*,/* result: starting table */
+ uIntf *, /* maximum lookup bits (returns actual) */
+ z_stream *)); /* for zalloc function */
+
+local voidpf falloc OF((
+ voidpf, /* opaque pointer (not used) */
+ uInt, /* number of items */
+ uInt)); /* size of item */
+
+local void ffree OF((
+ voidpf q, /* opaque pointer (not used) */
+ voidpf p, /* what to free (not used) */
+ uInt n)); /* number of bytes (not used) */
+
+/* Tables for deflate from PKZIP's appnote.txt. */
+local uInt cplens[] = { /* Copy lengths for literal codes 257..285 */
+ 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
+ 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
+ /* actually lengths - 2; also see note #13 above about 258 */
+local uInt cplext[] = { /* Extra bits for literal codes 257..285 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2,
+ 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 192, 192}; /* 192==invalid */
+local uInt cpdist[] = { /* Copy offsets for distance codes 0..29 */
+ 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,
+ 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145,
+ 8193, 12289, 16385, 24577};
+local uInt cpdext[] = { /* Extra bits for distance codes */
+ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
+ 7, 7, 8, 8, 9, 9, 10, 10, 11, 11,
+ 12, 12, 13, 13};
+
+/*
+ Huffman code decoding is performed using a multi-level table lookup.
+ The fastest way to decode is to simply build a lookup table whose
+ size is determined by the longest code. However, the time it takes
+ to build this table can also be a factor if the data being decoded
+ is not very long. The most common codes are necessarily the
+ shortest codes, so those codes dominate the decoding time, and hence
+ the speed. The idea is you can have a shorter table that decodes the
+ shorter, more probable codes, and then point to subsidiary tables for
+ the longer codes. The time it costs to decode the longer codes is
+ then traded against the time it takes to make longer tables.
+
+ This results of this trade are in the variables lbits and dbits
+ below. lbits is the number of bits the first level table for literal/
+ length codes can decode in one step, and dbits is the same thing for
+ the distance codes. Subsequent tables are also less than or equal to
+ those sizes. These values may be adjusted either when all of the
+ codes are shorter than that, in which case the longest code length in
+ bits is used, or when the shortest code is *longer* than the requested
+ table size, in which case the length of the shortest code in bits is
+ used.
+
+ There are two different values for the two tables, since they code a
+ different number of possibilities each. The literal/length table
+ codes 286 possible values, or in a flat code, a little over eight
+ bits. The distance table codes 30 possible values, or a little less
+ than five bits, flat. The optimum values for speed end up being
+ about one bit more than those, so lbits is 8+1 and dbits is 5+1.
+ The optimum values may differ though from machine to machine, and
+ possibly even between compilers. Your mileage may vary.
+ */
+
+
+/* If BMAX needs to be larger than 16, then h and x[] should be uLong. */
+#define BMAX 15 /* maximum bit length of any code */
+#define N_MAX 288 /* maximum number of codes in any set */
+
+#ifdef DEBUG_ZLIB
+ uInt inflate_hufts;
+#endif
+
+local int huft_build(b, n, s, d, e, t, m, zs)
+uIntf *b; /* code lengths in bits (all assumed <= BMAX) */
+uInt n; /* number of codes (assumed <= N_MAX) */
+uInt s; /* number of simple-valued codes (0..s-1) */
+uIntf *d; /* list of base values for non-simple codes */
+uIntf *e; /* list of extra bits for non-simple codes */
+inflate_huft * FAR *t; /* result: starting table */
+uIntf *m; /* maximum lookup bits, returns actual */
+z_stream *zs; /* for zalloc function */
+/* Given a list of code lengths and a maximum table size, make a set of
+ tables to decode that set of codes. Return Z_OK on success, Z_BUF_ERROR
+ if the given code set is incomplete (the tables are still built in this
+ case), Z_DATA_ERROR if the input is invalid (all zero length codes or an
+ over-subscribed set of lengths), or Z_MEM_ERROR if not enough memory. */
+{
+
+ uInt a; /* counter for codes of length k */
+ uInt c[BMAX+1]; /* bit length count table */
+ uInt f; /* i repeats in table every f entries */
+ int g; /* maximum code length */
+ int h; /* table level */
+ register uInt i; /* counter, current code */
+ register uInt j; /* counter */
+ register int k; /* number of bits in current code */
+ int l; /* bits per table (returned in m) */
+ register uIntf *p; /* pointer into c[], b[], or v[] */
+ inflate_huft *q; /* points to current table */
+ struct inflate_huft_s r; /* table entry for structure assignment */
+ inflate_huft *u[BMAX]; /* table stack */
+ uInt v[N_MAX]; /* values in order of bit length */
+ register int w; /* bits before this table == (l * h) */
+ uInt x[BMAX+1]; /* bit offsets, then code stack */
+ uIntf *xp; /* pointer into x */
+ int y; /* number of dummy codes added */
+ uInt z; /* number of entries in current table */
+
+
+ /* Generate counts for each bit length */
+ p = c;
+#define C0 *p++ = 0;
+#define C2 C0 C0 C0 C0
+#define C4 C2 C2 C2 C2
+ C4 /* clear c[]--assume BMAX+1 is 16 */
+ p = b; i = n;
+ do {
+ c[*p++]++; /* assume all entries <= BMAX */
+ } while (--i);
+ if (c[0] == n) /* null input--all zero length codes */
+ {
+ *t = (inflate_huft *)Z_NULL;
+ *m = 0;
+ return Z_OK;
+ }
+
+
+ /* Find minimum and maximum length, bound *m by those */
+ l = *m;
+ for (j = 1; j <= BMAX; j++)
+ if (c[j])
+ break;
+ k = j; /* minimum code length */
+ if ((uInt)l < j)
+ l = j;
+ for (i = BMAX; i; i--)
+ if (c[i])
+ break;
+ g = i; /* maximum code length */
+ if ((uInt)l > i)
+ l = i;
+ *m = l;
+
+
+ /* Adjust last length count to fill out codes, if needed */
+ for (y = 1 << j; j < i; j++, y <<= 1)
+ if ((y -= c[j]) < 0)
+ return Z_DATA_ERROR;
+ if ((y -= c[i]) < 0)
+ return Z_DATA_ERROR;
+ c[i] += y;
+
+
+ /* Generate starting offsets into the value table for each length */
+ x[1] = j = 0;
+ p = c + 1; xp = x + 2;
+ while (--i) { /* note that i == g from above */
+ *xp++ = (j += *p++);
+ }
+
+
+ /* Make a table of values in order of bit lengths */
+ p = b; i = 0;
+ do {
+ if ((j = *p++) != 0)
+ v[x[j]++] = i;
+ } while (++i < n);
+
+
+ /* Generate the Huffman codes and for each, make the table entries */
+ x[0] = i = 0; /* first Huffman code is zero */
+ p = v; /* grab values in bit order */
+ h = -1; /* no tables yet--level -1 */
+ w = -l; /* bits decoded == (l * h) */
+ u[0] = (inflate_huft *)Z_NULL; /* just to keep compilers happy */
+ q = (inflate_huft *)Z_NULL; /* ditto */
+ z = 0; /* ditto */
+
+ /* go through the bit lengths (k already is bits in shortest code) */
+ for (; k <= g; k++)
+ {
+ a = c[k];
+ while (a--)
+ {
+ /* here i is the Huffman code of length k bits for value *p */
+ /* make tables up to required level */
+ while (k > w + l)
+ {
+ h++;
+ w += l; /* previous table always l bits */
+
+ /* compute minimum size table less than or equal to l bits */
+ z = (z = g - w) > (uInt)l ? l : z; /* table size upper limit */
+ if ((f = 1 << (j = k - w)) > a + 1) /* try a k-w bit table */
+ { /* too few codes for k-w bit table */
+ f -= a + 1; /* deduct codes from patterns left */
+ xp = c + k;
+ if (j < z)
+ while (++j < z) /* try smaller tables up to z bits */
+ {
+ if ((f <<= 1) <= *++xp)
+ break; /* enough codes to use up j bits */
+ f -= *xp; /* else deduct codes from patterns */
+ }
+ }
+ z = 1 << j; /* table entries for j-bit table */
+
+ /* allocate and link in new table */
+ if ((q = (inflate_huft *)ZALLOC
+ (zs,z + 1,sizeof(inflate_huft))) == Z_NULL)
+ {
+ if (h)
+ inflate_trees_free(u[0], zs);
+ return Z_MEM_ERROR; /* not enough memory */
+ }
+ q->word.Nalloc = z + 1;
+#ifdef DEBUG_ZLIB
+ inflate_hufts += z + 1;
+#endif
+ *t = q + 1; /* link to list for huft_free() */
+ *(t = &(q->next)) = Z_NULL;
+ u[h] = ++q; /* table starts after link */
+
+ /* connect to last table, if there is one */
+ if (h)
+ {
+ x[h] = i; /* save pattern for backing up */
+ r.bits = (Byte)l; /* bits to dump before this table */
+ r.exop = (Byte)j; /* bits in this table */
+ r.next = q; /* pointer to this table */
+ j = i >> (w - l); /* (get around Turbo C bug) */
+ u[h-1][j] = r; /* connect to last table */
+ }
+ }
+
+ /* set up table entry in r */
+ r.bits = (Byte)(k - w);
+ if (p >= v + n)
+ r.exop = 128 + 64; /* out of values--invalid code */
+ else if (*p < s)
+ {
+ r.exop = (Byte)(*p < 256 ? 0 : 32 + 64); /* 256 is end-of-block */
+ r.base = *p++; /* simple code is just the value */
+ }
+ else
+ {
+ r.exop = (Byte)e[*p - s] + 16 + 64; /* non-simple--look up in lists */
+ r.base = d[*p++ - s];
+ }
+
+ /* fill code-like entries with r */
+ f = 1 << (k - w);
+ for (j = i >> w; j < z; j += f)
+ q[j] = r;
+
+ /* backwards increment the k-bit code i */
+ for (j = 1 << (k - 1); i & j; j >>= 1)
+ i ^= j;
+ i ^= j;
+
+ /* backup over finished tables */
+ while ((i & ((1 << w) - 1)) != x[h])
+ {
+ h--; /* don't need to update q */
+ w -= l;
+ }
+ }
+ }
+
+
+ /* Return Z_BUF_ERROR if we were given an incomplete table */
+ return y != 0 && g != 1 ? Z_BUF_ERROR : Z_OK;
+}
+
+
+local int inflate_trees_bits(c, bb, tb, z)
+uIntf *c; /* 19 code lengths */
+uIntf *bb; /* bits tree desired/actual depth */
+inflate_huft * FAR *tb; /* bits tree result */
+z_stream *z; /* for zfree function */
+{
+ int r;
+
+ r = huft_build(c, 19, 19, (uIntf*)Z_NULL, (uIntf*)Z_NULL, tb, bb, z);
+ if (r == Z_DATA_ERROR)
+ z->msg = "oversubscribed dynamic bit lengths tree";
+ else if (r == Z_BUF_ERROR)
+ {
+ inflate_trees_free(*tb, z);
+ z->msg = "incomplete dynamic bit lengths tree";
+ r = Z_DATA_ERROR;
+ }
+ return r;
+}
+
+
+local int inflate_trees_dynamic(nl, nd, c, bl, bd, tl, td, z)
+uInt nl; /* number of literal/length codes */
+uInt nd; /* number of distance codes */
+uIntf *c; /* that many (total) code lengths */
+uIntf *bl; /* literal desired/actual bit depth */
+uIntf *bd; /* distance desired/actual bit depth */
+inflate_huft * FAR *tl; /* literal/length tree result */
+inflate_huft * FAR *td; /* distance tree result */
+z_stream *z; /* for zfree function */
+{
+ int r;
+
+ /* build literal/length tree */
+ if ((r = huft_build(c, nl, 257, cplens, cplext, tl, bl, z)) != Z_OK)
+ {
+ if (r == Z_DATA_ERROR)
+ z->msg = "oversubscribed literal/length tree";
+ else if (r == Z_BUF_ERROR)
+ {
+ inflate_trees_free(*tl, z);
+ z->msg = "incomplete literal/length tree";
+ r = Z_DATA_ERROR;
+ }
+ return r;
+ }
+
+ /* build distance tree */
+ if ((r = huft_build(c + nl, nd, 0, cpdist, cpdext, td, bd, z)) != Z_OK)
+ {
+ if (r == Z_DATA_ERROR)
+ z->msg = "oversubscribed literal/length tree";
+ else if (r == Z_BUF_ERROR) {
+#ifdef PKZIP_BUG_WORKAROUND
+ r = Z_OK;
+ }
+#else
+ inflate_trees_free(*td, z);
+ z->msg = "incomplete literal/length tree";
+ r = Z_DATA_ERROR;
+ }
+ inflate_trees_free(*tl, z);
+ return r;
+#endif
+ }
+
+ /* done */
+ return Z_OK;
+}
+
+
+/* build fixed tables only once--keep them here */
+local int fixed_lock = 0;
+local int fixed_built = 0;
+#define FIXEDH 530 /* number of hufts used by fixed tables */
+local uInt fixed_left = FIXEDH;
+local inflate_huft fixed_mem[FIXEDH];
+local uInt fixed_bl;
+local uInt fixed_bd;
+local inflate_huft *fixed_tl;
+local inflate_huft *fixed_td;
+
+
+local voidpf falloc(q, n, s)
+voidpf q; /* opaque pointer (not used) */
+uInt n; /* number of items */
+uInt s; /* size of item */
+{
+ Assert(s == sizeof(inflate_huft) && n <= fixed_left,
+ "inflate_trees falloc overflow");
+ if (q) s++; /* to make some compilers happy */
+ fixed_left -= n;
+ return (voidpf)(fixed_mem + fixed_left);
+}
+
+
+local void ffree(q, p, n)
+voidpf q;
+voidpf p;
+uInt n;
+{
+ Assert(0, "inflate_trees ffree called!");
+ if (q) q = p; /* to make some compilers happy */
+}
+
+
+local int inflate_trees_fixed(bl, bd, tl, td)
+uIntf *bl; /* literal desired/actual bit depth */
+uIntf *bd; /* distance desired/actual bit depth */
+inflate_huft * FAR *tl; /* literal/length tree result */
+inflate_huft * FAR *td; /* distance tree result */
+{
+ /* build fixed tables if not built already--lock out other instances */
+ while (++fixed_lock > 1)
+ fixed_lock--;
+ if (!fixed_built)
+ {
+ int k; /* temporary variable */
+ unsigned c[288]; /* length list for huft_build */
+ z_stream z; /* for falloc function */
+
+ /* set up fake z_stream for memory routines */
+ z.zalloc = falloc;
+ z.zfree = ffree;
+ z.opaque = Z_NULL;
+
+ /* literal table */
+ for (k = 0; k < 144; k++)
+ c[k] = 8;
+ for (; k < 256; k++)
+ c[k] = 9;
+ for (; k < 280; k++)
+ c[k] = 7;
+ for (; k < 288; k++)
+ c[k] = 8;
+ fixed_bl = 7;
+ huft_build(c, 288, 257, cplens, cplext, &fixed_tl, &fixed_bl, &z);
+
+ /* distance table */
+ for (k = 0; k < 30; k++)
+ c[k] = 5;
+ fixed_bd = 5;
+ huft_build(c, 30, 0, cpdist, cpdext, &fixed_td, &fixed_bd, &z);
+
+ /* done */
+ fixed_built = 1;
+ }
+ fixed_lock--;
+ *bl = fixed_bl;
+ *bd = fixed_bd;
+ *tl = fixed_tl;
+ *td = fixed_td;
+ return Z_OK;
+}
+
+
+local int inflate_trees_free(t, z)
+inflate_huft *t; /* table to free */
+z_stream *z; /* for zfree function */
+/* Free the malloc'ed tables built by huft_build(), which makes a linked
+ list of the tables it made, with the links in a dummy first entry of
+ each table. */
+{
+ register inflate_huft *p, *q;
+
+ /* Go through linked list, freeing from the malloced (t[-1]) address. */
+ p = t;
+ while (p != Z_NULL)
+ {
+ q = (--p)->next;
+ ZFREE(z, p, p->word.Nalloc * sizeof(inflate_huft));
+ p = q;
+ }
+ return Z_OK;
+}
+
+/*+++++*/
+/* infcodes.c -- process literals and length/distance pairs
+ * Copyright (C) 1995 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* simplify the use of the inflate_huft type with some defines */
+#define base more.Base
+#define next more.Next
+#define exop word.what.Exop
+#define bits word.what.Bits
+
+/* inflate codes private state */
+struct inflate_codes_state {
+
+ /* mode */
+ enum { /* waiting for "i:"=input, "o:"=output, "x:"=nothing */
+ START, /* x: set up for LEN */
+ LEN, /* i: get length/literal/eob next */
+ LENEXT, /* i: getting length extra (have base) */
+ DIST, /* i: get distance next */
+ DISTEXT, /* i: getting distance extra */
+ COPY, /* o: copying bytes in window, waiting for space */
+ LIT, /* o: got literal, waiting for output space */
+ WASH, /* o: got eob, possibly still output waiting */
+ END, /* x: got eob and all data flushed */
+ BADCODE} /* x: got error */
+ mode; /* current inflate_codes mode */
+
+ /* mode dependent information */
+ uInt len;
+ union {
+ struct {
+ inflate_huft *tree; /* pointer into tree */
+ uInt need; /* bits needed */
+ } code; /* if LEN or DIST, where in tree */
+ uInt lit; /* if LIT, literal */
+ struct {
+ uInt get; /* bits to get for extra */
+ uInt dist; /* distance back to copy from */
+ } copy; /* if EXT or COPY, where and how much */
+ } sub; /* submode */
+
+ /* mode independent information */
+ Byte lbits; /* ltree bits decoded per branch */
+ Byte dbits; /* dtree bits decoder per branch */
+ inflate_huft *ltree; /* literal/length/eob tree */
+ inflate_huft *dtree; /* distance tree */
+
+};
+
+
+local inflate_codes_statef *inflate_codes_new(bl, bd, tl, td, z)
+uInt bl, bd;
+inflate_huft *tl, *td;
+z_stream *z;
+{
+ inflate_codes_statef *c;
+
+ if ((c = (inflate_codes_statef *)
+ ZALLOC(z,1,sizeof(struct inflate_codes_state))) != Z_NULL)
+ {
+ c->mode = START;
+ c->lbits = (Byte)bl;
+ c->dbits = (Byte)bd;
+ c->ltree = tl;
+ c->dtree = td;
+ Tracev((stderr, "inflate: codes new\n"));
+ }
+ return c;
+}
+
+
+local int inflate_codes(s, z, r)
+inflate_blocks_statef *s;
+z_stream *z;
+int r;
+{
+ uInt j; /* temporary storage */
+ inflate_huft *t; /* temporary pointer */
+ uInt e; /* extra bits or operation */
+ uLong b; /* bit buffer */
+ uInt k; /* bits in bit buffer */
+ Bytef *p; /* input data pointer */
+ uInt n; /* bytes available there */
+ Bytef *q; /* output window write pointer */
+ uInt m; /* bytes to end of window or read pointer */
+ Bytef *f; /* pointer to copy strings from */
+ inflate_codes_statef *c = s->sub.decode.codes; /* codes state */
+
+ /* copy input/output information to locals (UPDATE macro restores) */
+ LOAD
+
+ /* process input and output based on current state */
+ while (1) switch (c->mode)
+ { /* waiting for "i:"=input, "o:"=output, "x:"=nothing */
+ case START: /* x: set up for LEN */
+#ifndef SLOW
+ if (m >= 258 && n >= 10)
+ {
+ UPDATE
+ r = inflate_fast(c->lbits, c->dbits, c->ltree, c->dtree, s, z);
+ LOAD
+ if (r != Z_OK)
+ {
+ c->mode = r == Z_STREAM_END ? WASH : BADCODE;
+ break;
+ }
+ }
+#endif /* !SLOW */
+ c->sub.code.need = c->lbits;
+ c->sub.code.tree = c->ltree;
+ c->mode = LEN;
+ case LEN: /* i: get length/literal/eob next */
+ j = c->sub.code.need;
+ NEEDBITS(j)
+ t = c->sub.code.tree + ((uInt)b & inflate_mask[j]);
+ DUMPBITS(t->bits)
+ e = (uInt)(t->exop);
+ if (e == 0) /* literal */
+ {
+ c->sub.lit = t->base;
+ Tracevv((stderr, t->base >= 0x20 && t->base < 0x7f ?
+ "inflate: literal '%c'\n" :
+ "inflate: literal 0x%02x\n", t->base));
+ c->mode = LIT;
+ break;
+ }
+ if (e & 16) /* length */
+ {
+ c->sub.copy.get = e & 15;
+ c->len = t->base;
+ c->mode = LENEXT;
+ break;
+ }
+ if ((e & 64) == 0) /* next table */
+ {
+ c->sub.code.need = e;
+ c->sub.code.tree = t->next;
+ break;
+ }
+ if (e & 32) /* end of block */
+ {
+ Tracevv((stderr, "inflate: end of block\n"));
+ c->mode = WASH;
+ break;
+ }
+ c->mode = BADCODE; /* invalid code */
+ z->msg = "invalid literal/length code";
+ r = Z_DATA_ERROR;
+ LEAVE
+ case LENEXT: /* i: getting length extra (have base) */
+ j = c->sub.copy.get;
+ NEEDBITS(j)
+ c->len += (uInt)b & inflate_mask[j];
+ DUMPBITS(j)
+ c->sub.code.need = c->dbits;
+ c->sub.code.tree = c->dtree;
+ Tracevv((stderr, "inflate: length %u\n", c->len));
+ c->mode = DIST;
+ case DIST: /* i: get distance next */
+ j = c->sub.code.need;
+ NEEDBITS(j)
+ t = c->sub.code.tree + ((uInt)b & inflate_mask[j]);
+ DUMPBITS(t->bits)
+ e = (uInt)(t->exop);
+ if (e & 16) /* distance */
+ {
+ c->sub.copy.get = e & 15;
+ c->sub.copy.dist = t->base;
+ c->mode = DISTEXT;
+ break;
+ }
+ if ((e & 64) == 0) /* next table */
+ {
+ c->sub.code.need = e;
+ c->sub.code.tree = t->next;
+ break;
+ }
+ c->mode = BADCODE; /* invalid code */
+ z->msg = "invalid distance code";
+ r = Z_DATA_ERROR;
+ LEAVE
+ case DISTEXT: /* i: getting distance extra */
+ j = c->sub.copy.get;
+ NEEDBITS(j)
+ c->sub.copy.dist += (uInt)b & inflate_mask[j];
+ DUMPBITS(j)
+ Tracevv((stderr, "inflate: distance %u\n", c->sub.copy.dist));
+ c->mode = COPY;
+ case COPY: /* o: copying bytes in window, waiting for space */
+#ifndef __TURBOC__ /* Turbo C bug for following expression */
+ f = (uInt)(q - s->window) < c->sub.copy.dist ?
+ s->end - (c->sub.copy.dist - (q - s->window)) :
+ q - c->sub.copy.dist;
+#else
+ f = q - c->sub.copy.dist;
+ if ((uInt)(q - s->window) < c->sub.copy.dist)
+ f = s->end - (c->sub.copy.dist - (q - s->window));
+#endif
+ while (c->len)
+ {
+ NEEDOUT
+ OUTBYTE(*f++)
+ if (f == s->end)
+ f = s->window;
+ c->len--;
+ }
+ c->mode = START;
+ break;
+ case LIT: /* o: got literal, waiting for output space */
+ NEEDOUT
+ OUTBYTE(c->sub.lit)
+ c->mode = START;
+ break;
+ case WASH: /* o: got eob, possibly more output */
+ FLUSH
+ if (s->read != s->write)
+ LEAVE
+ c->mode = END;
+ case END:
+ r = Z_STREAM_END;
+ LEAVE
+ case BADCODE: /* x: got error */
+ r = Z_DATA_ERROR;
+ LEAVE
+ default:
+ r = Z_STREAM_ERROR;
+ LEAVE
+ }
+}
+
+
+local void inflate_codes_free(c, z)
+inflate_codes_statef *c;
+z_stream *z;
+{
+ ZFREE(z, c, sizeof(struct inflate_codes_state));
+ Tracev((stderr, "inflate: codes free\n"));
+}
+
+/*+++++*/
+/* inflate_util.c -- data and routines common to blocks and codes
+ * Copyright (C) 1995 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* copy as much as possible from the sliding window to the output area */
+local int inflate_flush(s, z, r)
+inflate_blocks_statef *s;
+z_stream *z;
+int r;
+{
+ uInt n;
+ Bytef *p, *q;
+
+ /* local copies of source and destination pointers */
+ p = z->next_out;
+ q = s->read;
+
+ /* compute number of bytes to copy as far as end of window */
+ n = (uInt)((q <= s->write ? s->write : s->end) - q);
+ if (n > z->avail_out) n = z->avail_out;
+ if (n && r == Z_BUF_ERROR) r = Z_OK;
+
+ /* update counters */
+ z->avail_out -= n;
+ z->total_out += n;
+
+ /* update check information */
+ if (s->checkfn != Z_NULL)
+ s->check = (*s->checkfn)(s->check, q, n);
+
+ /* copy as far as end of window */
+ zmemcpy(p, q, n);
+ p += n;
+ q += n;
+
+ /* see if more to copy at beginning of window */
+ if (q == s->end)
+ {
+ /* wrap pointers */
+ q = s->window;
+ if (s->write == s->end)
+ s->write = s->window;
+
+ /* compute bytes to copy */
+ n = (uInt)(s->write - q);
+ if (n > z->avail_out) n = z->avail_out;
+ if (n && r == Z_BUF_ERROR) r = Z_OK;
+
+ /* update counters */
+ z->avail_out -= n;
+ z->total_out += n;
+
+ /* update check information */
+ if (s->checkfn != Z_NULL)
+ s->check = (*s->checkfn)(s->check, q, n);
+
+ /* copy */
+ zmemcpy(p, q, n);
+ p += n;
+ q += n;
+ }
+
+ /* update pointers */
+ z->next_out = p;
+ s->read = q;
+
+ /* done */
+ return r;
+}
+
+
+/*+++++*/
+/* inffast.c -- process literals and length/distance pairs fast
+ * Copyright (C) 1995 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* simplify the use of the inflate_huft type with some defines */
+#define base more.Base
+#define next more.Next
+#define exop word.what.Exop
+#define bits word.what.Bits
+
+/* macros for bit input with no checking and for returning unused bytes */
+#define GRABBITS(j) {while(k<(j)){b|=((uLong)NEXTBYTE)<<k;k+=8;}}
+#define UNGRAB {n+=(c=k>>3);p-=c;k&=7;}
+
+/* Called with number of bytes left to write in window at least 258
+ (the maximum string length) and number of input bytes available
+ at least ten. The ten bytes are six bytes for the longest length/
+ distance pair plus four bytes for overloading the bit buffer. */
+
+local int inflate_fast(bl, bd, tl, td, s, z)
+uInt bl, bd;
+inflate_huft *tl, *td;
+inflate_blocks_statef *s;
+z_stream *z;
+{
+ inflate_huft *t; /* temporary pointer */
+ uInt e; /* extra bits or operation */
+ uLong b; /* bit buffer */
+ uInt k; /* bits in bit buffer */
+ Bytef *p; /* input data pointer */
+ uInt n; /* bytes available there */
+ Bytef *q; /* output window write pointer */
+ uInt m; /* bytes to end of window or read pointer */
+ uInt ml; /* mask for literal/length tree */
+ uInt md; /* mask for distance tree */
+ uInt c; /* bytes to copy */
+ uInt d; /* distance back to copy from */
+ Bytef *r; /* copy source pointer */
+
+ /* load input, output, bit values */
+ LOAD
+
+ /* initialize masks */
+ ml = inflate_mask[bl];
+ md = inflate_mask[bd];
+
+ /* do until not enough input or output space for fast loop */
+ do { /* assume called with m >= 258 && n >= 10 */
+ /* get literal/length code */
+ GRABBITS(20) /* max bits for literal/length code */
+ if ((e = (t = tl + ((uInt)b & ml))->exop) == 0)
+ {
+ DUMPBITS(t->bits)
+ Tracevv((stderr, t->base >= 0x20 && t->base < 0x7f ?
+ "inflate: * literal '%c'\n" :
+ "inflate: * literal 0x%02x\n", t->base));
+ *q++ = (Byte)t->base;
+ m--;
+ continue;
+ }
+ do {
+ DUMPBITS(t->bits)
+ if (e & 16)
+ {
+ /* get extra bits for length */
+ e &= 15;
+ c = t->base + ((uInt)b & inflate_mask[e]);
+ DUMPBITS(e)
+ Tracevv((stderr, "inflate: * length %u\n", c));
+
+ /* decode distance base of block to copy */
+ GRABBITS(15); /* max bits for distance code */
+ e = (t = td + ((uInt)b & md))->exop;
+ do {
+ DUMPBITS(t->bits)
+ if (e & 16)
+ {
+ /* get extra bits to add to distance base */
+ e &= 15;
+ GRABBITS(e) /* get extra bits (up to 13) */
+ d = t->base + ((uInt)b & inflate_mask[e]);
+ DUMPBITS(e)
+ Tracevv((stderr, "inflate: * distance %u\n", d));
+
+ /* do the copy */
+ m -= c;
+ if ((uInt)(q - s->window) >= d) /* offset before dest */
+ { /* just copy */
+ r = q - d;
+ *q++ = *r++; c--; /* minimum count is three, */
+ *q++ = *r++; c--; /* so unroll loop a little */
+ }
+ else /* else offset after destination */
+ {
+ e = d - (q - s->window); /* bytes from offset to end */
+ r = s->end - e; /* pointer to offset */
+ if (c > e) /* if source crosses, */
+ {
+ c -= e; /* copy to end of window */
+ do {
+ *q++ = *r++;
+ } while (--e);
+ r = s->window; /* copy rest from start of window */
+ }
+ }
+ do { /* copy all or what's left */
+ *q++ = *r++;
+ } while (--c);
+ break;
+ }
+ else if ((e & 64) == 0)
+ e = (t = t->next + ((uInt)b & inflate_mask[e]))->exop;
+ else
+ {
+ z->msg = "invalid distance code";
+ UNGRAB
+ UPDATE
+ return Z_DATA_ERROR;
+ }
+ } while (1);
+ break;
+ }
+ if ((e & 64) == 0)
+ {
+ if ((e = (t = t->next + ((uInt)b & inflate_mask[e]))->exop) == 0)
+ {
+ DUMPBITS(t->bits)
+ Tracevv((stderr, t->base >= 0x20 && t->base < 0x7f ?
+ "inflate: * literal '%c'\n" :
+ "inflate: * literal 0x%02x\n", t->base));
+ *q++ = (Byte)t->base;
+ m--;
+ break;
+ }
+ }
+ else if (e & 32)
+ {
+ Tracevv((stderr, "inflate: * end of block\n"));
+ UNGRAB
+ UPDATE
+ return Z_STREAM_END;
+ }
+ else
+ {
+ z->msg = "invalid literal/length code";
+ UNGRAB
+ UPDATE
+ return Z_DATA_ERROR;
+ }
+ } while (1);
+ } while (m >= 258 && n >= 10);
+
+ /* not enough input or output--restore pointers and return */
+ UNGRAB
+ UPDATE
+ return Z_OK;
+}
+
+
+/*+++++*/
+/* zutil.c -- target dependent utility functions for the compression library
+ * Copyright (C) 1995 Jean-loup Gailly.
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* From: zutil.c,v 1.8 1995/05/03 17:27:12 jloup Exp */
+
+char *zlib_version = ZLIB_VERSION;
+
+char *z_errmsg[] = {
+"stream end", /* Z_STREAM_END 1 */
+"", /* Z_OK 0 */
+"file error", /* Z_ERRNO (-1) */
+"stream error", /* Z_STREAM_ERROR (-2) */
+"data error", /* Z_DATA_ERROR (-3) */
+"insufficient memory", /* Z_MEM_ERROR (-4) */
+"buffer error", /* Z_BUF_ERROR (-5) */
+""};
+
+
+/*+++++*/
+/* adler32.c -- compute the Adler-32 checksum of a data stream
+ * Copyright (C) 1995 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* From: adler32.c,v 1.6 1995/05/03 17:27:08 jloup Exp */
+
+#define BASE 65521L /* largest prime smaller than 65536 */
+#define NMAX 5552
+/* NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 */
+
+#define DO1(buf) {s1 += *buf++; s2 += s1;}
+#define DO2(buf) DO1(buf); DO1(buf);
+#define DO4(buf) DO2(buf); DO2(buf);
+#define DO8(buf) DO4(buf); DO4(buf);
+#define DO16(buf) DO8(buf); DO8(buf);
+
+/* ========================================================================= */
+uLong adler32(adler, buf, len)
+ uLong adler;
+ Bytef *buf;
+ uInt len;
+{
+ unsigned long s1 = adler & 0xffff;
+ unsigned long s2 = (adler >> 16) & 0xffff;
+ int k;
+
+ if (buf == Z_NULL) return 1L;
+
+ while (len > 0) {
+ k = len < NMAX ? len : NMAX;
+ len -= k;
+ while (k >= 16) {
+ DO16(buf);
+ k -= 16;
+ }
+ if (k != 0) do {
+ DO1(buf);
+ } while (--k);
+ s1 %= BASE;
+ s2 %= BASE;
+ }
+ return (s2 << 16) | s1;
+}
diff --git a/arch/xtensa/boot/lib/zmem.c b/arch/xtensa/boot/lib/zmem.c
new file mode 100644
index 00000000000..7848f126d67
--- /dev/null
+++ b/arch/xtensa/boot/lib/zmem.c
@@ -0,0 +1,87 @@
+#include "zlib.h"
+
+/* bits taken from ppc */
+
+extern void *avail_ram, *end_avail;
+
+void exit (void)
+{
+ for (;;);
+}
+
+void *zalloc(void *x, unsigned items, unsigned size)
+{
+ void *p = avail_ram;
+
+ size *= items;
+ size = (size + 7) & -8;
+ avail_ram += size;
+ if (avail_ram > end_avail) {
+ //puts("oops... out of memory\n");
+ //pause();
+ exit ();
+ }
+ return p;
+}
+
+void zfree(void *x, void *addr, unsigned nb)
+{
+}
+
+
+#define HEAD_CRC 2
+#define EXTRA_FIELD 4
+#define ORIG_NAME 8
+#define COMMENT 0x10
+#define RESERVED 0xe0
+
+#define DEFLATED 8
+
+void gunzip (void *dst, int dstlen, unsigned char *src, int *lenp)
+{
+ z_stream s;
+ int r, i, flags;
+
+ /* skip header */
+
+ i = 10;
+ flags = src[3];
+ if (src[2] != DEFLATED || (flags & RESERVED) != 0) {
+ //puts("bad gzipped data\n");
+ exit();
+ }
+ if ((flags & EXTRA_FIELD) != 0)
+ i = 12 + src[10] + (src[11] << 8);
+ if ((flags & ORIG_NAME) != 0)
+ while (src[i++] != 0)
+ ;
+ if ((flags & COMMENT) != 0)
+ while (src[i++] != 0)
+ ;
+ if ((flags & HEAD_CRC) != 0)
+ i += 2;
+ if (i >= *lenp) {
+ //puts("gunzip: ran out of data in header\n");
+ exit();
+ }
+
+ s.zalloc = zalloc;
+ s.zfree = zfree;
+ r = inflateInit2(&s, -MAX_WBITS);
+ if (r != Z_OK) {
+ //puts("inflateInit2 returned "); puthex(r); puts("\n");
+ exit();
+ }
+ s.next_in = src + i;
+ s.avail_in = *lenp - i;
+ s.next_out = dst;
+ s.avail_out = dstlen;
+ r = inflate(&s, Z_FINISH);
+ if (r != Z_OK && r != Z_STREAM_END) {
+ //puts("inflate returned "); puthex(r); puts("\n");
+ exit();
+ }
+ *lenp = s.next_out - (unsigned char *) dst;
+ inflateEnd(&s);
+}
+
diff --git a/arch/xtensa/boot/ramdisk/Makefile b/arch/xtensa/boot/ramdisk/Makefile
new file mode 100644
index 00000000000..b12f7635243
--- /dev/null
+++ b/arch/xtensa/boot/ramdisk/Makefile
@@ -0,0 +1,23 @@
+#
+# Makefile for a ramdisk image
+#
+
+BIG_ENDIAN := $(shell echo -e "\#ifdef __XTENSA_EL__\nint little;\n\#else\nint big;\n\#endif" | $(CC) -E -|grep -c big)
+
+ifeq ($(BIG_ENDIAN),1)
+OBJCOPY_ARGS := -O elf32-xtensa-be
+else
+OBJCOPY_ARGS := -O elf32-xtensa-le
+endif
+
+obj-y = ramdisk.o
+
+RAMDISK_IMAGE = arch/$(ARCH)/boot/ramdisk/$(CONFIG_EMBEDDED_RAMDISK_IMAGE)
+
+arch/$(ARCH)/boot/ramdisk/ramdisk.o:
+ $(Q)echo -e "dummy:" | $(AS) -o $@;
+ $(Q)$(OBJCOPY) $(OBJCOPY_ARGS) \
+ --add-section .initrd=$(RAMDISK_IMAGE) \
+ --set-section-flags .initrd=contents,alloc,load,load,data \
+ arch/$(ARCH)/boot/ramdisk/ramdisk.o $@
+
diff --git a/arch/xtensa/configs/common_defconfig b/arch/xtensa/configs/common_defconfig
new file mode 100644
index 00000000000..1d230ee081b
--- /dev/null
+++ b/arch/xtensa/configs/common_defconfig
@@ -0,0 +1,662 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.11-rc2
+# Tue Mar 1 16:36:53 2005
+#
+# CONFIG_FRAME_POINTER is not set
+CONFIG_XTENSA=y
+# CONFIG_UID16 is not set
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_HAVE_DEC_LOCK=y
+CONFIG_GENERIC_HARDIRQS=y
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_CLEAN_COMPILE=y
+CONFIG_BROKEN_ON_SMP=y
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION=""
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+# CONFIG_POSIX_MQUEUE is not set
+CONFIG_BSD_PROCESS_ACCT=y
+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+CONFIG_SYSCTL=y
+# CONFIG_AUDIT is not set
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_HOTPLUG is not set
+CONFIG_KOBJECT_UEVENT=y
+# CONFIG_IKCONFIG is not set
+# CONFIG_EMBEDDED is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SHMEM=y
+CONFIG_CC_ALIGN_FUNCTIONS=0
+CONFIG_CC_ALIGN_LABELS=0
+CONFIG_CC_ALIGN_LOOPS=0
+CONFIG_CC_ALIGN_JUMPS=0
+# CONFIG_TINY_SHMEM is not set
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+# CONFIG_MODULE_UNLOAD is not set
+CONFIG_OBSOLETE_MODPARM=y
+CONFIG_MODVERSIONS=y
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+
+#
+# Processor type and features
+#
+CONFIG_XTENSA_ARCH_LINUX_BE=y
+# CONFIG_XTENSA_ARCH_LINUX_LE is not set
+# CONFIG_XTENSA_ARCH_LINUX_TEST is not set
+# CONFIG_XTENSA_ARCH_S5 is not set
+# CONFIG_XTENSA_CUSTOM is not set
+CONFIG_MMU=y
+# CONFIG_XTENSA_UNALIGNED_USER is not set
+# CONFIG_PREEMPT is not set
+# CONFIG_MATH_EMULATION is not set
+# CONFIG_HIGHMEM is not set
+
+#
+# Platform options
+#
+# CONFIG_XTENSA_PLATFORM_ISS is not set
+CONFIG_XTENSA_PLATFORM_XT2000=y
+CONFIG_XTENSA_CALIBRATE_CCOUNT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="console=ttyS0,38400 ip=bootp root=nfs nfsroot=/opt/montavista/pro/devkit/xtensa/linux_be/target"
+
+#
+# Bus options
+#
+CONFIG_PCI=y
+# CONFIG_PCI_LEGACY_PROC is not set
+# CONFIG_PCI_NAMES is not set
+
+#
+# PCCARD (PCMCIA/CardBus) support
+#
+# CONFIG_PCCARD is not set
+
+#
+# PC-card bridges
+#
+
+#
+# PCI Hotplug Support
+#
+# CONFIG_HOTPLUG_PCI is not set
+
+#
+# Exectuable file formats
+#
+CONFIG_KCORE_ELF=y
+CONFIG_BINFMT_ELF=y
+CONFIG_BINFMT_MISC=y
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+# CONFIG_MTD is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+# CONFIG_BLK_DEV_LOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_SX8 is not set
+# CONFIG_BLK_DEV_RAM is not set
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_CDROM_PKTCDVD is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_ATA_OVER_ETH is not set
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_SCSI is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+
+#
+# Fusion MPT device support
+#
+
+#
+# IEEE 1394 (FireWire) support
+#
+# CONFIG_IEEE1394 is not set
+
+#
+# I2O device support
+#
+# CONFIG_I2O is not set
+
+#
+# Networking support
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+# CONFIG_PACKET is not set
+# CONFIG_NETLINK_DEV is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_IP_MROUTE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_TUNNEL is not set
+# CONFIG_IP_TCPDIAG is not set
+# CONFIG_IP_TCPDIAG_IPV6 is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETFILTER is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CLK_JIFFIES=y
+# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
+# CONFIG_NET_SCH_CLK_CPU is not set
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+# CONFIG_NET_SCH_HFSC is not set
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+# CONFIG_NET_SCH_NETEM is not set
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_QOS=y
+CONFIG_NET_ESTIMATOR=y
+CONFIG_NET_CLS=y
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_ROUTE=y
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+# CONFIG_CLS_U32_PERF is not set
+# CONFIG_NET_CLS_IND is not set
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+# CONFIG_NET_CLS_ACT is not set
+CONFIG_NET_CLS_POLICE=y
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=y
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+
+#
+# ARCnet devices
+#
+# CONFIG_ARCNET is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+# CONFIG_MII is not set
+CONFIG_XT2000_SONIC=y
+# CONFIG_HAPPYMEAL is not set
+# CONFIG_SUNGEM is not set
+# CONFIG_NET_VENDOR_3COM is not set
+
+#
+# Tulip family network device support
+#
+# CONFIG_NET_TULIP is not set
+# CONFIG_HP100 is not set
+# CONFIG_NET_PCI is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+# CONFIG_ACENIC is not set
+# CONFIG_DL2K is not set
+# CONFIG_E1000 is not set
+# CONFIG_NS83820 is not set
+# CONFIG_HAMACHI is not set
+# CONFIG_YELLOWFIN is not set
+# CONFIG_R8169 is not set
+# CONFIG_SK98LIN is not set
+# CONFIG_TIGON3 is not set
+
+#
+# Ethernet (10000 Mbit)
+#
+# CONFIG_IXGB is not set
+# CONFIG_S2IO is not set
+
+#
+# Token Ring devices
+#
+# CONFIG_TR is not set
+
+#
+# Wireless LAN (non-hamradio)
+#
+CONFIG_NET_RADIO=y
+
+#
+# Obsolete Wireless cards support (pre-802.11)
+#
+CONFIG_STRIP=m
+
+#
+# Wireless 802.11b ISA/PCI cards support
+#
+CONFIG_HERMES=m
+# CONFIG_PLX_HERMES is not set
+# CONFIG_TMD_HERMES is not set
+# CONFIG_PCI_HERMES is not set
+# CONFIG_ATMEL is not set
+
+#
+# Prism GT/Duette 802.11(a/b/g) PCI/Cardbus support
+#
+# CONFIG_PRISM54 is not set
+CONFIG_NET_WIRELESS=y
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Telephony Support
+#
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input I/O drivers
+#
+# CONFIG_GAMEPORT is not set
+CONFIG_SOUND_GAMEPORT=y
+CONFIG_SERIO=y
+# CONFIG_SERIO_I8042 is not set
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_SERIO_CT82C710 is not set
+# CONFIG_SERIO_PCIPS2 is not set
+# CONFIG_SERIO_RAW is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=4
+# CONFIG_SERIAL_8250_EXTENDED is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+# CONFIG_RTC is not set
+# CONFIG_GEN_RTC is not set
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_DRM is not set
+# CONFIG_RAW_DRIVER is not set
+
+#
+# I2C support
+#
+# CONFIG_I2C is not set
+
+#
+# Dallas's 1-wire bus
+#
+# CONFIG_W1 is not set
+
+#
+# Misc devices
+#
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+# CONFIG_FB is not set
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+# CONFIG_USB is not set
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# MMC/SD Card support
+#
+# CONFIG_MMC is not set
+
+#
+# InfiniBand support
+#
+# CONFIG_INFINIBAND is not set
+
+#
+# File systems
+#
+# CONFIG_EXT2_FS is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_JBD is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_QUOTA is not set
+CONFIG_DNOTIFY=y
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+# CONFIG_PROC_KCORE is not set
+CONFIG_SYSFS=y
+CONFIG_DEVFS_FS=y
+# CONFIG_DEVFS_MOUNT is not set
+# CONFIG_DEVFS_DEBUG is not set
+# CONFIG_DEVPTS_FS_XATTR is not set
+# CONFIG_TMPFS is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFS_DIRECTIO is not set
+# CONFIG_NFSD is not set
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+# CONFIG_EXPORTFS is not set
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+
+#
+# Native Language Support
+#
+# CONFIG_NLS is not set
+
+#
+# Kernel hacking
+#
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_STACKOVERFLOW is not set
+# CONFIG_DEBUG_SLAB is not set
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_PAGEALLOC is not set
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_KGDB is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+# CONFIG_CRYPTO is not set
+
+#
+# Hardware crypto devices
+#
+
+#
+# Library routines
+#
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC32 is not set
+# CONFIG_LIBCRC32C is not set
diff --git a/arch/xtensa/configs/iss_defconfig b/arch/xtensa/configs/iss_defconfig
new file mode 100644
index 00000000000..802621dd486
--- /dev/null
+++ b/arch/xtensa/configs/iss_defconfig
@@ -0,0 +1,531 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.11-rc2
+# Fri Feb 25 19:21:24 2005
+#
+CONFIG_FRAME_POINTER=y
+CONFIG_XTENSA=y
+# CONFIG_UID16 is not set
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_HAVE_DEC_LOCK=y
+CONFIG_GENERIC_HARDIRQS=y
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_CLEAN_COMPILE=y
+CONFIG_BROKEN_ON_SMP=y
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION=""
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+CONFIG_SYSCTL=y
+# CONFIG_AUDIT is not set
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_HOTPLUG is not set
+# CONFIG_KOBJECT_UEVENT is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_EMBEDDED=y
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SHMEM=y
+CONFIG_CC_ALIGN_FUNCTIONS=0
+CONFIG_CC_ALIGN_LABELS=0
+CONFIG_CC_ALIGN_LOOPS=0
+CONFIG_CC_ALIGN_JUMPS=0
+# CONFIG_TINY_SHMEM is not set
+
+#
+# Loadable module support
+#
+# CONFIG_MODULES is not set
+
+#
+# Processor type and features
+#
+CONFIG_XTENSA_ARCH_LINUX_BE=y
+# CONFIG_XTENSA_ARCH_LINUX_LE is not set
+# CONFIG_XTENSA_ARCH_LINUX_TEST is not set
+# CONFIG_XTENSA_ARCH_S5 is not set
+# CONFIG_XTENSA_CUSTOM is not set
+CONFIG_MMU=y
+# CONFIG_XTENSA_UNALIGNED_USER is not set
+# CONFIG_PREEMPT is not set
+# CONFIG_MATH_EMULATION is not set
+# CONFIG_HIGHMEM is not set
+
+#
+# Platform options
+#
+CONFIG_XTENSA_PLATFORM_ISS=y
+# CONFIG_XTENSA_PLATFORM_XT2000 is not set
+# CONFIG_XTENSA_PLATFORM_ARUBA is not set
+# CONFIG_XTENSA_CALIBRATE_CCOUNT is not set
+CONFIG_XTENSA_CPU_CLOCK=10
+# CONFIG_GENERIC_CALIBRATE_DELAY is not set
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="console=ttyS0,38400 eth0=tuntap,,tap0 ip=192.168.168.5:192.168.168.1 root=nfs nfsroot=192.168.168.1:/opt/montavista/pro/devkit/xtensa/linux_be/target"
+CONFIG_SERIAL_CONSOLE=y
+CONFIG_XTENSA_ISS_NETWORK=y
+
+#
+# Bus options
+#
+
+#
+# PCCARD (PCMCIA/CardBus) support
+#
+# CONFIG_PCCARD is not set
+
+#
+# PC-card bridges
+#
+
+#
+# PCI Hotplug Support
+#
+
+#
+# Exectuable file formats
+#
+CONFIG_KCORE_ELF=y
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_MISC is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+# CONFIG_STANDALONE is not set
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+# CONFIG_MTD is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+# CONFIG_BLK_DEV_LOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_RAM is not set
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_CDROM_PKTCDVD is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+# CONFIG_IOSCHED_AS is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+# CONFIG_ATA_OVER_ETH is not set
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_SCSI is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+
+#
+# Fusion MPT device support
+#
+
+#
+# IEEE 1394 (FireWire) support
+#
+
+#
+# I2O device support
+#
+
+#
+# Networking support
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+# CONFIG_NETLINK_DEV is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_TUNNEL is not set
+# CONFIG_IP_TCPDIAG is not set
+# CONFIG_IP_TCPDIAG_IPV6 is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETFILTER is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+# CONFIG_SCTP_HMAC_NONE is not set
+# CONFIG_SCTP_HMAC_SHA1 is not set
+# CONFIG_SCTP_HMAC_MD5 is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+# CONFIG_NET_SCH_CLK_JIFFIES is not set
+# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
+# CONFIG_NET_SCH_CLK_CPU is not set
+# CONFIG_NET_CLS_ROUTE is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_NETDEVICES is not set
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Telephony Support
+#
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+
+#
+# Userland interfaces
+#
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input I/O drivers
+#
+# CONFIG_GAMEPORT is not set
+CONFIG_SOUND_GAMEPORT=y
+# CONFIG_SERIO is not set
+# CONFIG_SERIO_I8042 is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+
+#
+# Watchdog Device Drivers
+#
+CONFIG_SOFT_WATCHDOG=y
+# CONFIG_RTC is not set
+# CONFIG_GEN_RTC is not set
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_DRM is not set
+# CONFIG_RAW_DRIVER is not set
+
+#
+# I2C support
+#
+# CONFIG_I2C is not set
+
+#
+# Dallas's 1-wire bus
+#
+# CONFIG_W1 is not set
+
+#
+# Misc devices
+#
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+# CONFIG_FB is not set
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+# CONFIG_USB_ARCH_HAS_HCD is not set
+# CONFIG_USB_ARCH_HAS_OHCI is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# MMC/SD Card support
+#
+# CONFIG_MMC is not set
+
+#
+# InfiniBand support
+#
+# CONFIG_INFINIBAND is not set
+
+#
+# File systems
+#
+# CONFIG_EXT2_FS is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_JBD is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_QUOTA is not set
+# CONFIG_DNOTIFY is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_SYSFS=y
+CONFIG_DEVFS_FS=y
+CONFIG_DEVFS_MOUNT=y
+# CONFIG_DEVFS_DEBUG is not set
+# CONFIG_DEVPTS_FS_XATTR is not set
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_XATTR is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V4 is not set
+CONFIG_NFS_DIRECTIO=y
+# CONFIG_NFSD is not set
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+# CONFIG_EXPORTFS is not set
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+
+#
+# Native Language Support
+#
+# CONFIG_NLS is not set
+
+#
+# Kernel hacking
+#
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_STACKOVERFLOW is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_PAGEALLOC is not set
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_KGDB is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+# CONFIG_CRYPTO is not set
+
+#
+# Hardware crypto devices
+#
+
+#
+# Library routines
+#
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC32 is not set
+# CONFIG_LIBCRC32C is not set
diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile
new file mode 100644
index 00000000000..d573017a5dd
--- /dev/null
+++ b/arch/xtensa/kernel/Makefile
@@ -0,0 +1,18 @@
+#
+# Makefile for the Linux/Xtensa kernel.
+#
+
+extra-y := head.o vmlinux.lds
+
+
+obj-y := align.o entry.o irq.o coprocessor.o process.o ptrace.o semaphore.o \
+ setup.o signal.o syscalls.o time.o traps.o vectors.o platform.o \
+ pci-dma.o
+
+## windowspill.o
+
+obj-$(CONFIG_KGDB) += xtensa-stub.o
+obj-$(CONFIG_PCI) += pci.o
+obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o
+
+
diff --git a/arch/xtensa/kernel/align.S b/arch/xtensa/kernel/align.S
new file mode 100644
index 00000000000..74b1e90ef08
--- /dev/null
+++ b/arch/xtensa/kernel/align.S
@@ -0,0 +1,459 @@
+/*
+ * arch/xtensa/kernel/align.S
+ *
+ * Handle unalignment exceptions in kernel space.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica, Inc.
+ *
+ * Rewritten by Chris Zankel <chris@zankel.net>
+ *
+ * Based on work from Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
+ * and Marc Gauthier <marc@tensilica.com, marc@alimni.uwaterloo.ca>
+ */
+
+#include <linux/linkage.h>
+#include <asm/ptrace.h>
+#include <asm/ptrace.h>
+#include <asm/current.h>
+#include <asm/offsets.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/thread_info.h>
+
+#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
+
+/* First-level exception handler for unaligned exceptions.
+ *
+ * Note: This handler works only for kernel exceptions. Unaligned user
+ * access should get a seg fault.
+ */
+
+/* Big and little endian 16-bit values are located in
+ * different halves of a register. HWORD_START helps to
+ * abstract the notion of extracting a 16-bit value from a
+ * register.
+ * We also have to define new shifting instructions because
+ * lsb and msb are on 'opposite' ends in a register for
+ * different endian machines.
+ *
+ * Assume a memory region in ascending address:
+ * 0 1 2 3|4 5 6 7
+ *
+ * When loading one word into a register, the content of that register is:
+ * LE 3 2 1 0, 7 6 5 4
+ * BE 0 1 2 3, 4 5 6 7
+ *
+ * Masking the bits of the higher/lower address means:
+ * LE X X 0 0, 0 0 X X
+ * BE 0 0 X X, X X 0 0
+ *
+ * Shifting to higher/lower addresses, means:
+ * LE shift left / shift right
+ * BE shift right / shift left
+ *
+ * Extracting 16 bits from a 32 bit reg. value to higher/lower address means:
+ * LE mask 0 0 X X / shift left
+ * BE shift left / mask 0 0 X X
+ */
+
+#define UNALIGNED_USER_EXCEPTION
+
+#if XCHAL_HAVE_BE
+
+#define HWORD_START 16
+#define INSN_OP0 28
+#define INSN_T 24
+#define INSN_OP1 16
+
+.macro __src_b r, w0, w1; src \r, \w0, \w1; .endm
+.macro __ssa8 r; ssa8b \r; .endm
+.macro __ssa8r r; ssa8l \r; .endm
+.macro __sh r, s; srl \r, \s; .endm
+.macro __sl r, s; sll \r, \s; .endm
+.macro __exth r, s; extui \r, \s, 0, 16; .endm
+.macro __extl r, s; slli \r, \s, 16; .endm
+
+#else
+
+#define HWORD_START 0
+#define INSN_OP0 0
+#define INSN_T 4
+#define INSN_OP1 12
+
+.macro __src_b r, w0, w1; src \r, \w1, \w0; .endm
+.macro __ssa8 r; ssa8l \r; .endm
+.macro __ssa8r r; ssa8b \r; .endm
+.macro __sh r, s; sll \r, \s; .endm
+.macro __sl r, s; srl \r, \s; .endm
+.macro __exth r, s; slli \r, \s, 16; .endm
+.macro __extl r, s; extui \r, \s, 0, 16; .endm
+
+#endif
+
+/*
+ * xxxx xxxx = imm8 field
+ * yyyy = imm4 field
+ * ssss = s field
+ * tttt = t field
+ *
+ * 16 0
+ * -------------------
+ * L32I.N yyyy ssss tttt 1000
+ * S32I.N yyyy ssss tttt 1001
+ *
+ * 23 0
+ * -----------------------------
+ * res 0000 0010
+ * L16UI xxxx xxxx 0001 ssss tttt 0010
+ * L32I xxxx xxxx 0010 ssss tttt 0010
+ * XXX 0011 ssss tttt 0010
+ * XXX 0100 ssss tttt 0010
+ * S16I xxxx xxxx 0101 ssss tttt 0010
+ * S32I xxxx xxxx 0110 ssss tttt 0010
+ * XXX 0111 ssss tttt 0010
+ * XXX 1000 ssss tttt 0010
+ * L16SI xxxx xxxx 1001 ssss tttt 0010
+ * XXX 1010 0010
+ * **L32AI xxxx xxxx 1011 ssss tttt 0010 unsupported
+ * XXX 1100 0010
+ * XXX 1101 0010
+ * XXX 1110 0010
+ * **S32RI xxxx xxxx 1111 ssss tttt 0010 unsupported
+ * -----------------------------
+ * ^ ^ ^
+ * sub-opcode (NIBBLE_R) -+ | |
+ * t field (NIBBLE_T) -----------+ |
+ * major opcode (NIBBLE_OP0) --------------+
+ */
+
+#define OP0_L32I_N 0x8 /* load immediate narrow */
+#define OP0_S32I_N 0x9 /* store immediate narrow */
+#define OP1_SI_MASK 0x4 /* OP1 bit set for stores */
+#define OP1_SI_BIT 2 /* OP1 bit number for stores */
+
+#define OP1_L32I 0x2
+#define OP1_L16UI 0x1
+#define OP1_L16SI 0x9
+#define OP1_L32AI 0xb
+
+#define OP1_S32I 0x6
+#define OP1_S16I 0x5
+#define OP1_S32RI 0xf
+
+/*
+ * Entry condition:
+ *
+ * a0: trashed, original value saved on stack (PT_AREG0)
+ * a1: a1
+ * a2: new stack pointer, original in DEPC
+ * a3: dispatch table
+ * depc: a2, original value saved on stack (PT_DEPC)
+ * excsave_1: a3
+ *
+ * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
+ * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
+ */
+
+
+ENTRY(fast_unaligned)
+
+ /* Note: We don't expect the address to be aligned on a word
+ * boundary. After all, the processor generated that exception
+ * and it would be a hardware fault.
+ */
+
+ /* Save some working register */
+
+ s32i a4, a2, PT_AREG4
+ s32i a5, a2, PT_AREG5
+ s32i a6, a2, PT_AREG6
+ s32i a7, a2, PT_AREG7
+ s32i a8, a2, PT_AREG8
+
+ rsr a0, DEPC
+ xsr a3, EXCSAVE_1
+ s32i a0, a2, PT_AREG2
+ s32i a3, a2, PT_AREG3
+
+ /* Keep value of SAR in a0 */
+
+ rsr a0, SAR
+ rsr a8, EXCVADDR # load unaligned memory address
+
+ /* Now, identify one of the following load/store instructions.
+ *
+ * The only possible danger of a double exception on the
+ * following l32i instructions is kernel code in vmalloc
+ * memory. The processor was just executing at the EPC_1
+ * address, and indeed, already fetched the instruction. That
+ * guarantees a TLB mapping, which hasn't been replaced by
+ * this unaligned exception handler that uses only static TLB
+ * mappings. However, high-level interrupt handlers might
+ * modify TLB entries, so for the generic case, we register a
+ * TABLE_FIXUP handler here, too.
+ */
+
+ /* a3...a6 saved on stack, a2 = SP */
+
+ /* Extract the instruction that caused the unaligned access. */
+
+ rsr a7, EPC_1 # load exception address
+ movi a3, ~3
+ and a3, a3, a7 # mask lower bits
+
+ l32i a4, a3, 0 # load 2 words
+ l32i a5, a3, 4
+
+ __ssa8 a7
+ __src_b a4, a4, a5 # a4 has the instruction
+
+ /* Analyze the instruction (load or store?). */
+
+ extui a5, a4, INSN_OP0, 4 # get insn.op0 nibble
+
+#if XCHAL_HAVE_NARROW
+ _beqi a5, OP0_L32I_N, .Lload # L32I.N, jump
+ addi a6, a5, -OP0_S32I_N
+ _beqz a6, .Lstore # S32I.N, do a store
+#endif
+ /* 'store indicator bit' not set, jump */
+ _bbci.l a4, OP1_SI_BIT + INSN_OP1, .Lload
+
+ /* Store: Jump to table entry to get the value in the source register.*/
+
+.Lstore:movi a5, .Lstore_table # table
+ extui a6, a4, INSN_T, 4 # get source register
+ addx8 a5, a6, a5
+ jx a5 # jump into table
+
+ /* Invalid instruction, CRITICAL! */
+.Linvalid_instruction_load:
+ j .Linvalid_instruction
+
+ /* Load: Load memory address. */
+
+.Lload: movi a3, ~3
+ and a3, a3, a8 # align memory address
+
+ __ssa8 a8
+#ifdef UNALIGNED_USER_EXCEPTION
+ addi a3, a3, 8
+ l32e a5, a3, -8
+ l32e a6, a3, -4
+#else
+ l32i a5, a3, 0
+ l32i a6, a3, 4
+#endif
+ __src_b a3, a5, a6 # a3 has the data word
+
+#if XCHAL_HAVE_NARROW
+ addi a7, a7, 2 # increment PC (assume 16-bit insn)
+
+ extui a5, a4, INSN_OP0, 4
+ _beqi a5, OP0_L32I_N, 1f # l32i.n: jump
+
+ addi a7, a7, 1
+#else
+ addi a7, a7, 3
+#endif
+
+ extui a5, a4, INSN_OP1, 4
+ _beqi a5, OP1_L32I, 1f # l32i: jump
+
+ extui a3, a3, 0, 16 # extract lower 16 bits
+ _beqi a5, OP1_L16UI, 1f
+ addi a5, a5, -OP1_L16SI
+ _bnez a5, .Linvalid_instruction_load
+
+ /* sign extend value */
+
+ slli a3, a3, 16
+ srai a3, a3, 16
+
+ /* Set target register. */
+
+1:
+
+#if XCHAL_HAVE_LOOP
+ rsr a3, LEND # check if we reached LEND
+ bne a7, a3, 1f
+ rsr a3, LCOUNT # and LCOUNT != 0
+ beqz a3, 1f
+ addi a3, a3, -1 # decrement LCOUNT and set
+ rsr a7, LBEG # set PC to LBEGIN
+ wsr a3, LCOUNT
+#endif
+
+1: wsr a7, EPC_1 # skip load instruction
+ extui a4, a4, INSN_T, 4 # extract target register
+ movi a5, .Lload_table
+ addx8 a4, a4, a5
+ jx a4 # jump to entry for target register
+
+ .align 8
+.Lload_table:
+ s32i a3, a2, PT_AREG0; _j .Lexit; .align 8
+ mov a1, a3; _j .Lexit; .align 8 # fishy??
+ s32i a3, a2, PT_AREG2; _j .Lexit; .align 8
+ s32i a3, a2, PT_AREG3; _j .Lexit; .align 8
+ s32i a3, a2, PT_AREG4; _j .Lexit; .align 8
+ s32i a3, a2, PT_AREG5; _j .Lexit; .align 8
+ s32i a3, a2, PT_AREG6; _j .Lexit; .align 8
+ s32i a3, a2, PT_AREG7; _j .Lexit; .align 8
+ s32i a3, a2, PT_AREG8; _j .Lexit; .align 8
+ mov a9, a3 ; _j .Lexit; .align 8
+ mov a10, a3 ; _j .Lexit; .align 8
+ mov a11, a3 ; _j .Lexit; .align 8
+ mov a12, a3 ; _j .Lexit; .align 8
+ mov a13, a3 ; _j .Lexit; .align 8
+ mov a14, a3 ; _j .Lexit; .align 8
+ mov a15, a3 ; _j .Lexit; .align 8
+
+.Lstore_table:
+ l32i a3, a2, PT_AREG0; _j 1f; .align 8
+ mov a3, a1; _j 1f; .align 8 # fishy??
+ l32i a3, a2, PT_AREG2; _j 1f; .align 8
+ l32i a3, a2, PT_AREG3; _j 1f; .align 8
+ l32i a3, a2, PT_AREG4; _j 1f; .align 8
+ l32i a3, a2, PT_AREG5; _j 1f; .align 8
+ l32i a3, a2, PT_AREG6; _j 1f; .align 8
+ l32i a3, a2, PT_AREG7; _j 1f; .align 8
+ l32i a3, a2, PT_AREG8; _j 1f; .align 8
+ mov a3, a9 ; _j 1f; .align 8
+ mov a3, a10 ; _j 1f; .align 8
+ mov a3, a11 ; _j 1f; .align 8
+ mov a3, a12 ; _j 1f; .align 8
+ mov a3, a13 ; _j 1f; .align 8
+ mov a3, a14 ; _j 1f; .align 8
+ mov a3, a15 ; _j 1f; .align 8
+
+1: # a7: instruction pointer, a4: instruction, a3: value
+
+ movi a6, 0 # mask: ffffffff:00000000
+
+#if XCHAL_HAVE_NARROW
+ addi a7, a7, 2 # incr. PC,assume 16-bit instruction
+
+ extui a5, a4, INSN_OP0, 4 # extract OP0
+ addi a5, a5, -OP0_S32I_N
+ _beqz a5, 1f # s32i.n: jump
+
+ addi a7, a7, 1 # increment PC, 32-bit instruction
+#else
+ addi a7, a7, 3 # increment PC, 32-bit instruction
+#endif
+
+ extui a5, a4, INSN_OP1, 4 # extract OP1
+ _beqi a5, OP1_S32I, 1f # jump if 32 bit store
+ _bnei a5, OP1_S16I, .Linvalid_instruction_store
+
+ movi a5, -1
+ __extl a3, a3 # get 16-bit value
+ __exth a6, a5 # get 16-bit mask ffffffff:ffff0000
+
+ /* Get memory address */
+
+1:
+#if XCHAL_HAVE_LOOP
+ rsr a3, LEND # check if we reached LEND
+ bne a7, a3, 1f
+ rsr a3, LCOUNT # and LCOUNT != 0
+ beqz a3, 1f
+ addi a3, a3, -1 # decrement LCOUNT and set
+ rsr a7, LBEG # set PC to LBEGIN
+ wsr a3, LCOUNT
+#endif
+
+1: wsr a7, EPC_1 # skip store instruction
+ movi a4, ~3
+ and a4, a4, a8 # align memory address
+
+ /* Insert value into memory */
+
+ movi a5, -1 # mask: ffffffff:XXXX0000
+#ifdef UNALIGNED_USER_EXCEPTION
+ addi a4, a4, 8
+#endif
+
+ __ssa8r a8
+ __src_b a7, a5, a6 # lo-mask F..F0..0 (BE) 0..0F..F (LE)
+ __src_b a6, a6, a5 # hi-mask 0..0F..F (BE) F..F0..0 (LE)
+#ifdef UNALIGNED_USER_EXCEPTION
+ l32e a5, a4, -8
+#else
+ l32i a5, a4, 0 # load lower address word
+#endif
+ and a5, a5, a7 # mask
+ __sh a7, a3 # shift value
+ or a5, a5, a7 # or with original value
+#ifdef UNALIGNED_USER_EXCEPTION
+ s32e a5, a4, -8
+ l32e a7, a4, -4
+#else
+ s32i a5, a4, 0 # store
+ l32i a7, a4, 4 # same for upper address word
+#endif
+ __sl a5, a3
+ and a6, a7, a6
+ or a6, a6, a5
+#ifdef UNALIGNED_USER_EXCEPTION
+ s32e a6, a4, -4
+#else
+ s32i a6, a4, 4
+#endif
+
+ /* Done. restore stack and return */
+
+.Lexit:
+ movi a4, 0
+ rsr a3, EXCSAVE_1
+ s32i a4, a3, EXC_TABLE_FIXUP
+
+ /* Restore working register */
+
+ l32i a7, a2, PT_AREG7
+ l32i a6, a2, PT_AREG6
+ l32i a5, a2, PT_AREG5
+ l32i a4, a2, PT_AREG4
+ l32i a3, a2, PT_AREG3
+
+ /* restore SAR and return */
+
+ wsr a0, SAR
+ l32i a0, a2, PT_AREG0
+ l32i a2, a2, PT_AREG2
+ rfe
+
+ /* We cannot handle this exception. */
+
+ .extern _kernel_exception
+.Linvalid_instruction_store:
+.Linvalid_instruction:
+
+ /* Restore a4...a8 and SAR, set SP, and jump to default exception. */
+
+ l32i a8, a2, PT_AREG8
+ l32i a7, a2, PT_AREG7
+ l32i a6, a2, PT_AREG6
+ l32i a5, a2, PT_AREG5
+ l32i a4, a2, PT_AREG4
+ wsr a0, SAR
+ mov a1, a2
+
+ rsr a0, PS
+ bbsi.l a2, PS_UM_SHIFT, 1f # jump if user mode
+
+ movi a0, _kernel_exception
+ jx a0
+
+1: movi a0, _user_exception
+ jx a0
+
+
+#endif /* XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION */
+
diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c
new file mode 100644
index 00000000000..840cd9a1d3d
--- /dev/null
+++ b/arch/xtensa/kernel/asm-offsets.c
@@ -0,0 +1,94 @@
+/*
+ * arch/xtensa/kernel/asm-offsets.c
+ *
+ * Generates definitions from c-type structures used by assembly sources.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005 Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ */
+
+#include <asm/processor.h>
+
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/stddef.h>
+#include <linux/thread_info.h>
+#include <linux/ptrace.h>
+#include <asm/ptrace.h>
+#include <asm/processor.h>
+#include <asm/uaccess.h>
+
+#define DEFINE(sym, val) asm volatile("\n->" #sym " %0 " #val : : "i" (val))
+#define BLANK() asm volatile("\n->" : : )
+
+int main(void)
+{
+ /* struct pt_regs */
+ DEFINE(PT_PC, offsetof (struct pt_regs, pc));
+ DEFINE(PT_PS, offsetof (struct pt_regs, ps));
+ DEFINE(PT_DEPC, offsetof (struct pt_regs, depc));
+ DEFINE(PT_EXCCAUSE, offsetof (struct pt_regs, exccause));
+ DEFINE(PT_EXCVADDR, offsetof (struct pt_regs, excvaddr));
+ DEFINE(PT_DEBUGCAUSE, offsetof (struct pt_regs, debugcause));
+ DEFINE(PT_WMASK, offsetof (struct pt_regs, wmask));
+ DEFINE(PT_LBEG, offsetof (struct pt_regs, lbeg));
+ DEFINE(PT_LEND, offsetof (struct pt_regs, lend));
+ DEFINE(PT_LCOUNT, offsetof (struct pt_regs, lcount));
+ DEFINE(PT_SAR, offsetof (struct pt_regs, sar));
+ DEFINE(PT_SYSCALL, offsetof (struct pt_regs, syscall));
+ DEFINE(PT_AREG, offsetof (struct pt_regs, areg[0]));
+ DEFINE(PT_AREG0, offsetof (struct pt_regs, areg[0]));
+ DEFINE(PT_AREG1, offsetof (struct pt_regs, areg[1]));
+ DEFINE(PT_AREG2, offsetof (struct pt_regs, areg[2]));
+ DEFINE(PT_AREG3, offsetof (struct pt_regs, areg[3]));
+ DEFINE(PT_AREG4, offsetof (struct pt_regs, areg[4]));
+ DEFINE(PT_AREG5, offsetof (struct pt_regs, areg[5]));
+ DEFINE(PT_AREG6, offsetof (struct pt_regs, areg[6]));
+ DEFINE(PT_AREG7, offsetof (struct pt_regs, areg[7]));
+ DEFINE(PT_AREG8, offsetof (struct pt_regs, areg[8]));
+ DEFINE(PT_AREG9, offsetof (struct pt_regs, areg[9]));
+ DEFINE(PT_AREG10, offsetof (struct pt_regs, areg[10]));
+ DEFINE(PT_AREG11, offsetof (struct pt_regs, areg[11]));
+ DEFINE(PT_AREG12, offsetof (struct pt_regs, areg[12]));
+ DEFINE(PT_AREG13, offsetof (struct pt_regs, areg[13]));
+ DEFINE(PT_AREG14, offsetof (struct pt_regs, areg[14]));
+ DEFINE(PT_AREG15, offsetof (struct pt_regs, areg[15]));
+ DEFINE(PT_WINDOWBASE, offsetof (struct pt_regs, windowbase));
+ DEFINE(PT_WINDOWSTART, offsetof(struct pt_regs, windowstart));
+ DEFINE(PT_SIZE, sizeof(struct pt_regs));
+ DEFINE(PT_AREG_END, offsetof (struct pt_regs, areg[XCHAL_NUM_AREGS]));
+ DEFINE(PT_USER_SIZE, offsetof(struct pt_regs, areg[XCHAL_NUM_AREGS]));
+ BLANK();
+
+ /* struct task_struct */
+ DEFINE(TASK_PTRACE, offsetof (struct task_struct, ptrace));
+ DEFINE(TASK_MM, offsetof (struct task_struct, mm));
+ DEFINE(TASK_ACTIVE_MM, offsetof (struct task_struct, active_mm));
+ DEFINE(TASK_PID, offsetof (struct task_struct, pid));
+ DEFINE(TASK_THREAD, offsetof (struct task_struct, thread));
+ DEFINE(TASK_THREAD_INFO, offsetof (struct task_struct, thread_info));
+ DEFINE(TASK_STRUCT_SIZE, sizeof (struct task_struct));
+ BLANK();
+
+ /* struct thread_info (offset from start_struct) */
+ DEFINE(THREAD_RA, offsetof (struct task_struct, thread.ra));
+ DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp));
+ DEFINE(THREAD_CP_SAVE, offsetof (struct task_struct, thread.cp_save));
+ DEFINE(THREAD_CURRENT_DS, offsetof (struct task_struct, thread.current_ds));
+ BLANK();
+
+ /* struct mm_struct */
+ DEFINE(MM_USERS, offsetof(struct mm_struct, mm_users));
+ DEFINE(MM_PGD, offsetof (struct mm_struct, pgd));
+ DEFINE(MM_CONTEXT, offsetof (struct mm_struct, context));
+ BLANK();
+ DEFINE(PT_SINGLESTEP_BIT, PT_SINGLESTEP_BIT);
+ return 0;
+}
+
+
diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S
new file mode 100644
index 00000000000..356192a4d39
--- /dev/null
+++ b/arch/xtensa/kernel/coprocessor.S
@@ -0,0 +1,201 @@
+/*
+ * arch/xtensa/kernel/coprocessor.S
+ *
+ * Xtensa processor configuration-specific table of coprocessor and
+ * other custom register layout information.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 - 2005 Tensilica Inc.
+ *
+ * Marc Gauthier <marc@tensilica.com> <marc@alumni.uwaterloo.ca>
+ */
+
+/*
+ * This module contains a table that describes the layout of the various
+ * custom registers and states associated with each coprocessor, as well
+ * as those not associated with any coprocessor ("extra state").
+ * This table is included with core dumps and is available via the ptrace
+ * interface, allowing the layout of such register/state information to
+ * be modified in the kernel without affecting the debugger. Each
+ * register or state is identified using a 32-bit "libdb target number"
+ * assigned when the Xtensa processor is generated.
+ */
+
+#include <linux/config.h>
+#include <linux/linkage.h>
+#include <asm/processor.h>
+
+#if XCHAL_HAVE_CP
+
+#define CP_LAST ((XCHAL_CP_MAX - 1) * COPROCESSOR_INFO_SIZE)
+
+ENTRY(release_coprocessors)
+
+ entry a1, 16
+ # a2: task
+ movi a3, 1 << XCHAL_CP_MAX # a3: coprocessor-bit
+ movi a4, coprocessor_info+CP_LAST # a4: owner-table
+ # a5: tmp
+ movi a6, 0 # a6: 0
+ rsil a7, LOCKLEVEL # a7: PS
+
+1: /* Check if task is coprocessor owner of coprocessor[i]. */
+
+ l32i a5, a4, COPROCESSOR_INFO_OWNER
+ srli a3, a3, 1
+ beqz a3, 1f
+ addi a4, a4, -8
+ beq a2, a5, 1b
+
+ /* Found an entry: Clear entry CPENABLE bit to disable CP. */
+
+ rsr a5, CPENABLE
+ s32i a6, a4, COPROCESSOR_INFO_OWNER
+ xor a5, a3, a5
+ wsr a5, CPENABLE
+
+ bnez a3, 1b
+
+1: wsr a7, PS
+ rsync
+ retw
+
+
+ENTRY(disable_coprocessor)
+ entry sp, 16
+ rsil a7, LOCKLEVEL
+ rsr a3, CPENABLE
+ movi a4, 1
+ ssl a2
+ sll a4, a4
+ and a4, a3, a4
+ xor a3, a3, a4
+ wsr a3, CPENABLE
+ wsr a7, PS
+ rsync
+ retw
+
+ENTRY(enable_coprocessor)
+ entry sp, 16
+ rsil a7, LOCKLEVEL
+ rsr a3, CPENABLE
+ movi a4, 1
+ ssl a2
+ sll a4, a4
+ or a3, a3, a4
+ wsr a3, CPENABLE
+ wsr a7, PS
+ rsync
+ retw
+
+#endif
+
+ENTRY(save_coprocessor_extra)
+ entry sp, 16
+ xchal_extra_store_funcbody
+ retw
+
+ENTRY(restore_coprocessor_extra)
+ entry sp, 16
+ xchal_extra_load_funcbody
+ retw
+
+ENTRY(save_coprocessor_registers)
+ entry sp, 16
+ xchal_cpi_store_funcbody
+ retw
+
+ENTRY(restore_coprocessor_registers)
+ entry sp, 16
+ xchal_cpi_load_funcbody
+ retw
+
+
+/*
+ * The Xtensa compile-time HAL (core.h) XCHAL_*_SA_CONTENTS_LIBDB macros
+ * describe the contents of coprocessor & extra save areas in terms of
+ * undefined CONTENTS_LIBDB_{SREG,UREG,REGF} macros. We define these
+ * latter macros here; they expand into a table of the format we want.
+ * The general format is:
+ *
+ * CONTENTS_LIBDB_SREG(libdbnum, offset, size, align, rsv1, name, sregnum,
+ * bitmask, rsv2, rsv3)
+ * CONTENTS_LIBDB_UREG(libdbnum, offset, size, align, rsv1, name, uregnum,
+ * bitmask, rsv2, rsv3)
+ * CONTENTS_LIBDB_REGF(libdbnum, offset, size, align, rsv1, name, index,
+ * numentries, contentsize, regname_base,
+ * regfile_name, rsv2, rsv3)
+ *
+ * For this table, we only care about the <libdbnum>, <offset> and <size>
+ * fields.
+ */
+
+/* Map all XCHAL CONTENTS macros to the reg_entry asm macro defined below: */
+
+#define CONTENTS_LIBDB_SREG(libdbnum,offset,size,align,rsv1,name,sregnum, \
+ bitmask, rsv2, rsv3) \
+ reg_entry libdbnum, offset, size ;
+#define CONTENTS_LIBDB_UREG(libdbnum,offset,size,align,rsv1,name,uregnum, \
+ bitmask, rsv2, rsv3) \
+ reg_entry libdbnum, offset, size ;
+#define CONTENTS_LIBDB_REGF(libdbnum, offset, size, align, rsv1, name, index, \
+ numentries, contentsize, regname_base, \
+ regfile_name, rsv2, rsv3) \
+ reg_entry libdbnum, offset, size ;
+
+/* A single table entry: */
+ .macro reg_entry libdbnum, offset, size
+ .ifne (__last_offset-(__last_group_offset+\offset))
+ /* padding entry */
+ .word (0xFC000000+__last_offset-(__last_group_offset+\offset))
+ .endif
+ .word \libdbnum /* actual entry */
+ .set __last_offset, __last_group_offset+\offset+\size
+ .endm /* reg_entry */
+
+
+/* Table entry that marks the beginning of a group (coprocessor or "extra"): */
+ .macro reg_group cpnum, num_entries, align
+ .set __last_group_offset, (__last_offset + \align- 1) & -\align
+ .ifne \num_entries
+ .word 0xFD000000+(\cpnum<<16)+\num_entries
+ .endif
+ .endm /* reg_group */
+
+/*
+ * Register info tables.
+ */
+
+ .section .rodata, "a"
+ .globl _xtensa_reginfo_tables
+ .globl _xtensa_reginfo_table_size
+ .align 4
+_xtensa_reginfo_table_size:
+ .word _xtensa_reginfo_table_end - _xtensa_reginfo_tables
+
+_xtensa_reginfo_tables:
+ .set __last_offset, 0
+ reg_group 0xFF, XCHAL_EXTRA_SA_CONTENTS_LIBDB_NUM, XCHAL_EXTRA_SA_ALIGN
+ XCHAL_EXTRA_SA_CONTENTS_LIBDB
+ reg_group 0, XCHAL_CP0_SA_CONTENTS_LIBDB_NUM, XCHAL_CP0_SA_ALIGN
+ XCHAL_CP0_SA_CONTENTS_LIBDB
+ reg_group 1, XCHAL_CP1_SA_CONTENTS_LIBDB_NUM, XCHAL_CP1_SA_ALIGN
+ XCHAL_CP1_SA_CONTENTS_LIBDB
+ reg_group 2, XCHAL_CP2_SA_CONTENTS_LIBDB_NUM, XCHAL_CP2_SA_ALIGN
+ XCHAL_CP2_SA_CONTENTS_LIBDB
+ reg_group 3, XCHAL_CP3_SA_CONTENTS_LIBDB_NUM, XCHAL_CP3_SA_ALIGN
+ XCHAL_CP3_SA_CONTENTS_LIBDB
+ reg_group 4, XCHAL_CP4_SA_CONTENTS_LIBDB_NUM, XCHAL_CP4_SA_ALIGN
+ XCHAL_CP4_SA_CONTENTS_LIBDB
+ reg_group 5, XCHAL_CP5_SA_CONTENTS_LIBDB_NUM, XCHAL_CP5_SA_ALIGN
+ XCHAL_CP5_SA_CONTENTS_LIBDB
+ reg_group 6, XCHAL_CP6_SA_CONTENTS_LIBDB_NUM, XCHAL_CP6_SA_ALIGN
+ XCHAL_CP6_SA_CONTENTS_LIBDB
+ reg_group 7, XCHAL_CP7_SA_CONTENTS_LIBDB_NUM, XCHAL_CP7_SA_ALIGN
+ XCHAL_CP7_SA_CONTENTS_LIBDB
+ .word 0xFC000000 /* invalid register number,marks end of table*/
+_xtensa_reginfo_table_end:
+
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
new file mode 100644
index 00000000000..c64a01f71de
--- /dev/null
+++ b/arch/xtensa/kernel/entry.S
@@ -0,0 +1,1996 @@
+/*
+ * arch/xtensa/kernel/entry.S
+ *
+ * Low-level exception handling
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004-2005 by Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ *
+ */
+
+#include <linux/linkage.h>
+#include <asm/offsets.h>
+#include <asm/processor.h>
+#include <asm/thread_info.h>
+#include <asm/uaccess.h>
+#include <asm/unistd.h>
+#include <asm/ptrace.h>
+#include <asm/current.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/signal.h>
+#include <xtensa/coreasm.h>
+
+/* Unimplemented features. */
+
+#undef SIGNAL_HANDLING_IN_DOUBLE_EXCEPTION
+#undef KERNEL_STACK_OVERFLOW_CHECK
+#undef PREEMPTIBLE_KERNEL
+#undef ALLOCA_EXCEPTION_IN_IRAM
+
+/* Not well tested.
+ *
+ * - fast_coprocessor
+ */
+
+/*
+ * Macro to find first bit set in WINDOWBASE from the left + 1
+ *
+ * 100....0 -> 1
+ * 010....0 -> 2
+ * 000....1 -> WSBITS
+ */
+
+ .macro ffs_ws bit mask
+
+#if XCHAL_HAVE_NSA
+ nsau \bit, \mask # 32-WSBITS ... 31 (32 iff 0)
+ addi \bit, \bit, WSBITS - 32 + 1 # uppest bit set -> return 1
+#else
+ movi \bit, WSBITS
+#if WSBITS > 16
+ _bltui \mask, 0x10000, 99f
+ addi \bit, \bit, -16
+ extui \mask, \mask, 16, 16
+#endif
+#if WSBITS > 8
+99: _bltui \mask, 0x100, 99f
+ addi \bit, \bit, -8
+ srli \mask, \mask, 8
+#endif
+99: _bltui \mask, 0x10, 99f
+ addi \bit, \bit, -4
+ srli \mask, \mask, 4
+99: _bltui \mask, 0x4, 99f
+ addi \bit, \bit, -2
+ srli \mask, \mask, 2
+99: _bltui \mask, 0x2, 99f
+ addi \bit, \bit, -1
+99:
+
+#endif
+ .endm
+
+/* ----------------- DEFAULT FIRST LEVEL EXCEPTION HANDLERS ----------------- */
+
+/*
+ * First-level exception handler for user exceptions.
+ * Save some special registers, extra states and all registers in the AR
+ * register file that were in use in the user task, and jump to the common
+ * exception code.
+ * We save SAR (used to calculate WMASK), and WB and WS (we don't have to
+ * save them for kernel exceptions).
+ *
+ * Entry condition for user_exception:
+ *
+ * a0: trashed, original value saved on stack (PT_AREG0)
+ * a1: a1
+ * a2: new stack pointer, original value in depc
+ * a3: dispatch table
+ * depc: a2, original value saved on stack (PT_DEPC)
+ * excsave1: a3
+ *
+ * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
+ * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
+ *
+ * Entry condition for _user_exception:
+ *
+ * a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC
+ * excsave has been restored, and
+ * stack pointer (a1) has been set.
+ *
+ * Note: _user_exception might be at an odd adress. Don't use call0..call12
+ */
+
+ENTRY(user_exception)
+
+ /* Save a2, a3, and depc, restore excsave_1 and set SP. */
+
+ xsr a3, EXCSAVE_1
+ rsr a0, DEPC
+ s32i a1, a2, PT_AREG1
+ s32i a0, a2, PT_AREG2
+ s32i a3, a2, PT_AREG3
+ mov a1, a2
+
+ .globl _user_exception
+_user_exception:
+
+ /* Save SAR and turn off single stepping */
+
+ movi a2, 0
+ rsr a3, SAR
+ wsr a2, ICOUNTLEVEL
+ s32i a3, a1, PT_SAR
+
+ /* Rotate ws so that the current windowbase is at bit0. */
+ /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
+
+ rsr a2, WINDOWBASE
+ rsr a3, WINDOWSTART
+ ssr a2
+ s32i a2, a1, PT_WINDOWBASE
+ s32i a3, a1, PT_WINDOWSTART
+ slli a2, a3, 32-WSBITS
+ src a2, a3, a2
+ srli a2, a2, 32-WSBITS
+ s32i a2, a1, PT_WMASK # needed for restoring registers
+
+ /* Save only live registers. */
+
+ _bbsi.l a2, 1, 1f
+ s32i a4, a1, PT_AREG4
+ s32i a5, a1, PT_AREG5
+ s32i a6, a1, PT_AREG6
+ s32i a7, a1, PT_AREG7
+ _bbsi.l a2, 2, 1f
+ s32i a8, a1, PT_AREG8
+ s32i a9, a1, PT_AREG9
+ s32i a10, a1, PT_AREG10
+ s32i a11, a1, PT_AREG11
+ _bbsi.l a2, 3, 1f
+ s32i a12, a1, PT_AREG12
+ s32i a13, a1, PT_AREG13
+ s32i a14, a1, PT_AREG14
+ s32i a15, a1, PT_AREG15
+ _bnei a2, 1, 1f # only one valid frame?
+
+ /* Only one valid frame, skip saving regs. */
+
+ j 2f
+
+ /* Save the remaining registers.
+ * We have to save all registers up to the first '1' from
+ * the right, except the current frame (bit 0).
+ * Assume a2 is: 001001000110001
+ * All regiser frames starting from the top fiel to the marked '1'
+ * must be saved.
+ */
+
+1: addi a3, a2, -1 # eliminate '1' in bit 0: yyyyxxww0
+ neg a3, a3 # yyyyxxww0 -> YYYYXXWW1+1
+ and a3, a3, a2 # max. only one bit is set
+
+ /* Find number of frames to save */
+
+ ffs_ws a0, a3 # number of frames to the '1' from left
+
+ /* Store information into WMASK:
+ * bits 0..3: xxx1 masked lower 4 bits of the rotated windowstart,
+ * bits 4...: number of valid 4-register frames
+ */
+
+ slli a3, a0, 4 # number of frames to save in bits 8..4
+ extui a2, a2, 0, 4 # mask for the first 16 registers
+ or a2, a3, a2
+ s32i a2, a1, PT_WMASK # needed when we restore the reg-file
+
+ /* Save 4 registers at a time */
+
+1: rotw -1
+ s32i a0, a5, PT_AREG_END - 16
+ s32i a1, a5, PT_AREG_END - 12
+ s32i a2, a5, PT_AREG_END - 8
+ s32i a3, a5, PT_AREG_END - 4
+ addi a0, a4, -1
+ addi a1, a5, -16
+ _bnez a0, 1b
+
+ /* WINDOWBASE still in SAR! */
+
+ rsr a2, SAR # original WINDOWBASE
+ movi a3, 1
+ ssl a2
+ sll a3, a3
+ wsr a3, WINDOWSTART # set corresponding WINDOWSTART bit
+ wsr a2, WINDOWBASE # and WINDOWSTART
+ rsync
+
+ /* We are back to the original stack pointer (a1) */
+
+2:
+#if XCHAL_EXTRA_SA_SIZE
+
+ /* For user exceptions, save the extra state into the user's TCB.
+ * Note: We must assume that xchal_extra_store_funcbody destroys a2..a15
+ */
+
+ GET_CURRENT(a2,a1)
+ addi a2, a2, THREAD_CP_SAVE
+ xchal_extra_store_funcbody
+#endif
+
+ /* Now, jump to the common exception handler. */
+
+ j common_exception
+
+
+/*
+ * First-level exit handler for kernel exceptions
+ * Save special registers and the live window frame.
+ * Note: Even though we changes the stack pointer, we don't have to do a
+ * MOVSP here, as we do that when we return from the exception.
+ * (See comment in the kernel exception exit code)
+ *
+ * Entry condition for kernel_exception:
+ *
+ * a0: trashed, original value saved on stack (PT_AREG0)
+ * a1: a1
+ * a2: new stack pointer, original in DEPC
+ * a3: dispatch table
+ * depc: a2, original value saved on stack (PT_DEPC)
+ * excsave_1: a3
+ *
+ * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
+ * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
+ *
+ * Entry condition for _kernel_exception:
+ *
+ * a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC
+ * excsave has been restored, and
+ * stack pointer (a1) has been set.
+ *
+ * Note: _kernel_exception might be at an odd adress. Don't use call0..call12
+ */
+
+ENTRY(kernel_exception)
+
+ /* Save a0, a2, a3, DEPC and set SP. */
+
+ xsr a3, EXCSAVE_1 # restore a3, excsave_1
+ rsr a0, DEPC # get a2
+ s32i a1, a2, PT_AREG1
+ s32i a0, a2, PT_AREG2
+ s32i a3, a2, PT_AREG3
+ mov a1, a2
+
+ .globl _kernel_exception
+_kernel_exception:
+
+ /* Save SAR and turn off single stepping */
+
+ movi a2, 0
+ rsr a3, SAR
+ wsr a2, ICOUNTLEVEL
+ s32i a3, a1, PT_SAR
+
+ /* Rotate ws so that the current windowbase is at bit0. */
+ /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
+
+ rsr a2, WINDOWBASE # don't need to save these, we only
+ rsr a3, WINDOWSTART # need shifted windowstart: windowmask
+ ssr a2
+ slli a2, a3, 32-WSBITS
+ src a2, a3, a2
+ srli a2, a2, 32-WSBITS
+ s32i a2, a1, PT_WMASK # needed for kernel_exception_exit
+
+ /* Save only the live window-frame */
+
+ _bbsi.l a2, 1, 1f
+ s32i a4, a1, PT_AREG4
+ s32i a5, a1, PT_AREG5
+ s32i a6, a1, PT_AREG6
+ s32i a7, a1, PT_AREG7
+ _bbsi.l a2, 2, 1f
+ s32i a8, a1, PT_AREG8
+ s32i a9, a1, PT_AREG9
+ s32i a10, a1, PT_AREG10
+ s32i a11, a1, PT_AREG11
+ _bbsi.l a2, 3, 1f
+ s32i a12, a1, PT_AREG12
+ s32i a13, a1, PT_AREG13
+ s32i a14, a1, PT_AREG14
+ s32i a15, a1, PT_AREG15
+
+1:
+
+#ifdef KERNEL_STACK_OVERFLOW_CHECK
+
+ /* Stack overflow check, for debugging */
+ extui a2, a1, TASK_SIZE_BITS,XX
+ movi a3, SIZE??
+ _bge a2, a3, out_of_stack_panic
+
+#endif
+
+/*
+ * This is the common exception handler.
+ * We get here from the user exception handler or simply by falling through
+ * from the kernel exception handler.
+ * Save the remaining special registers, switch to kernel mode, and jump
+ * to the second-level exception handler.
+ *
+ */
+
+common_exception:
+
+ /* Save EXCVADDR, DEBUGCAUSE, and PC, and clear LCOUNT */
+
+ rsr a2, DEBUGCAUSE
+ rsr a3, EPC_1
+ s32i a2, a1, PT_DEBUGCAUSE
+ s32i a3, a1, PT_PC
+
+ rsr a3, EXCVADDR
+ movi a2, 0
+ s32i a3, a1, PT_EXCVADDR
+ xsr a2, LCOUNT
+ s32i a2, a1, PT_LCOUNT
+
+ /* It is now save to restore the EXC_TABLE_FIXUP variable. */
+
+ rsr a0, EXCCAUSE
+ movi a3, 0
+ rsr a2, EXCSAVE_1
+ s32i a0, a1, PT_EXCCAUSE
+ s32i a3, a2, EXC_TABLE_FIXUP
+
+ /* All unrecoverable states are saved on stack, now, and a1 is valid,
+ * so we can allow exceptions and interrupts (*) again.
+ * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X)
+ *
+ * (*) We only allow interrupts if PS.INTLEVEL was not set to 1 before
+ * (interrupts disabled) and if this exception is not an interrupt.
+ */
+
+ rsr a3, PS
+ addi a0, a0, -4
+ movi a2, 1
+ extui a3, a3, 0, 1 # a3 = PS.INTLEVEL[0]
+ moveqz a3, a2, a0 # a3 = 1 iff interrupt exception
+ movi a2, PS_WOE_MASK
+ or a3, a3, a2
+ rsr a0, EXCCAUSE
+ xsr a3, PS
+
+ s32i a3, a1, PT_PS # save ps
+
+ /* Save LBEG, LEND */
+
+ rsr a2, LBEG
+ rsr a3, LEND
+ s32i a2, a1, PT_LBEG
+ s32i a3, a1, PT_LEND
+
+ /* Go to second-level dispatcher. Set up parameters to pass to the
+ * exception handler and call the exception handler.
+ */
+
+ movi a4, exc_table
+ mov a6, a1 # pass stack frame
+ mov a7, a0 # pass EXCCAUSE
+ addx4 a4, a0, a4
+ l32i a4, a4, EXC_TABLE_DEFAULT # load handler
+
+ /* Call the second-level handler */
+
+ callx4 a4
+
+ /* Jump here for exception exit */
+
+common_exception_return:
+
+ /* Jump if we are returning from kernel exceptions. */
+
+1: l32i a3, a1, PT_PS
+ _bbsi.l a3, PS_UM_SHIFT, 2f
+ j kernel_exception_exit
+
+ /* Specific to a user exception exit:
+ * We need to check some flags for signal handling and rescheduling,
+ * and have to restore WB and WS, extra states, and all registers
+ * in the register file that were in use in the user task.
+ */
+
+2: wsr a3, PS /* disable interrupts */
+
+ /* Check for signals (keep interrupts disabled while we read TI_FLAGS)
+ * Note: PS.INTLEVEL = 0, PS.EXCM = 1
+ */
+
+ GET_THREAD_INFO(a2,a1)
+ l32i a4, a2, TI_FLAGS
+
+ /* Enable interrupts again.
+ * Note: When we get here, we certainly have handled any interrupts.
+ * (Hint: There is only one user exception frame on stack)
+ */
+
+ movi a3, PS_WOE_MASK
+
+ _bbsi.l a4, TIF_NEED_RESCHED, 3f
+ _bbci.l a4, TIF_SIGPENDING, 4f
+
+#ifndef SIGNAL_HANDLING_IN_DOUBLE_EXCEPTION
+ l32i a4, a1, PT_DEPC
+ bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f
+#endif
+
+ /* Reenable interrupts and call do_signal() */
+
+ wsr a3, PS
+ movi a4, do_signal # int do_signal(struct pt_regs*, sigset_t*)
+ mov a6, a1
+ movi a7, 0
+ callx4 a4
+ j 1b
+
+3: /* Reenable interrupts and reschedule */
+
+ wsr a3, PS
+ movi a4, schedule # void schedule (void)
+ callx4 a4
+ j 1b
+
+ /* Restore the state of the task and return from the exception. */
+
+
+ /* If we are returning from a user exception, and the process
+ * to run next has PT_SINGLESTEP set, we want to setup
+ * ICOUNT and ICOUNTLEVEL to step one instruction.
+ * PT_SINGLESTEP is set by sys_ptrace (ptrace.c)
+ */
+
+4: /* a2 holds GET_CURRENT(a2,a1) */
+
+ l32i a3, a2, TI_TASK
+ l32i a3, a3, TASK_PTRACE
+ bbci.l a3, PT_SINGLESTEP_BIT, 1f # jump if single-step flag is not set
+
+ movi a3, -2 # PT_SINGLESTEP flag is set,
+ movi a4, 1 # icountlevel of 1 means it won't
+ wsr a3, ICOUNT # start counting until after rfe
+ wsr a4, ICOUNTLEVEL # so setup icount & icountlevel.
+ isync
+
+1:
+
+#if XCHAL_EXTRA_SA_SIZE
+
+ /* For user exceptions, restore the extra state from the user's TCB. */
+
+ /* Note: a2 still contains GET_CURRENT(a2,a1) */
+ addi a2, a2, THREAD_CP_SAVE
+ xchal_extra_load_funcbody
+
+ /* We must assume that xchal_extra_store_funcbody destroys
+ * registers a2..a15. FIXME, this list can eventually be
+ * reduced once real register requirements of the macro are
+ * finalized. */
+
+#endif /* XCHAL_EXTRA_SA_SIZE */
+
+
+ /* Switch to the user thread WINDOWBASE. Save SP temporarily in DEPC */
+
+ l32i a2, a1, PT_WINDOWBASE
+ l32i a3, a1, PT_WINDOWSTART
+ wsr a1, DEPC # use DEPC as temp storage
+ wsr a3, WINDOWSTART # restore WINDOWSTART
+ ssr a2 # preserve user's WB in the SAR
+ wsr a2, WINDOWBASE # switch to user's saved WB
+ rsync
+ rsr a1, DEPC # restore stack pointer
+ l32i a2, a1, PT_WMASK # register frames saved (in bits 4...9)
+ rotw -1 # we restore a4..a7
+ _bltui a6, 16, 1f # only have to restore current window?
+
+ /* The working registers are a0 and a3. We are restoring to
+ * a4..a7. Be careful not to destroy what we have just restored.
+ * Note: wmask has the format YYYYM:
+ * Y: number of registers saved in groups of 4
+ * M: 4 bit mask of first 16 registers
+ */
+
+ mov a2, a6
+ mov a3, a5
+
+2: rotw -1 # a0..a3 become a4..a7
+ addi a3, a7, -4*4 # next iteration
+ addi a2, a6, -16 # decrementing Y in WMASK
+ l32i a4, a3, PT_AREG_END + 0
+ l32i a5, a3, PT_AREG_END + 4
+ l32i a6, a3, PT_AREG_END + 8
+ l32i a7, a3, PT_AREG_END + 12
+ _bgeui a2, 16, 2b
+
+ /* Clear unrestored registers (don't leak anything to user-land */
+
+1: rsr a0, WINDOWBASE
+ rsr a3, SAR
+ sub a3, a0, a3
+ beqz a3, 2f
+ extui a3, a3, 0, WBBITS
+
+1: rotw -1
+ addi a3, a7, -1
+ movi a4, 0
+ movi a5, 0
+ movi a6, 0
+ movi a7, 0
+ bgei a3, 1, 1b
+
+ /* We are back were we were when we started.
+ * Note: a2 still contains WMASK (if we've returned to the original
+ * frame where we had loaded a2), or at least the lower 4 bits
+ * (if we have restored WSBITS-1 frames).
+ */
+
+2: j common_exception_exit
+
+ /* This is the kernel exception exit.
+ * We avoided to do a MOVSP when we entered the exception, but we
+ * have to do it here.
+ */
+
+kernel_exception_exit:
+
+ /* Disable interrupts (a3 holds PT_PS) */
+
+ wsr a3, PS
+
+#ifdef PREEMPTIBLE_KERNEL
+
+#ifdef CONFIG_PREEMPT
+
+ /*
+ * Note: We've just returned from a call4, so we have
+ * at least 4 addt'l regs.
+ */
+
+ /* Check current_thread_info->preempt_count */
+
+ GET_THREAD_INFO(a2)
+ l32i a3, a2, TI_PREEMPT
+ bnez a3, 1f
+
+ l32i a2, a2, TI_FLAGS
+
+1:
+
+#endif
+
+#endif
+
+ /* Check if we have to do a movsp.
+ *
+ * We only have to do a movsp if the previous window-frame has
+ * been spilled to the *temporary* exception stack instead of the
+ * task's stack. This is the case if the corresponding bit in
+ * WINDOWSTART for the previous window-frame was set before
+ * (not spilled) but is zero now (spilled).
+ * If this bit is zero, all other bits except the one for the
+ * current window frame are also zero. So, we can use a simple test:
+ * 'and' WINDOWSTART and WINDOWSTART-1:
+ *
+ * (XXXXXX1[0]* - 1) AND XXXXXX1[0]* = XXXXXX0[0]*
+ *
+ * The result is zero only if one bit was set.
+ *
+ * (Note: We might have gone through several task switches before
+ * we come back to the current task, so WINDOWBASE might be
+ * different from the time the exception occurred.)
+ */
+
+ /* Test WINDOWSTART before and after the exception.
+ * We actually have WMASK, so we only have to test if it is 1 or not.
+ */
+
+ l32i a2, a1, PT_WMASK
+ _beqi a2, 1, common_exception_exit # Spilled before exception,jump
+
+ /* Test WINDOWSTART now. If spilled, do the movsp */
+
+ rsr a3, WINDOWSTART
+ addi a0, a3, -1
+ and a3, a3, a0
+ _bnez a3, common_exception_exit
+
+ /* Do a movsp (we returned from a call4, so we have at least a0..a7) */
+
+ addi a0, a1, -16
+ l32i a3, a0, 0
+ l32i a4, a0, 4
+ s32i a3, a1, PT_SIZE+0
+ s32i a4, a1, PT_SIZE+4
+ l32i a3, a0, 8
+ l32i a4, a0, 12
+ s32i a3, a1, PT_SIZE+8
+ s32i a4, a1, PT_SIZE+12
+
+ /* Common exception exit.
+ * We restore the special register and the current window frame, and
+ * return from the exception.
+ *
+ * Note: We expect a2 to hold PT_WMASK
+ */
+
+common_exception_exit:
+
+ _bbsi.l a2, 1, 1f
+ l32i a4, a1, PT_AREG4
+ l32i a5, a1, PT_AREG5
+ l32i a6, a1, PT_AREG6
+ l32i a7, a1, PT_AREG7
+ _bbsi.l a2, 2, 1f
+ l32i a8, a1, PT_AREG8
+ l32i a9, a1, PT_AREG9
+ l32i a10, a1, PT_AREG10
+ l32i a11, a1, PT_AREG11
+ _bbsi.l a2, 3, 1f
+ l32i a12, a1, PT_AREG12
+ l32i a13, a1, PT_AREG13
+ l32i a14, a1, PT_AREG14
+ l32i a15, a1, PT_AREG15
+
+ /* Restore PC, SAR */
+
+1: l32i a2, a1, PT_PC
+ l32i a3, a1, PT_SAR
+ wsr a2, EPC_1
+ wsr a3, SAR
+
+ /* Restore LBEG, LEND, LCOUNT */
+
+ l32i a2, a1, PT_LBEG
+ l32i a3, a1, PT_LEND
+ wsr a2, LBEG
+ l32i a2, a1, PT_LCOUNT
+ wsr a3, LEND
+ wsr a2, LCOUNT
+
+ /* Check if it was double exception. */
+
+ l32i a0, a1, PT_DEPC
+ l32i a3, a1, PT_AREG3
+ l32i a2, a1, PT_AREG2
+ _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
+
+ /* Restore a0...a3 and return */
+
+ l32i a0, a1, PT_AREG0
+ l32i a1, a1, PT_AREG1
+ rfe
+
+1: wsr a0, DEPC
+ l32i a0, a1, PT_AREG0
+ l32i a1, a1, PT_AREG1
+ rfde
+
+/*
+ * Debug exception handler.
+ *
+ * Currently, we don't support KGDB, so only user application can be debugged.
+ *
+ * When we get here, a0 is trashed and saved to excsave[debuglevel]
+ */
+
+ENTRY(debug_exception)
+
+ rsr a0, EPS + XCHAL_DEBUGLEVEL
+ bbsi.l a0, PS_EXCM_SHIFT, 1f # exception mode
+
+ /* Set EPC_1 and EXCCAUSE */
+
+ wsr a2, DEPC # save a2 temporarily
+ rsr a2, EPC + XCHAL_DEBUGLEVEL
+ wsr a2, EPC_1
+
+ movi a2, EXCCAUSE_MAPPED_DEBUG
+ wsr a2, EXCCAUSE
+
+ /* Restore PS to the value before the debug exc but with PS.EXCM set.*/
+
+ movi a2, 1 << PS_EXCM_SHIFT
+ or a2, a0, a2
+ movi a0, debug_exception # restore a3, debug jump vector
+ wsr a2, PS
+ xsr a0, EXCSAVE + XCHAL_DEBUGLEVEL
+
+ /* Switch to kernel/user stack, restore jump vector, and save a0 */
+
+ bbsi.l a2, PS_UM_SHIFT, 2f # jump if user mode
+
+ addi a2, a1, -16-PT_SIZE # assume kernel stack
+ s32i a0, a2, PT_AREG0
+ movi a0, 0
+ s32i a1, a2, PT_AREG1
+ s32i a0, a2, PT_DEPC # mark it as a regular exception
+ xsr a0, DEPC
+ s32i a3, a2, PT_AREG3
+ s32i a0, a2, PT_AREG2
+ mov a1, a2
+ j _kernel_exception
+
+2: rsr a2, EXCSAVE_1
+ l32i a2, a2, EXC_TABLE_KSTK # load kernel stack pointer
+ s32i a0, a2, PT_AREG0
+ movi a0, 0
+ s32i a1, a2, PT_AREG1
+ s32i a0, a2, PT_DEPC
+ xsr a0, DEPC
+ s32i a3, a2, PT_AREG3
+ s32i a0, a2, PT_AREG2
+ mov a1, a2
+ j _user_exception
+
+ /* Debug exception while in exception mode. */
+1: j 1b // FIXME!!
+
+
+/*
+ * We get here in case of an unrecoverable exception.
+ * The only thing we can do is to be nice and print a panic message.
+ * We only produce a single stack frame for panic, so ???
+ *
+ *
+ * Entry conditions:
+ *
+ * - a0 contains the caller address; original value saved in excsave1.
+ * - the original a0 contains a valid return address (backtrace) or 0.
+ * - a2 contains a valid stackpointer
+ *
+ * Notes:
+ *
+ * - If the stack pointer could be invalid, the caller has to setup a
+ * dummy stack pointer (e.g. the stack of the init_task)
+ *
+ * - If the return address could be invalid, the caller has to set it
+ * to 0, so the backtrace would stop.
+ *
+ */
+ .align 4
+unrecoverable_text:
+ .ascii "Unrecoverable error in exception handler\0"
+
+ENTRY(unrecoverable_exception)
+
+ movi a0, 1
+ movi a1, 0
+
+ wsr a0, WINDOWSTART
+ wsr a1, WINDOWBASE
+ rsync
+
+ movi a1, PS_WOE_MASK | 1
+ wsr a1, PS
+ rsync
+
+ movi a1, init_task
+ movi a0, 0
+ addi a1, a1, PT_REGS_OFFSET
+
+ movi a4, panic
+ movi a6, unrecoverable_text
+
+ callx4 a4
+
+1: j 1b
+
+
+/* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */
+
+/*
+ * Fast-handler for alloca exceptions
+ *
+ * The ALLOCA handler is entered when user code executes the MOVSP
+ * instruction and the caller's frame is not in the register file.
+ * In this case, the caller frame's a0..a3 are on the stack just
+ * below sp (a1), and this handler moves them.
+ *
+ * For "MOVSP <ar>,<as>" without destination register a1, this routine
+ * simply moves the value from <as> to <ar> without moving the save area.
+ *
+ * Entry condition:
+ *
+ * a0: trashed, original value saved on stack (PT_AREG0)
+ * a1: a1
+ * a2: new stack pointer, original in DEPC
+ * a3: dispatch table
+ * depc: a2, original value saved on stack (PT_DEPC)
+ * excsave_1: a3
+ *
+ * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
+ * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
+ */
+
+#if XCHAL_HAVE_BE
+#define _EXTUI_MOVSP_SRC(ar) extui ar, ar, 4, 4
+#define _EXTUI_MOVSP_DST(ar) extui ar, ar, 0, 4
+#else
+#define _EXTUI_MOVSP_SRC(ar) extui ar, ar, 0, 4
+#define _EXTUI_MOVSP_DST(ar) extui ar, ar, 4, 4
+#endif
+
+ENTRY(fast_alloca)
+
+ /* We shouldn't be in a double exception. */
+
+ l32i a0, a2, PT_DEPC
+ _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lunhandled_double
+
+ rsr a0, DEPC # get a2
+ s32i a4, a2, PT_AREG4 # save a4 and
+ s32i a0, a2, PT_AREG2 # a2 to stack
+
+ /* Exit critical section. */
+
+ movi a0, 0
+ s32i a0, a3, EXC_TABLE_FIXUP
+
+ /* Restore a3, excsave_1 */
+
+ xsr a3, EXCSAVE_1 # make sure excsave_1 is valid for dbl.
+ rsr a4, EPC_1 # get exception address
+ s32i a3, a2, PT_AREG3 # save a3 to stack
+
+#ifdef ALLOCA_EXCEPTION_IN_IRAM
+#error iram not supported
+#else
+ /* Note: l8ui not allowed in IRAM/IROM!! */
+ l8ui a0, a4, 1 # read as(src) from MOVSP instruction
+#endif
+ movi a3, .Lmovsp_src
+ _EXTUI_MOVSP_SRC(a0) # extract source register number
+ addx8 a3, a0, a3
+ jx a3
+
+.Lunhandled_double:
+ wsr a0, EXCSAVE_1
+ movi a0, unrecoverable_exception
+ callx0 a0
+
+ .align 8
+.Lmovsp_src:
+ l32i a3, a2, PT_AREG0; _j 1f; .align 8
+ mov a3, a1; _j 1f; .align 8
+ l32i a3, a2, PT_AREG2; _j 1f; .align 8
+ l32i a3, a2, PT_AREG3; _j 1f; .align 8
+ l32i a3, a2, PT_AREG4; _j 1f; .align 8
+ mov a3, a5; _j 1f; .align 8
+ mov a3, a6; _j 1f; .align 8
+ mov a3, a7; _j 1f; .align 8
+ mov a3, a8; _j 1f; .align 8
+ mov a3, a9; _j 1f; .align 8
+ mov a3, a10; _j 1f; .align 8
+ mov a3, a11; _j 1f; .align 8
+ mov a3, a12; _j 1f; .align 8
+ mov a3, a13; _j 1f; .align 8
+ mov a3, a14; _j 1f; .align 8
+ mov a3, a15; _j 1f; .align 8
+
+1:
+
+#ifdef ALLOCA_EXCEPTION_IN_IRAM
+#error iram not supported
+#else
+ l8ui a0, a4, 0 # read ar(dst) from MOVSP instruction
+#endif
+ addi a4, a4, 3 # step over movsp
+ _EXTUI_MOVSP_DST(a0) # extract destination register
+ wsr a4, EPC_1 # save new epc_1
+
+ _bnei a0, 1, 1f # no 'movsp a1, ax': jump
+
+ /* Move the save area. This implies the use of the L32E
+ * and S32E instructions, because this move must be done with
+ * the user's PS.RING privilege levels, not with ring 0
+ * (kernel's) privileges currently active with PS.EXCM
+ * set. Note that we have stil registered a fixup routine with the
+ * double exception vector in case a double exception occurs.
+ */
+
+ /* a0,a4:avail a1:old user stack a2:exc. stack a3:new user stack. */
+
+ l32e a0, a1, -16
+ l32e a4, a1, -12
+ s32e a0, a3, -16
+ s32e a4, a3, -12
+ l32e a0, a1, -8
+ l32e a4, a1, -4
+ s32e a0, a3, -8
+ s32e a4, a3, -4
+
+ /* Restore stack-pointer and all the other saved registers. */
+
+ mov a1, a3
+
+ l32i a4, a2, PT_AREG4
+ l32i a3, a2, PT_AREG3
+ l32i a0, a2, PT_AREG0
+ l32i a2, a2, PT_AREG2
+ rfe
+
+ /* MOVSP <at>,<as> was invoked with <at> != a1.
+ * Because the stack pointer is not being modified,
+ * we should be able to just modify the pointer
+ * without moving any save area.
+ * The processor only traps these occurrences if the
+ * caller window isn't live, so unfortunately we can't
+ * use this as an alternate trap mechanism.
+ * So we just do the move. This requires that we
+ * resolve the destination register, not just the source,
+ * so there's some extra work.
+ * (PERHAPS NOT REALLY NEEDED, BUT CLEANER...)
+ */
+
+ /* a0 dst-reg, a1 user-stack, a2 stack, a3 value of src reg. */
+
+1: movi a4, .Lmovsp_dst
+ addx8 a4, a0, a4
+ jx a4
+
+ .align 8
+.Lmovsp_dst:
+ s32i a3, a2, PT_AREG0; _j 1f; .align 8
+ mov a1, a3; _j 1f; .align 8
+ s32i a3, a2, PT_AREG2; _j 1f; .align 8
+ s32i a3, a2, PT_AREG3; _j 1f; .align 8
+ s32i a3, a2, PT_AREG4; _j 1f; .align 8
+ mov a5, a3; _j 1f; .align 8
+ mov a6, a3; _j 1f; .align 8
+ mov a7, a3; _j 1f; .align 8
+ mov a8, a3; _j 1f; .align 8
+ mov a9, a3; _j 1f; .align 8
+ mov a10, a3; _j 1f; .align 8
+ mov a11, a3; _j 1f; .align 8
+ mov a12, a3; _j 1f; .align 8
+ mov a13, a3; _j 1f; .align 8
+ mov a14, a3; _j 1f; .align 8
+ mov a15, a3; _j 1f; .align 8
+
+1: l32i a4, a2, PT_AREG4
+ l32i a3, a2, PT_AREG3
+ l32i a0, a2, PT_AREG0
+ l32i a2, a2, PT_AREG2
+ rfe
+
+
+/*
+ * fast system calls.
+ *
+ * WARNING: The kernel doesn't save the entire user context before
+ * handling a fast system call. These functions are small and short,
+ * usually offering some functionality not available to user tasks.
+ *
+ * BE CAREFUL TO PRESERVE THE USER'S CONTEXT.
+ *
+ * Entry condition:
+ *
+ * a0: trashed, original value saved on stack (PT_AREG0)
+ * a1: a1
+ * a2: new stack pointer, original in DEPC
+ * a3: dispatch table
+ * depc: a2, original value saved on stack (PT_DEPC)
+ * excsave_1: a3
+ */
+
+ENTRY(fast_syscall_kernel)
+
+ /* Skip syscall. */
+
+ rsr a0, EPC_1
+ addi a0, a0, 3
+ wsr a0, EPC_1
+
+ l32i a0, a2, PT_DEPC
+ bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable
+
+ rsr a0, DEPC # get syscall-nr
+ _beqz a0, fast_syscall_spill_registers
+
+ addi a0, a0, -__NR_sysxtensa
+ _beqz a0, fast_syscall_sysxtensa
+
+ j kernel_exception
+
+
+ENTRY(fast_syscall_user)
+
+ /* Skip syscall. */
+
+ rsr a0, EPC_1
+ addi a0, a0, 3
+ wsr a0, EPC_1
+
+ l32i a0, a2, PT_DEPC
+ bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable
+
+ rsr a0, DEPC # get syscall-nr
+ _beqz a0, fast_syscall_spill_registers
+
+ addi a0, a0, -__NR_sysxtensa
+ _beqz a0, fast_syscall_sysxtensa
+
+ j user_exception
+
+ENTRY(fast_syscall_unrecoverable)
+
+ /* Restore all states. */
+
+ l32i a0, a2, PT_AREG0 # restore a0
+ xsr a2, DEPC # restore a2, depc
+ rsr a3, EXCSAVE_1
+
+ wsr a0, EXCSAVE_1
+ movi a0, unrecoverable_exception
+ callx0 a0
+
+
+
+/*
+ * sysxtensa syscall handler
+ *
+ * int sysxtensa (XTENSA_ATOMIC_SET, ptr, val, unused);
+ * int sysxtensa (XTENSA_ATOMIC_ADD, ptr, val, unused);
+ * int sysxtensa (XTENSA_ATOMIC_EXG_ADD, ptr, val, unused);
+ * int sysxtensa (XTENSA_ATOMIC_CMP_SWP, ptr, oldval, newval);
+ * a2 a6 a3 a4 a5
+ *
+ * Entry condition:
+ *
+ * a0: trashed, original value saved on stack (PT_AREG0)
+ * a1: a1
+ * a2: new stack pointer, original in DEPC
+ * a3: dispatch table
+ * depc: a2, original value saved on stack (PT_DEPC)
+ * excsave_1: a3
+ *
+ * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
+ * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
+ *
+ * Note: we don't have to save a2; a2 holds the return value
+ *
+ * We use the two macros TRY and CATCH:
+ *
+ * TRY adds an entry to the __ex_table fixup table for the immediately
+ * following instruction.
+ *
+ * CATCH catches any exception that occurred at one of the preceeding TRY
+ * statements and continues from there
+ *
+ * Usage TRY l32i a0, a1, 0
+ * <other code>
+ * done: rfe
+ * CATCH <set return code>
+ * j done
+ */
+
+#define TRY \
+ .section __ex_table, "a"; \
+ .word 66f, 67f; \
+ .text; \
+66:
+
+#define CATCH \
+67:
+
+ENTRY(fast_syscall_sysxtensa)
+
+ _beqz a6, 1f
+ _blti a6, SYSXTENSA_COUNT, 2f
+
+1: j user_exception
+
+2: xsr a3, EXCSAVE_1 # restore a3, excsave1
+ s32i a7, a2, PT_AREG7
+
+ movi a7, 4 # sizeof(unsigned int)
+ verify_area a3, a7, a0, a2, .Leac
+
+ _beqi a6, SYSXTENSA_ATOMIC_SET, .Lset
+ _beqi a6, SYSXTENSA_ATOMIC_EXG_ADD, .Lexg
+ _beqi a6, SYSXTENSA_ATOMIC_ADD, .Ladd
+
+ /* Fall through for SYSXTENSA_ATOMIC_CMP_SWP */
+
+.Lswp: /* Atomic compare and swap */
+
+TRY l32i a7, a3, 0 # read old value
+ bne a7, a4, 1f # same as old value? jump
+ s32i a5, a3, 0 # different, modify value
+ movi a7, 1 # and return 1
+ j .Lret
+
+1: movi a7, 0 # same values: return 0
+ j .Lret
+
+.Ladd: /* Atomic add */
+.Lexg: /* Atomic (exchange) add */
+
+TRY l32i a7, a3, 0
+ add a4, a4, a7
+ s32i a4, a3, 0
+ j .Lret
+
+.Lset: /* Atomic set */
+
+TRY l32i a7, a3, 0 # read old value as return value
+ s32i a4, a3, 0 # write new value
+
+.Lret: mov a0, a2
+ mov a2, a7
+ l32i a7, a0, PT_AREG7
+ l32i a3, a0, PT_AREG3
+ l32i a0, a0, PT_AREG0
+ rfe
+
+CATCH
+.Leac: movi a7, -EFAULT
+ j .Lret
+
+
+
+/* fast_syscall_spill_registers.
+ *
+ * Entry condition:
+ *
+ * a0: trashed, original value saved on stack (PT_AREG0)
+ * a1: a1
+ * a2: new stack pointer, original in DEPC
+ * a3: dispatch table
+ * depc: a2, original value saved on stack (PT_DEPC)
+ * excsave_1: a3
+ *
+ * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler.
+ * Note: We don't need to save a2 in depc (return value)
+ */
+
+ENTRY(fast_syscall_spill_registers)
+
+ /* Register a FIXUP handler (pass current wb as a parameter) */
+
+ movi a0, fast_syscall_spill_registers_fixup
+ s32i a0, a3, EXC_TABLE_FIXUP
+ rsr a0, WINDOWBASE
+ s32i a0, a3, EXC_TABLE_PARAM
+
+ /* Save a3 and SAR on stack. */
+
+ rsr a0, SAR
+ xsr a3, EXCSAVE_1 # restore a3 and excsave_1
+ s32i a0, a2, PT_AREG4 # store SAR to PT_AREG4
+ s32i a3, a2, PT_AREG3
+
+ /* The spill routine might clobber a7, a11, and a15. */
+
+ s32i a7, a2, PT_AREG5
+ s32i a11, a2, PT_AREG6
+ s32i a15, a2, PT_AREG7
+
+ call0 _spill_registers # destroys a3, DEPC, and SAR
+
+ /* Advance PC, restore registers and SAR, and return from exception. */
+
+ l32i a3, a2, PT_AREG4
+ l32i a0, a2, PT_AREG0
+ wsr a3, SAR
+ l32i a3, a2, PT_AREG3
+
+ /* Restore clobbered registers. */
+
+ l32i a7, a2, PT_AREG5
+ l32i a11, a2, PT_AREG6
+ l32i a15, a2, PT_AREG7
+
+ movi a2, 0
+ rfe
+
+/* Fixup handler.
+ *
+ * We get here if the spill routine causes an exception, e.g. tlb miss.
+ * We basically restore WINDOWBASE and WINDOWSTART to the condition when
+ * we entered the spill routine and jump to the user exception handler.
+ *
+ * a0: value of depc, original value in depc
+ * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
+ * a3: exctable, original value in excsave1
+ */
+
+fast_syscall_spill_registers_fixup:
+
+ rsr a2, WINDOWBASE # get current windowbase (a2 is saved)
+ xsr a0, DEPC # restore depc and a0
+ ssl a2 # set shift (32 - WB)
+
+ /* We need to make sure the current registers (a0-a3) are preserved.
+ * To do this, we simply set the bit for the current window frame
+ * in WS, so that the exception handlers save them to the task stack.
+ */
+
+ rsr a3, EXCSAVE_1 # get spill-mask
+ slli a2, a3, 1 # shift left by one
+
+ slli a3, a2, 32-WSBITS
+ src a2, a2, a3 # a1 = xxwww1yyxxxwww1yy......
+ wsr a2, WINDOWSTART # set corrected windowstart
+
+ movi a3, exc_table
+ l32i a2, a3, EXC_TABLE_DOUBLE_SAVE # restore a2
+ l32i a3, a3, EXC_TABLE_PARAM # original WB (in user task)
+
+ /* Return to the original (user task) WINDOWBASE.
+ * We leave the following frame behind:
+ * a0, a1, a2 same
+ * a3: trashed (saved in excsave_1)
+ * depc: depc (we have to return to that address)
+ * excsave_1: a3
+ */
+
+ wsr a3, WINDOWBASE
+ rsync
+
+ /* We are now in the original frame when we entered _spill_registers:
+ * a0: return address
+ * a1: used, stack pointer
+ * a2: kernel stack pointer
+ * a3: available, saved in EXCSAVE_1
+ * depc: exception address
+ * excsave: a3
+ * Note: This frame might be the same as above.
+ */
+
+#ifdef SIGNAL_HANDLING_IN_DOUBLE_EXCEPTION
+ /* Restore registers we precautiously saved.
+ * We have the value of the 'right' a3
+ */
+
+ l32i a7, a2, PT_AREG5
+ l32i a11, a2, PT_AREG6
+ l32i a15, a2, PT_AREG7
+#endif
+
+ /* Setup stack pointer. */
+
+ addi a2, a2, -PT_USER_SIZE
+ s32i a0, a2, PT_AREG0
+
+ /* Make sure we return to this fixup handler. */
+
+ movi a3, fast_syscall_spill_registers_fixup_return
+ s32i a3, a2, PT_DEPC # setup depc
+
+ /* Jump to the exception handler. */
+
+ movi a3, exc_table
+ rsr a0, EXCCAUSE
+ addx4 a0, a0, a3 # find entry in table
+ l32i a0, a0, EXC_TABLE_FAST_USER # load handler
+ jx a0
+
+fast_syscall_spill_registers_fixup_return:
+
+ /* When we return here, all registers have been restored (a2: DEPC) */
+
+ wsr a2, DEPC # exception address
+
+ /* Restore fixup handler. */
+
+ xsr a3, EXCSAVE_1
+ movi a2, fast_syscall_spill_registers_fixup
+ s32i a2, a3, EXC_TABLE_FIXUP
+ rsr a2, WINDOWBASE
+ s32i a2, a3, EXC_TABLE_PARAM
+ l32i a2, a3, EXC_TABLE_KSTK
+
+#ifdef SIGNAL_HANDLING_IN_DOUBLE_EXCEPTION
+ /* Save registers again that might be clobbered. */
+
+ s32i a7, a2, PT_AREG5
+ s32i a11, a2, PT_AREG6
+ s32i a15, a2, PT_AREG7
+#endif
+
+ /* Load WB at the time the exception occurred. */
+
+ rsr a3, SAR # WB is still in SAR
+ neg a3, a3
+ wsr a3, WINDOWBASE
+ rsync
+
+ /* Restore a3 and return. */
+
+ movi a3, exc_table
+ xsr a3, EXCSAVE_1
+
+ rfde
+
+
+/*
+ * spill all registers.
+ *
+ * This is not a real function. The following conditions must be met:
+ *
+ * - must be called with call0.
+ * - uses DEPC, a3 and SAR.
+ * - the last 'valid' register of each frame are clobbered.
+ * - the caller must have registered a fixup handler
+ * (or be inside a critical section)
+ * - PS_EXCM must be set (PS_WOE cleared?)
+ */
+
+ENTRY(_spill_registers)
+
+ /*
+ * Rotate ws so that the current windowbase is at bit 0.
+ * Assume ws = xxxwww1yy (www1 current window frame).
+ * Rotate ws right so that a2 = yyxxxwww1.
+ */
+
+ wsr a2, DEPC # preserve a2
+ rsr a2, WINDOWBASE
+ rsr a3, WINDOWSTART
+ ssr a2 # holds WB
+ slli a2, a3, WSBITS
+ or a3, a3, a2 # a2 = xxxwww1yyxxxwww1yy
+ srl a3, a3
+
+ /* We are done if there are no more than the current register frame. */
+
+ extui a3, a3, 1, WSBITS-2 # a3 = 0yyxxxwww
+ movi a2, (1 << (WSBITS-1))
+ _beqz a3, .Lnospill # only one active frame? jump
+
+ /* We want 1 at the top, so that we return to the current windowbase */
+
+ or a3, a3, a2 # 1yyxxxwww
+
+ /* Skip empty frames - get 'oldest' WINDOWSTART-bit. */
+
+ wsr a3, WINDOWSTART # save shifted windowstart
+ neg a2, a3
+ and a3, a2, a3 # first bit set from right: 000010000
+
+ ffs_ws a2, a3 # a2: shifts to skip empty frames
+ movi a3, WSBITS
+ sub a2, a3, a2 # WSBITS-a2:number of 0-bits from right
+ ssr a2 # save in SAR for later.
+
+ rsr a3, WINDOWBASE
+ add a3, a3, a2
+ rsr a2, DEPC # restore a2
+ wsr a3, WINDOWBASE
+ rsync
+
+ rsr a3, WINDOWSTART
+ srl a3, a3 # shift windowstart
+
+ /* WB is now just one frame below the oldest frame in the register
+ window. WS is shifted so the oldest frame is in bit 0, thus, WB
+ and WS differ by one 4-register frame. */
+
+ /* Save frames. Depending what call was used (call4, call8, call12),
+ * we have to save 4,8. or 12 registers.
+ */
+
+ _bbsi.l a3, 1, .Lc4
+ _bbsi.l a3, 2, .Lc8
+
+ /* Special case: we have a call12-frame starting at a4. */
+
+ _bbci.l a3, 3, .Lc12 # bit 3 shouldn't be zero! (Jump to Lc12 first)
+
+ s32e a4, a1, -16 # a1 is valid with an empty spill area
+ l32e a4, a5, -12
+ s32e a8, a4, -48
+ mov a8, a4
+ l32e a4, a1, -16
+ j .Lc12c
+
+.Lloop: _bbsi.l a3, 1, .Lc4
+ _bbci.l a3, 2, .Lc12
+
+.Lc8: s32e a4, a13, -16
+ l32e a4, a5, -12
+ s32e a8, a4, -32
+ s32e a5, a13, -12
+ s32e a6, a13, -8
+ s32e a7, a13, -4
+ s32e a9, a4, -28
+ s32e a10, a4, -24
+ s32e a11, a4, -20
+
+ srli a11, a3, 2 # shift windowbase by 2
+ rotw 2
+ _bnei a3, 1, .Lloop
+
+.Lexit: /* Done. Do the final rotation, set WS, and return. */
+
+ rotw 1
+ rsr a3, WINDOWBASE
+ ssl a3
+ movi a3, 1
+ sll a3, a3
+ wsr a3, WINDOWSTART
+
+.Lnospill:
+ jx a0
+
+.Lc4: s32e a4, a9, -16
+ s32e a5, a9, -12
+ s32e a6, a9, -8
+ s32e a7, a9, -4
+
+ srli a7, a3, 1
+ rotw 1
+ _bnei a3, 1, .Lloop
+ j .Lexit
+
+.Lc12: _bbci.l a3, 3, .Linvalid_mask # bit 2 shouldn't be zero!
+
+ /* 12-register frame (call12) */
+
+ l32e a2, a5, -12
+ s32e a8, a2, -48
+ mov a8, a2
+
+.Lc12c: s32e a9, a8, -44
+ s32e a10, a8, -40
+ s32e a11, a8, -36
+ s32e a12, a8, -32
+ s32e a13, a8, -28
+ s32e a14, a8, -24
+ s32e a15, a8, -20
+ srli a15, a3, 3
+
+ /* The stack pointer for a4..a7 is out of reach, so we rotate the
+ * window, grab the stackpointer, and rotate back.
+ * Alternatively, we could also use the following approach, but that
+ * makes the fixup routine much more complicated:
+ * rotw 1
+ * s32e a0, a13, -16
+ * ...
+ * rotw 2
+ */
+
+ rotw 1
+ mov a5, a13
+ rotw -1
+
+ s32e a4, a9, -16
+ s32e a5, a9, -12
+ s32e a6, a9, -8
+ s32e a7, a9, -4
+
+ rotw 3
+
+ _beqi a3, 1, .Lexit
+ j .Lloop
+
+.Linvalid_mask:
+
+ /* We get here because of an unrecoverable error in the window
+ * registers. If we are in user space, we kill the application,
+ * however, this condition is unrecoverable in kernel space.
+ */
+
+ rsr a0, PS
+ _bbci.l a0, PS_UM_SHIFT, 1f
+
+ /* User space: Setup a dummy frame and kill application.
+ * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer.
+ */
+
+ movi a0, 1
+ movi a1, 0
+
+ wsr a0, WINDOWSTART
+ wsr a1, WINDOWBASE
+ rsync
+
+ movi a0, 0
+
+ movi a3, exc_table
+ l32i a1, a3, EXC_TABLE_KSTK
+ wsr a3, EXCSAVE_1
+
+ movi a4, PS_WOE_MASK | 1
+ wsr a4, PS
+ rsync
+
+ movi a6, SIGSEGV
+ movi a4, do_exit
+ callx4 a4
+
+1: /* Kernel space: PANIC! */
+
+ wsr a0, EXCSAVE_1
+ movi a0, unrecoverable_exception
+ callx0 a0 # should not return
+1: j 1b
+
+/*
+ * We should never get here. Bail out!
+ */
+
+ENTRY(fast_second_level_miss_double_kernel)
+
+1: movi a0, unrecoverable_exception
+ callx0 a0 # should not return
+1: j 1b
+
+/* First-level entry handler for user, kernel, and double 2nd-level
+ * TLB miss exceptions. Note that for now, user and kernel miss
+ * exceptions share the same entry point and are handled identically.
+ *
+ * An old, less-efficient C version of this function used to exist.
+ * We include it below, interleaved as comments, for reference.
+ *
+ * Entry condition:
+ *
+ * a0: trashed, original value saved on stack (PT_AREG0)
+ * a1: a1
+ * a2: new stack pointer, original in DEPC
+ * a3: dispatch table
+ * depc: a2, original value saved on stack (PT_DEPC)
+ * excsave_1: a3
+ *
+ * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
+ * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
+ */
+
+ENTRY(fast_second_level_miss)
+
+ /* Save a1. Note: we don't expect a double exception. */
+
+ s32i a1, a2, PT_AREG1
+
+ /* We need to map the page of PTEs for the user task. Find
+ * the pointer to that page. Also, it's possible for tsk->mm
+ * to be NULL while tsk->active_mm is nonzero if we faulted on
+ * a vmalloc address. In that rare case, we must use
+ * active_mm instead to avoid a fault in this handler. See
+ *
+ * http://mail.nl.linux.org/linux-mm/2002-08/msg00258.html
+ * (or search Internet on "mm vs. active_mm")
+ *
+ * if (!mm)
+ * mm = tsk->active_mm;
+ * pgd = pgd_offset (mm, regs->excvaddr);
+ * pmd = pmd_offset (pgd, regs->excvaddr);
+ * pmdval = *pmd;
+ */
+
+ GET_CURRENT(a1,a2)
+ l32i a0, a1, TASK_MM # tsk->mm
+ beqz a0, 9f
+
+8: rsr a1, EXCVADDR # fault address
+ _PGD_OFFSET(a0, a1, a1)
+ l32i a0, a0, 0 # read pmdval
+ //beqi a0, _PAGE_USER, 2f
+ beqz a0, 2f
+
+ /* Read ptevaddr and convert to top of page-table page.
+ *
+ * vpnval = read_ptevaddr_register() & PAGE_MASK;
+ * vpnval += DTLB_WAY_PGTABLE;
+ * pteval = mk_pte (virt_to_page(pmd_val(pmdval)), PAGE_KERNEL);
+ * write_dtlb_entry (pteval, vpnval);
+ *
+ * The messy computation for 'pteval' above really simplifies
+ * into the following:
+ *
+ * pteval = ((pmdval - PAGE_OFFSET) & PAGE_MASK) | PAGE_KERNEL
+ */
+
+ movi a1, -PAGE_OFFSET
+ add a0, a0, a1 # pmdval - PAGE_OFFSET
+ extui a1, a0, 0, PAGE_SHIFT # ... & PAGE_MASK
+ xor a0, a0, a1
+
+
+ movi a1, PAGE_DIRECTORY
+ or a0, a0, a1 # ... | PAGE_DIRECTORY
+
+ rsr a1, PTEVADDR
+ srli a1, a1, PAGE_SHIFT
+ slli a1, a1, PAGE_SHIFT # ptevaddr & PAGE_MASK
+ addi a1, a1, DTLB_WAY_PGTABLE # ... + way_number
+
+ wdtlb a0, a1
+ dsync
+
+ /* Exit critical section. */
+
+ movi a0, 0
+ s32i a0, a3, EXC_TABLE_FIXUP
+
+ /* Restore the working registers, and return. */
+
+ l32i a0, a2, PT_AREG0
+ l32i a1, a2, PT_AREG1
+ l32i a2, a2, PT_DEPC
+ xsr a3, EXCSAVE_1
+
+ bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
+
+ /* Restore excsave1 and return. */
+
+ rsr a2, DEPC
+ rfe
+
+ /* Return from double exception. */
+
+1: xsr a2, DEPC
+ esync
+ rfde
+
+9: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0
+ j 8b
+
+2: /* Invalid PGD, default exception handling */
+
+ rsr a1, DEPC
+ xsr a3, EXCSAVE_1
+ s32i a1, a2, PT_AREG2
+ s32i a3, a2, PT_AREG3
+ mov a1, a2
+
+ rsr a2, PS
+ bbsi.l a2, PS_UM_SHIFT, 1f
+ j _kernel_exception
+1: j _user_exception
+
+
+/*
+ * StoreProhibitedException
+ *
+ * Update the pte and invalidate the itlb mapping for this pte.
+ *
+ * Entry condition:
+ *
+ * a0: trashed, original value saved on stack (PT_AREG0)
+ * a1: a1
+ * a2: new stack pointer, original in DEPC
+ * a3: dispatch table
+ * depc: a2, original value saved on stack (PT_DEPC)
+ * excsave_1: a3
+ *
+ * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
+ * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
+ */
+
+ENTRY(fast_store_prohibited)
+
+ /* Save a1 and a4. */
+
+ s32i a1, a2, PT_AREG1
+ s32i a4, a2, PT_AREG4
+
+ GET_CURRENT(a1,a2)
+ l32i a0, a1, TASK_MM # tsk->mm
+ beqz a0, 9f
+
+8: rsr a1, EXCVADDR # fault address
+ _PGD_OFFSET(a0, a1, a4)
+ l32i a0, a0, 0
+ //beqi a0, _PAGE_USER, 2f # FIXME use _PAGE_INVALID
+ beqz a0, 2f
+
+ _PTE_OFFSET(a0, a1, a4)
+ l32i a4, a0, 0 # read pteval
+ movi a1, _PAGE_VALID | _PAGE_RW
+ bnall a4, a1, 2f
+
+ movi a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_WRENABLE
+ or a4, a4, a1
+ rsr a1, EXCVADDR
+ s32i a4, a0, 0
+
+ /* We need to flush the cache if we have page coloring. */
+#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
+ dhwb a0, 0
+#endif
+ pdtlb a0, a1
+ beqz a0, 1f
+ idtlb a0 // FIXME do we need this?
+ wdtlb a4, a0
+1:
+
+ /* Exit critical section. */
+
+ movi a0, 0
+ s32i a0, a3, EXC_TABLE_FIXUP
+
+ /* Restore the working registers, and return. */
+
+ l32i a4, a2, PT_AREG4
+ l32i a1, a2, PT_AREG1
+ l32i a0, a2, PT_AREG0
+ l32i a2, a2, PT_DEPC
+
+ /* Restore excsave1 and a3. */
+
+ xsr a3, EXCSAVE_1
+ bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
+
+ rsr a2, DEPC
+ rfe
+
+ /* Double exception. Restore FIXUP handler and return. */
+
+1: xsr a2, DEPC
+ esync
+ rfde
+
+9: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0
+ j 8b
+
+2: /* If there was a problem, handle fault in C */
+
+ rsr a4, DEPC # still holds a2
+ xsr a3, EXCSAVE_1
+ s32i a4, a2, PT_AREG2
+ s32i a3, a2, PT_AREG3
+ l32i a4, a2, PT_AREG4
+ mov a1, a2
+
+ rsr a2, PS
+ bbsi.l a2, PS_UM_SHIFT, 1f
+ j _kernel_exception
+1: j _user_exception
+
+
+#if XCHAL_EXTRA_SA_SIZE
+
+#warning fast_coprocessor untested
+
+/*
+ * Entry condition:
+ *
+ * a0: trashed, original value saved on stack (PT_AREG0)
+ * a1: a1
+ * a2: new stack pointer, original in DEPC
+ * a3: dispatch table
+ * depc: a2, original value saved on stack (PT_DEPC)
+ * excsave_1: a3
+ *
+ * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
+ * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
+ */
+
+ENTRY(fast_coprocessor_double)
+ wsr a0, EXCSAVE_1
+ movi a0, unrecoverable_exception
+ callx0 a0
+
+ENTRY(fast_coprocessor)
+
+ /* Fatal if we are in a double exception. */
+
+ l32i a0, a2, PT_DEPC
+ _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_coprocessor_double
+
+ /* Save some registers a1, a3, a4, SAR */
+
+ xsr a3, EXCSAVE_1
+ s32i a3, a2, PT_AREG3
+ rsr a3, SAR
+ s32i a4, a2, PT_AREG4
+ s32i a1, a2, PT_AREG1
+ s32i a5, a1, PT_AREG5
+ s32i a3, a2, PT_SAR
+ mov a1, a2
+
+ /* Currently, the HAL macros only guarantee saving a0 and a1.
+ * These can and will be refined in the future, but for now,
+ * just save the remaining registers of a2...a15.
+ */
+ s32i a6, a1, PT_AREG6
+ s32i a7, a1, PT_AREG7
+ s32i a8, a1, PT_AREG8
+ s32i a9, a1, PT_AREG9
+ s32i a10, a1, PT_AREG10
+ s32i a11, a1, PT_AREG11
+ s32i a12, a1, PT_AREG12
+ s32i a13, a1, PT_AREG13
+ s32i a14, a1, PT_AREG14
+ s32i a15, a1, PT_AREG15
+
+ /* Find coprocessor number. Subtract first CP EXCCAUSE from EXCCAUSE */
+
+ rsr a0, EXCCAUSE
+ addi a3, a0, -XCHAL_EXCCAUSE_COPROCESSOR0_DISABLED
+
+ /* Set corresponding CPENABLE bit */
+
+ movi a4, 1
+ ssl a3 # SAR: 32 - coprocessor_number
+ rsr a5, CPENABLE
+ sll a4, a4
+ or a4, a5, a4
+ wsr a4, CPENABLE
+ rsync
+ movi a5, coprocessor_info # list of owner and offset into cp_save
+ addx8 a0, a4, a5 # entry for CP
+
+ bne a4, a5, .Lload # bit wasn't set before, cp not in use
+
+ /* Now compare the current task with the owner of the coprocessor.
+ * If they are the same, there is no reason to save or restore any
+ * coprocessor state. Having already enabled the coprocessor,
+ * branch ahead to return.
+ */
+ GET_CURRENT(a5,a1)
+ l32i a4, a0, COPROCESSOR_INFO_OWNER # a4: current owner for this CP
+ beq a4, a5, .Ldone
+
+ /* Find location to dump current coprocessor state:
+ * task_struct->task_cp_save_offset + coprocessor_offset[coprocessor]
+ *
+ * Note: a0 pointer to the entry in the coprocessor owner table,
+ * a3 coprocessor number,
+ * a4 current owner of coprocessor.
+ */
+ l32i a5, a0, COPROCESSOR_INFO_OFFSET
+ addi a2, a4, THREAD_CP_SAVE
+ add a2, a2, a5
+
+ /* Store current coprocessor states. (a5 still has CP number) */
+
+ xchal_cpi_store_funcbody
+
+ /* The macro might have destroyed a3 (coprocessor number), but
+ * SAR still has 32 - coprocessor_number!
+ */
+ movi a3, 32
+ rsr a4, SAR
+ sub a3, a3, a4
+
+.Lload: /* A new task now owns the corpocessors. Save its TCB pointer into
+ * the coprocessor owner table.
+ *
+ * Note: a0 pointer to the entry in the coprocessor owner table,
+ * a3 coprocessor number.
+ */
+ GET_CURRENT(a4,a1)
+ s32i a4, a0, 0
+
+ /* Find location from where to restore the current coprocessor state.*/
+
+ l32i a5, a0, COPROCESSOR_INFO_OFFSET
+ addi a2, a4, THREAD_CP_SAVE
+ add a2, a2, a4
+
+ xchal_cpi_load_funcbody
+
+ /* We must assume that the xchal_cpi_store_funcbody macro destroyed
+ * registers a2..a15.
+ */
+
+.Ldone: l32i a15, a1, PT_AREG15
+ l32i a14, a1, PT_AREG14
+ l32i a13, a1, PT_AREG13
+ l32i a12, a1, PT_AREG12
+ l32i a11, a1, PT_AREG11
+ l32i a10, a1, PT_AREG10
+ l32i a9, a1, PT_AREG9
+ l32i a8, a1, PT_AREG8
+ l32i a7, a1, PT_AREG7
+ l32i a6, a1, PT_AREG6
+ l32i a5, a1, PT_AREG5
+ l32i a4, a1, PT_AREG4
+ l32i a3, a1, PT_AREG3
+ l32i a2, a1, PT_AREG2
+ l32i a0, a1, PT_AREG0
+ l32i a1, a1, PT_AREG1
+
+ rfe
+
+#endif /* XCHAL_EXTRA_SA_SIZE */
+
+/*
+ * Task switch.
+ *
+ * struct task* _switch_to (struct task* prev, struct task* next)
+ * a2 a2 a3
+ */
+
+ENTRY(_switch_to)
+
+ entry a1, 16
+
+ mov a4, a3 # preserve a3
+
+ s32i a0, a2, THREAD_RA # save return address
+ s32i a1, a2, THREAD_SP # save stack pointer
+
+ /* Disable ints while we manipulate the stack pointer; spill regs. */
+
+ movi a5, PS_EXCM_MASK | LOCKLEVEL
+ xsr a5, PS
+ rsr a3, EXCSAVE_1
+ rsync
+ s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */
+
+ call0 _spill_registers
+
+ /* Set kernel stack (and leave critical section)
+ * Note: It's save to set it here. The stack will not be overwritten
+ * because the kernel stack will only be loaded again after
+ * we return from kernel space.
+ */
+
+ l32i a0, a4, TASK_THREAD_INFO
+ rsr a3, EXCSAVE_1 # exc_table
+ movi a1, 0
+ addi a0, a0, PT_REGS_OFFSET
+ s32i a1, a3, EXC_TABLE_FIXUP
+ s32i a0, a3, EXC_TABLE_KSTK
+
+ /* restore context of the task that 'next' addresses */
+
+ l32i a0, a4, THREAD_RA /* restore return address */
+ l32i a1, a4, THREAD_SP /* restore stack pointer */
+
+ wsr a5, PS
+ rsync
+
+ retw
+
+
+ENTRY(ret_from_fork)
+
+ /* void schedule_tail (struct task_struct *prev)
+ * Note: prev is still in a6 (return value from fake call4 frame)
+ */
+ movi a4, schedule_tail
+ callx4 a4
+
+ movi a4, do_syscall_trace
+ callx4 a4
+
+ j common_exception_return
+
+
+
+/*
+ * Table of syscalls
+ */
+
+.data
+.align 4
+.global sys_call_table
+sys_call_table:
+
+#define SYSCALL(call, narg) .word call
+#include "syscalls.h"
+
+/*
+ * Number of arguments of each syscall
+ */
+
+.global sys_narg_table
+sys_narg_table:
+
+#undef SYSCALL
+#define SYSCALL(call, narg) .byte narg
+#include "syscalls.h"
+
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
new file mode 100644
index 00000000000..6e9b5225b8f
--- /dev/null
+++ b/arch/xtensa/kernel/head.S
@@ -0,0 +1,237 @@
+/*
+ * arch/xtensa/kernel/head.S
+ *
+ * Xtensa Processor startup code.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
+ * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
+ * Kevin Chea
+ */
+
+#include <xtensa/cacheasm.h>
+#include <linux/config.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+
+/*
+ * This module contains the entry code for kernel images. It performs the
+ * minimal setup needed to call the generic C routines.
+ *
+ * Prerequisites:
+ *
+ * - The kernel image has been loaded to the actual address where it was
+ * compiled to.
+ * - a2 contains either 0 or a pointer to a list of boot parameters.
+ * (see setup.c for more details)
+ *
+ */
+
+ .macro iterate from, to , cmd
+ .ifeq ((\to - \from) & ~0xfff)
+ \cmd \from
+ iterate "(\from+1)", \to, \cmd
+ .endif
+ .endm
+
+/*
+ * _start
+ *
+ * The bootloader passes a pointer to a list of boot parameters in a2.
+ */
+
+ /* The first bytes of the kernel image must be an instruction, so we
+ * manually allocate and define the literal constant we need for a jx
+ * instruction.
+ */
+
+ .section .head.text, "ax"
+ .globl _start
+_start: _j 2f
+ .align 4
+1: .word _startup
+2: l32r a0, 1b
+ jx a0
+
+ .text
+ .align 4
+_startup:
+
+ /* Disable interrupts and exceptions. */
+
+ movi a0, XCHAL_PS_EXCM_MASK
+ wsr a0, PS
+
+ /* Preserve the pointer to the boot parameter list in EXCSAVE_1 */
+
+ wsr a2, EXCSAVE_1
+
+ /* Start with a fresh windowbase and windowstart. */
+
+ movi a1, 1
+ movi a0, 0
+ wsr a1, WINDOWSTART
+ wsr a0, WINDOWBASE
+ rsync
+
+ /* Set a0 to 0 for the remaining initialization. */
+
+ movi a0, 0
+
+ /* Clear debugging registers. */
+
+#if XCHAL_HAVE_DEBUG
+ wsr a0, IBREAKENABLE
+ wsr a0, ICOUNT
+ movi a1, 15
+ wsr a0, ICOUNTLEVEL
+
+ .macro reset_dbreak num
+ wsr a0, DBREAKC + \num
+ .endm
+
+ iterate 0, XCHAL_NUM_IBREAK-1, reset_dbreak
+#endif
+
+ /* Clear CCOUNT (not really necessary, but nice) */
+
+ wsr a0, CCOUNT # not really necessary, but nice
+
+ /* Disable zero-loops. */
+
+#if XCHAL_HAVE_LOOPS
+ wsr a0, LCOUNT
+#endif
+
+ /* Disable all timers. */
+
+ .macro reset_timer num
+ wsr a0, CCOMPARE_0 + \num
+ .endm
+ iterate 0, XCHAL_NUM_TIMERS-1, reset_timer
+
+ /* Interrupt initialization. */
+
+ movi a2, XCHAL_INTTYPE_MASK_SOFTWARE | XCHAL_INTTYPE_MASK_EXTERN_EDGE
+ wsr a0, INTENABLE
+ wsr a2, INTCLEAR
+
+ /* Disable coprocessors. */
+
+#if XCHAL_CP_NUM > 0
+ wsr a0, CPENABLE
+#endif
+
+ /* Set PS.INTLEVEL=1, PS.WOE=0, kernel stack, PS.EXCM=0
+ *
+ * Note: PS.EXCM must be cleared before using any loop
+ * instructions; otherwise, they are silently disabled, and
+ * at most one iteration of the loop is executed.
+ */
+
+ movi a1, 1
+ wsr a1, PS
+ rsync
+
+ /* Initialize the caches.
+ * Does not include flushing writeback d-cache.
+ * a6, a7 are just working registers (clobbered).
+ */
+
+ icache_reset a2, a3
+ dcache_reset a2, a3
+
+ /* Unpack data sections
+ *
+ * The linker script used to build the Linux kernel image
+ * creates a table located at __boot_reloc_table_start
+ * that contans the information what data needs to be unpacked.
+ *
+ * Uses a2-a7.
+ */
+
+ movi a2, __boot_reloc_table_start
+ movi a3, __boot_reloc_table_end
+
+1: beq a2, a3, 3f # no more entries?
+ l32i a4, a2, 0 # start destination (in RAM)
+ l32i a5, a2, 4 # end desination (in RAM)
+ l32i a6, a2, 8 # start source (in ROM)
+ addi a2, a2, 12 # next entry
+ beq a4, a5, 1b # skip, empty entry
+ beq a4, a6, 1b # skip, source and dest. are the same
+
+2: l32i a7, a6, 0 # load word
+ addi a6, a6, 4
+ s32i a7, a4, 0 # store word
+ addi a4, a4, 4
+ bltu a4, a5, 2b
+ j 1b
+
+3:
+ /* All code and initialized data segments have been copied.
+ * Now clear the BSS segment.
+ */
+
+ movi a2, _bss_start # start of BSS
+ movi a3, _bss_end # end of BSS
+
+1: addi a2, a2, 4
+ s32i a0, a2, 0
+ blt a2, a3, 1b
+
+#if XCHAL_DCACHE_IS_WRITEBACK
+
+ /* After unpacking, flush the writeback cache to memory so the
+ * instructions/data are available.
+ */
+
+ dcache_writeback_all a2, a3
+#endif
+
+ /* Setup stack and enable window exceptions (keep irqs disabled) */
+
+ movi a1, init_thread_union
+ addi a1, a1, KERNEL_STACK_SIZE
+
+ movi a2, 0x00040001 # WOE=1, INTLEVEL=1, UM=0
+ wsr a2, PS # (enable reg-windows; progmode stack)
+ rsync
+
+ /* Set up EXCSAVE[DEBUGLEVEL] to point to the Debug Exception Handler.*/
+
+ movi a2, debug_exception
+ wsr a2, EXCSAVE + XCHAL_DEBUGLEVEL
+
+ /* Set up EXCSAVE[1] to point to the exc_table. */
+
+ movi a6, exc_table
+ xsr a6, EXCSAVE_1
+
+ /* init_arch kick-starts the linux kernel */
+
+ movi a4, init_arch
+ callx4 a4
+
+ movi a4, start_kernel
+ callx4 a4
+
+should_never_return:
+ j should_never_return
+
+ /* Define some common data structures here. We define them
+ * here in this assembly file due to their unusual alignment
+ * requirements.
+ */
+
+ .comm swapper_pg_dir,PAGE_SIZE,PAGE_SIZE
+ .comm empty_bad_page_table,PAGE_SIZE,PAGE_SIZE
+ .comm empty_bad_page,PAGE_SIZE,PAGE_SIZE
+ .comm empty_zero_page,PAGE_SIZE,PAGE_SIZE
+
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
new file mode 100644
index 00000000000..4cbf6d91571
--- /dev/null
+++ b/arch/xtensa/kernel/irq.c
@@ -0,0 +1,192 @@
+/*
+ * linux/arch/xtensa/kernel/irq.c
+ *
+ * Xtensa built-in interrupt controller and some generic functions copied
+ * from i386.
+ *
+ * Copyright (C) 2002 - 2005 Tensilica, Inc.
+ * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
+ *
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Kevin Chea
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel_stat.h>
+
+#include <asm/uaccess.h>
+#include <asm/platform.h>
+
+static void enable_xtensa_irq(unsigned int irq);
+static void disable_xtensa_irq(unsigned int irq);
+static void mask_and_ack_xtensa(unsigned int irq);
+static void end_xtensa_irq(unsigned int irq);
+
+static unsigned int cached_irq_mask;
+
+atomic_t irq_err_count;
+
+/*
+ * 'what should we do if we get a hw irq event on an illegal vector'.
+ * each architecture has to answer this themselves.
+ */
+void ack_bad_irq(unsigned int irq)
+{
+ printk("unexpected IRQ trap at vector %02x\n", irq);
+}
+
+/*
+ * do_IRQ handles all normal device IRQ's (the special
+ * SMP cross-CPU interrupts have their own specific
+ * handlers).
+ */
+
+unsigned int do_IRQ(int irq, struct pt_regs *regs)
+{
+ irq_enter();
+
+#ifdef CONFIG_DEBUG_STACKOVERFLOW
+ /* Debugging check for stack overflow: is there less than 1KB free? */
+ {
+ unsigned long sp;
+
+ __asm__ __volatile__ ("mov %0, a1\n" : "=a" (sp));
+ sp &= THREAD_SIZE - 1;
+
+ if (unlikely(sp < (sizeof(thread_info) + 1024)))
+ printk("Stack overflow in do_IRQ: %ld\n",
+ sp - sizeof(struct thread_info));
+ }
+#endif
+
+ __do_IRQ(irq, regs);
+
+ irq_exit();
+
+ return 1;
+}
+
+/*
+ * Generic, controller-independent functions:
+ */
+
+int show_interrupts(struct seq_file *p, void *v)
+{
+ int i = *(loff_t *) v, j;
+ struct irqaction * action;
+ unsigned long flags;
+
+ if (i == 0) {
+ seq_printf(p, " ");
+ for (j=0; j<NR_CPUS; j++)
+ if (cpu_online(j))
+ seq_printf(p, "CPU%d ",j);
+ seq_putc(p, '\n');
+ }
+
+ if (i < NR_IRQS) {
+ spin_lock_irqsave(&irq_desc[i].lock, flags);
+ action = irq_desc[i].action;
+ if (!action)
+ goto skip;
+ seq_printf(p, "%3d: ",i);
+#ifndef CONFIG_SMP
+ seq_printf(p, "%10u ", kstat_irqs(i));
+#else
+ for (j = 0; j < NR_CPUS; j++)
+ if (cpu_online(j))
+ seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
+#endif
+ seq_printf(p, " %14s", irq_desc[i].handler->typename);
+ seq_printf(p, " %s", action->name);
+
+ for (action=action->next; action; action = action->next)
+ seq_printf(p, ", %s", action->name);
+
+ seq_putc(p, '\n');
+skip:
+ spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ } else if (i == NR_IRQS) {
+ seq_printf(p, "NMI: ");
+ for (j = 0; j < NR_CPUS; j++)
+ if (cpu_online(j))
+ seq_printf(p, "%10u ", nmi_count(j));
+ seq_putc(p, '\n');
+ seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
+ }
+ return 0;
+}
+/* shutdown is same as "disable" */
+#define shutdown_xtensa_irq disable_xtensa_irq
+
+static unsigned int startup_xtensa_irq(unsigned int irq)
+{
+ enable_xtensa_irq(irq);
+ return 0; /* never anything pending */
+}
+
+static struct hw_interrupt_type xtensa_irq_type = {
+ "Xtensa-IRQ",
+ startup_xtensa_irq,
+ shutdown_xtensa_irq,
+ enable_xtensa_irq,
+ disable_xtensa_irq,
+ mask_and_ack_xtensa,
+ end_xtensa_irq
+};
+
+static inline void mask_irq(unsigned int irq)
+{
+ cached_irq_mask &= ~(1 << irq);
+ set_sr (cached_irq_mask, INTENABLE);
+}
+
+static inline void unmask_irq(unsigned int irq)
+{
+ cached_irq_mask |= 1 << irq;
+ set_sr (cached_irq_mask, INTENABLE);
+}
+
+static void disable_xtensa_irq(unsigned int irq)
+{
+ unsigned long flags;
+ local_save_flags(flags);
+ mask_irq(irq);
+ local_irq_restore(flags);
+}
+
+static void enable_xtensa_irq(unsigned int irq)
+{
+ unsigned long flags;
+ local_save_flags(flags);
+ unmask_irq(irq);
+ local_irq_restore(flags);
+}
+
+static void mask_and_ack_xtensa(unsigned int irq)
+{
+ disable_xtensa_irq(irq);
+}
+
+static void end_xtensa_irq(unsigned int irq)
+{
+ enable_xtensa_irq(irq);
+}
+
+
+void __init init_IRQ(void)
+{
+ int i;
+
+ for (i=0; i < XTENSA_NR_IRQS; i++)
+ irq_desc[i].handler = &xtensa_irq_type;
+
+ cached_irq_mask = 0;
+
+ platform_init_irq();
+}
diff --git a/arch/xtensa/kernel/module.c b/arch/xtensa/kernel/module.c
new file mode 100644
index 00000000000..d1683cfa19a
--- /dev/null
+++ b/arch/xtensa/kernel/module.c
@@ -0,0 +1,78 @@
+/*
+ * arch/xtensa/kernel/platform.c
+ *
+ * Module support.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleloader.h>
+#include <linux/elf.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/cache.h>
+
+LIST_HEAD(module_buf_list);
+
+void *module_alloc(unsigned long size)
+{
+ panic("module_alloc not implemented");
+}
+
+void module_free(struct module *mod, void *module_region)
+{
+ panic("module_free not implemented");
+}
+
+int module_frob_arch_sections(Elf32_Ehdr *hdr,
+ Elf32_Shdr *sechdrs,
+ char *secstrings,
+ struct module *me)
+{
+ panic("module_frob_arch_sections not implemented");
+}
+
+int apply_relocate(Elf32_Shdr *sechdrs,
+ const char *strtab,
+ unsigned int symindex,
+ unsigned int relsec,
+ struct module *module)
+{
+ panic ("apply_relocate not implemented");
+}
+
+int apply_relocate_add(Elf32_Shdr *sechdrs,
+ const char *strtab,
+ unsigned int symindex,
+ unsigned int relsec,
+ struct module *module)
+{
+ panic("apply_relocate_add not implemented");
+}
+
+int module_finalize(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ struct module *me)
+{
+ panic ("module_finalize not implemented");
+}
+
+void module_arch_cleanup(struct module *mod)
+{
+ panic("module_arch_cleanup not implemented");
+}
+
+struct bug_entry *module_find_bug(unsigned long bugaddr)
+{
+ panic("module_find_bug not implemented");
+}
diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c
new file mode 100644
index 00000000000..84fde258cf8
--- /dev/null
+++ b/arch/xtensa/kernel/pci-dma.c
@@ -0,0 +1,73 @@
+/*
+ * arch/xtensa/pci-dma.c
+ *
+ * DMA coherent memory allocation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * Copyright (C) 2002 - 2005 Tensilica Inc.
+ *
+ * Based on version for i386.
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
+ */
+
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/pci.h>
+#include <asm/io.h>
+#include <asm/cacheflush.h>
+
+/*
+ * Note: We assume that the full memory space is always mapped to 'kseg'
+ * Otherwise we have to use page attributes (not implemented).
+ */
+
+void *
+dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, int gfp)
+{
+ void *ret;
+
+ /* ignore region speicifiers */
+ gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
+
+ if (dev == NULL || (*dev->dma_mask < 0xffffffff))
+ gfp |= GFP_DMA;
+ ret = (void *)__get_free_pages(gfp, get_order(size));
+
+ if (ret != NULL) {
+ memset(ret, 0, size);
+ *handle = virt_to_bus(ret);
+ }
+ return (void*) BYPASS_ADDR((unsigned long)ret);
+}
+
+void dma_free_coherent(struct device *hwdev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
+{
+ free_pages(CACHED_ADDR((unsigned long)vaddr), get_order(size));
+}
+
+
+void consistent_sync(void *vaddr, size_t size, int direction)
+{
+ switch (direction) {
+ case PCI_DMA_NONE:
+ BUG();
+ case PCI_DMA_FROMDEVICE: /* invalidate only */
+ __invalidate_dcache_range((unsigned long)vaddr,
+ (unsigned long)size);
+ break;
+
+ case PCI_DMA_TODEVICE: /* writeback only */
+ case PCI_DMA_BIDIRECTIONAL: /* writeback and invalidate */
+ __flush_invalidate_dcache_range((unsigned long)vaddr,
+ (unsigned long)size);
+ break;
+ }
+}
diff --git a/arch/xtensa/kernel/pci.c b/arch/xtensa/kernel/pci.c
new file mode 100644
index 00000000000..d29a8164863
--- /dev/null
+++ b/arch/xtensa/kernel/pci.c
@@ -0,0 +1,563 @@
+/*
+ * arch/xtensa/pcibios.c
+ *
+ * PCI bios-type initialisation for PCI machines
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * Copyright (C) 2001-2005 Tensilica Inc.
+ *
+ * Based largely on work from Cort (ppc/kernel/pci.c)
+ * IO functions copied from sparc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/bootmem.h>
+
+#include <asm/pci-bridge.h>
+#include <asm/platform.h>
+
+#undef DEBUG
+
+#ifdef DEBUG
+#define DBG(x...) printk(x)
+#else
+#define DBG(x...)
+#endif
+
+/* PCI Controller */
+
+
+/*
+ * pcibios_alloc_controller
+ * pcibios_enable_device
+ * pcibios_fixups
+ * pcibios_align_resource
+ * pcibios_fixup_bus
+ * pcibios_setup
+ * pci_bus_add_device
+ * pci_mmap_page_range
+ */
+
+struct pci_controller* pci_ctrl_head;
+struct pci_controller** pci_ctrl_tail = &pci_ctrl_head;
+
+static int pci_bus_count;
+
+static void pcibios_fixup_resources(struct pci_dev* dev);
+
+#if 0 // FIXME
+struct pci_fixup pcibios_fixups[] = {
+ { DECLARE_PCI_FIXUP_HEADER, PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources },
+ { 0 }
+};
+#endif
+
+void
+pcibios_update_resource(struct pci_dev *dev, struct resource *root,
+ struct resource *res, int resource)
+{
+ u32 new, check, mask;
+ int reg;
+ struct pci_controller* pci_ctrl = dev->sysdata;
+
+ new = res->start;
+ if (pci_ctrl && res->flags & IORESOURCE_IO) {
+ new -= pci_ctrl->io_space.base;
+ }
+ new |= (res->flags & PCI_REGION_FLAG_MASK);
+ if (resource < 6) {
+ reg = PCI_BASE_ADDRESS_0 + 4*resource;
+ } else if (resource == PCI_ROM_RESOURCE) {
+ res->flags |= PCI_ROM_ADDRESS_ENABLE;
+ reg = dev->rom_base_reg;
+ } else {
+ /* Somebody might have asked allocation of a non-standard resource */
+ return;
+ }
+
+ pci_write_config_dword(dev, reg, new);
+ pci_read_config_dword(dev, reg, &check);
+ mask = (new & PCI_BASE_ADDRESS_SPACE_IO) ?
+ PCI_BASE_ADDRESS_IO_MASK : PCI_BASE_ADDRESS_MEM_MASK;
+
+ if ((new ^ check) & mask) {
+ printk(KERN_ERR "PCI: Error while updating region "
+ "%s/%d (%08x != %08x)\n", dev->slot_name, resource,
+ new, check);
+ }
+}
+
+/*
+ * We need to avoid collisions with `mirrored' VGA ports
+ * and other strange ISA hardware, so we always want the
+ * addresses to be allocated in the 0x000-0x0ff region
+ * modulo 0x400.
+ *
+ * Why? Because some silly external IO cards only decode
+ * the low 10 bits of the IO address. The 0x00-0xff region
+ * is reserved for motherboard devices that decode all 16
+ * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
+ * but we want to try to avoid allocating at 0x2900-0x2bff
+ * which might have be mirrored at 0x0100-0x03ff..
+ */
+void
+pcibios_align_resource(void *data, struct resource *res, unsigned long size,
+ unsigned long align)
+{
+ struct pci_dev *dev = data;
+
+ if (res->flags & IORESOURCE_IO) {
+ unsigned long start = res->start;
+
+ if (size > 0x100) {
+ printk(KERN_ERR "PCI: I/O Region %s/%d too large"
+ " (%ld bytes)\n", dev->slot_name,
+ dev->resource - res, size);
+ }
+
+ if (start & 0x300) {
+ start = (start + 0x3ff) & ~0x3ff;
+ res->start = start;
+ }
+ }
+}
+
+int
+pcibios_enable_resources(struct pci_dev *dev, int mask)
+{
+ u16 cmd, old_cmd;
+ int idx;
+ struct resource *r;
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ old_cmd = cmd;
+ for(idx=0; idx<6; idx++) {
+ r = &dev->resource[idx];
+ if (!r->start && r->end) {
+ printk (KERN_ERR "PCI: Device %s not available because "
+ "of resource collisions\n", dev->slot_name);
+ return -EINVAL;
+ }
+ if (r->flags & IORESOURCE_IO)
+ cmd |= PCI_COMMAND_IO;
+ if (r->flags & IORESOURCE_MEM)
+ cmd |= PCI_COMMAND_MEMORY;
+ }
+ if (dev->resource[PCI_ROM_RESOURCE].start)
+ cmd |= PCI_COMMAND_MEMORY;
+ if (cmd != old_cmd) {
+ printk("PCI: Enabling device %s (%04x -> %04x)\n",
+ dev->slot_name, old_cmd, cmd);
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+ }
+ return 0;
+}
+
+struct pci_controller * __init pcibios_alloc_controller(void)
+{
+ struct pci_controller *pci_ctrl;
+
+ pci_ctrl = (struct pci_controller *)alloc_bootmem(sizeof(*pci_ctrl));
+ memset(pci_ctrl, 0, sizeof(struct pci_controller));
+
+ *pci_ctrl_tail = pci_ctrl;
+ pci_ctrl_tail = &pci_ctrl->next;
+
+ return pci_ctrl;
+}
+
+static int __init pcibios_init(void)
+{
+ struct pci_controller *pci_ctrl;
+ struct pci_bus *bus;
+ int next_busno = 0, i;
+
+ printk("PCI: Probing PCI hardware\n");
+
+ /* Scan all of the recorded PCI controllers. */
+ for (pci_ctrl = pci_ctrl_head; pci_ctrl; pci_ctrl = pci_ctrl->next) {
+ pci_ctrl->last_busno = 0xff;
+ bus = pci_scan_bus(pci_ctrl->first_busno, pci_ctrl->ops,
+ pci_ctrl);
+ if (pci_ctrl->io_resource.flags) {
+ unsigned long offs;
+
+ offs = (unsigned long)pci_ctrl->io_space.base;
+ pci_ctrl->io_resource.start += offs;
+ pci_ctrl->io_resource.end += offs;
+ bus->resource[0] = &pci_ctrl->io_resource;
+ }
+ for (i = 0; i < 3; ++i)
+ if (pci_ctrl->mem_resources[i].flags)
+ bus->resource[i+1] =&pci_ctrl->mem_resources[i];
+ pci_ctrl->bus = bus;
+ pci_ctrl->last_busno = bus->subordinate;
+ if (next_busno <= pci_ctrl->last_busno)
+ next_busno = pci_ctrl->last_busno+1;
+ }
+ pci_bus_count = next_busno;
+
+ return platform_pcibios_fixup();
+}
+
+subsys_initcall(pcibios_init);
+
+void __init pcibios_fixup_bus(struct pci_bus *bus)
+{
+ struct pci_controller *pci_ctrl = bus->sysdata;
+ struct resource *res;
+ unsigned long io_offset;
+ int i;
+
+ io_offset = (unsigned long)pci_ctrl->io_space.base;
+ if (bus->parent == NULL) {
+ /* this is a host bridge - fill in its resources */
+ pci_ctrl->bus = bus;
+
+ bus->resource[0] = res = &pci_ctrl->io_resource;
+ if (!res->flags) {
+ if (io_offset)
+ printk (KERN_ERR "I/O resource not set for host"
+ " bridge %d\n", pci_ctrl->index);
+ res->start = 0;
+ res->end = IO_SPACE_LIMIT;
+ res->flags = IORESOURCE_IO;
+ }
+ res->start += io_offset;
+ res->end += io_offset;
+
+ for (i = 0; i < 3; i++) {
+ res = &pci_ctrl->mem_resources[i];
+ if (!res->flags) {
+ if (i > 0)
+ continue;
+ printk(KERN_ERR "Memory resource not set for "
+ "host bridge %d\n", pci_ctrl->index);
+ res->start = 0;
+ res->end = ~0U;
+ res->flags = IORESOURCE_MEM;
+ }
+ bus->resource[i+1] = res;
+ }
+ } else {
+ /* This is a subordinate bridge */
+ pci_read_bridge_bases(bus);
+
+ for (i = 0; i < 4; i++) {
+ if ((res = bus->resource[i]) == NULL || !res->flags)
+ continue;
+ if (io_offset && (res->flags & IORESOURCE_IO)) {
+ res->start += io_offset;
+ res->end += io_offset;
+ }
+ }
+ }
+}
+
+char __init *pcibios_setup(char *str)
+{
+ return str;
+}
+
+/* the next one is stolen from the alpha port... */
+
+void __init
+pcibios_update_irq(struct pci_dev *dev, int irq)
+{
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
+}
+
+int pcibios_enable_device(struct pci_dev *dev, int mask)
+{
+ u16 cmd, old_cmd;
+ int idx;
+ struct resource *r;
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ old_cmd = cmd;
+ for (idx=0; idx<6; idx++) {
+ r = &dev->resource[idx];
+ if (!r->start && r->end) {
+ printk(KERN_ERR "PCI: Device %s not available because "
+ "of resource collisions\n", dev->slot_name);
+ return -EINVAL;
+ }
+ if (r->flags & IORESOURCE_IO)
+ cmd |= PCI_COMMAND_IO;
+ if (r->flags & IORESOURCE_MEM)
+ cmd |= PCI_COMMAND_MEMORY;
+ }
+ if (cmd != old_cmd) {
+ printk("PCI: Enabling device %s (%04x -> %04x)\n",
+ dev->slot_name, old_cmd, cmd);
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PROC_FS
+
+/*
+ * Return the index of the PCI controller for device pdev.
+ */
+
+int
+pci_controller_num(struct pci_dev *dev)
+{
+ struct pci_controller *pci_ctrl = (struct pci_controller*) dev->sysdata;
+ return pci_ctrl->index;
+}
+
+#endif /* CONFIG_PROC_FS */
+
+
+static void
+pcibios_fixup_resources(struct pci_dev *dev)
+{
+ struct pci_controller* pci_ctrl = (struct pci_controller *)dev->sysdata;
+ int i;
+ unsigned long offset;
+
+ if (!pci_ctrl) {
+ printk(KERN_ERR "No pci_ctrl for PCI dev %s!\n",dev->slot_name);
+ return;
+ }
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ struct resource *res = dev->resource + i;
+ if (!res->start || !res->flags)
+ continue;
+ if (res->end == 0xffffffff) {
+ DBG("PCI:%s Resource %d [%08lx-%08lx] is unassigned\n",
+ dev->slot_name, i, res->start, res->end);
+ res->end -= res->start;
+ res->start = 0;
+ continue;
+ }
+ offset = 0;
+ if (res->flags & IORESOURCE_IO)
+ offset = (unsigned long) pci_ctrl->io_space.base;
+ else if (res->flags & IORESOURCE_MEM)
+ offset = (unsigned long) pci_ctrl->mem_space.base;
+
+ if (offset != 0) {
+ res->start += offset;
+ res->end += offset;
+#ifdef DEBUG
+ printk("Fixup res %d (%lx) of dev %s: %lx -> %lx\n",
+ i, res->flags, dev->slot_name,
+ res->start - offset, res->start);
+#endif
+ }
+ }
+}
+
+/*
+ * Platform support for /proc/bus/pci/X/Y mmap()s,
+ * modelled on the sparc64 implementation by Dave Miller.
+ * -- paulus.
+ */
+
+/*
+ * Adjust vm_pgoff of VMA such that it is the physical page offset
+ * corresponding to the 32-bit pci bus offset for DEV requested by the user.
+ *
+ * Basically, the user finds the base address for his device which he wishes
+ * to mmap. They read the 32-bit value from the config space base register,
+ * add whatever PAGE_SIZE multiple offset they wish, and feed this into the
+ * offset parameter of mmap on /proc/bus/pci/XXX for that device.
+ *
+ * Returns negative error code on failure, zero on success.
+ */
+static __inline__ int
+__pci_mmap_make_offset(struct pci_dev *dev, struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state)
+{
+ struct pci_controller *pci_ctrl = (struct pci_controller*) dev->sysdata;
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ unsigned long io_offset = 0;
+ int i, res_bit;
+
+ if (pci_ctrl == 0)
+ return -EINVAL; /* should never happen */
+
+ /* If memory, add on the PCI bridge address offset */
+ if (mmap_state == pci_mmap_mem) {
+ res_bit = IORESOURCE_MEM;
+ } else {
+ io_offset = (unsigned long)pci_ctrl->io_space.base;
+ offset += io_offset;
+ res_bit = IORESOURCE_IO;
+ }
+
+ /*
+ * Check that the offset requested corresponds to one of the
+ * resources of the device.
+ */
+ for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
+ struct resource *rp = &dev->resource[i];
+ int flags = rp->flags;
+
+ /* treat ROM as memory (should be already) */
+ if (i == PCI_ROM_RESOURCE)
+ flags |= IORESOURCE_MEM;
+
+ /* Active and same type? */
+ if ((flags & res_bit) == 0)
+ continue;
+
+ /* In the range of this resource? */
+ if (offset < (rp->start & PAGE_MASK) || offset > rp->end)
+ continue;
+
+ /* found it! construct the final physical address */
+ if (mmap_state == pci_mmap_io)
+ offset += pci_ctrl->io_space.start - io_offset;
+ vma->vm_pgoff = offset >> PAGE_SHIFT;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/*
+ * Set vm_flags of VMA, as appropriate for this architecture, for a pci device
+ * mapping.
+ */
+static __inline__ void
+__pci_mmap_set_flags(struct pci_dev *dev, struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state)
+{
+ vma->vm_flags |= VM_SHM | VM_LOCKED | VM_IO;
+}
+
+/*
+ * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
+ * device mapping.
+ */
+static __inline__ void
+__pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state, int write_combine)
+{
+ int prot = pgprot_val(vma->vm_page_prot);
+
+ /* Set to write-through */
+ prot &= ~_PAGE_NO_CACHE;
+#if 0
+ if (!write_combine)
+ prot |= _PAGE_WRITETHRU;
+#endif
+ vma->vm_page_prot = __pgprot(prot);
+}
+
+/*
+ * Perform the actual remap of the pages for a PCI device mapping, as
+ * appropriate for this architecture. The region in the process to map
+ * is described by vm_start and vm_end members of VMA, the base physical
+ * address is found in vm_pgoff.
+ * The pci device structure is provided so that architectures may make mapping
+ * decisions on a per-device or per-bus basis.
+ *
+ * Returns a negative error code on failure, zero on success.
+ */
+int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state,
+ int write_combine)
+{
+ int ret;
+
+ ret = __pci_mmap_make_offset(dev, vma, mmap_state);
+ if (ret < 0)
+ return ret;
+
+ __pci_mmap_set_flags(dev, vma, mmap_state);
+ __pci_mmap_set_pgprot(dev, vma, mmap_state, write_combine);
+
+ ret = io_remap_page_range(vma, vma->vm_start, vma->vm_pgoff<<PAGE_SHIFT,
+ vma->vm_end - vma->vm_start, vma->vm_page_prot);
+
+ return ret;
+}
+
+/*
+ * This probably belongs here rather than ioport.c because
+ * we do not want this crud linked into SBus kernels.
+ * Also, think for a moment about likes of floppy.c that
+ * include architecture specific parts. They may want to redefine ins/outs.
+ *
+ * We do not use horroble macroses here because we want to
+ * advance pointer by sizeof(size).
+ */
+void outsb(unsigned long addr, const void *src, unsigned long count) {
+ while (count) {
+ count -= 1;
+ writeb(*(const char *)src, addr);
+ src += 1;
+ addr += 1;
+ }
+}
+
+void outsw(unsigned long addr, const void *src, unsigned long count) {
+ while (count) {
+ count -= 2;
+ writew(*(const short *)src, addr);
+ src += 2;
+ addr += 2;
+ }
+}
+
+void outsl(unsigned long addr, const void *src, unsigned long count) {
+ while (count) {
+ count -= 4;
+ writel(*(const long *)src, addr);
+ src += 4;
+ addr += 4;
+ }
+}
+
+void insb(unsigned long addr, void *dst, unsigned long count) {
+ while (count) {
+ count -= 1;
+ *(unsigned char *)dst = readb(addr);
+ dst += 1;
+ addr += 1;
+ }
+}
+
+void insw(unsigned long addr, void *dst, unsigned long count) {
+ while (count) {
+ count -= 2;
+ *(unsigned short *)dst = readw(addr);
+ dst += 2;
+ addr += 2;
+ }
+}
+
+void insl(unsigned long addr, void *dst, unsigned long count) {
+ while (count) {
+ count -= 4;
+ /*
+ * XXX I am sure we are in for an unaligned trap here.
+ */
+ *(unsigned long *)dst = readl(addr);
+ dst += 4;
+ addr += 4;
+ }
+}
+
+
+
diff --git a/arch/xtensa/kernel/platform.c b/arch/xtensa/kernel/platform.c
new file mode 100644
index 00000000000..cf136278444
--- /dev/null
+++ b/arch/xtensa/kernel/platform.c
@@ -0,0 +1,49 @@
+/*
+ * arch/xtensa/kernel/platform.c
+ *
+ * Default platform functions.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005 Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/time.h>
+#include <asm/platform.h>
+#include <asm/timex.h>
+
+#define _F(r,f,a,b) \
+ r __platform_##f a b; \
+ r platform_##f a __attribute__((weak, alias("__platform_"#f)))
+
+/*
+ * Default functions that are used if no platform specific function is defined.
+ * (Please, refer to include/asm-xtensa/platform.h for more information)
+ */
+
+_F(void, setup, (char** cmd), { });
+_F(void, init_irq, (void), { });
+_F(void, restart, (void), { while(1); });
+_F(void, halt, (void), { while(1); });
+_F(void, power_off, (void), { while(1); });
+_F(void, idle, (void), { __asm__ __volatile__ ("waiti 0" ::: "memory"); });
+_F(void, heartbeat, (void), { });
+_F(int, pcibios_fixup, (void), { return 0; });
+_F(int, get_rtc_time, (time_t* t), { return 0; });
+_F(int, set_rtc_time, (time_t t), { return 0; });
+
+#if CONFIG_XTENSA_CALIBRATE_CCOUNT
+_F(void, calibrate_ccount, (void),
+{
+ printk ("ERROR: Cannot calibrate cpu frequency! Assuming 100MHz.\n");
+ ccount_per_jiffy = 100 * (1000000UL/HZ);
+});
+#endif
+
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
new file mode 100644
index 00000000000..4099703b14b
--- /dev/null
+++ b/arch/xtensa/kernel/process.c
@@ -0,0 +1,482 @@
+// TODO verify coprocessor handling
+/*
+ * arch/xtensa/kernel/process.c
+ *
+ * Xtensa Processor version.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ *
+ * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
+ * Chris Zankel <chris@zankel.net>
+ * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
+ * Kevin Chea
+ */
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/elf.h>
+#include <linux/init.h>
+#include <linux/prctl.h>
+#include <linux/init_task.h>
+#include <linux/module.h>
+#include <linux/mqueue.h>
+
+#include <asm/pgtable.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/processor.h>
+#include <asm/platform.h>
+#include <asm/mmu.h>
+#include <asm/irq.h>
+#include <asm/atomic.h>
+#include <asm/offsets.h>
+#include <asm/coprocessor.h>
+
+extern void ret_from_fork(void);
+
+static struct fs_struct init_fs = INIT_FS;
+static struct files_struct init_files = INIT_FILES;
+static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
+static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
+struct mm_struct init_mm = INIT_MM(init_mm);
+EXPORT_SYMBOL(init_mm);
+
+union thread_union init_thread_union
+ __attribute__((__section__(".data.init_task"))) =
+{ INIT_THREAD_INFO(init_task) };
+
+struct task_struct init_task = INIT_TASK(init_task);
+EXPORT_SYMBOL(init_task);
+
+struct task_struct *current_set[NR_CPUS] = {&init_task, };
+
+
+#if XCHAL_CP_NUM > 0
+
+/*
+ * Coprocessor ownership.
+ */
+
+coprocessor_info_t coprocessor_info[] = {
+ { 0, XTENSA_CPE_CP0_OFFSET },
+ { 0, XTENSA_CPE_CP1_OFFSET },
+ { 0, XTENSA_CPE_CP2_OFFSET },
+ { 0, XTENSA_CPE_CP3_OFFSET },
+ { 0, XTENSA_CPE_CP4_OFFSET },
+ { 0, XTENSA_CPE_CP5_OFFSET },
+ { 0, XTENSA_CPE_CP6_OFFSET },
+ { 0, XTENSA_CPE_CP7_OFFSET },
+};
+
+#endif
+
+/*
+ * Powermanagement idle function, if any is provided by the platform.
+ */
+
+void cpu_idle(void)
+{
+ local_irq_enable();
+
+ /* endless idle loop with no priority at all */
+ while (1) {
+ while (!need_resched())
+ platform_idle();
+ preempt_enable();
+ schedule();
+ }
+}
+
+/*
+ * Free current thread data structures etc..
+ */
+
+void exit_thread(void)
+{
+ release_coprocessors(current); /* Empty macro if no CPs are defined */
+}
+
+void flush_thread(void)
+{
+ release_coprocessors(current); /* Empty macro if no CPs are defined */
+}
+
+/*
+ * Copy thread.
+ *
+ * The stack layout for the new thread looks like this:
+ *
+ * +------------------------+ <- sp in childregs (= tos)
+ * | childregs |
+ * +------------------------+ <- thread.sp = sp in dummy-frame
+ * | dummy-frame | (saved in dummy-frame spill-area)
+ * +------------------------+
+ *
+ * We create a dummy frame to return to ret_from_fork:
+ * a0 points to ret_from_fork (simulating a call4)
+ * sp points to itself (thread.sp)
+ * a2, a3 are unused.
+ *
+ * Note: This is a pristine frame, so we don't need any spill region on top of
+ * childregs.
+ */
+
+int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
+ unsigned long unused,
+ struct task_struct * p, struct pt_regs * regs)
+{
+ struct pt_regs *childregs;
+ unsigned long tos;
+ int user_mode = user_mode(regs);
+
+ /* Set up new TSS. */
+ tos = (unsigned long)p->thread_info + THREAD_SIZE;
+ if (user_mode)
+ childregs = (struct pt_regs*)(tos - PT_USER_SIZE);
+ else
+ childregs = (struct pt_regs*)tos - 1;
+
+ *childregs = *regs;
+
+ /* Create a call4 dummy-frame: a0 = 0, a1 = childregs. */
+ *((int*)childregs - 3) = (unsigned long)childregs;
+ *((int*)childregs - 4) = 0;
+
+ childregs->areg[1] = tos;
+ childregs->areg[2] = 0;
+ p->set_child_tid = p->clear_child_tid = NULL;
+ p->thread.ra = MAKE_RA_FOR_CALL((unsigned long)ret_from_fork, 0x1);
+ p->thread.sp = (unsigned long)childregs;
+ if (user_mode(regs)) {
+
+ int len = childregs->wmask & ~0xf;
+ childregs->areg[1] = usp;
+ memcpy(&childregs->areg[XCHAL_NUM_AREGS - len/4],
+ &regs->areg[XCHAL_NUM_AREGS - len/4], len);
+
+ if (clone_flags & CLONE_SETTLS)
+ childregs->areg[2] = childregs->areg[6];
+
+ } else {
+ /* In kernel space, we start a new thread with a new stack. */
+ childregs->wmask = 1;
+ }
+ return 0;
+}
+
+
+/*
+ * Create a kernel thread
+ */
+
+int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
+{
+ long retval;
+ __asm__ __volatile__
+ ("mov a5, %4\n\t" /* preserve fn in a5 */
+ "mov a6, %3\n\t" /* preserve and setup arg in a6 */
+ "movi a2, %1\n\t" /* load __NR_clone for syscall*/
+ "mov a3, sp\n\t" /* sp check and sys_clone */
+ "mov a4, %5\n\t" /* load flags for syscall */
+ "syscall\n\t"
+ "beq a3, sp, 1f\n\t" /* branch if parent */
+ "callx4 a5\n\t" /* call fn */
+ "movi a2, %2\n\t" /* load __NR_exit for syscall */
+ "mov a3, a6\n\t" /* load fn return value */
+ "syscall\n"
+ "1:\n\t"
+ "mov %0, a2\n\t" /* parent returns zero */
+ :"=r" (retval)
+ :"i" (__NR_clone), "i" (__NR_exit),
+ "r" (arg), "r" (fn),
+ "r" (flags | CLONE_VM)
+ : "a2", "a3", "a4", "a5", "a6" );
+ return retval;
+}
+
+
+/*
+ * These bracket the sleeping functions..
+ */
+
+unsigned long get_wchan(struct task_struct *p)
+{
+ unsigned long sp, pc;
+ unsigned long stack_page = (unsigned long) p->thread_info;
+ int count = 0;
+
+ if (!p || p == current || p->state == TASK_RUNNING)
+ return 0;
+
+ sp = p->thread.sp;
+ pc = MAKE_PC_FROM_RA(p->thread.ra, p->thread.sp);
+
+ do {
+ if (sp < stack_page + sizeof(struct task_struct) ||
+ sp >= (stack_page + THREAD_SIZE) ||
+ pc == 0)
+ return 0;
+ if (!in_sched_functions(pc))
+ return pc;
+
+ /* Stack layout: sp-4: ra, sp-3: sp' */
+
+ pc = MAKE_PC_FROM_RA(*(unsigned long*)sp - 4, sp);
+ sp = *(unsigned long *)sp - 3;
+ } while (count++ < 16);
+ return 0;
+}
+
+/*
+ * do_copy_regs() gathers information from 'struct pt_regs' and
+ * 'current->thread.areg[]' to fill in the xtensa_gregset_t
+ * structure.
+ *
+ * xtensa_gregset_t and 'struct pt_regs' are vastly different formats
+ * of processor registers. Besides different ordering,
+ * xtensa_gregset_t contains non-live register information that
+ * 'struct pt_regs' does not. Exception handling (primarily) uses
+ * 'struct pt_regs'. Core files and ptrace use xtensa_gregset_t.
+ *
+ */
+
+void do_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs,
+ struct task_struct *tsk)
+{
+ int i, n, wb_offset;
+
+ elfregs->xchal_config_id0 = XCHAL_HW_CONFIGID0;
+ elfregs->xchal_config_id1 = XCHAL_HW_CONFIGID1;
+
+ __asm__ __volatile__ ("rsr %0, 176\n" : "=a" (i));
+ elfregs->cpux = i;
+ __asm__ __volatile__ ("rsr %0, 208\n" : "=a" (i));
+ elfregs->cpuy = i;
+
+ /* Note: PS.EXCM is not set while user task is running; its
+ * being set in regs->ps is for exception handling convenience.
+ */
+
+ elfregs->pc = regs->pc;
+ elfregs->ps = (regs->ps & ~XCHAL_PS_EXCM_MASK);
+ elfregs->exccause = regs->exccause;
+ elfregs->excvaddr = regs->excvaddr;
+ elfregs->windowbase = regs->windowbase;
+ elfregs->windowstart = regs->windowstart;
+ elfregs->lbeg = regs->lbeg;
+ elfregs->lend = regs->lend;
+ elfregs->lcount = regs->lcount;
+ elfregs->sar = regs->sar;
+ elfregs->syscall = regs->syscall;
+
+ /* Copy register file.
+ * The layout looks like this:
+ *
+ * | a0 ... a15 | Z ... Z | arX ... arY |
+ * current window unused saved frames
+ */
+
+ memset (elfregs->ar, 0, sizeof(elfregs->ar));
+
+ wb_offset = regs->windowbase * 4;
+ n = (regs->wmask&1)? 4 : (regs->wmask&2)? 8 : (regs->wmask&4)? 12 : 16;
+
+ for (i = 0; i < n; i++)
+ elfregs->ar[(wb_offset + i) % XCHAL_NUM_AREGS] = regs->areg[i];
+
+ n = (regs->wmask >> 4) * 4;
+
+ for (i = XCHAL_NUM_AREGS - n; n > 0; i++, n--)
+ elfregs->ar[(wb_offset + i) % XCHAL_NUM_AREGS] = regs->areg[i];
+}
+
+void xtensa_elf_core_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs)
+{
+ do_copy_regs ((xtensa_gregset_t *)elfregs, regs, current);
+}
+
+
+/* The inverse of do_copy_regs(). No error or sanity checking. */
+
+void do_restore_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs,
+ struct task_struct *tsk)
+{
+ int i, n, wb_offset;
+
+ /* Note: PS.EXCM is not set while user task is running; it
+ * needs to be set in regs->ps is for exception handling convenience.
+ */
+
+ regs->pc = elfregs->pc;
+ regs->ps = (elfregs->ps | XCHAL_PS_EXCM_MASK);
+ regs->exccause = elfregs->exccause;
+ regs->excvaddr = elfregs->excvaddr;
+ regs->windowbase = elfregs->windowbase;
+ regs->windowstart = elfregs->windowstart;
+ regs->lbeg = elfregs->lbeg;
+ regs->lend = elfregs->lend;
+ regs->lcount = elfregs->lcount;
+ regs->sar = elfregs->sar;
+ regs->syscall = elfregs->syscall;
+
+ /* Clear everything. */
+
+ memset (regs->areg, 0, sizeof(regs->areg));
+
+ /* Copy regs from live window frame. */
+
+ wb_offset = regs->windowbase * 4;
+ n = (regs->wmask&1)? 4 : (regs->wmask&2)? 8 : (regs->wmask&4)? 12 : 16;
+
+ for (i = 0; i < n; i++)
+ regs->areg[(wb_offset+i) % XCHAL_NUM_AREGS] = elfregs->ar[i];
+
+ n = (regs->wmask >> 4) * 4;
+
+ for (i = XCHAL_NUM_AREGS - n; n > 0; i++, n--)
+ regs->areg[(wb_offset+i) % XCHAL_NUM_AREGS] = elfregs->ar[i];
+}
+
+/*
+ * do_save_fpregs() gathers information from 'struct pt_regs' and
+ * 'current->thread' to fill in the elf_fpregset_t structure.
+ *
+ * Core files and ptrace use elf_fpregset_t.
+ */
+
+void do_save_fpregs (elf_fpregset_t *fpregs, struct pt_regs *regs,
+ struct task_struct *tsk)
+{
+#if XCHAL_HAVE_CP
+
+ extern unsigned char _xtensa_reginfo_tables[];
+ extern unsigned _xtensa_reginfo_table_size;
+ int i;
+ unsigned long flags;
+
+ /* Before dumping coprocessor state from memory,
+ * ensure any live coprocessor contents for this
+ * task are first saved to memory:
+ */
+ local_irq_save(flags);
+
+ for (i = 0; i < XCHAL_CP_MAX; i++) {
+ if (tsk == coprocessor_info[i].owner) {
+ enable_coprocessor(i);
+ save_coprocessor_registers(
+ tsk->thread.cp_save+coprocessor_info[i].offset,i);
+ disable_coprocessor(i);
+ }
+ }
+
+ local_irq_restore(flags);
+
+ /* Now dump coprocessor & extra state: */
+ memcpy((unsigned char*)fpregs,
+ _xtensa_reginfo_tables, _xtensa_reginfo_table_size);
+ memcpy((unsigned char*)fpregs + _xtensa_reginfo_table_size,
+ tsk->thread.cp_save, XTENSA_CP_EXTRA_SIZE);
+#endif
+}
+
+/*
+ * The inverse of do_save_fpregs().
+ * Copies coprocessor and extra state from fpregs into regs and tsk->thread.
+ * Returns 0 on success, non-zero if layout doesn't match.
+ */
+
+int do_restore_fpregs (elf_fpregset_t *fpregs, struct pt_regs *regs,
+ struct task_struct *tsk)
+{
+#if XCHAL_HAVE_CP
+
+ extern unsigned char _xtensa_reginfo_tables[];
+ extern unsigned _xtensa_reginfo_table_size;
+ int i;
+ unsigned long flags;
+
+ /* Make sure save area layouts match.
+ * FIXME: in the future we could allow restoring from
+ * a different layout of the same registers, by comparing
+ * fpregs' table with _xtensa_reginfo_tables and matching
+ * entries and copying registers one at a time.
+ * Not too sure yet whether that's very useful.
+ */
+
+ if( memcmp((unsigned char*)fpregs,
+ _xtensa_reginfo_tables, _xtensa_reginfo_table_size) ) {
+ return -1;
+ }
+
+ /* Before restoring coprocessor state from memory,
+ * ensure any live coprocessor contents for this
+ * task are first invalidated.
+ */
+
+ local_irq_save(flags);
+
+ for (i = 0; i < XCHAL_CP_MAX; i++) {
+ if (tsk == coprocessor_info[i].owner) {
+ enable_coprocessor(i);
+ save_coprocessor_registers(
+ tsk->thread.cp_save+coprocessor_info[i].offset,i);
+ coprocessor_info[i].owner = 0;
+ disable_coprocessor(i);
+ }
+ }
+
+ local_irq_restore(flags);
+
+ /* Now restore coprocessor & extra state: */
+
+ memcpy(tsk->thread.cp_save,
+ (unsigned char*)fpregs + _xtensa_reginfo_table_size,
+ XTENSA_CP_EXTRA_SIZE);
+#endif
+ return 0;
+}
+/*
+ * Fill in the CP structure for a core dump for a particular task.
+ */
+
+int
+dump_task_fpu(struct pt_regs *regs, struct task_struct *task, elf_fpregset_t *r)
+{
+/* see asm/coprocessor.h for this magic number 16 */
+#if TOTAL_CPEXTRA_SIZE > 16
+ do_save_fpregs (r, regs, task);
+
+ /* For now, bit 16 means some extra state may be present: */
+// FIXME!! need to track to return more accurate mask
+ return 0x10000 | XCHAL_CP_MASK;
+#else
+ return 0; /* no coprocessors active on this processor */
+#endif
+}
+
+/*
+ * Fill in the CP structure for a core dump.
+ * This includes any FPU coprocessor.
+ * Here, we dump all coprocessors, and other ("extra") custom state.
+ *
+ * This function is called by elf_core_dump() in fs/binfmt_elf.c
+ * (in which case 'regs' comes from calls to do_coredump, see signals.c).
+ */
+int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
+{
+ return dump_task_fpu(regs, current, r);
+}
diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c
new file mode 100644
index 00000000000..9ef07a4dd2a
--- /dev/null
+++ b/arch/xtensa/kernel/ptrace.c
@@ -0,0 +1,407 @@
+// TODO some minor issues
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ *
+ * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
+ * Chris Zankel <chris@zankel.net>
+ * Scott Foehner<sfoehner@yahoo.com>,
+ * Kevin Chea
+ * Marc Gauthier<marc@tensilica.com> <marc@alumni.uwaterloo.ca>
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/security.h>
+
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/ptrace.h>
+#include <asm/elf.h>
+
+#define TEST_KERNEL // verify kernel operations FIXME: remove
+
+
+/*
+ * Called by kernel/ptrace.c when detaching..
+ *
+ * Make sure single step bits etc are not set.
+ */
+
+void ptrace_disable(struct task_struct *child)
+{
+ /* Nothing to do.. */
+}
+
+int sys_ptrace(long request, long pid, long addr, long data)
+{
+ struct task_struct *child;
+ int ret = -EPERM;
+
+ lock_kernel();
+
+#if 0
+ if ((int)request != 1)
+ printk("ptrace(r=%d,pid=%d,addr=%08lx,data=%08lx)\n",
+ (int) request, (int) pid, (unsigned long) addr,
+ (unsigned long) data);
+#endif
+
+ if (request == PTRACE_TRACEME) {
+
+ /* Are we already being traced? */
+
+ if (current->ptrace & PT_PTRACED)
+ goto out;
+
+ if ((ret = security_ptrace(current->parent, current)))
+ goto out;
+
+ /* Set the ptrace bit in the process flags. */
+
+ current->ptrace |= PT_PTRACED;
+ ret = 0;
+ goto out;
+ }
+
+ ret = -ESRCH;
+ read_lock(&tasklist_lock);
+ child = find_task_by_pid(pid);
+ if (child)
+ get_task_struct(child);
+ read_unlock(&tasklist_lock);
+ if (!child)
+ goto out;
+
+ ret = -EPERM;
+ if (pid == 1) /* you may not mess with init */
+ goto out;
+
+ if (request == PTRACE_ATTACH) {
+ ret = ptrace_attach(child);
+ goto out_tsk;
+ }
+
+ if ((ret = ptrace_check_attach(child, request == PTRACE_KILL)) < 0)
+ goto out_tsk;
+
+ switch (request) {
+ case PTRACE_PEEKTEXT: /* read word at location addr. */
+ case PTRACE_PEEKDATA:
+ {
+ unsigned long tmp;
+ int copied;
+
+ copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
+ ret = -EIO;
+ if (copied != sizeof(tmp))
+ break;
+ ret = put_user(tmp,(unsigned long *) data);
+
+ goto out;
+ }
+
+ /* Read the word at location addr in the USER area. */
+
+ case PTRACE_PEEKUSR:
+ {
+ struct pt_regs *regs;
+ unsigned long tmp;
+
+ regs = xtensa_pt_regs(child);
+ tmp = 0; /* Default return value. */
+
+ switch(addr) {
+
+ case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1:
+ {
+ int ar = addr - REG_AR_BASE - regs->windowbase * 4;
+ ar &= (XCHAL_NUM_AREGS - 1);
+ if (ar < 16 && ar + (regs->wmask >> 4) * 4 >= 0)
+ tmp = regs->areg[ar];
+ else
+ ret = -EIO;
+ break;
+ }
+ case REG_A_BASE ... REG_A_BASE + 15:
+ tmp = regs->areg[addr - REG_A_BASE];
+ break;
+ case REG_PC:
+ tmp = regs->pc;
+ break;
+ case REG_PS:
+ /* Note: PS.EXCM is not set while user task is running;
+ * its being set in regs is for exception handling
+ * convenience. */
+ tmp = (regs->ps & ~XCHAL_PS_EXCM_MASK);
+ break;
+ case REG_WB:
+ tmp = regs->windowbase;
+ break;
+ case REG_WS:
+ tmp = regs->windowstart;
+ break;
+ case REG_LBEG:
+ tmp = regs->lbeg;
+ break;
+ case REG_LEND:
+ tmp = regs->lend;
+ break;
+ case REG_LCOUNT:
+ tmp = regs->lcount;
+ break;
+ case REG_SAR:
+ tmp = regs->sar;
+ break;
+ case REG_DEPC:
+ tmp = regs->depc;
+ break;
+ case REG_EXCCAUSE:
+ tmp = regs->exccause;
+ break;
+ case REG_EXCVADDR:
+ tmp = regs->excvaddr;
+ break;
+ case SYSCALL_NR:
+ tmp = regs->syscall;
+ break;
+ default:
+ tmp = 0;
+ ret = -EIO;
+ goto out;
+ }
+ ret = put_user(tmp, (unsigned long *) data);
+ goto out;
+ }
+
+ case PTRACE_POKETEXT: /* write the word at location addr. */
+ case PTRACE_POKEDATA:
+ if (access_process_vm(child, addr, &data, sizeof(data), 1)
+ == sizeof(data))
+ break;
+ ret = -EIO;
+ goto out;
+
+ case PTRACE_POKEUSR:
+ {
+ struct pt_regs *regs;
+ regs = xtensa_pt_regs(child);
+
+ switch (addr) {
+ case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1:
+ {
+ int ar = addr - REG_AR_BASE - regs->windowbase * 4;
+ if (ar < 16 && ar + (regs->wmask >> 4) * 4 >= 0)
+ regs->areg[ar & (XCHAL_NUM_AREGS - 1)] = data;
+ else
+ ret = -EIO;
+ break;
+ }
+ case REG_A_BASE ... REG_A_BASE + 15:
+ regs->areg[addr - REG_A_BASE] = data;
+ break;
+ case REG_PC:
+ regs->pc = data;
+ break;
+ case SYSCALL_NR:
+ regs->syscall = data;
+ break;
+#ifdef TEST_KERNEL
+ case REG_WB:
+ regs->windowbase = data;
+ break;
+ case REG_WS:
+ regs->windowstart = data;
+ break;
+#endif
+
+ default:
+ /* The rest are not allowed. */
+ ret = -EIO;
+ break;
+ }
+ break;
+ }
+
+ /* continue and stop at next (return from) syscall */
+ case PTRACE_SYSCALL:
+ case PTRACE_CONT: /* restart after signal. */
+ {
+ ret = -EIO;
+ if ((unsigned long) data > _NSIG)
+ break;
+ if (request == PTRACE_SYSCALL)
+ set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+ else
+ clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+ child->exit_code = data;
+ /* Make sure the single step bit is not set. */
+ child->ptrace &= ~PT_SINGLESTEP;
+ wake_up_process(child);
+ ret = 0;
+ break;
+ }
+
+ /*
+ * make the child exit. Best I can do is send it a sigkill.
+ * perhaps it should be put in the status that it wants to
+ * exit.
+ */
+ case PTRACE_KILL:
+ ret = 0;
+ if (child->state == EXIT_ZOMBIE) /* already dead */
+ break;
+ child->exit_code = SIGKILL;
+ child->ptrace &= ~PT_SINGLESTEP;
+ wake_up_process(child);
+ break;
+
+ case PTRACE_SINGLESTEP:
+ ret = -EIO;
+ if ((unsigned long) data > _NSIG)
+ break;
+ clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+ child->ptrace |= PT_SINGLESTEP;
+ child->exit_code = data;
+ wake_up_process(child);
+ ret = 0;
+ break;
+
+ case PTRACE_GETREGS:
+ {
+ /* 'data' points to user memory in which to write.
+ * Mainly due to the non-live register values, we
+ * reformat the register values into something more
+ * standard. For convenience, we use the handy
+ * elf_gregset_t format. */
+
+ xtensa_gregset_t format;
+ struct pt_regs *regs = xtensa_pt_regs(child);
+
+ do_copy_regs (&format, regs, child);
+
+ /* Now, copy to user space nice and easy... */
+ ret = 0;
+ if (copy_to_user((void *)data, &format, sizeof(elf_gregset_t)))
+ ret = -EFAULT;
+ break;
+ }
+
+ case PTRACE_SETREGS:
+ {
+ /* 'data' points to user memory that contains the new
+ * values in the elf_gregset_t format. */
+
+ xtensa_gregset_t format;
+ struct pt_regs *regs = xtensa_pt_regs(child);
+
+ if (copy_from_user(&format,(void *)data,sizeof(elf_gregset_t))){
+ ret = -EFAULT;
+ break;
+ }
+
+ /* FIXME: Perhaps we want some sanity checks on
+ * these user-space values? See ARM version. Are
+ * debuggers a security concern? */
+
+ do_restore_regs (&format, regs, child);
+
+ ret = 0;
+ break;
+ }
+
+ case PTRACE_GETFPREGS:
+ {
+ /* 'data' points to user memory in which to write.
+ * For convenience, we use the handy
+ * elf_fpregset_t format. */
+
+ elf_fpregset_t fpregs;
+ struct pt_regs *regs = xtensa_pt_regs(child);
+
+ do_save_fpregs (&fpregs, regs, child);
+
+ /* Now, copy to user space nice and easy... */
+ ret = 0;
+ if (copy_to_user((void *)data, &fpregs, sizeof(elf_fpregset_t)))
+ ret = -EFAULT;
+
+ break;
+ }
+
+ case PTRACE_SETFPREGS:
+ {
+ /* 'data' points to user memory that contains the new
+ * values in the elf_fpregset_t format.
+ */
+ elf_fpregset_t fpregs;
+ struct pt_regs *regs = xtensa_pt_regs(child);
+
+ ret = 0;
+ if (copy_from_user(&fpregs, (void *)data, sizeof(elf_fpregset_t))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ if (do_restore_fpregs (&fpregs, regs, child))
+ ret = -EIO;
+ break;
+ }
+
+ case PTRACE_GETFPREGSIZE:
+ /* 'data' points to 'unsigned long' set to the size
+ * of elf_fpregset_t
+ */
+ ret = put_user(sizeof(elf_fpregset_t), (unsigned long *) data);
+ break;
+
+ case PTRACE_DETACH: /* detach a process that was attached. */
+ ret = ptrace_detach(child, data);
+ break;
+
+ default:
+ ret = ptrace_request(child, request, addr, data);
+ goto out;
+ }
+out_tsk:
+ put_task_struct(child);
+out:
+ unlock_kernel();
+ return ret;
+}
+
+void do_syscall_trace(void)
+{
+ if (!test_thread_flag(TIF_SYSCALL_TRACE))
+ return;
+
+ if (!(current->ptrace & PT_PTRACED))
+ return;
+
+ /*
+ * The 0x80 provides a way for the tracing parent to distinguish
+ * between a syscall stop and SIGTRAP delivery
+ */
+ ptrace_notify(SIGTRAP|((current->ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
+
+ /*
+ * this isn't the same as continuing with a signal, but it will do
+ * for normal use. strace only continues with a signal if the
+ * stopping signal is not SIGTRAP. -brl
+ */
+ if (current->exit_code) {
+ send_sig(current->exit_code, current, 1);
+ current->exit_code = 0;
+ }
+}
diff --git a/arch/xtensa/kernel/semaphore.c b/arch/xtensa/kernel/semaphore.c
new file mode 100644
index 00000000000..d40f4b1b75a
--- /dev/null
+++ b/arch/xtensa/kernel/semaphore.c
@@ -0,0 +1,226 @@
+/*
+ * arch/xtensa/kernel/semaphore.c
+ *
+ * Generic semaphore code. Buyer beware. Do your own specific changes
+ * in <asm/semaphore-helper.h>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ *
+ * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
+ * Chris Zankel <chris@zankel.net>
+ * Marc Gauthier<marc@tensilica.com, marc@alumni.uwaterloo.ca>
+ * Kevin Chea
+ */
+
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/init.h>
+#include <asm/semaphore.h>
+#include <asm/errno.h>
+
+/*
+ * These two _must_ execute atomically wrt each other.
+ */
+
+static __inline__ void wake_one_more(struct semaphore * sem)
+{
+ atomic_inc((atomic_t *)&sem->sleepers);
+}
+
+static __inline__ int waking_non_zero(struct semaphore *sem)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&semaphore_wake_lock, flags);
+ if (sem->sleepers > 0) {
+ sem->sleepers--;
+ ret = 1;
+ }
+ spin_unlock_irqrestore(&semaphore_wake_lock, flags);
+ return ret;
+}
+
+/*
+ * waking_non_zero_interruptible:
+ * 1 got the lock
+ * 0 go to sleep
+ * -EINTR interrupted
+ *
+ * We must undo the sem->count down_interruptible() increment while we are
+ * protected by the spinlock in order to make atomic this atomic_inc() with the
+ * atomic_read() in wake_one_more(), otherwise we can race. -arca
+ */
+
+static __inline__ int waking_non_zero_interruptible(struct semaphore *sem,
+ struct task_struct *tsk)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&semaphore_wake_lock, flags);
+ if (sem->sleepers > 0) {
+ sem->sleepers--;
+ ret = 1;
+ } else if (signal_pending(tsk)) {
+ atomic_inc(&sem->count);
+ ret = -EINTR;
+ }
+ spin_unlock_irqrestore(&semaphore_wake_lock, flags);
+ return ret;
+}
+
+/*
+ * waking_non_zero_trylock:
+ * 1 failed to lock
+ * 0 got the lock
+ *
+ * We must undo the sem->count down_trylock() increment while we are
+ * protected by the spinlock in order to make atomic this atomic_inc() with the
+ * atomic_read() in wake_one_more(), otherwise we can race. -arca
+ */
+
+static __inline__ int waking_non_zero_trylock(struct semaphore *sem)
+{
+ unsigned long flags;
+ int ret = 1;
+
+ spin_lock_irqsave(&semaphore_wake_lock, flags);
+ if (sem->sleepers <= 0)
+ atomic_inc(&sem->count);
+ else {
+ sem->sleepers--;
+ ret = 0;
+ }
+ spin_unlock_irqrestore(&semaphore_wake_lock, flags);
+ return ret;
+}
+
+spinlock_t semaphore_wake_lock;
+
+/*
+ * Semaphores are implemented using a two-way counter:
+ * The "count" variable is decremented for each process
+ * that tries to sleep, while the "waking" variable is
+ * incremented when the "up()" code goes to wake up waiting
+ * processes.
+ *
+ * Notably, the inline "up()" and "down()" functions can
+ * efficiently test if they need to do any extra work (up
+ * needs to do something only if count was negative before
+ * the increment operation.
+ *
+ * waking_non_zero() (from asm/semaphore.h) must execute
+ * atomically.
+ *
+ * When __up() is called, the count was negative before
+ * incrementing it, and we need to wake up somebody.
+ *
+ * This routine adds one to the count of processes that need to
+ * wake up and exit. ALL waiting processes actually wake up but
+ * only the one that gets to the "waking" field first will gate
+ * through and acquire the semaphore. The others will go back
+ * to sleep.
+ *
+ * Note that these functions are only called when there is
+ * contention on the lock, and as such all this is the
+ * "non-critical" part of the whole semaphore business. The
+ * critical part is the inline stuff in <asm/semaphore.h>
+ * where we want to avoid any extra jumps and calls.
+ */
+
+void __up(struct semaphore *sem)
+{
+ wake_one_more(sem);
+ wake_up(&sem->wait);
+}
+
+/*
+ * Perform the "down" function. Return zero for semaphore acquired,
+ * return negative for signalled out of the function.
+ *
+ * If called from __down, the return is ignored and the wait loop is
+ * not interruptible. This means that a task waiting on a semaphore
+ * using "down()" cannot be killed until someone does an "up()" on
+ * the semaphore.
+ *
+ * If called from __down_interruptible, the return value gets checked
+ * upon return. If the return value is negative then the task continues
+ * with the negative value in the return register (it can be tested by
+ * the caller).
+ *
+ * Either form may be used in conjunction with "up()".
+ *
+ */
+
+#define DOWN_VAR \
+ struct task_struct *tsk = current; \
+ wait_queue_t wait; \
+ init_waitqueue_entry(&wait, tsk);
+
+#define DOWN_HEAD(task_state) \
+ \
+ \
+ tsk->state = (task_state); \
+ add_wait_queue(&sem->wait, &wait); \
+ \
+ /* \
+ * Ok, we're set up. sem->count is known to be less than zero \
+ * so we must wait. \
+ * \
+ * We can let go the lock for purposes of waiting. \
+ * We re-acquire it after awaking so as to protect \
+ * all semaphore operations. \
+ * \
+ * If "up()" is called before we call waking_non_zero() then \
+ * we will catch it right away. If it is called later then \
+ * we will have to go through a wakeup cycle to catch it. \
+ * \
+ * Multiple waiters contend for the semaphore lock to see \
+ * who gets to gate through and who has to wait some more. \
+ */ \
+ for (;;) {
+
+#define DOWN_TAIL(task_state) \
+ tsk->state = (task_state); \
+ } \
+ tsk->state = TASK_RUNNING; \
+ remove_wait_queue(&sem->wait, &wait);
+
+void __sched __down(struct semaphore * sem)
+{
+ DOWN_VAR
+ DOWN_HEAD(TASK_UNINTERRUPTIBLE)
+ if (waking_non_zero(sem))
+ break;
+ schedule();
+ DOWN_TAIL(TASK_UNINTERRUPTIBLE)
+}
+
+int __sched __down_interruptible(struct semaphore * sem)
+{
+ int ret = 0;
+ DOWN_VAR
+ DOWN_HEAD(TASK_INTERRUPTIBLE)
+
+ ret = waking_non_zero_interruptible(sem, tsk);
+ if (ret)
+ {
+ if (ret == 1)
+ /* ret != 0 only if we get interrupted -arca */
+ ret = 0;
+ break;
+ }
+ schedule();
+ DOWN_TAIL(TASK_INTERRUPTIBLE)
+ return ret;
+}
+
+int __down_trylock(struct semaphore * sem)
+{
+ return waking_non_zero_trylock(sem);
+}
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
new file mode 100644
index 00000000000..1f5bf5d624e
--- /dev/null
+++ b/arch/xtensa/kernel/setup.c
@@ -0,0 +1,520 @@
+/*
+ * arch/xtensa/setup.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
+ * Kevin Chea
+ * Marc Gauthier<marc@tensilica.com> <marc@alumni.uwaterloo.ca>
+ */
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <linux/tty.h>
+#include <linux/bootmem.h>
+#include <linux/kernel.h>
+
+#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
+# include <linux/console.h>
+#endif
+
+#ifdef CONFIG_RTC
+# include <linux/timex.h>
+#endif
+
+#ifdef CONFIG_PROC_FS
+# include <linux/seq_file.h>
+#endif
+
+#include <asm/system.h>
+#include <asm/bootparam.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/timex.h>
+#include <asm/platform.h>
+#include <asm/page.h>
+#include <asm/setup.h>
+
+#include <xtensa/config/system.h>
+
+#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
+struct screen_info screen_info = { 0, 24, 0, 0, 0, 80, 0, 0, 0, 24, 1, 16};
+#endif
+
+#ifdef CONFIG_BLK_DEV_FD
+extern struct fd_ops no_fd_ops;
+struct fd_ops *fd_ops;
+#endif
+
+#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE)
+extern struct ide_ops no_ide_ops;
+struct ide_ops *ide_ops;
+#endif
+
+extern struct rtc_ops no_rtc_ops;
+struct rtc_ops *rtc_ops;
+
+#ifdef CONFIG_PC_KEYB
+extern struct kbd_ops no_kbd_ops;
+struct kbd_ops *kbd_ops;
+#endif
+
+#ifdef CONFIG_BLK_DEV_INITRD
+extern void *initrd_start;
+extern void *initrd_end;
+extern void *__initrd_start;
+extern void *__initrd_end;
+int initrd_is_mapped = 0;
+extern int initrd_below_start_ok;
+#endif
+
+unsigned char aux_device_present;
+extern unsigned long loops_per_jiffy;
+
+/* Command line specified as configuration option. */
+
+static char command_line[COMMAND_LINE_SIZE];
+
+#ifdef CONFIG_CMDLINE_BOOL
+static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
+#endif
+
+sysmem_info_t __initdata sysmem;
+
+#ifdef CONFIG_BLK_DEV_INITRD
+int initrd_is_mapped;
+#endif
+
+extern void init_mmu(void);
+
+/*
+ * Boot parameter parsing.
+ *
+ * The Xtensa port uses a list of variable-sized tags to pass data to
+ * the kernel. The first tag must be a BP_TAG_FIRST tag for the list
+ * to be recognised. The list is terminated with a zero-sized
+ * BP_TAG_LAST tag.
+ */
+
+typedef struct tagtable {
+ u32 tag;
+ int (*parse)(const bp_tag_t*);
+} tagtable_t;
+
+#define __tagtable(tag, fn) static tagtable_t __tagtable_##fn \
+ __attribute__((unused, __section__(".taglist"))) = { tag, fn }
+
+/* parse current tag */
+
+static int __init parse_tag_mem(const bp_tag_t *tag)
+{
+ meminfo_t *mi = (meminfo_t*)(tag->data);
+
+ if (mi->type != MEMORY_TYPE_CONVENTIONAL)
+ return -1;
+
+ if (sysmem.nr_banks >= SYSMEM_BANKS_MAX) {
+ printk(KERN_WARNING
+ "Ignoring memory bank 0x%08lx size %ldKB\n",
+ (unsigned long)mi->start,
+ (unsigned long)mi->end - (unsigned long)mi->start);
+ return -EINVAL;
+ }
+ sysmem.bank[sysmem.nr_banks].type = mi->type;
+ sysmem.bank[sysmem.nr_banks].start = PAGE_ALIGN(mi->start);
+ sysmem.bank[sysmem.nr_banks].end = mi->end & PAGE_SIZE;
+ sysmem.nr_banks++;
+
+ return 0;
+}
+
+__tagtable(BP_TAG_MEMORY, parse_tag_mem);
+
+#ifdef CONFIG_BLK_DEV_INITRD
+
+static int __init parse_tag_initrd(const bp_tag_t* tag)
+{
+ meminfo_t* mi;
+ mi = (meminfo_t*)(tag->data);
+ initrd_start = (void*)(mi->start);
+ initrd_end = (void*)(mi->end);
+
+ return 0;
+}
+
+__tagtable(BP_TAG_INITRD, parse_tag_initrd);
+
+#endif /* CONFIG_BLK_DEV_INITRD */
+
+static int __init parse_tag_cmdline(const bp_tag_t* tag)
+{
+ strncpy(command_line, (char*)(tag->data), COMMAND_LINE_SIZE);
+ command_line[COMMAND_LINE_SIZE - 1] = '\0';
+ return 0;
+}
+
+__tagtable(BP_TAG_COMMAND_LINE, parse_tag_cmdline);
+
+static int __init parse_bootparam(const bp_tag_t* tag)
+{
+ extern tagtable_t __tagtable_begin, __tagtable_end;
+ tagtable_t *t;
+
+ /* Boot parameters must start with a BP_TAG_FIRST tag. */
+
+ if (tag->id != BP_TAG_FIRST) {
+ printk(KERN_WARNING "Invalid boot parameters!\n");
+ return 0;
+ }
+
+ tag = (bp_tag_t*)((unsigned long)tag + sizeof(bp_tag_t) + tag->size);
+
+ /* Parse all tags. */
+
+ while (tag != NULL && tag->id != BP_TAG_LAST) {
+ for (t = &__tagtable_begin; t < &__tagtable_end; t++) {
+ if (tag->id == t->tag) {
+ t->parse(tag);
+ break;
+ }
+ }
+ if (t == &__tagtable_end)
+ printk(KERN_WARNING "Ignoring tag "
+ "0x%08x\n", tag->id);
+ tag = (bp_tag_t*)((unsigned long)(tag + 1) + tag->size);
+ }
+
+ return 0;
+}
+
+/*
+ * Initialize architecture. (Early stage)
+ */
+
+void __init init_arch(bp_tag_t *bp_start)
+{
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ initrd_start = &__initrd_start;
+ initrd_end = &__initrd_end;
+#endif
+
+ sysmem.nr_banks = 0;
+
+#ifdef CONFIG_CMDLINE_BOOL
+ strcpy(command_line, default_command_line);
+#endif
+
+ /* Parse boot parameters */
+
+ if (bp_start)
+ parse_bootparam(bp_start);
+
+ if (sysmem.nr_banks == 0) {
+ sysmem.nr_banks = 1;
+ sysmem.bank[0].start = PLATFORM_DEFAULT_MEM_START;
+ sysmem.bank[0].end = PLATFORM_DEFAULT_MEM_START
+ + PLATFORM_DEFAULT_MEM_SIZE;
+ }
+
+ /* Early hook for platforms */
+
+ platform_init(bp_start);
+
+ /* Initialize MMU. */
+
+ init_mmu();
+}
+
+/*
+ * Initialize system. Setup memory and reserve regions.
+ */
+
+extern char _end;
+extern char _stext;
+extern char _WindowVectors_text_start;
+extern char _WindowVectors_text_end;
+extern char _DebugInterruptVector_literal_start;
+extern char _DebugInterruptVector_text_end;
+extern char _KernelExceptionVector_literal_start;
+extern char _KernelExceptionVector_text_end;
+extern char _UserExceptionVector_literal_start;
+extern char _UserExceptionVector_text_end;
+extern char _DoubleExceptionVector_literal_start;
+extern char _DoubleExceptionVector_text_end;
+
+void __init setup_arch(char **cmdline_p)
+{
+ extern int mem_reserve(unsigned long, unsigned long, int);
+ extern void bootmem_init(void);
+
+ memcpy(saved_command_line, command_line, COMMAND_LINE_SIZE);
+ saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
+ *cmdline_p = command_line;
+
+ /* Reserve some memory regions */
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (initrd_start < initrd_end) {
+ initrd_is_mapped = mem_reserve(__pa(initrd_start),
+ __pa(initrd_end), 0);
+ initrd_below_start_ok = 1;
+ } else {
+ initrd_start = 0;
+ }
+#endif
+
+ mem_reserve(__pa(&_stext),__pa(&_end), 1);
+
+ mem_reserve(__pa(&_WindowVectors_text_start),
+ __pa(&_WindowVectors_text_end), 0);
+
+ mem_reserve(__pa(&_DebugInterruptVector_literal_start),
+ __pa(&_DebugInterruptVector_text_end), 0);
+
+ mem_reserve(__pa(&_KernelExceptionVector_literal_start),
+ __pa(&_KernelExceptionVector_text_end), 0);
+
+ mem_reserve(__pa(&_UserExceptionVector_literal_start),
+ __pa(&_UserExceptionVector_text_end), 0);
+
+ mem_reserve(__pa(&_DoubleExceptionVector_literal_start),
+ __pa(&_DoubleExceptionVector_text_end), 0);
+
+ bootmem_init();
+
+ platform_setup(cmdline_p);
+
+
+ paging_init();
+
+#ifdef CONFIG_VT
+# if defined(CONFIG_VGA_CONSOLE)
+ conswitchp = &vga_con;
+# elif defined(CONFIG_DUMMY_CONSOLE)
+ conswitchp = &dummy_con;
+# endif
+#endif
+
+#if CONFIG_PCI
+ platform_pcibios_init();
+#endif
+}
+
+void machine_restart(char * cmd)
+{
+ platform_restart();
+}
+
+void machine_halt(void)
+{
+ platform_halt();
+ while (1);
+}
+
+void machine_power_off(void)
+{
+ platform_power_off();
+ while (1);
+}
+#ifdef CONFIG_PROC_FS
+
+/*
+ * Display some core information through /proc/cpuinfo.
+ */
+
+static int
+c_show(struct seq_file *f, void *slot)
+{
+ /* high-level stuff */
+ seq_printf(f,"processor\t: 0\n"
+ "vendor_id\t: Tensilica\n"
+ "model\t\t: Xtensa " XCHAL_HW_RELEASE_NAME "\n"
+ "core ID\t\t: " XCHAL_CORE_ID "\n"
+ "build ID\t: 0x%x\n"
+ "byte order\t: %s\n"
+ "cpu MHz\t\t: %lu.%02lu\n"
+ "bogomips\t: %lu.%02lu\n",
+ XCHAL_BUILD_UNIQUE_ID,
+ XCHAL_HAVE_BE ? "big" : "little",
+ CCOUNT_PER_JIFFY/(1000000/HZ),
+ (CCOUNT_PER_JIFFY/(10000/HZ)) % 100,
+ loops_per_jiffy/(500000/HZ),
+ (loops_per_jiffy/(5000/HZ)) % 100);
+
+ seq_printf(f,"flags\t\t: "
+#if XCHAL_HAVE_NMI
+ "nmi "
+#endif
+#if XCHAL_HAVE_DEBUG
+ "debug "
+# if XCHAL_HAVE_OCD
+ "ocd "
+# endif
+#endif
+#if XCHAL_HAVE_DENSITY
+ "density "
+#endif
+#if XCHAL_HAVE_BOOLEANS
+ "boolean "
+#endif
+#if XCHAL_HAVE_LOOPS
+ "loop "
+#endif
+#if XCHAL_HAVE_NSA
+ "nsa "
+#endif
+#if XCHAL_HAVE_MINMAX
+ "minmax "
+#endif
+#if XCHAL_HAVE_SEXT
+ "sext "
+#endif
+#if XCHAL_HAVE_CLAMPS
+ "clamps "
+#endif
+#if XCHAL_HAVE_MAC16
+ "mac16 "
+#endif
+#if XCHAL_HAVE_MUL16
+ "mul16 "
+#endif
+#if XCHAL_HAVE_MUL32
+ "mul32 "
+#endif
+#if XCHAL_HAVE_MUL32_HIGH
+ "mul32h "
+#endif
+#if XCHAL_HAVE_FP
+ "fpu "
+#endif
+ "\n");
+
+ /* Registers. */
+ seq_printf(f,"physical aregs\t: %d\n"
+ "misc regs\t: %d\n"
+ "ibreak\t\t: %d\n"
+ "dbreak\t\t: %d\n",
+ XCHAL_NUM_AREGS,
+ XCHAL_NUM_MISC_REGS,
+ XCHAL_NUM_IBREAK,
+ XCHAL_NUM_DBREAK);
+
+
+ /* Interrupt. */
+ seq_printf(f,"num ints\t: %d\n"
+ "ext ints\t: %d\n"
+ "int levels\t: %d\n"
+ "timers\t\t: %d\n"
+ "debug level\t: %d\n",
+ XCHAL_NUM_INTERRUPTS,
+ XCHAL_NUM_EXTINTERRUPTS,
+ XCHAL_NUM_INTLEVELS,
+ XCHAL_NUM_TIMERS,
+ XCHAL_DEBUGLEVEL);
+
+ /* Coprocessors */
+#if XCHAL_HAVE_CP
+ seq_printf(f, "coprocessors\t: %d\n", XCHAL_CP_NUM);
+#else
+ seq_printf(f, "coprocessors\t: none\n");
+#endif
+
+ /* {I,D}{RAM,ROM} and XLMI */
+ seq_printf(f,"inst ROMs\t: %d\n"
+ "inst RAMs\t: %d\n"
+ "data ROMs\t: %d\n"
+ "data RAMs\t: %d\n"
+ "XLMI ports\t: %d\n",
+ XCHAL_NUM_IROM,
+ XCHAL_NUM_IRAM,
+ XCHAL_NUM_DROM,
+ XCHAL_NUM_DRAM,
+ XCHAL_NUM_XLMI);
+
+ /* Cache */
+ seq_printf(f,"icache line size: %d\n"
+ "icache ways\t: %d\n"
+ "icache size\t: %d\n"
+ "icache flags\t: "
+#if XCHAL_ICACHE_LINE_LOCKABLE
+ "lock"
+#endif
+ "\n"
+ "dcache line size: %d\n"
+ "dcache ways\t: %d\n"
+ "dcache size\t: %d\n"
+ "dcache flags\t: "
+#if XCHAL_DCACHE_IS_WRITEBACK
+ "writeback"
+#endif
+#if XCHAL_DCACHE_LINE_LOCKABLE
+ "lock"
+#endif
+ "\n",
+ XCHAL_ICACHE_LINESIZE,
+ XCHAL_ICACHE_WAYS,
+ XCHAL_ICACHE_SIZE,
+ XCHAL_DCACHE_LINESIZE,
+ XCHAL_DCACHE_WAYS,
+ XCHAL_DCACHE_SIZE);
+
+ /* MMU */
+ seq_printf(f,"ASID bits\t: %d\n"
+ "ASID invalid\t: %d\n"
+ "ASID kernel\t: %d\n"
+ "rings\t\t: %d\n"
+ "itlb ways\t: %d\n"
+ "itlb AR ways\t: %d\n"
+ "dtlb ways\t: %d\n"
+ "dtlb AR ways\t: %d\n",
+ XCHAL_MMU_ASID_BITS,
+ XCHAL_MMU_ASID_INVALID,
+ XCHAL_MMU_ASID_KERNEL,
+ XCHAL_MMU_RINGS,
+ XCHAL_ITLB_WAYS,
+ XCHAL_ITLB_ARF_WAYS,
+ XCHAL_DTLB_WAYS,
+ XCHAL_DTLB_ARF_WAYS);
+
+ return 0;
+}
+
+/*
+ * We show only CPU #0 info.
+ */
+static void *
+c_start(struct seq_file *f, loff_t *pos)
+{
+ return (void *) ((*pos == 0) ? (void *)1 : NULL);
+}
+
+static void *
+c_next(struct seq_file *f, void *v, loff_t *pos)
+{
+ return NULL;
+}
+
+static void
+c_stop(struct seq_file *f, void *v)
+{
+}
+
+struct seq_operations cpuinfo_op =
+{
+ start: c_start,
+ next: c_next,
+ stop: c_stop,
+ show: c_show
+};
+
+#endif /* CONFIG_PROC_FS */
+
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
new file mode 100644
index 00000000000..df6e1e17b09
--- /dev/null
+++ b/arch/xtensa/kernel/signal.c
@@ -0,0 +1,713 @@
+// TODO coprocessor stuff
+/*
+ * linux/arch/xtensa/kernel/signal.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
+ *
+ * Joe Taylor <joe@tensilica.com>
+ * Chris Zankel <chris@zankel.net>
+ *
+ *
+ *
+ */
+
+#include <xtensa/config/core.h>
+#include <xtensa/hal.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/ptrace.h>
+#include <linux/unistd.h>
+#include <linux/stddef.h>
+#include <linux/personality.h>
+#include <asm/ucontext.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/cacheflush.h>
+
+#define DEBUG_SIG 0
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options,
+ struct rusage * ru);
+asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset);
+
+extern struct task_struct *coproc_owners[];
+
+
+/*
+ * Atomically swap in the new signal mask, and wait for a signal.
+ */
+
+int sys_sigsuspend(struct pt_regs *regs)
+{
+ old_sigset_t mask = (old_sigset_t) regs->areg[3];
+ sigset_t saveset;
+
+ mask &= _BLOCKABLE;
+ spin_lock_irq(&current->sighand->siglock);
+ saveset = current->blocked;
+ siginitset(&current->blocked, mask);
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+
+ regs->areg[2] = -EINTR;
+ while (1) {
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ if (do_signal(regs, &saveset))
+ return -EINTR;
+ }
+}
+
+asmlinkage int
+sys_rt_sigsuspend(struct pt_regs *regs)
+{
+ sigset_t *unewset = (sigset_t *) regs->areg[4];
+ size_t sigsetsize = (size_t) regs->areg[3];
+ sigset_t saveset, newset;
+ /* XXX: Don't preclude handling different sized sigset_t's. */
+ if (sigsetsize != sizeof(sigset_t))
+ return -EINVAL;
+
+ if (copy_from_user(&newset, unewset, sizeof(newset)))
+ return -EFAULT;
+ sigdelsetmask(&newset, ~_BLOCKABLE);
+ spin_lock_irq(&current->sighand->siglock);
+ saveset = current->blocked;
+ current->blocked = newset;
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+
+ regs->areg[2] = -EINTR;
+ while (1) {
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ if (do_signal(regs, &saveset))
+ return -EINTR;
+ }
+}
+
+asmlinkage int
+sys_sigaction(int sig, const struct old_sigaction *act,
+ struct old_sigaction *oact)
+{
+ struct k_sigaction new_ka, old_ka;
+ int ret;
+
+ if (act) {
+ old_sigset_t mask;
+ if (verify_area(VERIFY_READ, act, sizeof(*act)) ||
+ __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
+ __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
+ return -EFAULT;
+ __get_user(new_ka.sa.sa_flags, &act->sa_flags);
+ __get_user(mask, &act->sa_mask);
+ siginitset(&new_ka.sa.sa_mask, mask);
+ }
+
+ ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+ if (!ret && oact) {
+ if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) ||
+ __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
+ __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
+ return -EFAULT;
+ __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+ __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
+ }
+
+ return ret;
+}
+
+asmlinkage int
+sys_sigaltstack(struct pt_regs *regs)
+{
+ const stack_t *uss = (stack_t *) regs->areg[4];
+ stack_t *uoss = (stack_t *) regs->areg[3];
+
+ if (regs->depc > 64)
+ panic ("Double exception sys_sigreturn\n");
+
+
+ return do_sigaltstack(uss, uoss, regs->areg[1]);
+}
+
+
+/*
+ * Do a signal return; undo the signal stack.
+ */
+
+struct sigframe
+{
+ struct sigcontext sc;
+ struct _cpstate cpstate;
+ unsigned long extramask[_NSIG_WORDS-1];
+ unsigned char retcode[6];
+ unsigned int reserved[4]; /* Reserved area for chaining */
+ unsigned int window[4]; /* Window of 4 registers for initial context */
+};
+
+struct rt_sigframe
+{
+ struct siginfo info;
+ struct ucontext uc;
+ struct _cpstate cpstate;
+ unsigned char retcode[6];
+ unsigned int reserved[4]; /* Reserved area for chaining */
+ unsigned int window[4]; /* Window of 4 registers for initial context */
+};
+
+extern void release_all_cp (struct task_struct *);
+
+
+// FIXME restore_cpextra
+static inline int
+restore_cpextra (struct _cpstate *buf)
+{
+#if 0
+ /* The signal handler may have used coprocessors in which
+ * case they are still enabled. We disable them to force a
+ * reloading of the original task's CP state by the lazy
+ * context-switching mechanisms of CP exception handling.
+ * Also, we essentially discard any coprocessor state that the
+ * signal handler created. */
+
+ struct task_struct *tsk = current;
+ release_all_cp(tsk);
+ return __copy_from_user(tsk->thread.cpextra, buf, TOTAL_CPEXTRA_SIZE);
+#endif
+ return 0;
+}
+
+/* Note: We don't copy double exception 'tregs', we have to finish double exc. first before we return to signal handler! This dbl.exc.handler might cause another double exception, but I think we are fine as the situation is the same as if we had returned to the signal handerl and got an interrupt immediately...
+ */
+
+
+static int
+restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
+{
+ struct thread_struct *thread;
+ unsigned int err = 0;
+ unsigned long ps;
+ struct _cpstate *buf;
+
+#define COPY(x) err |= __get_user(regs->x, &sc->sc_##x)
+ COPY(pc);
+ COPY(depc);
+ COPY(wmask);
+ COPY(lbeg);
+ COPY(lend);
+ COPY(lcount);
+ COPY(sar);
+ COPY(windowbase);
+ COPY(windowstart);
+#undef COPY
+
+ /* For PS, restore only PS.CALLINC.
+ * Assume that all other bits are either the same as for the signal
+ * handler, or the user mode value doesn't matter (e.g. PS.OWB).
+ */
+ err |= __get_user(ps, &sc->sc_ps);
+ regs->ps = (regs->ps & ~XCHAL_PS_CALLINC_MASK)
+ | (ps & XCHAL_PS_CALLINC_MASK);
+
+ /* Additional corruption checks */
+
+ if ((regs->windowbase >= (XCHAL_NUM_AREGS/4))
+ || ((regs->windowstart & ~((1<<(XCHAL_NUM_AREGS/4)) - 1)) != 0) )
+ err = 1;
+ if ((regs->lcount > 0)
+ && ((regs->lbeg > TASK_SIZE) || (regs->lend > TASK_SIZE)) )
+ err = 1;
+
+ /* Restore extended register state.
+ * See struct thread_struct in processor.h.
+ */
+ thread = &current->thread;
+
+ err |= __copy_from_user (regs->areg, sc->sc_areg, XCHAL_NUM_AREGS*4);
+ err |= __get_user(buf, &sc->sc_cpstate);
+ if (buf) {
+ if (verify_area(VERIFY_READ, buf, sizeof(*buf)))
+ goto badframe;
+ err |= restore_cpextra(buf);
+ }
+
+ regs->syscall = -1; /* disable syscall checks */
+ return err;
+
+badframe:
+ return 1;
+}
+
+static inline void
+flush_my_cpstate(struct task_struct *tsk)
+{
+ unsigned long flags;
+ local_irq_save(flags);
+
+#if 0 // FIXME
+ for (i = 0; i < XCHAL_CP_NUM; i++) {
+ if (tsk == coproc_owners[i]) {
+ xthal_validate_cp(i);
+ xthal_save_cpregs(tsk->thread.cpregs_ptr[i], i);
+
+ /* Invalidate and "disown" the cp to allow
+ * callers the chance to reset cp state in the
+ * task_struct. */
+
+ xthal_invalidate_cp(i);
+ coproc_owners[i] = 0;
+ }
+ }
+#endif
+ local_irq_restore(flags);
+}
+
+/* Return codes:
+ 0: nothing saved
+ 1: stuff to save, successful
+ -1: stuff to save, error happened
+*/
+static int
+save_cpextra (struct _cpstate *buf)
+{
+#if (XCHAL_EXTRA_SA_SIZE == 0) && (XCHAL_CP_NUM == 0)
+ return 0;
+#else
+
+ /* FIXME: If a task has never used a coprocessor, there is
+ * no need to save and restore anything. Tracking this
+ * information would allow us to optimize this section.
+ * Perhaps we can use current->used_math or (current->flags &
+ * PF_USEDFPU) or define a new field in the thread
+ * structure. */
+
+ /* We flush any live, task-owned cp state to the task_struct,
+ * then copy it all to the sigframe. Then we clear all
+ * cp/extra state in the task_struct, effectively
+ * clearing/resetting all cp/extra state for the signal
+ * handler (cp-exception handling will load these new values
+ * into the cp/extra registers.) This step is important for
+ * things like a floating-point cp, where the OS must reset
+ * the FCR to the default rounding mode. */
+
+ int err = 0;
+ struct task_struct *tsk = current;
+
+ flush_my_cpstate(tsk);
+ /* Note that we just copy everything: 'extra' and 'cp' state together.*/
+ err |= __copy_to_user(buf, tsk->thread.cp_save, XTENSA_CP_EXTRA_SIZE);
+ memset(tsk->thread.cp_save, 0, XTENSA_CP_EXTRA_SIZE);
+
+#if (XTENSA_CP_EXTRA_SIZE == 0)
+#error Sanity check on memset above, cpextra_size should not be zero.
+#endif
+
+ return err ? -1 : 1;
+#endif
+}
+
+static int
+setup_sigcontext(struct sigcontext *sc, struct _cpstate *cpstate,
+ struct pt_regs *regs, unsigned long mask)
+{
+ struct thread_struct *thread;
+ int err = 0;
+
+//printk("setup_sigcontext\n");
+#define COPY(x) err |= __put_user(regs->x, &sc->sc_##x)
+ COPY(pc);
+ COPY(ps);
+ COPY(depc);
+ COPY(wmask);
+ COPY(lbeg);
+ COPY(lend);
+ COPY(lcount);
+ COPY(sar);
+ COPY(windowbase);
+ COPY(windowstart);
+#undef COPY
+
+ /* Save extended register state.
+ * See struct thread_struct in processor.h.
+ */
+ thread = &current->thread;
+ err |= __copy_to_user (sc->sc_areg, regs->areg, XCHAL_NUM_AREGS * 4);
+ err |= save_cpextra(cpstate);
+ err |= __put_user(err ? NULL : cpstate, &sc->sc_cpstate);
+ /* non-iBCS2 extensions.. */
+ err |= __put_user(mask, &sc->oldmask);
+
+ return err;
+}
+
+asmlinkage int sys_sigreturn(struct pt_regs *regs)
+{
+ struct sigframe *frame = (struct sigframe *)regs->areg[1];
+ sigset_t set;
+ if (regs->depc > 64)
+ panic ("Double exception sys_sigreturn\n");
+
+ if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
+ goto badframe;
+
+ if (__get_user(set.sig[0], &frame->sc.oldmask)
+ || (_NSIG_WORDS > 1
+ && __copy_from_user(&set.sig[1], &frame->extramask,
+ sizeof(frame->extramask))))
+ goto badframe;
+
+ sigdelsetmask(&set, ~_BLOCKABLE);
+
+ spin_lock_irq(&current->sighand->siglock);
+ current->blocked = set;
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+
+ if (restore_sigcontext(regs, &frame->sc))
+ goto badframe;
+ return regs->areg[2];
+
+badframe:
+ force_sig(SIGSEGV, current);
+ return 0;
+}
+
+asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
+{
+ struct rt_sigframe *frame = (struct rt_sigframe *)regs->areg[1];
+ sigset_t set;
+ stack_t st;
+ int ret;
+ if (regs->depc > 64)
+ {
+ printk("!!!!!!! DEPC !!!!!!!\n");
+ return 0;
+ }
+
+ if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
+ goto badframe;
+
+ if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+ goto badframe;
+
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(&current->sighand->siglock);
+ current->blocked = set;
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+
+ if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
+ goto badframe;
+ ret = regs->areg[2];
+
+ if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
+ goto badframe;
+ /* It is more difficult to avoid calling this function than to
+ call it and ignore errors. */
+ do_sigaltstack(&st, NULL, regs->areg[1]);
+
+ return ret;
+
+badframe:
+ force_sig(SIGSEGV, current);
+ return 0;
+}
+
+/*
+ * Set up a signal frame.
+ */
+
+/*
+ * Determine which stack to use..
+ */
+static inline void *
+get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
+{
+ if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! on_sig_stack(sp))
+ sp = current->sas_ss_sp + current->sas_ss_size;
+
+ return (void *)((sp - frame_size) & -16ul);
+}
+
+#define USE_SIGRETURN 0
+#define USE_RT_SIGRETURN 1
+
+static int
+gen_return_code(unsigned char *codemem, unsigned int use_rt_sigreturn)
+{
+ unsigned int retcall;
+ int err = 0;
+
+#if 0
+ /* Ignoring SA_RESTORER for now; it's supposed to be obsolete,
+ * and the xtensa glibc doesn't use it.
+ */
+ if (ka->sa.sa_flags & SA_RESTORER) {
+ regs->pr = (unsigned long) ka->sa.sa_restorer;
+ } else
+#endif /* 0 */
+ {
+
+#if (__NR_sigreturn > 255) || (__NR_rt_sigreturn > 255)
+
+/* The 12-bit immediate is really split up within the 24-bit MOVI
+ * instruction. As long as the above system call numbers fit within
+ * 8-bits, the following code works fine. See the Xtensa ISA for
+ * details.
+ */
+
+#error Generating the MOVI instruction below breaks!
+#endif
+
+ retcall = use_rt_sigreturn ? __NR_rt_sigreturn : __NR_sigreturn;
+
+#ifdef __XTENSA_EB__ /* Big Endian version */
+ /* Generate instruction: MOVI a2, retcall */
+ err |= __put_user(0x22, &codemem[0]);
+ err |= __put_user(0x0a, &codemem[1]);
+ err |= __put_user(retcall, &codemem[2]);
+ /* Generate instruction: SYSCALL */
+ err |= __put_user(0x00, &codemem[3]);
+ err |= __put_user(0x05, &codemem[4]);
+ err |= __put_user(0x00, &codemem[5]);
+
+#elif defined __XTENSA_EL__ /* Little Endian version */
+ /* Generate instruction: MOVI a2, retcall */
+ err |= __put_user(0x22, &codemem[0]);
+ err |= __put_user(0xa0, &codemem[1]);
+ err |= __put_user(retcall, &codemem[2]);
+ /* Generate instruction: SYSCALL */
+ err |= __put_user(0x00, &codemem[3]);
+ err |= __put_user(0x50, &codemem[4]);
+ err |= __put_user(0x00, &codemem[5]);
+#else
+#error Must use compiler for Xtensa processors.
+#endif
+ }
+
+ /* Flush generated code out of the data cache */
+
+ if (err == 0)
+ __flush_invalidate_cache_range((unsigned long)codemem, 6UL);
+
+ return err;
+}
+
+static void
+set_thread_state(struct pt_regs *regs, void *stack, unsigned char *retaddr,
+ void *handler, unsigned long arg1, void *arg2, void *arg3)
+{
+ /* Set up registers for signal handler */
+ start_thread(regs, (unsigned long) handler, (unsigned long) stack);
+
+ /* Set up a stack frame for a call4
+ * Note: PS.CALLINC is set to one by start_thread
+ */
+ regs->areg[4] = (((unsigned long) retaddr) & 0x3fffffff) | 0x40000000;
+ regs->areg[6] = arg1;
+ regs->areg[7] = (unsigned long) arg2;
+ regs->areg[8] = (unsigned long) arg3;
+}
+
+static void setup_frame(int sig, struct k_sigaction *ka,
+ sigset_t *set, struct pt_regs *regs)
+{
+ struct sigframe *frame;
+ int err = 0;
+ int signal;
+
+ frame = get_sigframe(ka, regs->areg[1], sizeof(*frame));
+ if (regs->depc > 64)
+ {
+ printk("!!!!!!! DEPC !!!!!!!\n");
+ return;
+ }
+
+
+ if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+ goto give_sigsegv;
+
+ signal = current_thread_info()->exec_domain
+ && current_thread_info()->exec_domain->signal_invmap
+ && sig < 32
+ ? current_thread_info()->exec_domain->signal_invmap[sig]
+ : sig;
+
+ err |= setup_sigcontext(&frame->sc, &frame->cpstate, regs, set->sig[0]);
+
+ if (_NSIG_WORDS > 1) {
+ err |= __copy_to_user(frame->extramask, &set->sig[1],
+ sizeof(frame->extramask));
+ }
+
+ /* Create sys_sigreturn syscall in stack frame */
+ err |= gen_return_code(frame->retcode, USE_SIGRETURN);
+
+ if (err)
+ goto give_sigsegv;
+
+ /* Create signal handler execution context.
+ * Return context not modified until this point.
+ */
+ set_thread_state(regs, frame, frame->retcode,
+ ka->sa.sa_handler, signal, &frame->sc, NULL);
+
+ /* Set access mode to USER_DS. Nomenclature is outdated, but
+ * functionality is used in uaccess.h
+ */
+ set_fs(USER_DS);
+
+
+#if DEBUG_SIG
+ printk("SIG deliver (%s:%d): signal=%d sp=%p pc=%08x\n",
+ current->comm, current->pid, signal, frame, regs->pc);
+#endif
+
+ return;
+
+give_sigsegv:
+ if (sig == SIGSEGV)
+ ka->sa.sa_handler = SIG_DFL;
+ force_sig(SIGSEGV, current);
+}
+
+static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+ sigset_t *set, struct pt_regs *regs)
+{
+ struct rt_sigframe *frame;
+ int err = 0;
+ int signal;
+
+ frame = get_sigframe(ka, regs->areg[1], sizeof(*frame));
+ if (regs->depc > 64)
+ panic ("Double exception sys_sigreturn\n");
+
+ if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+ goto give_sigsegv;
+
+ signal = current_thread_info()->exec_domain
+ && current_thread_info()->exec_domain->signal_invmap
+ && sig < 32
+ ? current_thread_info()->exec_domain->signal_invmap[sig]
+ : sig;
+
+ err |= copy_siginfo_to_user(&frame->info, info);
+
+ /* Create the ucontext. */
+ err |= __put_user(0, &frame->uc.uc_flags);
+ err |= __put_user(0, &frame->uc.uc_link);
+ err |= __put_user((void *)current->sas_ss_sp,
+ &frame->uc.uc_stack.ss_sp);
+ err |= __put_user(sas_ss_flags(regs->areg[1]),
+ &frame->uc.uc_stack.ss_flags);
+ err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+ err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->cpstate,
+ regs, set->sig[0]);
+ err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+
+ /* Create sys_rt_sigreturn syscall in stack frame */
+ err |= gen_return_code(frame->retcode, USE_RT_SIGRETURN);
+
+ if (err)
+ goto give_sigsegv;
+
+ /* Create signal handler execution context.
+ * Return context not modified until this point.
+ */
+ set_thread_state(regs, frame, frame->retcode,
+ ka->sa.sa_handler, signal, &frame->info, &frame->uc);
+
+ /* Set access mode to USER_DS. Nomenclature is outdated, but
+ * functionality is used in uaccess.h
+ */
+ set_fs(USER_DS);
+
+#if DEBUG_SIG
+ printk("SIG rt deliver (%s:%d): signal=%d sp=%p pc=%08x\n",
+ current->comm, current->pid, signal, frame, regs->pc);
+#endif
+
+ return;
+
+give_sigsegv:
+ if (sig == SIGSEGV)
+ ka->sa.sa_handler = SIG_DFL;
+ force_sig(SIGSEGV, current);
+}
+
+
+
+/*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ *
+ * Note that we go through the signals twice: once to check the signals that
+ * the kernel can handle, and then we build all the user-level signal handling
+ * stack-frames in one go after that.
+ */
+int do_signal(struct pt_regs *regs, sigset_t *oldset)
+{
+ siginfo_t info;
+ int signr;
+ struct k_sigaction ka;
+
+ if (!oldset)
+ oldset = &current->blocked;
+
+ signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+
+ /* Are we from a system call? */
+ if (regs->syscall >= 0) {
+ /* If so, check system call restarting.. */
+ switch (regs->areg[2]) {
+ case ERESTARTNOHAND:
+ case ERESTART_RESTARTBLOCK:
+ regs->areg[2] = -EINTR;
+ break;
+
+ case ERESTARTSYS:
+ if (!(ka.sa.sa_flags & SA_RESTART)) {
+ regs->areg[2] = -EINTR;
+ break;
+ }
+ /* fallthrough */
+ case ERESTARTNOINTR:
+ regs->areg[2] = regs->syscall;
+ regs->pc -= 3;
+ }
+ }
+
+ if (signr == 0)
+ return 0; /* no signals delivered */
+
+ /* Whee! Actually deliver the signal. */
+
+ /* Set up the stack frame */
+ if (ka.sa.sa_flags & SA_SIGINFO)
+ setup_rt_frame(signr, &ka, &info, oldset, regs);
+ else
+ setup_frame(signr, &ka, oldset, regs);
+
+ if (ka.sa.sa_flags & SA_ONESHOT)
+ ka.sa.sa_handler = SIG_DFL;
+
+ if (!(ka.sa.sa_flags & SA_NODEFER)) {
+ spin_lock_irq(&current->sighand->siglock);
+ sigorsets(&current->blocked, &current->blocked, &ka.sa.sa_mask);
+ sigaddset(&current->blocked, signr);
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+ }
+ return 1;
+}
diff --git a/arch/xtensa/kernel/syscalls.c b/arch/xtensa/kernel/syscalls.c
new file mode 100644
index 00000000000..abc8ed6c702
--- /dev/null
+++ b/arch/xtensa/kernel/syscalls.c
@@ -0,0 +1,418 @@
+/*
+ * arch/xtensa/kernel/syscall.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2000 Silicon Graphics, Inc.
+ * Copyright (C) 1995 - 2000 by Ralf Baechle
+ *
+ * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
+ * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
+ * Chris Zankel <chris@zankel.net>
+ * Kevin Chea
+ *
+ */
+
+#define DEBUG 0
+
+#include <linux/config.h>
+#include <linux/linkage.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/mman.h>
+#include <linux/sched.h>
+#include <linux/file.h>
+#include <linux/slab.h>
+#include <linux/utsname.h>
+#include <linux/unistd.h>
+#include <linux/stringify.h>
+#include <linux/syscalls.h>
+#include <linux/sem.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
+#include <linux/errno.h>
+#include <asm/ptrace.h>
+#include <asm/signal.h>
+#include <asm/uaccess.h>
+#include <asm/hardirq.h>
+#include <asm/mman.h>
+#include <asm/shmparam.h>
+#include <asm/page.h>
+#include <asm/ipc.h>
+
+extern void do_syscall_trace(void);
+typedef int (*syscall_t)(void *a0,...);
+extern int (*do_syscalls)(struct pt_regs *regs, syscall_t fun,
+ int narg);
+extern syscall_t sys_call_table[];
+extern unsigned char sys_narg_table[];
+
+/*
+ * sys_pipe() is the normal C calling standard for creating a pipe. It's not
+ * the way unix traditional does this, though.
+ */
+
+int sys_pipe(int __user *userfds)
+{
+ int fd[2];
+ int error;
+
+ error = do_pipe(fd);
+ if (!error) {
+ if (copy_to_user(userfds, fd, 2 * sizeof(int)))
+ error = -EFAULT;
+ }
+ return error;
+}
+
+/*
+ * Common code for old and new mmaps.
+ */
+
+static inline long do_mmap2(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff)
+{
+ int error = -EBADF;
+ struct file * file = NULL;
+
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+ if (!file)
+ goto out;
+ }
+
+ down_write(&current->mm->mmap_sem);
+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+ up_write(&current->mm->mmap_sem);
+
+ if (file)
+ fput(file);
+out:
+ return error;
+}
+
+unsigned long old_mmap(unsigned long addr, size_t len, int prot,
+ int flags, int fd, off_t offset)
+{
+ return do_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
+}
+
+long sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
+ unsigned long flags, unsigned long fd, unsigned long pgoff)
+{
+ return do_mmap2(addr, len, prot, flags, fd, pgoff);
+}
+
+int sys_fork(struct pt_regs *regs)
+{
+ return do_fork(SIGCHLD, regs->areg[1], regs, 0, NULL, NULL);
+}
+
+int sys_vfork(struct pt_regs *regs)
+{
+ return do_fork(CLONE_VFORK|CLONE_VM|SIGCHLD, regs->areg[1],
+ regs, 0, NULL, NULL);
+}
+
+int sys_clone(struct pt_regs *regs)
+{
+ unsigned long clone_flags;
+ unsigned long newsp;
+ int __user *parent_tidptr, *child_tidptr;
+ clone_flags = regs->areg[4];
+ newsp = regs->areg[3];
+ parent_tidptr = (int __user *)regs->areg[5];
+ child_tidptr = (int __user *)regs->areg[6];
+ if (!newsp)
+ newsp = regs->areg[1];
+ return do_fork(clone_flags,newsp,regs,0,parent_tidptr,child_tidptr);
+}
+
+/*
+ * sys_execve() executes a new program.
+ */
+
+int sys_execve(struct pt_regs *regs)
+{
+ int error;
+ char * filename;
+
+ filename = getname((char *) (long)regs->areg[5]);
+ error = PTR_ERR(filename);
+ if (IS_ERR(filename))
+ goto out;
+ error = do_execve(filename, (char **) (long)regs->areg[3],
+ (char **) (long)regs->areg[4], regs);
+ putname(filename);
+
+out:
+ return error;
+}
+
+int sys_uname(struct old_utsname * name)
+{
+ if (name && !copy_to_user(name, &system_utsname, sizeof (*name)))
+ return 0;
+ return -EFAULT;
+}
+
+int sys_olduname(struct oldold_utsname * name)
+{
+ int error;
+
+ if (!name)
+ return -EFAULT;
+ if (!access_ok(VERIFY_WRITE,name,sizeof(struct oldold_utsname)))
+ return -EFAULT;
+
+ error = __copy_to_user(&name->sysname,&system_utsname.sysname,__OLD_UTS_LEN);
+ error -= __put_user(0,name->sysname+__OLD_UTS_LEN);
+ error -= __copy_to_user(&name->nodename,&system_utsname.nodename,__OLD_UTS_LEN);
+ error -= __put_user(0,name->nodename+__OLD_UTS_LEN);
+ error -= __copy_to_user(&name->release,&system_utsname.release,__OLD_UTS_LEN);
+ error -= __put_user(0,name->release+__OLD_UTS_LEN);
+ error -= __copy_to_user(&name->version,&system_utsname.version,__OLD_UTS_LEN);
+ error -= __put_user(0,name->version+__OLD_UTS_LEN);
+ error -= __copy_to_user(&name->machine,&system_utsname.machine,__OLD_UTS_LEN);
+ error -= __put_user(0,name->machine+__OLD_UTS_LEN);
+
+ return error ? -EFAULT : 0;
+}
+
+
+/*
+ * Build the string table for the builtin "poor man's strace".
+ */
+
+#if DEBUG
+#define SYSCALL(fun, narg) #fun,
+static char *sfnames[] = {
+#include "syscalls.h"
+};
+#undef SYS
+#endif
+
+void system_call (struct pt_regs *regs)
+{
+ syscall_t syscall;
+ unsigned long parm0, parm1, parm2, parm3, parm4, parm5;
+ int nargs, res;
+ unsigned int syscallnr;
+ int ps;
+
+#if DEBUG
+ int i;
+ unsigned long parms[6];
+ char *sysname;
+#endif
+
+ regs->syscall = regs->areg[2];
+
+ do_syscall_trace();
+
+ /* Have to load after syscall_trace because strace
+ * sometimes changes regs->syscall.
+ */
+ syscallnr = regs->syscall;
+
+ parm0 = parm1 = parm2 = parm3 = parm4 = parm5 = 0;
+
+ /* Restore interrupt level to syscall invoker's.
+ * If this were in assembly, we wouldn't disable
+ * interrupts in the first place:
+ */
+ local_save_flags (ps);
+ local_irq_restore((ps & ~XCHAL_PS_INTLEVEL_MASK) |
+ (regs->ps & XCHAL_PS_INTLEVEL_MASK) );
+
+ if (syscallnr > __NR_Linux_syscalls) {
+ regs->areg[2] = -ENOSYS;
+ return;
+ }
+
+ syscall = sys_call_table[syscallnr];
+ nargs = sys_narg_table[syscallnr];
+
+ if (syscall == NULL) {
+ regs->areg[2] = -ENOSYS;
+ return;
+ }
+
+ /* There shouldn't be more than six arguments in the table! */
+
+ if (nargs > 6)
+ panic("Internal error - too many syscall arguments (%d)!\n",
+ nargs);
+
+ /* Linux takes system-call arguments in registers. The ABI
+ * and Xtensa software conventions require the system-call
+ * number in a2. If an argument exists in a2, we move it to
+ * the next available register. Note that for improved
+ * efficiency, we do NOT shift all parameters down one
+ * register to maintain the original order.
+ *
+ * At best case (zero arguments), we just write the syscall
+ * number to a2. At worst case (1 to 6 arguments), we move
+ * the argument in a2 to the next available register, then
+ * write the syscall number to a2.
+ *
+ * For clarity, the following truth table enumerates all
+ * possibilities.
+ *
+ * arguments syscall number arg0, arg1, arg2, arg3, arg4, arg5
+ * --------- -------------- ----------------------------------
+ * 0 a2
+ * 1 a2 a3
+ * 2 a2 a4, a3
+ * 3 a2 a5, a3, a4
+ * 4 a2 a6, a3, a4, a5
+ * 5 a2 a7, a3, a4, a5, a6
+ * 6 a2 a8, a3, a4, a5, a6, a7
+ */
+ if (nargs) {
+ parm0 = regs->areg[nargs+2];
+ parm1 = regs->areg[3];
+ parm2 = regs->areg[4];
+ parm3 = regs->areg[5];
+ parm4 = regs->areg[6];
+ parm5 = regs->areg[7];
+ } else /* nargs == 0 */
+ parm0 = (unsigned long) regs;
+
+#if DEBUG
+ parms[0] = parm0;
+ parms[1] = parm1;
+ parms[2] = parm2;
+ parms[3] = parm3;
+ parms[4] = parm4;
+ parms[5] = parm5;
+
+ sysname = sfnames[syscallnr];
+ if (strncmp(sysname, "sys_", 4) == 0)
+ sysname = sysname + 4;
+
+ printk("\017SYSCALL:I:%x:%d:%s %s(", regs->pc, current->pid,
+ current->comm, sysname);
+ for (i = 0; i < nargs; i++)
+ printk((i>0) ? ", %#lx" : "%#lx", parms[i]);
+ printk(")\n");
+#endif
+
+ res = syscall((void *)parm0, parm1, parm2, parm3, parm4, parm5);
+
+#if DEBUG
+ printk("\017SYSCALL:O:%d:%s %s(",current->pid, current->comm, sysname);
+ for (i = 0; i < nargs; i++)
+ printk((i>0) ? ", %#lx" : "%#lx", parms[i]);
+ if (res < 4096)
+ printk(") = %d\n", res);
+ else
+ printk(") = %#x\n", res);
+#endif /* DEBUG */
+
+ regs->areg[2] = res;
+ do_syscall_trace();
+}
+
+/*
+ * sys_ipc() is the de-multiplexer for the SysV IPC calls..
+ *
+ * This is really horribly ugly.
+ */
+
+int sys_ipc (uint call, int first, int second,
+ int third, void __user *ptr, long fifth)
+{
+ int version, ret;
+
+ version = call >> 16; /* hack for backward compatibility */
+ call &= 0xffff;
+ ret = -ENOSYS;
+
+ switch (call) {
+ case SEMOP:
+ ret = sys_semtimedop (first, (struct sembuf __user *)ptr,
+ second, NULL);
+ break;
+
+ case SEMTIMEDOP:
+ ret = sys_semtimedop (first, (struct sembuf __user *)ptr,
+ second, (const struct timespec *) fifth);
+ break;
+
+ case SEMGET:
+ ret = sys_semget (first, second, third);
+ break;
+
+ case SEMCTL: {
+ union semun fourth;
+
+ if (ptr && !get_user(fourth.__pad, (void *__user *) ptr))
+ ret = sys_semctl (first, second, third, fourth);
+ break;
+ }
+
+ case MSGSND:
+ ret = sys_msgsnd (first, (struct msgbuf __user*) ptr,
+ second, third);
+ break;
+
+ case MSGRCV:
+ switch (version) {
+ case 0: {
+ struct ipc_kludge tmp;
+
+ if (ptr && !copy_from_user(&tmp,
+ (struct ipc_kludge *) ptr,
+ sizeof (tmp)))
+ ret = sys_msgrcv (first, tmp.msgp, second,
+ tmp.msgtyp, third);
+ break;
+ }
+
+ default:
+ ret = sys_msgrcv (first, (struct msgbuf __user *) ptr,
+ second, 0, third);
+ break;
+ }
+ break;
+
+ case MSGGET:
+ ret = sys_msgget ((key_t) first, second);
+ break;
+
+ case MSGCTL:
+ ret = sys_msgctl (first, second, (struct msqid_ds __user*) ptr);
+ break;
+
+ case SHMAT: {
+ ulong raddr;
+ ret = do_shmat (first, (char __user *) ptr, second, &raddr);
+
+ if (!ret)
+ ret = put_user (raddr, (ulong __user *) third);
+
+ break;
+ }
+
+ case SHMDT:
+ ret = sys_shmdt ((char __user *)ptr);
+ break;
+
+ case SHMGET:
+ ret = sys_shmget (first, second, third);
+ break;
+
+ case SHMCTL:
+ ret = sys_shmctl (first, second, (struct shmid_ds __user*) ptr);
+ break;
+ }
+ return ret;
+}
+
diff --git a/arch/xtensa/kernel/syscalls.h b/arch/xtensa/kernel/syscalls.h
new file mode 100644
index 00000000000..5b3f75f50fe
--- /dev/null
+++ b/arch/xtensa/kernel/syscalls.h
@@ -0,0 +1,248 @@
+/*
+ * arch/xtensa/kernel/syscalls.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995, 1996, 1997, 1998 by Ralf Baechle
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ *
+ * Changes by Joe Taylor <joe@tensilica.com>
+ */
+
+/*
+ * This file is being included twice - once to build a list of all
+ * syscalls and once to build a table of how many arguments each syscall
+ * accepts. Syscalls that receive a pointer to the saved registers are
+ * marked as having zero arguments.
+ *
+ * The binary compatibility calls are in a separate list.
+ *
+ * Entry '0' used to be system_call. It's removed to disable indirect
+ * system calls for now so user tasks can't recurse. See mips'
+ * sys_syscall for a comparable example.
+ */
+
+SYSCALL(0, 0) /* 00 */
+
+SYSCALL(sys_exit, 1)
+SYSCALL(sys_fork, 0)
+SYSCALL(sys_read, 3)
+SYSCALL(sys_write, 3)
+SYSCALL(sys_open, 3) /* 05 */
+SYSCALL(sys_close, 1)
+SYSCALL(sys_waitpid, 3)
+SYSCALL(sys_creat, 2)
+SYSCALL(sys_link, 2)
+SYSCALL(sys_unlink, 1) /* 10 */
+SYSCALL(sys_execve, 0)
+SYSCALL(sys_chdir, 1)
+SYSCALL(sys_time, 1)
+SYSCALL(sys_mknod, 3)
+SYSCALL(sys_chmod, 2) /* 15 */
+SYSCALL(sys_lchown, 3)
+SYSCALL(sys_ni_syscall, 0)
+SYSCALL(sys_stat, 2)
+SYSCALL(sys_lseek, 3)
+SYSCALL(sys_getpid, 0) /* 20 */
+SYSCALL(sys_mount, 5)
+SYSCALL(sys_oldumount, 1)
+SYSCALL(sys_setuid, 1)
+SYSCALL(sys_getuid, 0)
+SYSCALL(sys_stime, 1) /* 25 */
+SYSCALL(sys_ptrace, 4)
+SYSCALL(sys_alarm, 1)
+SYSCALL(sys_fstat, 2)
+SYSCALL(sys_pause, 0)
+SYSCALL(sys_utime, 2) /* 30 */
+SYSCALL(sys_ni_syscall, 0)
+SYSCALL(sys_ni_syscall, 0)
+SYSCALL(sys_access, 2)
+SYSCALL(sys_nice, 1)
+SYSCALL(sys_ni_syscall, 0) /* 35 */
+SYSCALL(sys_sync, 0)
+SYSCALL(sys_kill, 2)
+SYSCALL(sys_rename, 2)
+SYSCALL(sys_mkdir, 2)
+SYSCALL(sys_rmdir, 1) /* 40 */
+SYSCALL(sys_dup, 1)
+SYSCALL(sys_pipe, 1)
+SYSCALL(sys_times, 1)
+SYSCALL(sys_ni_syscall, 0)
+SYSCALL(sys_brk, 1) /* 45 */
+SYSCALL(sys_setgid, 1)
+SYSCALL(sys_getgid, 0)
+SYSCALL(sys_ni_syscall, 0) /* was signal(2) */
+SYSCALL(sys_geteuid, 0)
+SYSCALL(sys_getegid, 0) /* 50 */
+SYSCALL(sys_acct, 1)
+SYSCALL(sys_umount, 2)
+SYSCALL(sys_ni_syscall, 0)
+SYSCALL(sys_ioctl, 3)
+SYSCALL(sys_fcntl, 3) /* 55 */
+SYSCALL(sys_ni_syscall, 2)
+SYSCALL(sys_setpgid, 2)
+SYSCALL(sys_ni_syscall, 0)
+SYSCALL(sys_olduname, 1)
+SYSCALL(sys_umask, 1) /* 60 */
+SYSCALL(sys_chroot, 1)
+SYSCALL(sys_ustat, 2)
+SYSCALL(sys_dup2, 2)
+SYSCALL(sys_getppid, 0)
+SYSCALL(sys_getpgrp, 0) /* 65 */
+SYSCALL(sys_setsid, 0)
+SYSCALL(sys_sigaction, 3)
+SYSCALL(sys_sgetmask, 0)
+SYSCALL(sys_ssetmask, 1)
+SYSCALL(sys_setreuid, 2) /* 70 */
+SYSCALL(sys_setregid, 2)
+SYSCALL(sys_sigsuspend, 0)
+SYSCALL(sys_sigpending, 1)
+SYSCALL(sys_sethostname, 2)
+SYSCALL(sys_setrlimit, 2) /* 75 */
+SYSCALL(sys_getrlimit, 2)
+SYSCALL(sys_getrusage, 2)
+SYSCALL(sys_gettimeofday, 2)
+SYSCALL(sys_settimeofday, 2)
+SYSCALL(sys_getgroups, 2) /* 80 */
+SYSCALL(sys_setgroups, 2)
+SYSCALL(sys_ni_syscall, 0) /* old_select */
+SYSCALL(sys_symlink, 2)
+SYSCALL(sys_lstat, 2)
+SYSCALL(sys_readlink, 3) /* 85 */
+SYSCALL(sys_uselib, 1)
+SYSCALL(sys_swapon, 2)
+SYSCALL(sys_reboot, 3)
+SYSCALL(old_readdir, 3)
+SYSCALL(old_mmap, 6) /* 90 */
+SYSCALL(sys_munmap, 2)
+SYSCALL(sys_truncate, 2)
+SYSCALL(sys_ftruncate, 2)
+SYSCALL(sys_fchmod, 2)
+SYSCALL(sys_fchown, 3) /* 95 */
+SYSCALL(sys_getpriority, 2)
+SYSCALL(sys_setpriority, 3)
+SYSCALL(sys_ni_syscall, 0)
+SYSCALL(sys_statfs, 2)
+SYSCALL(sys_fstatfs, 2) /* 100 */
+SYSCALL(sys_ni_syscall, 3)
+SYSCALL(sys_socketcall, 2)
+SYSCALL(sys_syslog, 3)
+SYSCALL(sys_setitimer, 3)
+SYSCALL(sys_getitimer, 2) /* 105 */
+SYSCALL(sys_newstat, 2)
+SYSCALL(sys_newlstat, 2)
+SYSCALL(sys_newfstat, 2)
+SYSCALL(sys_uname, 1)
+SYSCALL(sys_ni_syscall, 0) /* 110 */
+SYSCALL(sys_vhangup, 0)
+SYSCALL(sys_ni_syscall, 0) /* was sys_idle() */
+SYSCALL(sys_ni_syscall, 0)
+SYSCALL(sys_wait4, 4)
+SYSCALL(sys_swapoff, 1) /* 115 */
+SYSCALL(sys_sysinfo, 1)
+SYSCALL(sys_ipc, 5) /* 6 really, but glibc uses only 5) */
+SYSCALL(sys_fsync, 1)
+SYSCALL(sys_sigreturn, 0)
+SYSCALL(sys_clone, 0) /* 120 */
+SYSCALL(sys_setdomainname, 2)
+SYSCALL(sys_newuname, 1)
+SYSCALL(sys_ni_syscall, 0) /* sys_modify_ldt */
+SYSCALL(sys_adjtimex, 1)
+SYSCALL(sys_mprotect, 3) /* 125 */
+SYSCALL(sys_sigprocmask, 3)
+SYSCALL(sys_ni_syscall, 2) /* old sys_create_module */
+SYSCALL(sys_init_module, 2)
+SYSCALL(sys_delete_module, 1)
+SYSCALL(sys_ni_syscall, 1) /* old sys_get_kernel_sysm */ /* 130 */
+SYSCALL(sys_quotactl, 0)
+SYSCALL(sys_getpgid, 1)
+SYSCALL(sys_fchdir, 1)
+SYSCALL(sys_bdflush, 2)
+SYSCALL(sys_sysfs, 3) /* 135 */
+SYSCALL(sys_personality, 1)
+SYSCALL(sys_ni_syscall, 0) /* for afs_syscall */
+SYSCALL(sys_setfsuid, 1)
+SYSCALL(sys_setfsgid, 1)
+SYSCALL(sys_llseek, 5) /* 140 */
+SYSCALL(sys_getdents, 3)
+SYSCALL(sys_select, 5)
+SYSCALL(sys_flock, 2)
+SYSCALL(sys_msync, 3)
+SYSCALL(sys_readv, 3) /* 145 */
+SYSCALL(sys_writev, 3)
+SYSCALL(sys_ni_syscall, 3)
+SYSCALL(sys_ni_syscall, 3)
+SYSCALL(sys_ni_syscall, 4) /* handled in fast syscall handler. */
+SYSCALL(sys_ni_syscall, 0) /* 150 */
+SYSCALL(sys_getsid, 1)
+SYSCALL(sys_fdatasync, 1)
+SYSCALL(sys_sysctl, 1)
+SYSCALL(sys_mlock, 2)
+SYSCALL(sys_munlock, 2) /* 155 */
+SYSCALL(sys_mlockall, 1)
+SYSCALL(sys_munlockall, 0)
+SYSCALL(sys_sched_setparam,2)
+SYSCALL(sys_sched_getparam,2)
+SYSCALL(sys_sched_setscheduler,3) /* 160 */
+SYSCALL(sys_sched_getscheduler,1)
+SYSCALL(sys_sched_yield,0)
+SYSCALL(sys_sched_get_priority_max,1)
+SYSCALL(sys_sched_get_priority_min,1)
+SYSCALL(sys_sched_rr_get_interval,2) /* 165 */
+SYSCALL(sys_nanosleep,2)
+SYSCALL(sys_mremap,4)
+SYSCALL(sys_accept, 3)
+SYSCALL(sys_bind, 3)
+SYSCALL(sys_connect, 3) /* 170 */
+SYSCALL(sys_getpeername, 3)
+SYSCALL(sys_getsockname, 3)
+SYSCALL(sys_getsockopt, 5)
+SYSCALL(sys_listen, 2)
+SYSCALL(sys_recv, 4) /* 175 */
+SYSCALL(sys_recvfrom, 6)
+SYSCALL(sys_recvmsg, 3)
+SYSCALL(sys_send, 4)
+SYSCALL(sys_sendmsg, 3)
+SYSCALL(sys_sendto, 6) /* 180 */
+SYSCALL(sys_setsockopt, 5)
+SYSCALL(sys_shutdown, 2)
+SYSCALL(sys_socket, 3)
+SYSCALL(sys_socketpair, 4)
+SYSCALL(sys_setresuid, 3) /* 185 */
+SYSCALL(sys_getresuid, 3)
+SYSCALL(sys_ni_syscall, 5) /* old sys_query_module */
+SYSCALL(sys_poll, 3)
+SYSCALL(sys_nfsservctl, 3)
+SYSCALL(sys_setresgid, 3) /* 190 */
+SYSCALL(sys_getresgid, 3)
+SYSCALL(sys_prctl, 5)
+SYSCALL(sys_rt_sigreturn, 0)
+SYSCALL(sys_rt_sigaction, 4)
+SYSCALL(sys_rt_sigprocmask, 4) /* 195 */
+SYSCALL(sys_rt_sigpending, 2)
+SYSCALL(sys_rt_sigtimedwait, 4)
+SYSCALL(sys_rt_sigqueueinfo, 3)
+SYSCALL(sys_rt_sigsuspend, 0)
+SYSCALL(sys_pread64, 5) /* 200 */
+SYSCALL(sys_pwrite64, 5)
+SYSCALL(sys_chown, 3)
+SYSCALL(sys_getcwd, 2)
+SYSCALL(sys_capget, 2)
+SYSCALL(sys_capset, 2) /* 205 */
+SYSCALL(sys_sigaltstack, 0)
+SYSCALL(sys_sendfile, 4)
+SYSCALL(sys_ni_syscall, 0)
+SYSCALL(sys_ni_syscall, 0)
+SYSCALL(sys_mmap2, 6) /* 210 */
+SYSCALL(sys_truncate64, 2)
+SYSCALL(sys_ftruncate64, 2)
+SYSCALL(sys_stat64, 2)
+SYSCALL(sys_lstat64, 2)
+SYSCALL(sys_fstat64, 2) /* 215 */
+SYSCALL(sys_pivot_root, 2)
+SYSCALL(sys_mincore, 3)
+SYSCALL(sys_madvise, 3)
+SYSCALL(sys_getdents64, 3)
+SYSCALL(sys_vfork, 0) /* 220 */
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
new file mode 100644
index 00000000000..e07287db5a4
--- /dev/null
+++ b/arch/xtensa/kernel/time.c
@@ -0,0 +1,227 @@
+/*
+ * arch/xtensa/kernel/time.c
+ *
+ * Timer and clock support.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005 Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ */
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/time.h>
+#include <linux/timex.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/profile.h>
+#include <linux/delay.h>
+
+#include <asm/timex.h>
+#include <asm/platform.h>
+
+
+extern volatile unsigned long wall_jiffies;
+
+u64 jiffies_64 = INITIAL_JIFFIES;
+EXPORT_SYMBOL(jiffies_64);
+
+spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED;
+EXPORT_SYMBOL(rtc_lock);
+
+
+#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
+unsigned long ccount_per_jiffy; /* per 1/HZ */
+unsigned long ccount_nsec; /* nsec per ccount increment */
+#endif
+
+unsigned int last_ccount_stamp;
+static long last_rtc_update = 0;
+
+/*
+ * Scheduler clock - returns current tim in nanosec units.
+ */
+
+unsigned long long sched_clock(void)
+{
+ return (unsigned long long)jiffies * (1000000000 / HZ);
+}
+
+static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static struct irqaction timer_irqaction = {
+ .handler = timer_interrupt,
+ .flags = SA_INTERRUPT,
+ .name = "timer",
+};
+
+void __init time_init(void)
+{
+ time_t sec_o, sec_n = 0;
+
+ /* The platform must provide a function to calibrate the processor
+ * speed for the CALIBRATE.
+ */
+
+#if CONFIG_XTENSA_CALIBRATE_CCOUNT
+ printk("Calibrating CPU frequency ");
+ platform_calibrate_ccount();
+ printk("%d.%02d MHz\n", (int)ccount_per_jiffy/(1000000/HZ),
+ (int)(ccount_per_jiffy/(10000/HZ))%100);
+#endif
+
+ /* Set time from RTC (if provided) */
+
+ if (platform_get_rtc_time(&sec_o) == 0)
+ while (platform_get_rtc_time(&sec_n))
+ if (sec_o != sec_n)
+ break;
+
+ xtime.tv_nsec = 0;
+ last_rtc_update = xtime.tv_sec = sec_n;
+ last_ccount_stamp = get_ccount();
+
+ set_normalized_timespec(&wall_to_monotonic,
+ -xtime.tv_sec, -xtime.tv_nsec);
+
+ /* Initialize the linux timer interrupt. */
+
+ setup_irq(LINUX_TIMER_INT, &timer_irqaction);
+ set_linux_timer(get_ccount() + CCOUNT_PER_JIFFY);
+}
+
+
+int do_settimeofday(struct timespec *tv)
+{
+ time_t wtm_sec, sec = tv->tv_sec;
+ long wtm_nsec, nsec = tv->tv_nsec;
+ unsigned long ccount;
+
+ if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
+ return -EINVAL;
+
+ write_seqlock_irq(&xtime_lock);
+
+ /* This is revolting. We need to set "xtime" correctly. However, the
+ * value in this location is the value at the most recent update of
+ * wall time. Discover what correction gettimeofday() would have
+ * made, and then undo it!
+ */
+ ccount = get_ccount();
+ nsec -= (ccount - last_ccount_stamp) * CCOUNT_NSEC;
+ nsec -= (jiffies - wall_jiffies) * CCOUNT_PER_JIFFY * CCOUNT_NSEC;
+
+ wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
+ wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
+
+ set_normalized_timespec(&xtime, sec, nsec);
+ set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
+
+ time_adjust = 0; /* stop active adjtime() */
+ time_status |= STA_UNSYNC;
+ time_maxerror = NTP_PHASE_LIMIT;
+ time_esterror = NTP_PHASE_LIMIT;
+ write_sequnlock_irq(&xtime_lock);
+ return 0;
+}
+
+EXPORT_SYMBOL(do_settimeofday);
+
+
+void do_gettimeofday(struct timeval *tv)
+{
+ unsigned long flags;
+ unsigned long sec, usec, delta, lost, seq;
+
+ do {
+ seq = read_seqbegin_irqsave(&xtime_lock, flags);
+
+ delta = get_ccount() - last_ccount_stamp;
+ sec = xtime.tv_sec;
+ usec = (xtime.tv_nsec / NSEC_PER_USEC);
+
+ lost = jiffies - wall_jiffies;
+
+ } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
+
+ usec += lost * (1000000UL/HZ) + (delta * CCOUNT_NSEC) / NSEC_PER_USEC;
+ for (; usec >= 1000000; sec++, usec -= 1000000)
+ ;
+
+ tv->tv_sec = sec;
+ tv->tv_usec = usec;
+}
+
+EXPORT_SYMBOL(do_gettimeofday);
+
+/*
+ * The timer interrupt is called HZ times per second.
+ */
+
+irqreturn_t timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
+{
+
+ unsigned long next;
+
+ next = get_linux_timer();
+
+again:
+ while ((signed long)(get_ccount() - next) > 0) {
+
+ profile_tick(CPU_PROFILING, regs);
+#ifndef CONFIG_SMP
+ update_process_times(user_mode(regs));
+#endif
+
+ write_seqlock(&xtime_lock);
+
+ last_ccount_stamp = next;
+ next += CCOUNT_PER_JIFFY;
+ do_timer (regs); /* Linux handler in kernel/timer.c */
+
+ if ((time_status & STA_UNSYNC) == 0 &&
+ xtime.tv_sec - last_rtc_update >= 659 &&
+ abs((xtime.tv_nsec/1000)-(1000000-1000000/HZ))<5000000/HZ &&
+ jiffies - wall_jiffies == 1) {
+
+ if (platform_set_rtc_time(xtime.tv_sec+1) == 0)
+ last_rtc_update = xtime.tv_sec+1;
+ else
+ /* Do it again in 60 s */
+ last_rtc_update += 60;
+ }
+ write_sequnlock(&xtime_lock);
+ }
+
+ /* NOTE: writing CCOMPAREn clears the interrupt. */
+
+ set_linux_timer (next);
+
+ /* Make sure we didn't miss any tick... */
+
+ if ((signed long)(get_ccount() - next) > 0)
+ goto again;
+
+ /* Allow platform to do something usefull (Wdog). */
+
+ platform_heartbeat();
+
+ return IRQ_HANDLED;
+}
+
+#ifndef CONFIG_GENERIC_CALIBRATE_DELAY
+void __devinit calibrate_delay(void)
+{
+ loops_per_jiffy = CCOUNT_PER_JIFFY;
+ printk("Calibrating delay loop (skipped)... "
+ "%lu.%02lu BogoMIPS preset\n",
+ loops_per_jiffy/(1000000/HZ),
+ (loops_per_jiffy/(10000/HZ)) % 100);
+}
+#endif
+
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
new file mode 100644
index 00000000000..804246e743b
--- /dev/null
+++ b/arch/xtensa/kernel/traps.c
@@ -0,0 +1,498 @@
+/*
+ * arch/xtensa/kernel/traps.c
+ *
+ * Exception handling.
+ *
+ * Derived from code with the following copyrights:
+ * Copyright (C) 1994 - 1999 by Ralf Baechle
+ * Modified for R3000 by Paul M. Antoine, 1995, 1996
+ * Complete output from die() by Ulf Carlsson, 1998
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ *
+ * Essentially rewritten for the Xtensa architecture port.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ *
+ * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
+ * Chris Zankel <chris@zankel.net>
+ * Marc Gauthier<marc@tensilica.com, marc@alumni.uwaterloo.ca>
+ * Kevin Chea
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/stringify.h>
+#include <linux/kallsyms.h>
+
+#include <asm/ptrace.h>
+#include <asm/timex.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+
+#ifdef CONFIG_KGDB
+extern int gdb_enter;
+extern int return_from_debug_flag;
+#endif
+
+/*
+ * Machine specific interrupt handlers
+ */
+
+extern void kernel_exception(void);
+extern void user_exception(void);
+
+extern void fast_syscall_kernel(void);
+extern void fast_syscall_user(void);
+extern void fast_alloca(void);
+extern void fast_unaligned(void);
+extern void fast_second_level_miss(void);
+extern void fast_store_prohibited(void);
+extern void fast_coprocessor(void);
+
+extern void do_illegal_instruction (struct pt_regs*);
+extern void do_interrupt (struct pt_regs*);
+extern void do_unaligned_user (struct pt_regs*);
+extern void do_multihit (struct pt_regs*, unsigned long);
+extern void do_page_fault (struct pt_regs*, unsigned long);
+extern void do_debug (struct pt_regs*);
+extern void system_call (struct pt_regs*);
+
+/*
+ * The vector table must be preceded by a save area (which
+ * implies it must be in RAM, unless one places RAM immediately
+ * before a ROM and puts the vector at the start of the ROM (!))
+ */
+
+#define KRNL 0x01
+#define USER 0x02
+
+#define COPROCESSOR(x) \
+{ XCHAL_EXCCAUSE_COPROCESSOR ## x ## _DISABLED, USER, fast_coprocessor }
+
+typedef struct {
+ int cause;
+ int fast;
+ void* handler;
+} dispatch_init_table_t;
+
+dispatch_init_table_t __init dispatch_init_table[] = {
+
+{ XCHAL_EXCCAUSE_ILLEGAL_INSTRUCTION, 0, do_illegal_instruction},
+{ XCHAL_EXCCAUSE_SYSTEM_CALL, KRNL, fast_syscall_kernel },
+{ XCHAL_EXCCAUSE_SYSTEM_CALL, USER, fast_syscall_user },
+{ XCHAL_EXCCAUSE_SYSTEM_CALL, 0, system_call },
+/* XCHAL_EXCCAUSE_INSTRUCTION_FETCH unhandled */
+/* XCHAL_EXCCAUSE_LOAD_STORE_ERROR unhandled*/
+{ XCHAL_EXCCAUSE_LEVEL1_INTERRUPT, 0, do_interrupt },
+{ XCHAL_EXCCAUSE_ALLOCA, USER|KRNL, fast_alloca },
+/* XCHAL_EXCCAUSE_INTEGER_DIVIDE_BY_ZERO unhandled */
+/* XCHAL_EXCCAUSE_PRIVILEGED unhandled */
+#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
+#ifdef CONFIG_UNALIGNED_USER
+{ XCHAL_EXCCAUSE_UNALIGNED, USER, fast_unaligned },
+#else
+{ XCHAL_EXCCAUSE_UNALIGNED, 0, do_unaligned_user },
+#endif
+{ XCHAL_EXCCAUSE_UNALIGNED, KRNL, fast_unaligned },
+#endif
+{ XCHAL_EXCCAUSE_ITLB_MISS, 0, do_page_fault },
+{ XCHAL_EXCCAUSE_ITLB_MISS, USER|KRNL, fast_second_level_miss},
+{ XCHAL_EXCCAUSE_ITLB_MULTIHIT, 0, do_multihit },
+{ XCHAL_EXCCAUSE_ITLB_PRIVILEGE, 0, do_page_fault },
+/* XCHAL_EXCCAUSE_SIZE_RESTRICTION unhandled */
+{ XCHAL_EXCCAUSE_FETCH_CACHE_ATTRIBUTE, 0, do_page_fault },
+{ XCHAL_EXCCAUSE_DTLB_MISS, USER|KRNL, fast_second_level_miss},
+{ XCHAL_EXCCAUSE_DTLB_MISS, 0, do_page_fault },
+{ XCHAL_EXCCAUSE_DTLB_MULTIHIT, 0, do_multihit },
+{ XCHAL_EXCCAUSE_DTLB_PRIVILEGE, 0, do_page_fault },
+/* XCHAL_EXCCAUSE_DTLB_SIZE_RESTRICTION unhandled */
+{ XCHAL_EXCCAUSE_STORE_CACHE_ATTRIBUTE, USER|KRNL, fast_store_prohibited },
+{ XCHAL_EXCCAUSE_STORE_CACHE_ATTRIBUTE, 0, do_page_fault },
+{ XCHAL_EXCCAUSE_LOAD_CACHE_ATTRIBUTE, 0, do_page_fault },
+/* XCCHAL_EXCCAUSE_FLOATING_POINT unhandled */
+#if (XCHAL_CP_MASK & 1)
+COPROCESSOR(0),
+#endif
+#if (XCHAL_CP_MASK & 2)
+COPROCESSOR(1),
+#endif
+#if (XCHAL_CP_MASK & 4)
+COPROCESSOR(2),
+#endif
+#if (XCHAL_CP_MASK & 8)
+COPROCESSOR(3),
+#endif
+#if (XCHAL_CP_MASK & 16)
+COPROCESSOR(4),
+#endif
+#if (XCHAL_CP_MASK & 32)
+COPROCESSOR(5),
+#endif
+#if (XCHAL_CP_MASK & 64)
+COPROCESSOR(6),
+#endif
+#if (XCHAL_CP_MASK & 128)
+COPROCESSOR(7),
+#endif
+{ EXCCAUSE_MAPPED_DEBUG, 0, do_debug },
+{ -1, -1, 0 }
+
+};
+
+/* The exception table <exc_table> serves two functions:
+ * 1. it contains three dispatch tables (fast_user, fast_kernel, default-c)
+ * 2. it is a temporary memory buffer for the exception handlers.
+ */
+
+unsigned long exc_table[EXC_TABLE_SIZE/4];
+
+void die(const char*, struct pt_regs*, long);
+
+static inline void
+__die_if_kernel(const char *str, struct pt_regs *regs, long err)
+{
+ if (!user_mode(regs))
+ die(str, regs, err);
+}
+
+/*
+ * Unhandled Exceptions. Kill user task or panic if in kernel space.
+ */
+
+void do_unhandled(struct pt_regs *regs, unsigned long exccause)
+{
+ __die_if_kernel("Caught unhandled exception - should not happen",
+ regs, SIGKILL);
+
+ /* If in user mode, send SIGILL signal to current process */
+ printk("Caught unhandled exception in '%s' "
+ "(pid = %d, pc = %#010lx) - should not happen\n"
+ "\tEXCCAUSE is %ld\n",
+ current->comm, current->pid, regs->pc, exccause);
+ force_sig(SIGILL, current);
+}
+
+/*
+ * Multi-hit exception. This if fatal!
+ */
+
+void do_multihit(struct pt_regs *regs, unsigned long exccause)
+{
+ die("Caught multihit exception", regs, SIGKILL);
+}
+
+/*
+ * Level-1 interrupt.
+ * We currently have no priority encoding.
+ */
+
+unsigned long ignored_level1_interrupts;
+extern void do_IRQ(int, struct pt_regs *);
+
+void do_interrupt (struct pt_regs *regs)
+{
+ unsigned long intread = get_sr (INTREAD);
+ unsigned long intenable = get_sr (INTENABLE);
+ int i, mask;
+
+ /* Handle all interrupts (no priorities).
+ * (Clear the interrupt before processing, in case it's
+ * edge-triggered or software-generated)
+ */
+
+ for (i=0, mask = 1; i < XCHAL_NUM_INTERRUPTS; i++, mask <<= 1) {
+ if (mask & (intread & intenable)) {
+ set_sr (mask, INTCLEAR);
+ do_IRQ (i,regs);
+ }
+ }
+}
+
+/*
+ * Illegal instruction. Fatal if in kernel space.
+ */
+
+void
+do_illegal_instruction(struct pt_regs *regs)
+{
+ __die_if_kernel("Illegal instruction in kernel", regs, SIGKILL);
+
+ /* If in user mode, send SIGILL signal to current process. */
+
+ printk("Illegal Instruction in '%s' (pid = %d, pc = %#010lx)\n",
+ current->comm, current->pid, regs->pc);
+ force_sig(SIGILL, current);
+}
+
+
+/*
+ * Handle unaligned memory accesses from user space. Kill task.
+ *
+ * If CONFIG_UNALIGNED_USER is not set, we don't allow unaligned memory
+ * accesses causes from user space.
+ */
+
+#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
+#ifndef CONFIG_UNALIGNED_USER
+void
+do_unaligned_user (struct pt_regs *regs)
+{
+ siginfo_t info;
+
+ __die_if_kernel("Unhandled unaligned exception in kernel",
+ regs, SIGKILL);
+
+ current->thread.bad_vaddr = regs->excvaddr;
+ current->thread.error_code = -3;
+ printk("Unaligned memory access to %08lx in '%s' "
+ "(pid = %d, pc = %#010lx)\n",
+ regs->excvaddr, current->comm, current->pid, regs->pc);
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = BUS_ADRALN;
+ info.si_addr = (void *) regs->excvaddr;
+ force_sig_info(SIGSEGV, &info, current);
+
+}
+#endif
+#endif
+
+void
+do_debug(struct pt_regs *regs)
+{
+#ifdef CONFIG_KGDB
+ /* If remote debugging is configured AND enabled, we give control to
+ * kgdb. Otherwise, we fall through, perhaps giving control to the
+ * native debugger.
+ */
+
+ if (gdb_enter) {
+ extern void gdb_handle_exception(struct pt_regs *);
+ gdb_handle_exception(regs);
+ return_from_debug_flag = 1;
+ return;
+ }
+#endif
+
+ __die_if_kernel("Breakpoint in kernel", regs, SIGKILL);
+
+ /* If in user mode, send SIGTRAP signal to current process */
+
+ force_sig(SIGTRAP, current);
+}
+
+
+/*
+ * Initialize dispatch tables.
+ *
+ * The exception vectors are stored compressed the __init section in the
+ * dispatch_init_table. This function initializes the following three tables
+ * from that compressed table:
+ * - fast user first dispatch table for user exceptions
+ * - fast kernel first dispatch table for kernel exceptions
+ * - default C-handler C-handler called by the default fast handler.
+ *
+ * See vectors.S for more details.
+ */
+
+#define set_handler(idx,handler) (exc_table[idx] = (unsigned long) (handler))
+
+void trap_init(void)
+{
+ int i;
+
+ /* Setup default vectors. */
+
+ for(i = 0; i < 64; i++) {
+ set_handler(EXC_TABLE_FAST_USER/4 + i, user_exception);
+ set_handler(EXC_TABLE_FAST_KERNEL/4 + i, kernel_exception);
+ set_handler(EXC_TABLE_DEFAULT/4 + i, do_unhandled);
+ }
+
+ /* Setup specific handlers. */
+
+ for(i = 0; dispatch_init_table[i].cause >= 0; i++) {
+
+ int fast = dispatch_init_table[i].fast;
+ int cause = dispatch_init_table[i].cause;
+ void *handler = dispatch_init_table[i].handler;
+
+ if (fast == 0)
+ set_handler (EXC_TABLE_DEFAULT/4 + cause, handler);
+ if (fast && fast & USER)
+ set_handler (EXC_TABLE_FAST_USER/4 + cause, handler);
+ if (fast && fast & KRNL)
+ set_handler (EXC_TABLE_FAST_KERNEL/4 + cause, handler);
+ }
+
+ /* Initialize EXCSAVE_1 to hold the address of the exception table. */
+
+ i = (unsigned long)exc_table;
+ __asm__ __volatile__("wsr %0, "__stringify(EXCSAVE_1)"\n" : : "a" (i));
+}
+
+/*
+ * This function dumps the current valid window frame and other base registers.
+ */
+
+void show_regs(struct pt_regs * regs)
+{
+ int i, wmask;
+
+ wmask = regs->wmask & ~1;
+
+ for (i = 0; i < 32; i++) {
+ if (wmask & (1 << (i / 4)))
+ break;
+ if ((i % 8) == 0)
+ printk ("\n" KERN_INFO "a%02d: ", i);
+ printk("%08lx ", regs->areg[i]);
+ }
+ printk("\n");
+
+ printk("pc: %08lx, ps: %08lx, depc: %08lx, excvaddr: %08lx\n",
+ regs->pc, regs->ps, regs->depc, regs->excvaddr);
+ printk("lbeg: %08lx, lend: %08lx lcount: %08lx, sar: %08lx\n",
+ regs->lbeg, regs->lend, regs->lcount, regs->sar);
+ if (user_mode(regs))
+ printk("wb: %08lx, ws: %08lx, wmask: %08lx, syscall: %ld\n",
+ regs->windowbase, regs->windowstart, regs->wmask,
+ regs->syscall);
+}
+
+void show_trace(struct task_struct *task, unsigned long *sp)
+{
+ unsigned long a0, a1, pc;
+ unsigned long sp_start, sp_end;
+
+ a1 = (unsigned long)sp;
+
+ if (a1 == 0)
+ __asm__ __volatile__ ("mov %0, a1\n" : "=a"(a1));
+
+
+ sp_start = a1 & ~(THREAD_SIZE-1);
+ sp_end = sp_start + THREAD_SIZE;
+
+ printk("Call Trace:");
+#ifdef CONFIG_KALLSYMS
+ printk("\n");
+#endif
+ spill_registers();
+
+ while (a1 > sp_start && a1 < sp_end) {
+ sp = (unsigned long*)a1;
+
+ a0 = *(sp - 4);
+ a1 = *(sp - 3);
+
+ if (a1 <= (unsigned long) sp)
+ break;
+
+ pc = MAKE_PC_FROM_RA(a0, a1);
+
+ if (kernel_text_address(pc)) {
+ printk(" [<%08lx>] ", pc);
+ print_symbol("%s\n", pc);
+ }
+ }
+ printk("\n");
+}
+
+/*
+ * This routine abuses get_user()/put_user() to reference pointers
+ * with at least a bit of error checking ...
+ */
+
+static int kstack_depth_to_print = 24;
+
+void show_stack(struct task_struct *task, unsigned long *sp)
+{
+ int i = 0;
+ unsigned long *stack;
+
+ if (sp == 0)
+ __asm__ __volatile__ ("mov %0, a1\n" : "=a"(sp));
+
+ stack = sp;
+
+ printk("\nStack: ");
+
+ for (i = 0; i < kstack_depth_to_print; i++) {
+ if (kstack_end(sp))
+ break;
+ if (i && ((i % 8) == 0))
+ printk("\n ");
+ printk("%08lx ", *sp++);
+ }
+ printk("\n");
+ show_trace(task, stack);
+}
+
+void dump_stack(void)
+{
+ show_stack(current, NULL);
+}
+
+EXPORT_SYMBOL(dump_stack);
+
+
+void show_code(unsigned int *pc)
+{
+ long i;
+
+ printk("\nCode:");
+
+ for(i = -3 ; i < 6 ; i++) {
+ unsigned long insn;
+ if (__get_user(insn, pc + i)) {
+ printk(" (Bad address in pc)\n");
+ break;
+ }
+ printk("%c%08lx%c",(i?' ':'<'),insn,(i?' ':'>'));
+ }
+}
+
+spinlock_t die_lock = SPIN_LOCK_UNLOCKED;
+
+void die(const char * str, struct pt_regs * regs, long err)
+{
+ static int die_counter;
+ int nl = 0;
+
+ console_verbose();
+ spin_lock_irq(&die_lock);
+
+ printk("%s: sig: %ld [#%d]\n", str, err, ++die_counter);
+#ifdef CONFIG_PREEMPT
+ printk("PREEMPT ");
+ nl = 1;
+#endif
+ if (nl)
+ printk("\n");
+ show_regs(regs);
+ if (!user_mode(regs))
+ show_stack(NULL, (unsigned long*)regs->areg[1]);
+
+ spin_unlock_irq(&die_lock);
+
+ if (in_interrupt())
+ panic("Fatal exception in interrupt");
+
+ if (panic_on_oops) {
+ printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(5 * HZ);
+ panic("Fatal exception");
+ }
+ do_exit(err);
+}
+
+
diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S
new file mode 100644
index 00000000000..81808f0c674
--- /dev/null
+++ b/arch/xtensa/kernel/vectors.S
@@ -0,0 +1,464 @@
+/*
+ * arch/xtensa/kernel/vectors.S
+ *
+ * This file contains all exception vectors (user, kernel, and double),
+ * as well as the window vectors (overflow and underflow), and the debug
+ * vector. These are the primary vectors executed by the processor if an
+ * exception occurs.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2005 Tensilica, Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ *
+ */
+
+/*
+ * We use a two-level table approach. The user and kernel exception vectors
+ * use a first-level dispatch table to dispatch the exception to a registered
+ * fast handler or the default handler, if no fast handler was registered.
+ * The default handler sets up a C-stack and dispatches the exception to a
+ * registerd C handler in the second-level dispatch table.
+ *
+ * Fast handler entry condition:
+ *
+ * a0: trashed, original value saved on stack (PT_AREG0)
+ * a1: a1
+ * a2: new stack pointer, original value in depc
+ * a3: dispatch table
+ * depc: a2, original value saved on stack (PT_DEPC)
+ * excsave_1: a3
+ *
+ * The value for PT_DEPC saved to stack also functions as a boolean to
+ * indicate that the exception is either a double or a regular exception:
+ *
+ * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception
+ * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
+ *
+ * Note: Neither the kernel nor the user exception handler generate literals.
+ *
+ */
+
+#include <linux/linkage.h>
+#include <asm/ptrace.h>
+#include <asm/ptrace.h>
+#include <asm/current.h>
+#include <asm/offsets.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/thread_info.h>
+#include <asm/processor.h>
+
+
+/*
+ * User exception vector. (Exceptions with PS.UM == 1, PS.EXCM == 0)
+ *
+ * We get here when an exception occurred while we were in userland.
+ * We switch to the kernel stack and jump to the first level handler
+ * associated to the exception cause.
+ *
+ * Note: the saved kernel stack pointer (EXC_TABLE_KSTK) is already
+ * decremented by PT_USER_SIZE.
+ */
+
+ .section .UserExceptionVector.text, "ax"
+
+ENTRY(_UserExceptionVector)
+
+ xsr a3, EXCSAVE_1 # save a3 and get dispatch table
+ wsr a2, DEPC # save a2
+ l32i a2, a3, EXC_TABLE_KSTK # load kernel stack to a2
+ s32i a0, a2, PT_AREG0 # save a0 to ESF
+ rsr a0, EXCCAUSE # retrieve exception cause
+ s32i a0, a2, PT_DEPC # mark it as a regular exception
+ addx4 a0, a0, a3 # find entry in table
+ l32i a0, a0, EXC_TABLE_FAST_USER # load handler
+ jx a0
+
+/*
+ * Kernel exception vector. (Exceptions with PS.UM == 0, PS.EXCM == 0)
+ *
+ * We get this exception when we were already in kernel space.
+ * We decrement the current stack pointer (kernel) by PT_SIZE and
+ * jump to the first-level handler associated with the exception cause.
+ *
+ * Note: we need to preserve space for the spill region.
+ */
+
+ .section .KernelExceptionVector.text, "ax"
+
+ENTRY(_KernelExceptionVector)
+
+ xsr a3, EXCSAVE_1 # save a3, and get dispatch table
+ wsr a2, DEPC # save a2
+ addi a2, a1, -16-PT_SIZE # adjust stack pointer
+ s32i a0, a2, PT_AREG0 # save a0 to ESF
+ rsr a0, EXCCAUSE # retrieve exception cause
+ s32i a0, a2, PT_DEPC # mark it as a regular exception
+ addx4 a0, a0, a3 # find entry in table
+ l32i a0, a0, EXC_TABLE_FAST_KERNEL # load handler address
+ jx a0
+
+
+/*
+ * Double exception vector (Exceptions with PS.EXCM == 1)
+ * We get this exception when another exception occurs while were are
+ * already in an exception, such as window overflow/underflow exception,
+ * or 'expected' exceptions, for example memory exception when we were trying
+ * to read data from an invalid address in user space.
+ *
+ * Note that this vector is never invoked for level-1 interrupts, because such
+ * interrupts are disabled (masked) when PS.EXCM is set.
+ *
+ * We decode the exception and take the appropriate action. However, the
+ * double exception vector is much more careful, because a lot more error
+ * cases go through the double exception vector than through the user and
+ * kernel exception vectors.
+ *
+ * Occasionally, the kernel expects a double exception to occur. This usually
+ * happens when accessing user-space memory with the user's permissions
+ * (l32e/s32e instructions). The kernel state, though, is not always suitable
+ * for immediate transfer of control to handle_double, where "normal" exception
+ * processing occurs. Also in kernel mode, TLB misses can occur if accessing
+ * vmalloc memory, possibly requiring repair in a double exception handler.
+ *
+ * The variable at TABLE_FIXUP offset from the pointer in EXCSAVE_1 doubles as
+ * a boolean variable and a pointer to a fixup routine. If the variable
+ * EXC_TABLE_FIXUP is non-zero, this handler jumps to that address. A value of
+ * zero indicates to use the default kernel/user exception handler.
+ * There is only one exception, when the value is identical to the exc_table
+ * label, the kernel is in trouble. This mechanism is used to protect critical
+ * sections, mainly when the handler writes to the stack to assert the stack
+ * pointer is valid. Once the fixup/default handler leaves that area, the
+ * EXC_TABLE_FIXUP variable is reset to the fixup handler or zero.
+ *
+ * Procedures wishing to use this mechanism should set EXC_TABLE_FIXUP to the
+ * nonzero address of a fixup routine before it could cause a double exception
+ * and reset it before it returns.
+ *
+ * Some other things to take care of when a fast exception handler doesn't
+ * specify a particular fixup handler but wants to use the default handlers:
+ *
+ * - The original stack pointer (in a1) must not be modified. The fast
+ * exception handler should only use a2 as the stack pointer.
+ *
+ * - If the fast handler manipulates the stack pointer (in a2), it has to
+ * register a valid fixup handler and cannot use the default handlers.
+ *
+ * - The handler can use any other generic register from a3 to a15, but it
+ * must save the content of these registers to stack (PT_AREG3...PT_AREGx)
+ *
+ * - These registers must be saved before a double exception can occur.
+ *
+ * - If we ever implement handling signals while in double exceptions, the
+ * number of registers a fast handler has saved (excluding a0 and a1) must
+ * be written to PT_AREG1. (1 if only a3 is used, 2 for a3 and a4, etc. )
+ *
+ * The fixup handlers are special handlers:
+ *
+ * - Fixup entry conditions differ from regular exceptions:
+ *
+ * a0: DEPC
+ * a1: a1
+ * a2: trashed, original value in EXC_TABLE_DOUBLE_A2
+ * a3: exctable
+ * depc: a0
+ * excsave_1: a3
+ *
+ * - When the kernel enters the fixup handler, it still assumes it is in a
+ * critical section, so EXC_TABLE_FIXUP variable is set to exc_table.
+ * The fixup handler, therefore, has to re-register itself as the fixup
+ * handler before it returns from the double exception.
+ *
+ * - Fixup handler can share the same exception frame with the fast handler.
+ * The kernel stack pointer is not changed when entering the fixup handler.
+ *
+ * - Fixup handlers can jump to the default kernel and user exception
+ * handlers. Before it jumps, though, it has to setup a exception frame
+ * on stack. Because the default handler resets the register fixup handler
+ * the fixup handler must make sure that the default handler returns to
+ * it instead of the exception address, so it can re-register itself as
+ * the fixup handler.
+ *
+ * In case of a critical condition where the kernel cannot recover, we jump
+ * to unrecoverable_exception with the following entry conditions.
+ * All registers a0...a15 are unchanged from the last exception, except:
+ *
+ * a0: last address before we jumped to the unrecoverable_exception.
+ * excsave_1: a0
+ *
+ *
+ * See the handle_alloca_user and spill_registers routines for example clients.
+ *
+ * FIXME: Note: we currently don't allow signal handling coming from a double
+ * exception, so the item markt with (*) is not required.
+ */
+
+ .section .DoubleExceptionVector.text, "ax"
+ .begin literal_prefix .DoubleExceptionVector
+
+ENTRY(_DoubleExceptionVector)
+
+ /* Deliberately destroy excsave (don't assume it's value was valid). */
+
+ wsr a3, EXCSAVE_1 # save a3
+
+ /* Check for kernel double exception (usually fatal). */
+
+ rsr a3, PS
+ _bbci.l a3, PS_UM_SHIFT, .Lksp
+
+ /* Check if we are currently handling a window exception. */
+ /* Note: We don't need to indicate that we enter a critical section. */
+
+ xsr a0, DEPC # get DEPC, save a0
+
+ movi a3, XCHAL_WINDOW_VECTORS_VADDR
+ _bltu a0, a3, .Lfixup
+ addi a3, a3, XSHAL_WINDOW_VECTORS_SIZE
+ _bgeu a0, a3, .Lfixup
+
+ /* Window overflow/underflow exception. Get stack pointer. */
+
+ mov a3, a2
+ movi a2, exc_table
+ l32i a2, a2, EXC_TABLE_KSTK
+
+ /* Check for overflow/underflow exception, jump if overflow. */
+
+ _bbci.l a0, 6, .Lovfl
+
+ /* a0: depc, a1: a1, a2: kstk, a3: a2, depc: a0, excsave: a3 */
+
+ /* Restart window underflow exception.
+ * We return to the instruction in user space that caused the window
+ * underflow exception. Therefore, we change window base to the value
+ * before we entered the window underflow exception and prepare the
+ * registers to return as if we were coming from a regular exception
+ * by changing depc (in a0).
+ * Note: We can trash the current window frame (a0...a3) and depc!
+ */
+
+ wsr a2, DEPC # save stack pointer temporarily
+ rsr a0, PS
+ extui a0, a0, XCHAL_PS_OWB_SHIFT, XCHAL_PS_OWB_BITS
+ wsr a0, WINDOWBASE
+ rsync
+
+ /* We are now in the previous window frame. Save registers again. */
+
+ xsr a2, DEPC # save a2 and get stack pointer
+ s32i a0, a2, PT_AREG0
+
+ wsr a3, EXCSAVE_1 # save a3
+ movi a3, exc_table
+
+ rsr a0, EXCCAUSE
+ s32i a0, a2, PT_DEPC # mark it as a regular exception
+ addx4 a0, a0, a3
+ l32i a0, a0, EXC_TABLE_FAST_USER
+ jx a0
+
+.Lfixup:/* Check for a fixup handler or if we were in a critical section. */
+
+ /* a0: depc, a1: a1, a2: a2, a3: trashed, depc: a0, excsave1: a3 */
+
+ movi a3, exc_table
+ s32i a2, a3, EXC_TABLE_DOUBLE_SAVE # temporary variable
+
+ /* Enter critical section. */
+
+ l32i a2, a3, EXC_TABLE_FIXUP
+ s32i a3, a3, EXC_TABLE_FIXUP
+ beq a2, a3, .Lunrecoverable_fixup # critical!
+ beqz a2, .Ldflt # no handler was registered
+
+ /* a0: depc, a1: a1, a2: trash, a3: exctable, depc: a0, excsave: a3 */
+
+ jx a2
+
+.Ldflt: /* Get stack pointer. */
+
+ l32i a3, a3, EXC_TABLE_DOUBLE_SAVE
+ addi a2, a3, -PT_USER_SIZE
+
+.Lovfl: /* Jump to default handlers. */
+
+ /* a0: depc, a1: a1, a2: kstk, a3: a2, depc: a0, excsave: a3 */
+
+ xsr a3, DEPC
+ s32i a0, a2, PT_DEPC
+ s32i a3, a2, PT_AREG0
+
+ /* a0: avail, a1: a1, a2: kstk, a3: avail, depc: a2, excsave: a3 */
+
+ movi a3, exc_table
+ rsr a0, EXCCAUSE
+ addx4 a0, a0, a3
+ l32i a0, a0, EXC_TABLE_FAST_USER
+ jx a0
+
+ /*
+ * We only allow the ITLB miss exception if we are in kernel space.
+ * All other exceptions are unexpected and thus unrecoverable!
+ */
+
+ .extern fast_second_level_miss_double_kernel
+
+.Lksp: /* a0: a0, a1: a1, a2: a2, a3: trashed, depc: depc, excsave: a3 */
+
+ rsr a3, EXCCAUSE
+ beqi a3, XCHAL_EXCCAUSE_ITLB_MISS, 1f
+ addi a3, a3, -XCHAL_EXCCAUSE_DTLB_MISS
+ bnez a3, .Lunrecoverable
+1: movi a3, fast_second_level_miss_double_kernel
+ jx a3
+
+ /* Critical! We can't handle this situation. PANIC! */
+
+ .extern unrecoverable_exception
+
+.Lunrecoverable_fixup:
+ l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+ xsr a0, DEPC
+
+.Lunrecoverable:
+ rsr a3, EXCSAVE_1
+ wsr a0, EXCSAVE_1
+ movi a0, unrecoverable_exception
+ callx0 a0
+
+ .end literal_prefix
+
+
+/*
+ * Debug interrupt vector
+ *
+ * There is not much space here, so simply jump to another handler.
+ * EXCSAVE[DEBUGLEVEL] has been set to that handler.
+ */
+
+ .section .DebugInterruptVector.text, "ax"
+
+ENTRY(_DebugInterruptVector)
+ xsr a0, EXCSAVE + XCHAL_DEBUGLEVEL
+ jx a0
+
+
+
+/* Window overflow and underflow handlers.
+ * The handlers must be 64 bytes apart, first starting with the underflow
+ * handlers underflow-4 to underflow-12, then the overflow handlers
+ * overflow-4 to overflow-12.
+ *
+ * Note: We rerun the underflow handlers if we hit an exception, so
+ * we try to access any page that would cause a page fault early.
+ */
+
+ .section .WindowVectors.text, "ax"
+
+
+/* 4-Register Window Overflow Vector (Handler) */
+
+ .align 64
+.global _WindowOverflow4
+_WindowOverflow4:
+ s32e a0, a5, -16
+ s32e a1, a5, -12
+ s32e a2, a5, -8
+ s32e a3, a5, -4
+ rfwo
+
+
+/* 4-Register Window Underflow Vector (Handler) */
+
+ .align 64
+.global _WindowUnderflow4
+_WindowUnderflow4:
+ l32e a0, a5, -16
+ l32e a1, a5, -12
+ l32e a2, a5, -8
+ l32e a3, a5, -4
+ rfwu
+
+
+/* 8-Register Window Overflow Vector (Handler) */
+
+ .align 64
+.global _WindowOverflow8
+_WindowOverflow8:
+ s32e a0, a9, -16
+ l32e a0, a1, -12
+ s32e a2, a9, -8
+ s32e a1, a9, -12
+ s32e a3, a9, -4
+ s32e a4, a0, -32
+ s32e a5, a0, -28
+ s32e a6, a0, -24
+ s32e a7, a0, -20
+ rfwo
+
+/* 8-Register Window Underflow Vector (Handler) */
+
+ .align 64
+.global _WindowUnderflow8
+_WindowUnderflow8:
+ l32e a1, a9, -12
+ l32e a0, a9, -16
+ l32e a7, a1, -12
+ l32e a2, a9, -8
+ l32e a4, a7, -32
+ l32e a3, a9, -4
+ l32e a5, a7, -28
+ l32e a6, a7, -24
+ l32e a7, a7, -20
+ rfwu
+
+
+/* 12-Register Window Overflow Vector (Handler) */
+
+ .align 64
+.global _WindowOverflow12
+_WindowOverflow12:
+ s32e a0, a13, -16
+ l32e a0, a1, -12
+ s32e a1, a13, -12
+ s32e a2, a13, -8
+ s32e a3, a13, -4
+ s32e a4, a0, -48
+ s32e a5, a0, -44
+ s32e a6, a0, -40
+ s32e a7, a0, -36
+ s32e a8, a0, -32
+ s32e a9, a0, -28
+ s32e a10, a0, -24
+ s32e a11, a0, -20
+ rfwo
+
+/* 12-Register Window Underflow Vector (Handler) */
+
+ .align 64
+.global _WindowUnderflow12
+_WindowUnderflow12:
+ l32e a1, a13, -12
+ l32e a0, a13, -16
+ l32e a11, a1, -12
+ l32e a2, a13, -8
+ l32e a4, a11, -48
+ l32e a8, a11, -32
+ l32e a3, a13, -4
+ l32e a5, a11, -44
+ l32e a6, a11, -40
+ l32e a7, a11, -36
+ l32e a9, a11, -28
+ l32e a10, a11, -24
+ l32e a11, a11, -20
+ rfwu
+
+ .text
+
+
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
new file mode 100644
index 00000000000..476b2b53cd0
--- /dev/null
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -0,0 +1,341 @@
+/*
+ * arch/xtensa/kernel/vmlinux.lds.S
+ *
+ * Xtensa linker script
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
+ * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
+ */
+
+#include <asm-generic/vmlinux.lds.h>
+
+#include <linux/config.h>
+#define _NOCLANGUAGE
+#include <xtensa/config/core.h>
+#include <xtensa/config/system.h>
+OUTPUT_ARCH(xtensa)
+ENTRY(_start)
+
+#if XCHAL_MEMORY_ORDER == XTHAL_BIGENDIAN
+jiffies = jiffies_64 + 4;
+#else
+jiffies = jiffies_64;
+#endif
+
+#define KERNELOFFSET 0x1000
+
+/* Note: In the following macros, it would be nice to specify only the
+ vector name and section kind and construct "sym" and "section" using
+ CPP concatenation, but that does not work reliably. Concatenating a
+ string with "." produces an invalid token. CPP will not print a
+ warning because it thinks this is an assembly file, but it leaves
+ them as multiple tokens and there may or may not be whitespace
+ between them. */
+
+/* Macro for a relocation entry */
+
+#define RELOCATE_ENTRY(sym, section) \
+ LONG(sym ## _start); \
+ LONG(sym ## _end); \
+ LONG(LOADADDR(section))
+
+/* Macro to define a section for a vector.
+ *
+ * Use of the MIN function catches the types of errors illustrated in
+ * the following example:
+ *
+ * Assume the section .DoubleExceptionVector.literal is completely
+ * full. Then a programmer adds code to .DoubleExceptionVector.text
+ * that produces another literal. The final literal position will
+ * overlay onto the first word of the adjacent code section
+ * .DoubleExceptionVector.text. (In practice, the literals will
+ * overwrite the code, and the first few instructions will be
+ * garbage.)
+ */
+
+#define SECTION_VECTOR(sym, section, addr, max_prevsec_size, prevsec) \
+ section addr : AT((MIN(LOADADDR(prevsec) + max_prevsec_size, \
+ LOADADDR(prevsec) + SIZEOF(prevsec)) + 3) & ~ 3) \
+ { \
+ . = ALIGN(4); \
+ sym ## _start = ABSOLUTE(.); \
+ *(section) \
+ sym ## _end = ABSOLUTE(.); \
+ }
+
+/*
+ * Mapping of input sections to output sections when linking.
+ */
+
+SECTIONS
+{
+ . = XCHAL_KSEG_CACHED_VADDR + KERNELOFFSET;
+ /* .text section */
+
+ _text = .;
+ _stext = .;
+ _ftext = .;
+
+ .text :
+ {
+ /* The .head.text section must be the first section! */
+ *(.head.text)
+ *(.literal .text)
+ *(.srom.text)
+ VMLINUX_SYMBOL(__sched_text_start) = .;
+ *(.sched.text.literal .sched.text)
+ VMLINUX_SYMBOL(__sched_text_end) = .;
+ VMLINUX_SYMBOL(__lock_text_start) = .;
+ *(.spinlock.text.literal .spinlock.text)
+ VMLINUX_SYMBOL(__lock_text_end) = .;
+
+ }
+ _etext = .;
+
+ . = ALIGN(16);
+
+ RODATA
+
+ /* Relocation table */
+
+ . = ALIGN(16);
+ __boot_reloc_table_start = ABSOLUTE(.);
+
+ __relocate : {
+
+ RELOCATE_ENTRY(_WindowVectors_text,
+ .WindowVectors.text);
+#if 0
+ RELOCATE_ENTRY(_KernelExceptionVector_literal,
+ .KernelExceptionVector.literal);
+#endif
+ RELOCATE_ENTRY(_KernelExceptionVector_text,
+ .KernelExceptionVector.text);
+#if 0
+ RELOCATE_ENTRY(_UserExceptionVector_literal,
+ .UserExceptionVector.literal);
+#endif
+ RELOCATE_ENTRY(_UserExceptionVector_text,
+ .UserExceptionVector.text);
+ RELOCATE_ENTRY(_DoubleExceptionVector_literal,
+ .DoubleExceptionVector.literal);
+ RELOCATE_ENTRY(_DoubleExceptionVector_text,
+ .DoubleExceptionVector.text);
+ }
+ __boot_reloc_table_end = ABSOLUTE(.) ;
+
+ .fixup : { *(.fixup) }
+
+ . = ALIGN(16);
+
+ __ex_table : {
+ __start___ex_table = .;
+ *(__ex_table)
+ __stop___ex_table = .;
+ }
+
+ /* Data section */
+
+ . = ALIGN(XCHAL_ICACHE_LINESIZE);
+ _fdata = .;
+ .data :
+ {
+ *(.data) CONSTRUCTORS
+ . = ALIGN(XCHAL_ICACHE_LINESIZE);
+ *(.data.cacheline_aligned)
+ }
+
+ _edata = .;
+
+ /* The initial task */
+ . = ALIGN(8192);
+ .data.init_task : { *(.data.init_task) }
+
+ /* Initialization code and data: */
+
+ . = ALIGN(1<<XCHAL_MMU_MIN_PTE_PAGE_SIZE);
+ __init_begin = .;
+ .init.text : {
+ _sinittext = .;
+ *(.init.text.literal) *(.init.text)
+ _einittext = .;
+ }
+
+ .init.data :
+ {
+ *(.init.data)
+ . = ALIGN(0x4);
+ __tagtable_begin = .;
+ *(.taglist)
+ __tagtable_end = .;
+ }
+
+ . = ALIGN(XCHAL_ICACHE_LINESIZE);
+
+ __setup_start = .;
+ .init.setup : { *(.init.setup) }
+ __setup_end = .;
+
+ __initcall_start = .;
+ .initcall.init : {
+ *(.initcall1.init)
+ *(.initcall2.init)
+ *(.initcall3.init)
+ *(.initcall4.init)
+ *(.initcall5.init)
+ *(.initcall6.init)
+ *(.initcall7.init)
+ }
+ __initcall_end = .;
+
+ __con_initcall_start = .;
+ .con_initcall.init : { *(.con_initcall.init) }
+ __con_initcall_end = .;
+
+ SECURITY_INIT
+
+ . = ALIGN(4);
+
+ __start___ftr_fixup = .;
+ __ftr_fixup : { *(__ftr_fixup) }
+ __stop___ftr_fixup = .;
+
+ . = ALIGN(32);
+ __per_cpu_start = .;
+ .data.percpu : { *(.data.percpu) }
+ __per_cpu_end = .;
+
+ . = ALIGN(4096);
+ __initramfs_start =.;
+ .init.ramfs : { *(.init.ramfs) }
+ __initramfs_end = .;
+
+ /* We need this dummy segment here */
+
+ . = ALIGN(4);
+ .dummy : { LONG(0) }
+
+ /* The vectors are relocated to the real position at startup time */
+
+ SECTION_VECTOR (_WindowVectors_text,
+ .WindowVectors.text,
+ XCHAL_WINDOW_VECTORS_VADDR, 4,
+ .dummy)
+ SECTION_VECTOR (_DebugInterruptVector_literal,
+ .DebugInterruptVector.literal,
+ XCHAL_INTLEVEL_VECTOR_VADDR(XCHAL_DEBUGLEVEL) - 4,
+ SIZEOF(.WindowVectors.text),
+ .WindowVectors.text)
+ SECTION_VECTOR (_DebugInterruptVector_text,
+ .DebugInterruptVector.text,
+ XCHAL_INTLEVEL_VECTOR_VADDR(XCHAL_DEBUGLEVEL),
+ 4,
+ .DebugInterruptVector.literal)
+ SECTION_VECTOR (_KernelExceptionVector_literal,
+ .KernelExceptionVector.literal,
+ XCHAL_KERNELEXC_VECTOR_VADDR - 4,
+ SIZEOF(.DebugInterruptVector.text),
+ .DebugInterruptVector.text)
+ SECTION_VECTOR (_KernelExceptionVector_text,
+ .KernelExceptionVector.text,
+ XCHAL_KERNELEXC_VECTOR_VADDR,
+ 4,
+ .KernelExceptionVector.literal)
+ SECTION_VECTOR (_UserExceptionVector_literal,
+ .UserExceptionVector.literal,
+ XCHAL_USEREXC_VECTOR_VADDR - 4,
+ SIZEOF(.KernelExceptionVector.text),
+ .KernelExceptionVector.text)
+ SECTION_VECTOR (_UserExceptionVector_text,
+ .UserExceptionVector.text,
+ XCHAL_USEREXC_VECTOR_VADDR,
+ 4,
+ .UserExceptionVector.literal)
+ SECTION_VECTOR (_DoubleExceptionVector_literal,
+ .DoubleExceptionVector.literal,
+ XCHAL_DOUBLEEXC_VECTOR_VADDR - 16,
+ SIZEOF(.UserExceptionVector.text),
+ .UserExceptionVector.text)
+ SECTION_VECTOR (_DoubleExceptionVector_text,
+ .DoubleExceptionVector.text,
+ XCHAL_DOUBLEEXC_VECTOR_VADDR,
+ 32,
+ .DoubleExceptionVector.literal)
+
+ . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
+ . = ALIGN(1<<XCHAL_MMU_MIN_PTE_PAGE_SIZE);
+
+ __init_end = .;
+
+ . = ALIGN(8192);
+
+ /* BSS section */
+ _bss_start = .;
+ .sbss : { *(.sbss) *(.scommon) }
+ .bss : { *(COMMON) *(.bss) }
+ _bss_end = .;
+ _end = .;
+
+ /* only used by the boot loader */
+
+ . = ALIGN(0x10);
+ .bootstrap : { *(.bootstrap.literal .bootstrap.text .bootstrap.data) }
+
+ . = ALIGN(0x1000);
+ __initrd_start = .;
+ .initrd : { *(.initrd) }
+ __initrd_end = .;
+
+ .ResetVector.text XCHAL_RESET_VECTOR_VADDR :
+ {
+ *(.ResetVector.text)
+ }
+
+
+ /* Sections to be discarded */
+ /DISCARD/ :
+ {
+ *(.text.exit)
+ *(.text.exit.literal)
+ *(.data.exit)
+ *(.exitcall.exit)
+ }
+
+
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ .debug_info 0 : { *(.debug_info) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+
+ .xt.insn 0 :
+ {
+ *(.xt.insn)
+ *(.gnu.linkonce.x*)
+ }
+
+ .xt.lit 0 :
+ {
+ *(.xt.lit)
+ *(.gnu.linkonce.p*)
+ }
+}
diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c
new file mode 100644
index 00000000000..efae56a5147
--- /dev/null
+++ b/arch/xtensa/kernel/xtensa_ksyms.c
@@ -0,0 +1,123 @@
+/*
+ * arch/xtensa/kernel/xtensa_ksyms.c
+ *
+ * Export Xtensa-specific functions for loadable modules.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ *
+ * Joe Taylor <joe@tensilica.com>
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <asm/irq.h>
+#include <linux/in6.h>
+#include <linux/pci.h>
+#include <linux/ide.h>
+
+#include <asm/uaccess.h>
+#include <asm/checksum.h>
+#include <asm/dma.h>
+#include <asm/io.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+#include <asm/semaphore.h>
+#ifdef CONFIG_BLK_DEV_FD
+#include <asm/floppy.h>
+#endif
+#ifdef CONFIG_NET
+#include <net/checksum.h>
+#endif /* CONFIG_NET */
+
+
+/*
+ * String functions
+ */
+EXPORT_SYMBOL(memcmp);
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(memmove);
+EXPORT_SYMBOL(memchr);
+EXPORT_SYMBOL(strcat);
+EXPORT_SYMBOL(strchr);
+EXPORT_SYMBOL(strlen);
+EXPORT_SYMBOL(strpbrk);
+EXPORT_SYMBOL(strncat);
+EXPORT_SYMBOL(strnlen);
+EXPORT_SYMBOL(strrchr);
+EXPORT_SYMBOL(strstr);
+
+EXPORT_SYMBOL(enable_irq);
+EXPORT_SYMBOL(disable_irq);
+EXPORT_SYMBOL(kernel_thread);
+
+/*
+ * gcc internal math functions
+ */
+extern long long __ashrdi3(long long, int);
+extern long long __ashldi3(long long, int);
+extern long long __lshrdi3(long long, int);
+extern int __divsi3(int, int);
+extern int __modsi3(int, int);
+extern long long __muldi3(long long, long long);
+extern int __mulsi3(int, int);
+extern unsigned int __udivsi3(unsigned int, unsigned int);
+extern unsigned int __umodsi3(unsigned int, unsigned int);
+extern unsigned long long __umoddi3(unsigned long long, unsigned long long);
+extern unsigned long long __udivdi3(unsigned long long, unsigned long long);
+
+EXPORT_SYMBOL(__ashldi3);
+EXPORT_SYMBOL(__ashrdi3);
+EXPORT_SYMBOL(__lshrdi3);
+EXPORT_SYMBOL(__divsi3);
+EXPORT_SYMBOL(__modsi3);
+EXPORT_SYMBOL(__muldi3);
+EXPORT_SYMBOL(__mulsi3);
+EXPORT_SYMBOL(__udivsi3);
+EXPORT_SYMBOL(__umodsi3);
+EXPORT_SYMBOL(__udivdi3);
+EXPORT_SYMBOL(__umoddi3);
+
+/*
+ * Semaphore operations
+ */
+EXPORT_SYMBOL(__down);
+EXPORT_SYMBOL(__down_interruptible);
+EXPORT_SYMBOL(__down_trylock);
+EXPORT_SYMBOL(__up);
+
+#ifdef CONFIG_NET
+/*
+ * Networking support
+ */
+EXPORT_SYMBOL(csum_partial_copy_generic);
+#endif /* CONFIG_NET */
+
+/*
+ * Architecture-specific symbols
+ */
+EXPORT_SYMBOL(__xtensa_copy_user);
+
+/*
+ * Kernel hacking ...
+ */
+
+#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
+// FIXME EXPORT_SYMBOL(screen_info);
+#endif
+
+EXPORT_SYMBOL(get_wchan);
+
+EXPORT_SYMBOL(outsb);
+EXPORT_SYMBOL(outsw);
+EXPORT_SYMBOL(outsl);
+EXPORT_SYMBOL(insb);
+EXPORT_SYMBOL(insw);
+EXPORT_SYMBOL(insl);
diff --git a/arch/xtensa/lib/Makefile b/arch/xtensa/lib/Makefile
new file mode 100644
index 00000000000..ed935b58e8a
--- /dev/null
+++ b/arch/xtensa/lib/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for Xtensa-specific library files.
+#
+
+lib-y += memcopy.o memset.o checksum.o strcasecmp.o \
+ usercopy.o strncpy_user.o strnlen_user.o
+lib-$(CONFIG_PCI) += pci-auto.o
diff --git a/arch/xtensa/lib/checksum.S b/arch/xtensa/lib/checksum.S
new file mode 100644
index 00000000000..e2d64dfd530
--- /dev/null
+++ b/arch/xtensa/lib/checksum.S
@@ -0,0 +1,410 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * IP/TCP/UDP checksumming routines
+ *
+ * Xtensa version: Copyright (C) 2001 Tensilica, Inc. by Kevin Chea
+ * Optimized by Joe Taylor
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <asm/errno.h>
+#include <linux/linkage.h>
+#define _ASMLANGUAGE
+#include <xtensa/config/core.h>
+
+/*
+ * computes a partial checksum, e.g. for TCP/UDP fragments
+ */
+
+/*
+ * unsigned int csum_partial(const unsigned char *buf, int len,
+ * unsigned int sum);
+ * a2 = buf
+ * a3 = len
+ * a4 = sum
+ *
+ * This function assumes 2- or 4-byte alignment. Other alignments will fail!
+ */
+
+/* ONES_ADD converts twos-complement math to ones-complement. */
+#define ONES_ADD(sum, val) \
+ add sum, sum, val ; \
+ bgeu sum, val, 99f ; \
+ addi sum, sum, 1 ; \
+99: ;
+
+.text
+ENTRY(csum_partial)
+ /*
+ * Experiments with Ethernet and SLIP connections show that buf
+ * is aligned on either a 2-byte or 4-byte boundary.
+ */
+ entry sp, 32
+ extui a5, a2, 0, 2
+ bnez a5, 8f /* branch if 2-byte aligned */
+ /* Fall-through on common case, 4-byte alignment */
+1:
+ srli a5, a3, 5 /* 32-byte chunks */
+#if XCHAL_HAVE_LOOPS
+ loopgtz a5, 2f
+#else
+ beqz a5, 2f
+ slli a5, a5, 5
+ add a5, a5, a2 /* a5 = end of last 32-byte chunk */
+.Loop1:
+#endif
+ l32i a6, a2, 0
+ l32i a7, a2, 4
+ ONES_ADD(a4, a6)
+ ONES_ADD(a4, a7)
+ l32i a6, a2, 8
+ l32i a7, a2, 12
+ ONES_ADD(a4, a6)
+ ONES_ADD(a4, a7)
+ l32i a6, a2, 16
+ l32i a7, a2, 20
+ ONES_ADD(a4, a6)
+ ONES_ADD(a4, a7)
+ l32i a6, a2, 24
+ l32i a7, a2, 28
+ ONES_ADD(a4, a6)
+ ONES_ADD(a4, a7)
+ addi a2, a2, 4*8
+#if !XCHAL_HAVE_LOOPS
+ blt a2, a5, .Loop1
+#endif
+2:
+ extui a5, a3, 2, 3 /* remaining 4-byte chunks */
+#if XCHAL_HAVE_LOOPS
+ loopgtz a5, 3f
+#else
+ beqz a5, 3f
+ slli a5, a5, 2
+ add a5, a5, a2 /* a5 = end of last 4-byte chunk */
+.Loop2:
+#endif
+ l32i a6, a2, 0
+ ONES_ADD(a4, a6)
+ addi a2, a2, 4
+#if !XCHAL_HAVE_LOOPS
+ blt a2, a5, .Loop2
+#endif
+3:
+ _bbci.l a3, 1, 5f /* remaining 2-byte chunk */
+ l16ui a6, a2, 0
+ ONES_ADD(a4, a6)
+ addi a2, a2, 2
+5:
+ _bbci.l a3, 0, 7f /* remaining 1-byte chunk */
+6: l8ui a6, a2, 0
+#ifdef __XTENSA_EB__
+ slli a6, a6, 8 /* load byte into bits 8..15 */
+#endif
+ ONES_ADD(a4, a6)
+7:
+ mov a2, a4
+ retw
+
+ /* uncommon case, buf is 2-byte aligned */
+8:
+ beqz a3, 7b /* branch if len == 0 */
+ beqi a3, 1, 6b /* branch if len == 1 */
+
+ extui a5, a2, 0, 1
+ bnez a5, 8f /* branch if 1-byte aligned */
+
+ l16ui a6, a2, 0 /* common case, len >= 2 */
+ ONES_ADD(a4, a6)
+ addi a2, a2, 2 /* adjust buf */
+ addi a3, a3, -2 /* adjust len */
+ j 1b /* now buf is 4-byte aligned */
+
+ /* case: odd-byte aligned, len > 1
+ * This case is dog slow, so don't give us an odd address.
+ * (I don't think this ever happens, but just in case.)
+ */
+8:
+ srli a5, a3, 2 /* 4-byte chunks */
+#if XCHAL_HAVE_LOOPS
+ loopgtz a5, 2f
+#else
+ beqz a5, 2f
+ slli a5, a5, 2
+ add a5, a5, a2 /* a5 = end of last 4-byte chunk */
+.Loop3:
+#endif
+ l8ui a6, a2, 0 /* bits 24..31 */
+ l16ui a7, a2, 1 /* bits 8..23 */
+ l8ui a8, a2, 3 /* bits 0.. 8 */
+#ifdef __XTENSA_EB__
+ slli a6, a6, 24
+#else
+ slli a8, a8, 24
+#endif
+ slli a7, a7, 8
+ or a7, a7, a6
+ or a7, a7, a8
+ ONES_ADD(a4, a7)
+ addi a2, a2, 4
+#if !XCHAL_HAVE_LOOPS
+ blt a2, a5, .Loop3
+#endif
+2:
+ _bbci.l a3, 1, 3f /* remaining 2-byte chunk, still odd addr */
+ l8ui a6, a2, 0
+ l8ui a7, a2, 1
+#ifdef __XTENSA_EB__
+ slli a6, a6, 8
+#else
+ slli a7, a7, 8
+#endif
+ or a7, a7, a6
+ ONES_ADD(a4, a7)
+ addi a2, a2, 2
+3:
+ j 5b /* branch to handle the remaining byte */
+
+
+
+/*
+ * Copy from ds while checksumming, otherwise like csum_partial
+ *
+ * The macros SRC and DST specify the type of access for the instruction.
+ * thus we can call a custom exception handler for each access type.
+ */
+
+#define SRC(y...) \
+ 9999: y; \
+ .section __ex_table, "a"; \
+ .long 9999b, 6001f ; \
+ .previous
+
+#define DST(y...) \
+ 9999: y; \
+ .section __ex_table, "a"; \
+ .long 9999b, 6002f ; \
+ .previous
+
+/*
+unsigned int csum_partial_copy_generic (const char *src, char *dst, int len,
+ int sum, int *src_err_ptr, int *dst_err_ptr)
+ a2 = src
+ a3 = dst
+ a4 = len
+ a5 = sum
+ a6 = src_err_ptr
+ a7 = dst_err_ptr
+ a8 = temp
+ a9 = temp
+ a10 = temp
+ a11 = original len for exception handling
+ a12 = original dst for exception handling
+
+ This function is optimized for 4-byte aligned addresses. Other
+ alignments work, but not nearly as efficiently.
+ */
+
+ENTRY(csum_partial_copy_generic)
+ entry sp, 32
+ mov a12, a3
+ mov a11, a4
+ or a10, a2, a3
+
+ /* We optimize the following alignment tests for the 4-byte
+ aligned case. Two bbsi.l instructions might seem more optimal
+ (commented out below). However, both labels 5: and 3: are out
+ of the imm8 range, so the assembler relaxes them into
+ equivalent bbci.l, j combinations, which is actually
+ slower. */
+
+ extui a9, a10, 0, 2
+ beqz a9, 1f /* branch if both are 4-byte aligned */
+ bbsi.l a10, 0, 5f /* branch if one address is odd */
+ j 3f /* one address is 2-byte aligned */
+
+/* _bbsi.l a10, 0, 5f */ /* branch if odd address */
+/* _bbsi.l a10, 1, 3f */ /* branch if 2-byte-aligned address */
+
+1:
+ /* src and dst are both 4-byte aligned */
+ srli a10, a4, 5 /* 32-byte chunks */
+#if XCHAL_HAVE_LOOPS
+ loopgtz a10, 2f
+#else
+ beqz a10, 2f
+ slli a10, a10, 5
+ add a10, a10, a2 /* a10 = end of last 32-byte src chunk */
+.Loop5:
+#endif
+SRC( l32i a9, a2, 0 )
+SRC( l32i a8, a2, 4 )
+DST( s32i a9, a3, 0 )
+DST( s32i a8, a3, 4 )
+ ONES_ADD(a5, a9)
+ ONES_ADD(a5, a8)
+SRC( l32i a9, a2, 8 )
+SRC( l32i a8, a2, 12 )
+DST( s32i a9, a3, 8 )
+DST( s32i a8, a3, 12 )
+ ONES_ADD(a5, a9)
+ ONES_ADD(a5, a8)
+SRC( l32i a9, a2, 16 )
+SRC( l32i a8, a2, 20 )
+DST( s32i a9, a3, 16 )
+DST( s32i a8, a3, 20 )
+ ONES_ADD(a5, a9)
+ ONES_ADD(a5, a8)
+SRC( l32i a9, a2, 24 )
+SRC( l32i a8, a2, 28 )
+DST( s32i a9, a3, 24 )
+DST( s32i a8, a3, 28 )
+ ONES_ADD(a5, a9)
+ ONES_ADD(a5, a8)
+ addi a2, a2, 32
+ addi a3, a3, 32
+#if !XCHAL_HAVE_LOOPS
+ blt a2, a10, .Loop5
+#endif
+2:
+ extui a10, a4, 2, 3 /* remaining 4-byte chunks */
+ extui a4, a4, 0, 2 /* reset len for general-case, 2-byte chunks */
+#if XCHAL_HAVE_LOOPS
+ loopgtz a10, 3f
+#else
+ beqz a10, 3f
+ slli a10, a10, 2
+ add a10, a10, a2 /* a10 = end of last 4-byte src chunk */
+.Loop6:
+#endif
+SRC( l32i a9, a2, 0 )
+DST( s32i a9, a3, 0 )
+ ONES_ADD(a5, a9)
+ addi a2, a2, 4
+ addi a3, a3, 4
+#if !XCHAL_HAVE_LOOPS
+ blt a2, a10, .Loop6
+#endif
+3:
+ /*
+ Control comes to here in two cases: (1) It may fall through
+ to here from the 4-byte alignment case to process, at most,
+ one 2-byte chunk. (2) It branches to here from above if
+ either src or dst is 2-byte aligned, and we process all bytes
+ here, except for perhaps a trailing odd byte. It's
+ inefficient, so align your addresses to 4-byte boundaries.
+
+ a2 = src
+ a3 = dst
+ a4 = len
+ a5 = sum
+ */
+ srli a10, a4, 1 /* 2-byte chunks */
+#if XCHAL_HAVE_LOOPS
+ loopgtz a10, 4f
+#else
+ beqz a10, 4f
+ slli a10, a10, 1
+ add a10, a10, a2 /* a10 = end of last 2-byte src chunk */
+.Loop7:
+#endif
+SRC( l16ui a9, a2, 0 )
+DST( s16i a9, a3, 0 )
+ ONES_ADD(a5, a9)
+ addi a2, a2, 2
+ addi a3, a3, 2
+#if !XCHAL_HAVE_LOOPS
+ blt a2, a10, .Loop7
+#endif
+4:
+ /* This section processes a possible trailing odd byte. */
+ _bbci.l a4, 0, 8f /* 1-byte chunk */
+SRC( l8ui a9, a2, 0 )
+DST( s8i a9, a3, 0 )
+#ifdef __XTENSA_EB__
+ slli a9, a9, 8 /* shift byte to bits 8..15 */
+#endif
+ ONES_ADD(a5, a9)
+8:
+ mov a2, a5
+ retw
+
+5:
+ /* Control branch to here when either src or dst is odd. We
+ process all bytes using 8-bit accesses. Grossly inefficient,
+ so don't feed us an odd address. */
+
+ srli a10, a4, 1 /* handle in pairs for 16-bit csum */
+#if XCHAL_HAVE_LOOPS
+ loopgtz a10, 6f
+#else
+ beqz a10, 6f
+ slli a10, a10, 1
+ add a10, a10, a2 /* a10 = end of last odd-aligned, 2-byte src chunk */
+.Loop8:
+#endif
+SRC( l8ui a9, a2, 0 )
+SRC( l8ui a8, a2, 1 )
+DST( s8i a9, a3, 0 )
+DST( s8i a8, a3, 1 )
+#ifdef __XTENSA_EB__
+ slli a9, a9, 8 /* combine into a single 16-bit value */
+#else /* for checksum computation */
+ slli a8, a8, 8
+#endif
+ or a9, a9, a8
+ ONES_ADD(a5, a9)
+ addi a2, a2, 2
+ addi a3, a3, 2
+#if !XCHAL_HAVE_LOOPS
+ blt a2, a10, .Loop8
+#endif
+6:
+ j 4b /* process the possible trailing odd byte */
+
+
+# Exception handler:
+.section .fixup, "ax"
+/*
+ a6 = src_err_ptr
+ a7 = dst_err_ptr
+ a11 = original len for exception handling
+ a12 = original dst for exception handling
+*/
+
+6001:
+ _movi a2, -EFAULT
+ s32i a2, a6, 0 /* src_err_ptr */
+
+ # clear the complete destination - computing the rest
+ # is too much work
+ movi a2, 0
+#if XCHAL_HAVE_LOOPS
+ loopgtz a11, 2f
+#else
+ beqz a11, 2f
+ add a11, a11, a12 /* a11 = ending address */
+.Leloop:
+#endif
+ s8i a2, a12, 0
+ addi a12, a12, 1
+#if !XCHAL_HAVE_LOOPS
+ blt a12, a11, .Leloop
+#endif
+2:
+ retw
+
+6002:
+ movi a2, -EFAULT
+ s32i a2, a7, 0 /* dst_err_ptr */
+ movi a2, 0
+ retw
+
+.previous
+
diff --git a/arch/xtensa/lib/memcopy.S b/arch/xtensa/lib/memcopy.S
new file mode 100644
index 00000000000..e8f6d7eb722
--- /dev/null
+++ b/arch/xtensa/lib/memcopy.S
@@ -0,0 +1,315 @@
+/*
+ * arch/xtensa/lib/hal/memcopy.S -- Core HAL library functions
+ * xthal_memcpy and xthal_bcopy
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2002 - 2005 Tensilica Inc.
+ */
+
+#include <xtensa/coreasm.h>
+
+ .macro src_b r, w0, w1
+#ifdef __XTENSA_EB__
+ src \r, \w0, \w1
+#else
+ src \r, \w1, \w0
+#endif
+ .endm
+
+ .macro ssa8 r
+#ifdef __XTENSA_EB__
+ ssa8b \r
+#else
+ ssa8l \r
+#endif
+ .endm
+
+
+/*
+ * void *memcpy(void *dst, const void *src, size_t len);
+ * void *memmove(void *dst, const void *src, size_t len);
+ * void *bcopy(const void *src, void *dst, size_t len);
+ *
+ * This function is intended to do the same thing as the standard
+ * library function memcpy() (or bcopy()) for most cases.
+ * However, where the source and/or destination references
+ * an instruction RAM or ROM or a data RAM or ROM, that
+ * source and/or destination will always be accessed with
+ * 32-bit load and store instructions (as required for these
+ * types of devices).
+ *
+ * !!!!!!! XTFIXME:
+ * !!!!!!! Handling of IRAM/IROM has not yet
+ * !!!!!!! been implemented.
+ *
+ * The bcopy version is provided here to avoid the overhead
+ * of an extra call, for callers that require this convention.
+ *
+ * The (general case) algorithm is as follows:
+ * If destination is unaligned, align it by conditionally
+ * copying 1 and 2 bytes.
+ * If source is aligned,
+ * do 16 bytes with a loop, and then finish up with
+ * 8, 4, 2, and 1 byte copies conditional on the length;
+ * else (if source is unaligned),
+ * do the same, but use SRC to align the source data.
+ * This code tries to use fall-through branches for the common
+ * case of aligned source and destination and multiple
+ * of 4 (or 8) length.
+ *
+ * Register use:
+ * a0/ return address
+ * a1/ stack pointer
+ * a2/ return value
+ * a3/ src
+ * a4/ length
+ * a5/ dst
+ * a6/ tmp
+ * a7/ tmp
+ * a8/ tmp
+ * a9/ tmp
+ * a10/ tmp
+ * a11/ tmp
+ */
+
+ .text
+ .align 4
+ .global bcopy
+ .type bcopy,@function
+bcopy:
+ entry sp, 16 # minimal stack frame
+ # a2=src, a3=dst, a4=len
+ mov a5, a3 # copy dst so that a2 is return value
+ mov a3, a2
+ mov a2, a5
+ j .Lcommon # go to common code for memcpy+bcopy
+
+
+/*
+ * Byte by byte copy
+ */
+ .align 4
+ .byte 0 # 1 mod 4 alignment for LOOPNEZ
+ # (0 mod 4 alignment for LBEG)
+.Lbytecopy:
+#if XCHAL_HAVE_LOOPS
+ loopnez a4, .Lbytecopydone
+#else /* !XCHAL_HAVE_LOOPS */
+ beqz a4, .Lbytecopydone
+ add a7, a3, a4 # a7 = end address for source
+#endif /* !XCHAL_HAVE_LOOPS */
+.Lnextbyte:
+ l8ui a6, a3, 0
+ addi a3, a3, 1
+ s8i a6, a5, 0
+ addi a5, a5, 1
+#if !XCHAL_HAVE_LOOPS
+ blt a3, a7, .Lnextbyte
+#endif /* !XCHAL_HAVE_LOOPS */
+.Lbytecopydone:
+ retw
+
+/*
+ * Destination is unaligned
+ */
+
+ .align 4
+.Ldst1mod2: # dst is only byte aligned
+ _bltui a4, 7, .Lbytecopy # do short copies byte by byte
+
+ # copy 1 byte
+ l8ui a6, a3, 0
+ addi a3, a3, 1
+ addi a4, a4, -1
+ s8i a6, a5, 0
+ addi a5, a5, 1
+ _bbci.l a5, 1, .Ldstaligned # if dst is now aligned, then
+ # return to main algorithm
+.Ldst2mod4: # dst 16-bit aligned
+ # copy 2 bytes
+ _bltui a4, 6, .Lbytecopy # do short copies byte by byte
+ l8ui a6, a3, 0
+ l8ui a7, a3, 1
+ addi a3, a3, 2
+ addi a4, a4, -2
+ s8i a6, a5, 0
+ s8i a7, a5, 1
+ addi a5, a5, 2
+ j .Ldstaligned # dst is now aligned, return to main algorithm
+
+ .align 4
+ .global memcpy
+ .type memcpy,@function
+memcpy:
+ .global memmove
+ .type memmove,@function
+memmove:
+
+ entry sp, 16 # minimal stack frame
+ # a2/ dst, a3/ src, a4/ len
+ mov a5, a2 # copy dst so that a2 is return value
+.Lcommon:
+ _bbsi.l a2, 0, .Ldst1mod2 # if dst is 1 mod 2
+ _bbsi.l a2, 1, .Ldst2mod4 # if dst is 2 mod 4
+.Ldstaligned: # return here from .Ldst?mod? once dst is aligned
+ srli a7, a4, 4 # number of loop iterations with 16B
+ # per iteration
+ movi a8, 3 # if source is not aligned,
+ _bany a3, a8, .Lsrcunaligned # then use shifting copy
+ /*
+ * Destination and source are word-aligned, use word copy.
+ */
+ # copy 16 bytes per iteration for word-aligned dst and word-aligned src
+#if XCHAL_HAVE_LOOPS
+ loopnez a7, .Loop1done
+#else /* !XCHAL_HAVE_LOOPS */
+ beqz a7, .Loop1done
+ slli a8, a7, 4
+ add a8, a8, a3 # a8 = end of last 16B source chunk
+#endif /* !XCHAL_HAVE_LOOPS */
+.Loop1:
+ l32i a6, a3, 0
+ l32i a7, a3, 4
+ s32i a6, a5, 0
+ l32i a6, a3, 8
+ s32i a7, a5, 4
+ l32i a7, a3, 12
+ s32i a6, a5, 8
+ addi a3, a3, 16
+ s32i a7, a5, 12
+ addi a5, a5, 16
+#if !XCHAL_HAVE_LOOPS
+ blt a3, a8, .Loop1
+#endif /* !XCHAL_HAVE_LOOPS */
+.Loop1done:
+ bbci.l a4, 3, .L2
+ # copy 8 bytes
+ l32i a6, a3, 0
+ l32i a7, a3, 4
+ addi a3, a3, 8
+ s32i a6, a5, 0
+ s32i a7, a5, 4
+ addi a5, a5, 8
+.L2:
+ bbsi.l a4, 2, .L3
+ bbsi.l a4, 1, .L4
+ bbsi.l a4, 0, .L5
+ retw
+.L3:
+ # copy 4 bytes
+ l32i a6, a3, 0
+ addi a3, a3, 4
+ s32i a6, a5, 0
+ addi a5, a5, 4
+ bbsi.l a4, 1, .L4
+ bbsi.l a4, 0, .L5
+ retw
+.L4:
+ # copy 2 bytes
+ l16ui a6, a3, 0
+ addi a3, a3, 2
+ s16i a6, a5, 0
+ addi a5, a5, 2
+ bbsi.l a4, 0, .L5
+ retw
+.L5:
+ # copy 1 byte
+ l8ui a6, a3, 0
+ s8i a6, a5, 0
+ retw
+
+/*
+ * Destination is aligned, Source is unaligned
+ */
+
+ .align 4
+.Lsrcunaligned:
+ _beqz a4, .Ldone # avoid loading anything for zero-length copies
+ # copy 16 bytes per iteration for word-aligned dst and unaligned src
+ ssa8 a3 # set shift amount from byte offset
+#define SIM_CHECKS_ALIGNMENT 1 /* set to 1 when running on ISS (simulator) with the
+ lint or ferret client, or 0 to save a few cycles */
+#if XCHAL_UNALIGNED_LOAD_EXCEPTION || SIM_CHECKS_ALIGNMENT
+ and a11, a3, a8 # save unalignment offset for below
+ sub a3, a3, a11 # align a3
+#endif
+ l32i a6, a3, 0 # load first word
+#if XCHAL_HAVE_LOOPS
+ loopnez a7, .Loop2done
+#else /* !XCHAL_HAVE_LOOPS */
+ beqz a7, .Loop2done
+ slli a10, a7, 4
+ add a10, a10, a3 # a10 = end of last 16B source chunk
+#endif /* !XCHAL_HAVE_LOOPS */
+.Loop2:
+ l32i a7, a3, 4
+ l32i a8, a3, 8
+ src_b a6, a6, a7
+ s32i a6, a5, 0
+ l32i a9, a3, 12
+ src_b a7, a7, a8
+ s32i a7, a5, 4
+ l32i a6, a3, 16
+ src_b a8, a8, a9
+ s32i a8, a5, 8
+ addi a3, a3, 16
+ src_b a9, a9, a6
+ s32i a9, a5, 12
+ addi a5, a5, 16
+#if !XCHAL_HAVE_LOOPS
+ blt a3, a10, .Loop2
+#endif /* !XCHAL_HAVE_LOOPS */
+.Loop2done:
+ bbci.l a4, 3, .L12
+ # copy 8 bytes
+ l32i a7, a3, 4
+ l32i a8, a3, 8
+ src_b a6, a6, a7
+ s32i a6, a5, 0
+ addi a3, a3, 8
+ src_b a7, a7, a8
+ s32i a7, a5, 4
+ addi a5, a5, 8
+ mov a6, a8
+.L12:
+ bbci.l a4, 2, .L13
+ # copy 4 bytes
+ l32i a7, a3, 4
+ addi a3, a3, 4
+ src_b a6, a6, a7
+ s32i a6, a5, 0
+ addi a5, a5, 4
+ mov a6, a7
+.L13:
+#if XCHAL_UNALIGNED_LOAD_EXCEPTION || SIM_CHECKS_ALIGNMENT
+ add a3, a3, a11 # readjust a3 with correct misalignment
+#endif
+ bbsi.l a4, 1, .L14
+ bbsi.l a4, 0, .L15
+.Ldone: retw
+.L14:
+ # copy 2 bytes
+ l8ui a6, a3, 0
+ l8ui a7, a3, 1
+ addi a3, a3, 2
+ s8i a6, a5, 0
+ s8i a7, a5, 1
+ addi a5, a5, 2
+ bbsi.l a4, 0, .L15
+ retw
+.L15:
+ # copy 1 byte
+ l8ui a6, a3, 0
+ s8i a6, a5, 0
+ retw
+
+/*
+ * Local Variables:
+ * mode:fundamental
+ * comment-start: "# "
+ * comment-start-skip: "# *"
+ * End:
+ */
diff --git a/arch/xtensa/lib/memset.S b/arch/xtensa/lib/memset.S
new file mode 100644
index 00000000000..4de25134bc6
--- /dev/null
+++ b/arch/xtensa/lib/memset.S
@@ -0,0 +1,160 @@
+/*
+ * arch/xtensa/lib/memset.S
+ *
+ * ANSI C standard library function memset
+ * (Well, almost. .fixup code might return zero.)
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2002 Tensilica Inc.
+ */
+
+#include <xtensa/coreasm.h>
+
+/*
+ * void *memset(void *dst, int c, size_t length)
+ *
+ * The algorithm is as follows:
+ * Create a word with c in all byte positions
+ * If the destination is aligned,
+ * do 16B chucks with a loop, and then finish up with
+ * 8B, 4B, 2B, and 1B stores conditional on the length.
+ * If destination is unaligned, align it by conditionally
+ * setting 1B and 2B and then go to aligned case.
+ * This code tries to use fall-through branches for the common
+ * case of an aligned destination (except for the branches to
+ * the alignment labels).
+ */
+
+/* Load or store instructions that may cause exceptions use the EX macro. */
+
+#define EX(insn,reg1,reg2,offset,handler) \
+9: insn reg1, reg2, offset; \
+ .section __ex_table, "a"; \
+ .word 9b, handler; \
+ .previous
+
+
+.text
+.align 4
+.global memset
+.type memset,@function
+memset:
+ entry sp, 16 # minimal stack frame
+ # a2/ dst, a3/ c, a4/ length
+ extui a3, a3, 0, 8 # mask to just 8 bits
+ slli a7, a3, 8 # duplicate character in all bytes of word
+ or a3, a3, a7 # ...
+ slli a7, a3, 16 # ...
+ or a3, a3, a7 # ...
+ mov a5, a2 # copy dst so that a2 is return value
+ movi a6, 3 # for alignment tests
+ bany a2, a6, .Ldstunaligned # if dst is unaligned
+.L0: # return here from .Ldstunaligned when dst is aligned
+ srli a7, a4, 4 # number of loop iterations with 16B
+ # per iteration
+ bnez a4, .Laligned
+ retw
+
+/*
+ * Destination is word-aligned.
+ */
+ # set 16 bytes per iteration for word-aligned dst
+ .align 4 # 1 mod 4 alignment for LOOPNEZ
+ .byte 0 # (0 mod 4 alignment for LBEG)
+.Laligned:
+#if XCHAL_HAVE_LOOPS
+ loopnez a7, .Loop1done
+#else /* !XCHAL_HAVE_LOOPS */
+ beqz a7, .Loop1done
+ slli a6, a7, 4
+ add a6, a6, a5 # a6 = end of last 16B chunk
+#endif /* !XCHAL_HAVE_LOOPS */
+.Loop1:
+ EX(s32i, a3, a5, 0, memset_fixup)
+ EX(s32i, a3, a5, 4, memset_fixup)
+ EX(s32i, a3, a5, 8, memset_fixup)
+ EX(s32i, a3, a5, 12, memset_fixup)
+ addi a5, a5, 16
+#if !XCHAL_HAVE_LOOPS
+ blt a5, a6, .Loop1
+#endif /* !XCHAL_HAVE_LOOPS */
+.Loop1done:
+ bbci.l a4, 3, .L2
+ # set 8 bytes
+ EX(s32i, a3, a5, 0, memset_fixup)
+ EX(s32i, a3, a5, 4, memset_fixup)
+ addi a5, a5, 8
+.L2:
+ bbci.l a4, 2, .L3
+ # set 4 bytes
+ EX(s32i, a3, a5, 0, memset_fixup)
+ addi a5, a5, 4
+.L3:
+ bbci.l a4, 1, .L4
+ # set 2 bytes
+ EX(s16i, a3, a5, 0, memset_fixup)
+ addi a5, a5, 2
+.L4:
+ bbci.l a4, 0, .L5
+ # set 1 byte
+ EX(s8i, a3, a5, 0, memset_fixup)
+.L5:
+.Lret1:
+ retw
+
+/*
+ * Destination is unaligned
+ */
+
+.Ldstunaligned:
+ bltui a4, 8, .Lbyteset # do short copies byte by byte
+ bbci.l a5, 0, .L20 # branch if dst alignment half-aligned
+ # dst is only byte aligned
+ # set 1 byte
+ EX(s8i, a3, a5, 0, memset_fixup)
+ addi a5, a5, 1
+ addi a4, a4, -1
+ # now retest if dst aligned
+ bbci.l a5, 1, .L0 # if now aligned, return to main algorithm
+.L20:
+ # dst half-aligned
+ # set 2 bytes
+ EX(s16i, a3, a5, 0, memset_fixup)
+ addi a5, a5, 2
+ addi a4, a4, -2
+ j .L0 # dst is now aligned, return to main algorithm
+
+/*
+ * Byte by byte set
+ */
+ .align 4
+ .byte 0 # 1 mod 4 alignment for LOOPNEZ
+ # (0 mod 4 alignment for LBEG)
+.Lbyteset:
+#if XCHAL_HAVE_LOOPS
+ loopnez a4, .Lbytesetdone
+#else /* !XCHAL_HAVE_LOOPS */
+ beqz a4, .Lbytesetdone
+ add a6, a5, a4 # a6 = ending address
+#endif /* !XCHAL_HAVE_LOOPS */
+.Lbyteloop:
+ EX(s8i, a3, a5, 0, memset_fixup)
+ addi a5, a5, 1
+#if !XCHAL_HAVE_LOOPS
+ blt a5, a6, .Lbyteloop
+#endif /* !XCHAL_HAVE_LOOPS */
+.Lbytesetdone:
+ retw
+
+
+ .section .fixup, "ax"
+ .align 4
+
+/* We return zero if a failure occurred. */
+
+memset_fixup:
+ movi a2, 0
+ retw
diff --git a/arch/xtensa/lib/pci-auto.c b/arch/xtensa/lib/pci-auto.c
new file mode 100644
index 00000000000..90c790f6123
--- /dev/null
+++ b/arch/xtensa/lib/pci-auto.c
@@ -0,0 +1,352 @@
+/*
+ * arch/xtensa/kernel/pci-auto.c
+ *
+ * PCI autoconfiguration library
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ *
+ * Chris Zankel <zankel@tensilica.com, cez@zankel.net>
+ *
+ * Based on work from Matt Porter <mporter@mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+
+#include <asm/pci-bridge.h>
+
+
+/*
+ *
+ * Setting up a PCI
+ *
+ * pci_ctrl->first_busno = <first bus number (0)>
+ * pci_ctrl->last_busno = <last bus number (0xff)>
+ * pci_ctrl->ops = <PCI config operations>
+ * pci_ctrl->map_irq = <function to return the interrupt number for a device>
+ *
+ * pci_ctrl->io_space.start = <IO space start address (PCI view)>
+ * pci_ctrl->io_space.end = <IO space end address (PCI view)>
+ * pci_ctrl->io_space.base = <IO space offset: address 0 from CPU space>
+ * pci_ctrl->mem_space.start = <MEM space start address (PCI view)>
+ * pci_ctrl->mem_space.end = <MEM space end address (PCI view)>
+ * pci_ctrl->mem_space.base = <MEM space offset: address 0 from CPU space>
+ *
+ * pcibios_init_resource(&pci_ctrl->io_resource, <IO space start>,
+ * <IO space end>, IORESOURCE_IO, "PCI host bridge");
+ * pcibios_init_resource(&pci_ctrl->mem_resources[0], <MEM space start>,
+ * <MEM space end>, IORESOURCE_MEM, "PCI host bridge");
+ *
+ * pci_ctrl->last_busno = pciauto_bus_scan(pci_ctrl,pci_ctrl->first_busno);
+ *
+ * int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus)
+ *
+ */
+
+
+/* define DEBUG to print some debugging messages. */
+
+#undef DEBUG
+
+#ifdef DEBUG
+# define DBG(x...) printk(x)
+#else
+# define DBG(x...)
+#endif
+
+static int pciauto_upper_iospc;
+static int pciauto_upper_memspc;
+
+static struct pci_dev pciauto_dev;
+static struct pci_bus pciauto_bus;
+
+/*
+ * Helper functions
+ */
+
+/* Initialize the bars of a PCI device. */
+
+static void __init
+pciauto_setup_bars(struct pci_dev *dev, int bar_limit)
+{
+ int bar_size;
+ int bar, bar_nr;
+ int *upper_limit;
+ int found_mem64 = 0;
+
+ for (bar = PCI_BASE_ADDRESS_0, bar_nr = 0;
+ bar <= bar_limit;
+ bar+=4, bar_nr++)
+ {
+ /* Tickle the BAR and get the size */
+ pci_write_config_dword(dev, bar, 0xffffffff);
+ pci_read_config_dword(dev, bar, &bar_size);
+
+ /* If BAR is not implemented go to the next BAR */
+ if (!bar_size)
+ continue;
+
+ /* Check the BAR type and set our address mask */
+ if (bar_size & PCI_BASE_ADDRESS_SPACE_IO)
+ {
+ bar_size &= PCI_BASE_ADDRESS_IO_MASK;
+ upper_limit = &pciauto_upper_iospc;
+ DBG("PCI Autoconfig: BAR %d, I/O, ", bar_nr);
+ }
+ else
+ {
+ if ((bar_size & PCI_BASE_ADDRESS_MEM_TYPE_MASK) ==
+ PCI_BASE_ADDRESS_MEM_TYPE_64)
+ found_mem64 = 1;
+
+ bar_size &= PCI_BASE_ADDRESS_MEM_MASK;
+ upper_limit = &pciauto_upper_memspc;
+ DBG("PCI Autoconfig: BAR %d, Mem, ", bar_nr);
+ }
+
+ /* Allocate a base address (bar_size is negative!) */
+ *upper_limit = (*upper_limit + bar_size) & bar_size;
+
+ /* Write it out and update our limit */
+ pci_write_config_dword(dev, bar, *upper_limit);
+
+ /*
+ * If we are a 64-bit decoder then increment to the
+ * upper 32 bits of the bar and force it to locate
+ * in the lower 4GB of memory.
+ */
+
+ if (found_mem64)
+ pci_write_config_dword(dev, (bar+=4), 0x00000000);
+
+ DBG("size=0x%x, address=0x%x\n", ~bar_size + 1, *upper_limit);
+ }
+}
+
+/* Initialize the interrupt number. */
+
+static void __init
+pciauto_setup_irq(struct pci_controller* pci_ctrl,struct pci_dev *dev,int devfn)
+{
+ u8 pin;
+ int irq = 0;
+
+ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
+
+ /* Fix illegal pin numbers. */
+
+ if (pin == 0 || pin > 4)
+ pin = 1;
+
+ if (pci_ctrl->map_irq)
+ irq = pci_ctrl->map_irq(dev, PCI_SLOT(devfn), pin);
+
+ if (irq == -1)
+ irq = 0;
+
+ DBG("PCI Autoconfig: Interrupt %d, pin %d\n", irq, pin);
+
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
+}
+
+
+static void __init
+pciauto_prescan_setup_bridge(struct pci_dev *dev, int current_bus,
+ int sub_bus, int *iosave, int *memsave)
+{
+ /* Configure bus number registers */
+ pci_write_config_byte(dev, PCI_PRIMARY_BUS, current_bus);
+ pci_write_config_byte(dev, PCI_SECONDARY_BUS, sub_bus + 1);
+ pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, 0xff);
+
+ /* Round memory allocator to 1MB boundary */
+ pciauto_upper_memspc &= ~(0x100000 - 1);
+ *memsave = pciauto_upper_memspc;
+
+ /* Round I/O allocator to 4KB boundary */
+ pciauto_upper_iospc &= ~(0x1000 - 1);
+ *iosave = pciauto_upper_iospc;
+
+ /* Set up memory and I/O filter limits, assume 32-bit I/O space */
+ pci_write_config_word(dev, PCI_MEMORY_LIMIT,
+ ((pciauto_upper_memspc - 1) & 0xfff00000) >> 16);
+ pci_write_config_byte(dev, PCI_IO_LIMIT,
+ ((pciauto_upper_iospc - 1) & 0x0000f000) >> 8);
+ pci_write_config_word(dev, PCI_IO_LIMIT_UPPER16,
+ ((pciauto_upper_iospc - 1) & 0xffff0000) >> 16);
+}
+
+static void __init
+pciauto_postscan_setup_bridge(struct pci_dev *dev, int current_bus, int sub_bus,
+ int *iosave, int *memsave)
+{
+ int cmdstat;
+
+ /* Configure bus number registers */
+ pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, sub_bus);
+
+ /*
+ * Round memory allocator to 1MB boundary.
+ * If no space used, allocate minimum.
+ */
+ pciauto_upper_memspc &= ~(0x100000 - 1);
+ if (*memsave == pciauto_upper_memspc)
+ pciauto_upper_memspc -= 0x00100000;
+
+ pci_write_config_word(dev, PCI_MEMORY_BASE, pciauto_upper_memspc >> 16);
+
+ /* Allocate 1MB for pre-fretch */
+ pci_write_config_word(dev, PCI_PREF_MEMORY_LIMIT,
+ ((pciauto_upper_memspc - 1) & 0xfff00000) >> 16);
+
+ pciauto_upper_memspc -= 0x100000;
+
+ pci_write_config_word(dev, PCI_PREF_MEMORY_BASE,
+ pciauto_upper_memspc >> 16);
+
+ /* Round I/O allocator to 4KB boundary */
+ pciauto_upper_iospc &= ~(0x1000 - 1);
+ if (*iosave == pciauto_upper_iospc)
+ pciauto_upper_iospc -= 0x1000;
+
+ pci_write_config_byte(dev, PCI_IO_BASE,
+ (pciauto_upper_iospc & 0x0000f000) >> 8);
+ pci_write_config_word(dev, PCI_IO_BASE_UPPER16,
+ pciauto_upper_iospc >> 16);
+
+ /* Enable memory and I/O accesses, enable bus master */
+ pci_read_config_dword(dev, PCI_COMMAND, &cmdstat);
+ pci_write_config_dword(dev, PCI_COMMAND,
+ cmdstat |
+ PCI_COMMAND_IO |
+ PCI_COMMAND_MEMORY |
+ PCI_COMMAND_MASTER);
+}
+
+/*
+ * Scan the current PCI bus.
+ */
+
+
+int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus)
+{
+ int sub_bus, pci_devfn, pci_class, cmdstat, found_multi=0;
+ unsigned short vid;
+ unsigned char header_type;
+ struct pci_dev *dev = &pciauto_dev;
+
+ pciauto_dev.bus = &pciauto_bus;
+ pciauto_dev.sysdata = pci_ctrl;
+ pciauto_bus.ops = pci_ctrl->ops;
+
+ /*
+ * Fetch our I/O and memory space upper boundaries used
+ * to allocated base addresses on this pci_controller.
+ */
+
+ if (current_bus == pci_ctrl->first_busno)
+ {
+ pciauto_upper_iospc = pci_ctrl->io_resource.end + 1;
+ pciauto_upper_memspc = pci_ctrl->mem_resources[0].end + 1;
+ }
+
+ sub_bus = current_bus;
+
+ for (pci_devfn = 0; pci_devfn < 0xff; pci_devfn++)
+ {
+ /* Skip our host bridge */
+ if ((current_bus == pci_ctrl->first_busno) && (pci_devfn == 0))
+ continue;
+
+ if (PCI_FUNC(pci_devfn) && !found_multi)
+ continue;
+
+ pciauto_bus.number = current_bus;
+ pciauto_dev.devfn = pci_devfn;
+
+ /* If config space read fails from this device, move on */
+ if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type))
+ continue;
+
+ if (!PCI_FUNC(pci_devfn))
+ found_multi = header_type & 0x80;
+ pci_read_config_word(dev, PCI_VENDOR_ID, &vid);
+
+ if (vid == 0xffff || vid == 0x0000) {
+ found_multi = 0;
+ continue;
+ }
+
+ pci_read_config_dword(dev, PCI_CLASS_REVISION, &pci_class);
+
+ if ((pci_class >> 16) == PCI_CLASS_BRIDGE_PCI) {
+
+ int iosave, memsave;
+
+ DBG("PCI Autoconfig: Found P2P bridge, device %d\n",
+ PCI_SLOT(pci_devfn));
+
+ /* Allocate PCI I/O and/or memory space */
+ pciauto_setup_bars(dev, PCI_BASE_ADDRESS_1);
+
+ pciauto_prescan_setup_bridge(dev, current_bus, sub_bus,
+ &iosave, &memsave);
+ sub_bus = pciauto_bus_scan(pci_ctrl, sub_bus+1);
+ pciauto_postscan_setup_bridge(dev, current_bus, sub_bus,
+ &iosave, &memsave);
+ pciauto_bus.number = current_bus;
+
+ continue;
+
+ }
+
+
+#if 0
+ /* Skip legacy mode IDE controller */
+
+ if ((pci_class >> 16) == PCI_CLASS_STORAGE_IDE) {
+
+ unsigned char prg_iface;
+ pci_read_config_byte(dev, PCI_CLASS_PROG, &prg_iface);
+
+ if (!(prg_iface & PCIAUTO_IDE_MODE_MASK)) {
+ DBG("PCI Autoconfig: Skipping legacy mode "
+ "IDE controller\n");
+ continue;
+ }
+ }
+#endif
+
+ /*
+ * Found a peripheral, enable some standard
+ * settings
+ */
+
+ pci_read_config_dword(dev, PCI_COMMAND, &cmdstat);
+ pci_write_config_dword(dev, PCI_COMMAND,
+ cmdstat |
+ PCI_COMMAND_IO |
+ PCI_COMMAND_MEMORY |
+ PCI_COMMAND_MASTER);
+ pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x80);
+
+ /* Allocate PCI I/O and/or memory space */
+ DBG("PCI Autoconfig: Found Bus %d, Device %d, Function %d\n",
+ current_bus, PCI_SLOT(pci_devfn), PCI_FUNC(pci_devfn) );
+
+ pciauto_setup_bars(dev, PCI_BASE_ADDRESS_5);
+ pciauto_setup_irq(pci_ctrl, dev, pci_devfn);
+ }
+ return sub_bus;
+}
+
+
+
+
+
diff --git a/arch/xtensa/lib/strcasecmp.c b/arch/xtensa/lib/strcasecmp.c
new file mode 100644
index 00000000000..165b2d6effa
--- /dev/null
+++ b/arch/xtensa/lib/strcasecmp.c
@@ -0,0 +1,32 @@
+/*
+ * linux/arch/xtensa/lib/strcasecmp.c
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2002 Tensilica Inc.
+ */
+
+#include <linux/string.h>
+
+
+/* We handle nothing here except the C locale. Since this is used in
+ only one place, on strings known to contain only 7 bit ASCII, this
+ is ok. */
+
+int strcasecmp(const char *a, const char *b)
+{
+ int ca, cb;
+
+ do {
+ ca = *a++ & 0xff;
+ cb = *b++ & 0xff;
+ if (ca >= 'A' && ca <= 'Z')
+ ca += 'a' - 'A';
+ if (cb >= 'A' && cb <= 'Z')
+ cb += 'a' - 'A';
+ } while (ca == cb && ca != '\0');
+
+ return ca - cb;
+}
diff --git a/arch/xtensa/lib/strncpy_user.S b/arch/xtensa/lib/strncpy_user.S
new file mode 100644
index 00000000000..71d55df4389
--- /dev/null
+++ b/arch/xtensa/lib/strncpy_user.S
@@ -0,0 +1,224 @@
+/*
+ * arch/xtensa/lib/strncpy_user.S
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Returns: -EFAULT if exception before terminator, N if the entire
+ * buffer filled, else strlen.
+ *
+ * Copyright (C) 2002 Tensilica Inc.
+ */
+
+#include <xtensa/coreasm.h>
+#include <linux/errno.h>
+
+/* Load or store instructions that may cause exceptions use the EX macro. */
+
+#define EX(insn,reg1,reg2,offset,handler) \
+9: insn reg1, reg2, offset; \
+ .section __ex_table, "a"; \
+ .word 9b, handler; \
+ .previous
+
+/*
+ * char *__strncpy_user(char *dst, const char *src, size_t len)
+ */
+.text
+.begin literal
+.align 4
+.Lmask0:
+ .byte 0xff, 0x00, 0x00, 0x00
+.Lmask1:
+ .byte 0x00, 0xff, 0x00, 0x00
+.Lmask2:
+ .byte 0x00, 0x00, 0xff, 0x00
+.Lmask3:
+ .byte 0x00, 0x00, 0x00, 0xff
+.end literal
+
+# Register use
+# a0/ return address
+# a1/ stack pointer
+# a2/ return value
+# a3/ src
+# a4/ len
+# a5/ mask0
+# a6/ mask1
+# a7/ mask2
+# a8/ mask3
+# a9/ tmp
+# a10/ tmp
+# a11/ dst
+# a12/ tmp
+
+.align 4
+.global __strncpy_user
+.type __strncpy_user,@function
+__strncpy_user:
+ entry sp, 16 # minimal stack frame
+ # a2/ dst, a3/ src, a4/ len
+ mov a11, a2 # leave dst in return value register
+ beqz a4, .Lret # if len is zero
+ l32r a5, .Lmask0 # mask for byte 0
+ l32r a6, .Lmask1 # mask for byte 1
+ l32r a7, .Lmask2 # mask for byte 2
+ l32r a8, .Lmask3 # mask for byte 3
+ bbsi.l a3, 0, .Lsrc1mod2 # if only 8-bit aligned
+ bbsi.l a3, 1, .Lsrc2mod4 # if only 16-bit aligned
+.Lsrcaligned: # return here when src is word-aligned
+ srli a12, a4, 2 # number of loop iterations with 4B per loop
+ movi a9, 3
+ bnone a11, a9, .Laligned
+ j .Ldstunaligned
+
+.Lsrc1mod2: # src address is odd
+ EX(l8ui, a9, a3, 0, fixup_l) # get byte 0
+ addi a3, a3, 1 # advance src pointer
+ EX(s8i, a9, a11, 0, fixup_s) # store byte 0
+ beqz a9, .Lret # if byte 0 is zero
+ addi a11, a11, 1 # advance dst pointer
+ addi a4, a4, -1 # decrement len
+ beqz a4, .Lret # if len is zero
+ bbci.l a3, 1, .Lsrcaligned # if src is now word-aligned
+
+.Lsrc2mod4: # src address is 2 mod 4
+ EX(l8ui, a9, a3, 0, fixup_l) # get byte 0
+ /* 1-cycle interlock */
+ EX(s8i, a9, a11, 0, fixup_s) # store byte 0
+ beqz a9, .Lret # if byte 0 is zero
+ addi a11, a11, 1 # advance dst pointer
+ addi a4, a4, -1 # decrement len
+ beqz a4, .Lret # if len is zero
+ EX(l8ui, a9, a3, 1, fixup_l) # get byte 0
+ addi a3, a3, 2 # advance src pointer
+ EX(s8i, a9, a11, 0, fixup_s) # store byte 0
+ beqz a9, .Lret # if byte 0 is zero
+ addi a11, a11, 1 # advance dst pointer
+ addi a4, a4, -1 # decrement len
+ bnez a4, .Lsrcaligned # if len is nonzero
+.Lret:
+ sub a2, a11, a2 # compute strlen
+ retw
+
+/*
+ * dst is word-aligned, src is word-aligned
+ */
+ .align 4 # 1 mod 4 alignment for LOOPNEZ
+ .byte 0 # (0 mod 4 alignment for LBEG)
+.Laligned:
+#if XCHAL_HAVE_LOOPS
+ loopnez a12, .Loop1done
+#else
+ beqz a12, .Loop1done
+ slli a12, a12, 2
+ add a12, a12, a11 # a12 = end of last 4B chunck
+#endif
+.Loop1:
+ EX(l32i, a9, a3, 0, fixup_l) # get word from src
+ addi a3, a3, 4 # advance src pointer
+ bnone a9, a5, .Lz0 # if byte 0 is zero
+ bnone a9, a6, .Lz1 # if byte 1 is zero
+ bnone a9, a7, .Lz2 # if byte 2 is zero
+ EX(s32i, a9, a11, 0, fixup_s) # store word to dst
+ bnone a9, a8, .Lz3 # if byte 3 is zero
+ addi a11, a11, 4 # advance dst pointer
+#if !XCHAL_HAVE_LOOPS
+ blt a11, a12, .Loop1
+#endif
+
+.Loop1done:
+ bbci.l a4, 1, .L100
+ # copy 2 bytes
+ EX(l16ui, a9, a3, 0, fixup_l)
+ addi a3, a3, 2 # advance src pointer
+#ifdef __XTENSA_EB__
+ bnone a9, a7, .Lz0 # if byte 2 is zero
+ bnone a9, a8, .Lz1 # if byte 3 is zero
+#else
+ bnone a9, a5, .Lz0 # if byte 0 is zero
+ bnone a9, a6, .Lz1 # if byte 1 is zero
+#endif
+ EX(s16i, a9, a11, 0, fixup_s)
+ addi a11, a11, 2 # advance dst pointer
+.L100:
+ bbci.l a4, 0, .Lret
+ EX(l8ui, a9, a3, 0, fixup_l)
+ /* slot */
+ EX(s8i, a9, a11, 0, fixup_s)
+ beqz a9, .Lret # if byte is zero
+ addi a11, a11, 1-3 # advance dst ptr 1, but also cancel
+ # the effect of adding 3 in .Lz3 code
+ /* fall thru to .Lz3 and "retw" */
+
+.Lz3: # byte 3 is zero
+ addi a11, a11, 3 # advance dst pointer
+ sub a2, a11, a2 # compute strlen
+ retw
+.Lz0: # byte 0 is zero
+#ifdef __XTENSA_EB__
+ movi a9, 0
+#endif /* __XTENSA_EB__ */
+ EX(s8i, a9, a11, 0, fixup_s)
+ sub a2, a11, a2 # compute strlen
+ retw
+.Lz1: # byte 1 is zero
+#ifdef __XTENSA_EB__
+ extui a9, a9, 16, 16
+#endif /* __XTENSA_EB__ */
+ EX(s16i, a9, a11, 0, fixup_s)
+ addi a11, a11, 1 # advance dst pointer
+ sub a2, a11, a2 # compute strlen
+ retw
+.Lz2: # byte 2 is zero
+#ifdef __XTENSA_EB__
+ extui a9, a9, 16, 16
+#endif /* __XTENSA_EB__ */
+ EX(s16i, a9, a11, 0, fixup_s)
+ movi a9, 0
+ EX(s8i, a9, a11, 2, fixup_s)
+ addi a11, a11, 2 # advance dst pointer
+ sub a2, a11, a2 # compute strlen
+ retw
+
+ .align 4 # 1 mod 4 alignment for LOOPNEZ
+ .byte 0 # (0 mod 4 alignment for LBEG)
+.Ldstunaligned:
+/*
+ * for now just use byte copy loop
+ */
+#if XCHAL_HAVE_LOOPS
+ loopnez a4, .Lunalignedend
+#else
+ beqz a4, .Lunalignedend
+ add a12, a11, a4 # a12 = ending address
+#endif /* XCHAL_HAVE_LOOPS */
+.Lnextbyte:
+ EX(l8ui, a9, a3, 0, fixup_l)
+ addi a3, a3, 1
+ EX(s8i, a9, a11, 0, fixup_s)
+ beqz a9, .Lunalignedend
+ addi a11, a11, 1
+#if !XCHAL_HAVE_LOOPS
+ blt a11, a12, .Lnextbyte
+#endif
+
+.Lunalignedend:
+ sub a2, a11, a2 # compute strlen
+ retw
+
+
+ .section .fixup, "ax"
+ .align 4
+
+ /* For now, just return -EFAULT. Future implementations might
+ * like to clear remaining kernel space, like the fixup
+ * implementation in memset(). Thus, we differentiate between
+ * load/store fixups. */
+
+fixup_s:
+fixup_l:
+ movi a2, -EFAULT
+ retw
+
diff --git a/arch/xtensa/lib/strnlen_user.S b/arch/xtensa/lib/strnlen_user.S
new file mode 100644
index 00000000000..cdff4d670f3
--- /dev/null
+++ b/arch/xtensa/lib/strnlen_user.S
@@ -0,0 +1,147 @@
+/*
+ * arch/xtensa/lib/strnlen_user.S
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Returns strnlen, including trailing zero terminator.
+ * Zero indicates error.
+ *
+ * Copyright (C) 2002 Tensilica Inc.
+ */
+
+#include <xtensa/coreasm.h>
+
+/* Load or store instructions that may cause exceptions use the EX macro. */
+
+#define EX(insn,reg1,reg2,offset,handler) \
+9: insn reg1, reg2, offset; \
+ .section __ex_table, "a"; \
+ .word 9b, handler; \
+ .previous
+
+/*
+ * size_t __strnlen_user(const char *s, size_t len)
+ */
+.text
+.begin literal
+.align 4
+.Lmask0:
+ .byte 0xff, 0x00, 0x00, 0x00
+.Lmask1:
+ .byte 0x00, 0xff, 0x00, 0x00
+.Lmask2:
+ .byte 0x00, 0x00, 0xff, 0x00
+.Lmask3:
+ .byte 0x00, 0x00, 0x00, 0xff
+.end literal
+
+# Register use:
+# a2/ src
+# a3/ len
+# a4/ tmp
+# a5/ mask0
+# a6/ mask1
+# a7/ mask2
+# a8/ mask3
+# a9/ tmp
+# a10/ tmp
+
+.align 4
+.global __strnlen_user
+.type __strnlen_user,@function
+__strnlen_user:
+ entry sp, 16 # minimal stack frame
+ # a2/ s, a3/ len
+ addi a4, a2, -4 # because we overincrement at the end;
+ # we compensate with load offsets of 4
+ l32r a5, .Lmask0 # mask for byte 0
+ l32r a6, .Lmask1 # mask for byte 1
+ l32r a7, .Lmask2 # mask for byte 2
+ l32r a8, .Lmask3 # mask for byte 3
+ bbsi.l a2, 0, .L1mod2 # if only 8-bit aligned
+ bbsi.l a2, 1, .L2mod4 # if only 16-bit aligned
+
+/*
+ * String is word-aligned.
+ */
+.Laligned:
+ srli a10, a3, 2 # number of loop iterations with 4B per loop
+#if XCHAL_HAVE_LOOPS
+ loopnez a10, .Ldone
+#else
+ beqz a10, .Ldone
+ slli a10, a10, 2
+ add a10, a10, a4 # a10 = end of last 4B chunk
+#endif /* XCHAL_HAVE_LOOPS */
+.Loop:
+ EX(l32i, a9, a4, 4, lenfixup) # get next word of string
+ addi a4, a4, 4 # advance string pointer
+ bnone a9, a5, .Lz0 # if byte 0 is zero
+ bnone a9, a6, .Lz1 # if byte 1 is zero
+ bnone a9, a7, .Lz2 # if byte 2 is zero
+ bnone a9, a8, .Lz3 # if byte 3 is zero
+#if !XCHAL_HAVE_LOOPS
+ blt a4, a10, .Loop
+#endif
+
+.Ldone:
+ EX(l32i, a9, a4, 4, lenfixup) # load 4 bytes for remaining checks
+
+ bbci.l a3, 1, .L100
+ # check two more bytes (bytes 0, 1 of word)
+ addi a4, a4, 2 # advance string pointer
+ bnone a9, a5, .Lz0 # if byte 0 is zero
+ bnone a9, a6, .Lz1 # if byte 1 is zero
+.L100:
+ bbci.l a3, 0, .L101
+ # check one more byte (byte 2 of word)
+ # Actually, we don't need to check. Zero or nonzero, we'll add one.
+ # Do not add an extra one for the NULL terminator since we have
+ # exhausted the original len parameter.
+ addi a4, a4, 1 # advance string pointer
+.L101:
+ sub a2, a4, a2 # compute length
+ retw
+
+# NOTE that in several places below, we point to the byte just after
+# the zero byte in order to include the NULL terminator in the count.
+
+.Lz3: # byte 3 is zero
+ addi a4, a4, 3 # point to zero byte
+.Lz0: # byte 0 is zero
+ addi a4, a4, 1 # point just beyond zero byte
+ sub a2, a4, a2 # subtract to get length
+ retw
+.Lz1: # byte 1 is zero
+ addi a4, a4, 1+1 # point just beyond zero byte
+ sub a2, a4, a2 # subtract to get length
+ retw
+.Lz2: # byte 2 is zero
+ addi a4, a4, 2+1 # point just beyond zero byte
+ sub a2, a4, a2 # subtract to get length
+ retw
+
+.L1mod2: # address is odd
+ EX(l8ui, a9, a4, 4, lenfixup) # get byte 0
+ addi a4, a4, 1 # advance string pointer
+ beqz a9, .Lz3 # if byte 0 is zero
+ bbci.l a4, 1, .Laligned # if string pointer is now word-aligned
+
+.L2mod4: # address is 2 mod 4
+ addi a4, a4, 2 # advance ptr for aligned access
+ EX(l32i, a9, a4, 0, lenfixup) # get word with first two bytes of string
+ bnone a9, a7, .Lz2 # if byte 2 (of word, not string) is zero
+ bany a9, a8, .Laligned # if byte 3 (of word, not string) is nonzero
+ # byte 3 is zero
+ addi a4, a4, 3+1 # point just beyond zero byte
+ sub a2, a4, a2 # subtract to get length
+ retw
+
+ .section .fixup, "ax"
+ .align 4
+lenfixup:
+ movi a2, 0
+ retw
+
diff --git a/arch/xtensa/lib/usercopy.S b/arch/xtensa/lib/usercopy.S
new file mode 100644
index 00000000000..265db2693cb
--- /dev/null
+++ b/arch/xtensa/lib/usercopy.S
@@ -0,0 +1,321 @@
+/*
+ * arch/xtensa/lib/usercopy.S
+ *
+ * Copy to/from user space (derived from arch/xtensa/lib/hal/memcopy.S)
+ *
+ * DO NOT COMBINE this function with <arch/xtensa/lib/hal/memcopy.S>.
+ * It needs to remain separate and distinct. The hal files are part
+ * of the the Xtensa link-time HAL, and those files may differ per
+ * processor configuration. Patching the kernel for another
+ * processor configuration includes replacing the hal files, and we
+ * could loose the special functionality for accessing user-space
+ * memory during such a patch. We sacrifice a little code space here
+ * in favor to simplify code maintenance.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2002 Tensilica Inc.
+ */
+
+
+/*
+ * size_t __xtensa_copy_user (void *dst, const void *src, size_t len);
+ *
+ * The returned value is the number of bytes not copied. Implies zero
+ * is success.
+ *
+ * The general case algorithm is as follows:
+ * If the destination and source are both aligned,
+ * do 16B chunks with a loop, and then finish up with
+ * 8B, 4B, 2B, and 1B copies conditional on the length.
+ * If destination is aligned and source unaligned,
+ * do the same, but use SRC to align the source data.
+ * If destination is unaligned, align it by conditionally
+ * copying 1B and 2B and then retest.
+ * This code tries to use fall-through braches for the common
+ * case of aligned destinations (except for the branches to
+ * the alignment label).
+ *
+ * Register use:
+ * a0/ return address
+ * a1/ stack pointer
+ * a2/ return value
+ * a3/ src
+ * a4/ length
+ * a5/ dst
+ * a6/ tmp
+ * a7/ tmp
+ * a8/ tmp
+ * a9/ tmp
+ * a10/ tmp
+ * a11/ original length
+ */
+
+#include <xtensa/coreasm.h>
+
+#ifdef __XTENSA_EB__
+#define ALIGN(R, W0, W1) src R, W0, W1
+#define SSA8(R) ssa8b R
+#else
+#define ALIGN(R, W0, W1) src R, W1, W0
+#define SSA8(R) ssa8l R
+#endif
+
+/* Load or store instructions that may cause exceptions use the EX macro. */
+
+#define EX(insn,reg1,reg2,offset,handler) \
+9: insn reg1, reg2, offset; \
+ .section __ex_table, "a"; \
+ .word 9b, handler; \
+ .previous
+
+
+ .text
+ .align 4
+ .global __xtensa_copy_user
+ .type __xtensa_copy_user,@function
+__xtensa_copy_user:
+ entry sp, 16 # minimal stack frame
+ # a2/ dst, a3/ src, a4/ len
+ mov a5, a2 # copy dst so that a2 is return value
+ mov a11, a4 # preserve original len for error case
+.Lcommon:
+ bbsi.l a2, 0, .Ldst1mod2 # if dst is 1 mod 2
+ bbsi.l a2, 1, .Ldst2mod4 # if dst is 2 mod 4
+.Ldstaligned: # return here from .Ldstunaligned when dst is aligned
+ srli a7, a4, 4 # number of loop iterations with 16B
+ # per iteration
+ movi a8, 3 # if source is also aligned,
+ bnone a3, a8, .Laligned # then use word copy
+ SSA8( a3) # set shift amount from byte offset
+ bnez a4, .Lsrcunaligned
+ movi a2, 0 # return success for len==0
+ retw
+
+/*
+ * Destination is unaligned
+ */
+
+.Ldst1mod2: # dst is only byte aligned
+ bltui a4, 7, .Lbytecopy # do short copies byte by byte
+
+ # copy 1 byte
+ EX(l8ui, a6, a3, 0, l_fixup)
+ addi a3, a3, 1
+ EX(s8i, a6, a5, 0, s_fixup)
+ addi a5, a5, 1
+ addi a4, a4, -1
+ bbci.l a5, 1, .Ldstaligned # if dst is now aligned, then
+ # return to main algorithm
+.Ldst2mod4: # dst 16-bit aligned
+ # copy 2 bytes
+ bltui a4, 6, .Lbytecopy # do short copies byte by byte
+ EX(l8ui, a6, a3, 0, l_fixup)
+ EX(l8ui, a7, a3, 1, l_fixup)
+ addi a3, a3, 2
+ EX(s8i, a6, a5, 0, s_fixup)
+ EX(s8i, a7, a5, 1, s_fixup)
+ addi a5, a5, 2
+ addi a4, a4, -2
+ j .Ldstaligned # dst is now aligned, return to main algorithm
+
+/*
+ * Byte by byte copy
+ */
+ .align 4
+ .byte 0 # 1 mod 4 alignment for LOOPNEZ
+ # (0 mod 4 alignment for LBEG)
+.Lbytecopy:
+#if XCHAL_HAVE_LOOPS
+ loopnez a4, .Lbytecopydone
+#else /* !XCHAL_HAVE_LOOPS */
+ beqz a4, .Lbytecopydone
+ add a7, a3, a4 # a7 = end address for source
+#endif /* !XCHAL_HAVE_LOOPS */
+.Lnextbyte:
+ EX(l8ui, a6, a3, 0, l_fixup)
+ addi a3, a3, 1
+ EX(s8i, a6, a5, 0, s_fixup)
+ addi a5, a5, 1
+#if !XCHAL_HAVE_LOOPS
+ blt a3, a7, .Lnextbyte
+#endif /* !XCHAL_HAVE_LOOPS */
+.Lbytecopydone:
+ movi a2, 0 # return success for len bytes copied
+ retw
+
+/*
+ * Destination and source are word-aligned.
+ */
+ # copy 16 bytes per iteration for word-aligned dst and word-aligned src
+ .align 4 # 1 mod 4 alignment for LOOPNEZ
+ .byte 0 # (0 mod 4 alignment for LBEG)
+.Laligned:
+#if XCHAL_HAVE_LOOPS
+ loopnez a7, .Loop1done
+#else /* !XCHAL_HAVE_LOOPS */
+ beqz a7, .Loop1done
+ slli a8, a7, 4
+ add a8, a8, a3 # a8 = end of last 16B source chunk
+#endif /* !XCHAL_HAVE_LOOPS */
+.Loop1:
+ EX(l32i, a6, a3, 0, l_fixup)
+ EX(l32i, a7, a3, 4, l_fixup)
+ EX(s32i, a6, a5, 0, s_fixup)
+ EX(l32i, a6, a3, 8, l_fixup)
+ EX(s32i, a7, a5, 4, s_fixup)
+ EX(l32i, a7, a3, 12, l_fixup)
+ EX(s32i, a6, a5, 8, s_fixup)
+ addi a3, a3, 16
+ EX(s32i, a7, a5, 12, s_fixup)
+ addi a5, a5, 16
+#if !XCHAL_HAVE_LOOPS
+ blt a3, a8, .Loop1
+#endif /* !XCHAL_HAVE_LOOPS */
+.Loop1done:
+ bbci.l a4, 3, .L2
+ # copy 8 bytes
+ EX(l32i, a6, a3, 0, l_fixup)
+ EX(l32i, a7, a3, 4, l_fixup)
+ addi a3, a3, 8
+ EX(s32i, a6, a5, 0, s_fixup)
+ EX(s32i, a7, a5, 4, s_fixup)
+ addi a5, a5, 8
+.L2:
+ bbci.l a4, 2, .L3
+ # copy 4 bytes
+ EX(l32i, a6, a3, 0, l_fixup)
+ addi a3, a3, 4
+ EX(s32i, a6, a5, 0, s_fixup)
+ addi a5, a5, 4
+.L3:
+ bbci.l a4, 1, .L4
+ # copy 2 bytes
+ EX(l16ui, a6, a3, 0, l_fixup)
+ addi a3, a3, 2
+ EX(s16i, a6, a5, 0, s_fixup)
+ addi a5, a5, 2
+.L4:
+ bbci.l a4, 0, .L5
+ # copy 1 byte
+ EX(l8ui, a6, a3, 0, l_fixup)
+ EX(s8i, a6, a5, 0, s_fixup)
+.L5:
+ movi a2, 0 # return success for len bytes copied
+ retw
+
+/*
+ * Destination is aligned, Source is unaligned
+ */
+
+ .align 4
+ .byte 0 # 1 mod 4 alignement for LOOPNEZ
+ # (0 mod 4 alignment for LBEG)
+.Lsrcunaligned:
+ # copy 16 bytes per iteration for word-aligned dst and unaligned src
+ and a10, a3, a8 # save unalignment offset for below
+ sub a3, a3, a10 # align a3 (to avoid sim warnings only; not needed for hardware)
+ EX(l32i, a6, a3, 0, l_fixup) # load first word
+#if XCHAL_HAVE_LOOPS
+ loopnez a7, .Loop2done
+#else /* !XCHAL_HAVE_LOOPS */
+ beqz a7, .Loop2done
+ slli a10, a7, 4
+ add a10, a10, a3 # a10 = end of last 16B source chunk
+#endif /* !XCHAL_HAVE_LOOPS */
+.Loop2:
+ EX(l32i, a7, a3, 4, l_fixup)
+ EX(l32i, a8, a3, 8, l_fixup)
+ ALIGN( a6, a6, a7)
+ EX(s32i, a6, a5, 0, s_fixup)
+ EX(l32i, a9, a3, 12, l_fixup)
+ ALIGN( a7, a7, a8)
+ EX(s32i, a7, a5, 4, s_fixup)
+ EX(l32i, a6, a3, 16, l_fixup)
+ ALIGN( a8, a8, a9)
+ EX(s32i, a8, a5, 8, s_fixup)
+ addi a3, a3, 16
+ ALIGN( a9, a9, a6)
+ EX(s32i, a9, a5, 12, s_fixup)
+ addi a5, a5, 16
+#if !XCHAL_HAVE_LOOPS
+ blt a3, a10, .Loop2
+#endif /* !XCHAL_HAVE_LOOPS */
+.Loop2done:
+ bbci.l a4, 3, .L12
+ # copy 8 bytes
+ EX(l32i, a7, a3, 4, l_fixup)
+ EX(l32i, a8, a3, 8, l_fixup)
+ ALIGN( a6, a6, a7)
+ EX(s32i, a6, a5, 0, s_fixup)
+ addi a3, a3, 8
+ ALIGN( a7, a7, a8)
+ EX(s32i, a7, a5, 4, s_fixup)
+ addi a5, a5, 8
+ mov a6, a8
+.L12:
+ bbci.l a4, 2, .L13
+ # copy 4 bytes
+ EX(l32i, a7, a3, 4, l_fixup)
+ addi a3, a3, 4
+ ALIGN( a6, a6, a7)
+ EX(s32i, a6, a5, 0, s_fixup)
+ addi a5, a5, 4
+ mov a6, a7
+.L13:
+ add a3, a3, a10 # readjust a3 with correct misalignment
+ bbci.l a4, 1, .L14
+ # copy 2 bytes
+ EX(l8ui, a6, a3, 0, l_fixup)
+ EX(l8ui, a7, a3, 1, l_fixup)
+ addi a3, a3, 2
+ EX(s8i, a6, a5, 0, s_fixup)
+ EX(s8i, a7, a5, 1, s_fixup)
+ addi a5, a5, 2
+.L14:
+ bbci.l a4, 0, .L15
+ # copy 1 byte
+ EX(l8ui, a6, a3, 0, l_fixup)
+ EX(s8i, a6, a5, 0, s_fixup)
+.L15:
+ movi a2, 0 # return success for len bytes copied
+ retw
+
+
+ .section .fixup, "ax"
+ .align 4
+
+/* a2 = original dst; a5 = current dst; a11= original len
+ * bytes_copied = a5 - a2
+ * retval = bytes_not_copied = original len - bytes_copied
+ * retval = a11 - (a5 - a2)
+ *
+ * Clearing the remaining pieces of kernel memory plugs security
+ * holes. This functionality is the equivalent of the *_zeroing
+ * functions that some architectures provide.
+ */
+
+.Lmemset:
+ .word memset
+
+s_fixup:
+ sub a2, a5, a2 /* a2 <-- bytes copied */
+ sub a2, a11, a2 /* a2 <-- bytes not copied */
+ retw
+
+l_fixup:
+ sub a2, a5, a2 /* a2 <-- bytes copied */
+ sub a2, a11, a2 /* a2 <-- bytes not copied == return value */
+
+ /* void *memset(void *s, int c, size_t n); */
+ mov a6, a5 /* s */
+ movi a7, 0 /* c */
+ mov a8, a2 /* n */
+ l32r a4, .Lmemset
+ callx4 a4
+ /* Ignore memset return value in a6. */
+ /* a2 still contains bytes not copied. */
+ retw
+
diff --git a/arch/xtensa/mm/Makefile b/arch/xtensa/mm/Makefile
new file mode 100644
index 00000000000..a5aed5932d7
--- /dev/null
+++ b/arch/xtensa/mm/Makefile
@@ -0,0 +1,13 @@
+#
+# Makefile for the Linux/Xtensa-specific parts of the memory manager.
+#
+# Note! Dependencies are done automagically by 'make dep', which also
+# removes any old dependencies. DON'T put your own dependencies here
+# unless it's something special (ie not a .c file).
+#
+# Note 2! The CFLAGS definition is now in the main makefile...
+
+obj-y := init.o fault.o tlb.o misc.o
+obj-m :=
+obj-n :=
+obj- :=
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
new file mode 100644
index 00000000000..a945a33e85a
--- /dev/null
+++ b/arch/xtensa/mm/fault.c
@@ -0,0 +1,241 @@
+// TODO VM_EXEC flag work-around, cache aliasing
+/*
+ * arch/xtensa/mm/fault.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
+ */
+
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <asm/mmu_context.h>
+#include <asm/cacheflush.h>
+#include <asm/hardirq.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/pgalloc.h>
+
+unsigned long asid_cache = ASID_FIRST_VERSION;
+void bad_page_fault(struct pt_regs*, unsigned long, int);
+
+/*
+ * This routine handles page faults. It determines the address,
+ * and the problem, and then passes it off to one of the appropriate
+ * routines.
+ *
+ * Note: does not handle Miss and MultiHit.
+ */
+
+void do_page_fault(struct pt_regs *regs)
+{
+ struct vm_area_struct * vma;
+ struct mm_struct *mm = current->mm;
+ unsigned int exccause = regs->exccause;
+ unsigned int address = regs->excvaddr;
+ siginfo_t info;
+
+ int is_write, is_exec;
+
+ info.si_code = SEGV_MAPERR;
+
+ /* We fault-in kernel-space virtual memory on-demand. The
+ * 'reference' page table is init_mm.pgd.
+ */
+ if (address >= TASK_SIZE && !user_mode(regs))
+ goto vmalloc_fault;
+
+ /* If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+ if (in_atomic() || !mm) {
+ bad_page_fault(regs, address, SIGSEGV);
+ return;
+ }
+
+ is_write = (exccause == XCHAL_EXCCAUSE_STORE_CACHE_ATTRIBUTE) ? 1 : 0;
+ is_exec = (exccause == XCHAL_EXCCAUSE_ITLB_PRIVILEGE ||
+ exccause == XCHAL_EXCCAUSE_ITLB_MISS ||
+ exccause == XCHAL_EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0;
+
+#if 0
+ printk("[%s:%d:%08x:%d:%08x:%s%s]\n", current->comm, current->pid,
+ address, exccause, regs->pc, is_write? "w":"", is_exec? "x":"");
+#endif
+
+ down_read(&mm->mmap_sem);
+ vma = find_vma(mm, address);
+
+ if (!vma)
+ goto bad_area;
+ if (vma->vm_start <= address)
+ goto good_area;
+ if (!(vma->vm_flags & VM_GROWSDOWN))
+ goto bad_area;
+ if (expand_stack(vma, address))
+ goto bad_area;
+
+ /* Ok, we have a good vm_area for this memory access, so
+ * we can handle it..
+ */
+
+good_area:
+ info.si_code = SEGV_ACCERR;
+
+ if (is_write) {
+ if (!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ } else if (is_exec) {
+ if (!(vma->vm_flags & VM_EXEC))
+ goto bad_area;
+ } else /* Allow read even from write-only pages. */
+ if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
+ goto bad_area;
+
+ /* If for any reason at all we couldn't handle the fault,
+ * make sure we exit gracefully rather than endlessly redo
+ * the fault.
+ */
+survive:
+ switch (handle_mm_fault(mm, vma, address, is_write)) {
+ case VM_FAULT_MINOR:
+ current->min_flt++;
+ break;
+ case VM_FAULT_MAJOR:
+ current->maj_flt++;
+ break;
+ case VM_FAULT_SIGBUS:
+ goto do_sigbus;
+ case VM_FAULT_OOM:
+ goto out_of_memory;
+ default:
+ BUG();
+ }
+
+ up_read(&mm->mmap_sem);
+ return;
+
+ /* Something tried to access memory that isn't in our memory map..
+ * Fix it, but check if it's kernel or user first..
+ */
+bad_area:
+ up_read(&mm->mmap_sem);
+ if (user_mode(regs)) {
+ current->thread.bad_vaddr = address;
+ current->thread.error_code = is_write;
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ /* info.si_code has been set above */
+ info.si_addr = (void *) address;
+ force_sig_info(SIGSEGV, &info, current);
+ return;
+ }
+ bad_page_fault(regs, address, SIGSEGV);
+ return;
+
+
+ /* We ran out of memory, or some other thing happened to us that made
+ * us unable to handle the page fault gracefully.
+ */
+out_of_memory:
+ up_read(&mm->mmap_sem);
+ if (current->pid == 1) {
+ yield();
+ down_read(&mm->mmap_sem);
+ goto survive;
+ }
+ printk("VM: killing process %s\n", current->comm);
+ if (user_mode(regs))
+ do_exit(SIGKILL);
+ bad_page_fault(regs, address, SIGKILL);
+ return;
+
+do_sigbus:
+ up_read(&mm->mmap_sem);
+
+ /* Send a sigbus, regardless of whether we were in kernel
+ * or user mode.
+ */
+ current->thread.bad_vaddr = address;
+ info.si_code = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = BUS_ADRERR;
+ info.si_addr = (void *) address;
+ force_sig_info(SIGBUS, &info, current);
+
+ /* Kernel mode? Handle exceptions or die */
+ if (!user_mode(regs))
+ bad_page_fault(regs, address, SIGBUS);
+
+vmalloc_fault:
+ {
+ /* Synchronize this task's top level page-table
+ * with the 'reference' page table.
+ */
+ struct mm_struct *act_mm = current->active_mm;
+ int index = pgd_index(address);
+ pgd_t *pgd, *pgd_k;
+ pmd_t *pmd, *pmd_k;
+ pte_t *pte_k;
+
+ if (act_mm == NULL)
+ goto bad_page_fault;
+
+ pgd = act_mm->pgd + index;
+ pgd_k = init_mm.pgd + index;
+
+ if (!pgd_present(*pgd_k))
+ goto bad_page_fault;
+
+ pgd_val(*pgd) = pgd_val(*pgd_k);
+
+ pmd = pmd_offset(pgd, address);
+ pmd_k = pmd_offset(pgd_k, address);
+ if (!pmd_present(*pmd) || !pmd_present(*pmd_k))
+ goto bad_page_fault;
+
+ pmd_val(*pmd) = pmd_val(*pmd_k);
+ pte_k = pte_offset_kernel(pmd_k, address);
+
+ if (!pte_present(*pte_k))
+ goto bad_page_fault;
+ return;
+ }
+bad_page_fault:
+ bad_page_fault(regs, address, SIGKILL);
+ return;
+}
+
+
+void
+bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
+{
+ extern void die(const char*, struct pt_regs*, long);
+ const struct exception_table_entry *entry;
+
+ /* Are we prepared to handle this kernel fault? */
+ if ((entry = search_exception_tables(regs->pc)) != NULL) {
+#if 1
+ printk(KERN_DEBUG "%s: Exception at pc=%#010lx (%lx)\n",
+ current->comm, regs->pc, entry->fixup);
+#endif
+ current->thread.bad_uaddr = address;
+ regs->pc = entry->fixup;
+ return;
+ }
+
+ /* Oops. The kernel tried to access some bad page. We'll have to
+ * terminate things with extreme prejudice.
+ */
+ printk(KERN_ALERT "Unable to handle kernel paging request at virtual "
+ "address %08lx\n pc = %08lx, ra = %08lx\n",
+ address, regs->pc, regs->areg[0]);
+ die("Oops", regs, sig);
+ do_exit(sig);
+}
+
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
new file mode 100644
index 00000000000..56aace84aae
--- /dev/null
+++ b/arch/xtensa/mm/init.c
@@ -0,0 +1,551 @@
+/*
+ * arch/xtensa/mm/init.c
+ *
+ * Derived from MIPS, PPC.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
+ * Marc Gauthier
+ * Kevin Chea
+ */
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/bootmem.h>
+#include <linux/swap.h>
+
+#include <asm/pgtable.h>
+#include <asm/bootparam.h>
+#include <asm/mmu_context.h>
+#include <asm/tlb.h>
+#include <asm/tlbflush.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+
+
+#define DEBUG 0
+
+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+//static DEFINE_SPINLOCK(tlb_lock);
+
+/*
+ * This flag is used to indicate that the page was mapped and modified in
+ * kernel space, so the cache is probably dirty at that address.
+ * If cache aliasing is enabled and the page color mismatches, update_mmu_cache
+ * synchronizes the caches if this bit is set.
+ */
+
+#define PG_cache_clean PG_arch_1
+
+/* References to section boundaries */
+
+extern char _ftext, _etext, _fdata, _edata, _rodata_end;
+extern char __init_begin, __init_end;
+
+/*
+ * mem_reserve(start, end, must_exist)
+ *
+ * Reserve some memory from the memory pool.
+ *
+ * Parameters:
+ * start Start of region,
+ * end End of region,
+ * must_exist Must exist in memory pool.
+ *
+ * Returns:
+ * 0 (memory area couldn't be mapped)
+ * -1 (success)
+ */
+
+int __init mem_reserve(unsigned long start, unsigned long end, int must_exist)
+{
+ int i;
+
+ if (start == end)
+ return 0;
+
+ start = start & PAGE_MASK;
+ end = PAGE_ALIGN(end);
+
+ for (i = 0; i < sysmem.nr_banks; i++)
+ if (start < sysmem.bank[i].end
+ && end >= sysmem.bank[i].start)
+ break;
+
+ if (i == sysmem.nr_banks) {
+ if (must_exist)
+ printk (KERN_WARNING "mem_reserve: [0x%0lx, 0x%0lx) "
+ "not in any region!\n", start, end);
+ return 0;
+ }
+
+ if (start > sysmem.bank[i].start) {
+ if (end < sysmem.bank[i].end) {
+ /* split entry */
+ if (sysmem.nr_banks >= SYSMEM_BANKS_MAX)
+ panic("meminfo overflow\n");
+ sysmem.bank[sysmem.nr_banks].start = end;
+ sysmem.bank[sysmem.nr_banks].end = sysmem.bank[i].end;
+ sysmem.nr_banks++;
+ }
+ sysmem.bank[i].end = start;
+ } else {
+ if (end < sysmem.bank[i].end)
+ sysmem.bank[i].start = end;
+ else {
+ /* remove entry */
+ sysmem.nr_banks--;
+ sysmem.bank[i].start = sysmem.bank[sysmem.nr_banks].start;
+ sysmem.bank[i].end = sysmem.bank[sysmem.nr_banks].end;
+ }
+ }
+ return -1;
+}
+
+
+/*
+ * Initialize the bootmem system and give it all the memory we have available.
+ */
+
+void __init bootmem_init(void)
+{
+ unsigned long pfn;
+ unsigned long bootmap_start, bootmap_size;
+ int i;
+
+ max_low_pfn = max_pfn = 0;
+ min_low_pfn = ~0;
+
+ for (i=0; i < sysmem.nr_banks; i++) {
+ pfn = PAGE_ALIGN(sysmem.bank[i].start) >> PAGE_SHIFT;
+ if (pfn < min_low_pfn)
+ min_low_pfn = pfn;
+ pfn = PAGE_ALIGN(sysmem.bank[i].end - 1) >> PAGE_SHIFT;
+ if (pfn > max_pfn)
+ max_pfn = pfn;
+ }
+
+ if (min_low_pfn > max_pfn)
+ panic("No memory found!\n");
+
+ max_low_pfn = max_pfn < MAX_LOW_MEMORY >> PAGE_SHIFT ?
+ max_pfn : MAX_LOW_MEMORY >> PAGE_SHIFT;
+
+ /* Find an area to use for the bootmem bitmap. */
+
+ bootmap_size = bootmem_bootmap_pages(max_low_pfn) << PAGE_SHIFT;
+ bootmap_start = ~0;
+
+ for (i=0; i<sysmem.nr_banks; i++)
+ if (sysmem.bank[i].end - sysmem.bank[i].start >= bootmap_size) {
+ bootmap_start = sysmem.bank[i].start;
+ break;
+ }
+
+ if (bootmap_start == ~0UL)
+ panic("Cannot find %ld bytes for bootmap\n", bootmap_size);
+
+ /* Reserve the bootmem bitmap area */
+
+ mem_reserve(bootmap_start, bootmap_start + bootmap_size, 1);
+ bootmap_size = init_bootmem_node(NODE_DATA(0), min_low_pfn,
+ bootmap_start >> PAGE_SHIFT,
+ max_low_pfn);
+
+ /* Add all remaining memory pieces into the bootmem map */
+
+ for (i=0; i<sysmem.nr_banks; i++)
+ free_bootmem(sysmem.bank[i].start,
+ sysmem.bank[i].end - sysmem.bank[i].start);
+
+}
+
+
+void __init paging_init(void)
+{
+ unsigned long zones_size[MAX_NR_ZONES];
+ int i;
+
+ /* All pages are DMA-able, so we put them all in the DMA zone. */
+
+ zones_size[ZONE_DMA] = max_low_pfn;
+ for (i = 1; i < MAX_NR_ZONES; i++)
+ zones_size[i] = 0;
+
+#ifdef CONFIG_HIGHMEM
+ zones_size[ZONE_HIGHMEM] = max_pfn - max_low_pfn;
+#endif
+
+ /* Initialize the kernel's page tables. */
+
+ memset(swapper_pg_dir, 0, PAGE_SIZE);
+
+ free_area_init(zones_size);
+}
+
+/*
+ * Flush the mmu and reset associated register to default values.
+ */
+
+void __init init_mmu (void)
+{
+ /* Writing zeros to the <t>TLBCFG special registers ensure
+ * that valid values exist in the register. For existing
+ * PGSZID<w> fields, zero selects the first element of the
+ * page-size array. For nonexistant PGSZID<w> fields, zero is
+ * the best value to write. Also, when changing PGSZID<w>
+ * fields, the corresponding TLB must be flushed.
+ */
+ set_itlbcfg_register (0);
+ set_dtlbcfg_register (0);
+ flush_tlb_all ();
+
+ /* Set rasid register to a known value. */
+
+ set_rasid_register (ASID_ALL_RESERVED);
+
+ /* Set PTEVADDR special register to the start of the page
+ * table, which is in kernel mappable space (ie. not
+ * statically mapped). This register's value is undefined on
+ * reset.
+ */
+ set_ptevaddr_register (PGTABLE_START);
+}
+
+/*
+ * Initialize memory pages.
+ */
+
+void __init mem_init(void)
+{
+ unsigned long codesize, reservedpages, datasize, initsize;
+ unsigned long highmemsize, tmp, ram;
+
+ max_mapnr = num_physpages = max_low_pfn;
+ high_memory = (void *) __va(max_mapnr << PAGE_SHIFT);
+ highmemsize = 0;
+
+#if CONFIG_HIGHMEM
+#error HIGHGMEM not implemented in init.c
+#endif
+
+ totalram_pages += free_all_bootmem();
+
+ reservedpages = ram = 0;
+ for (tmp = 0; tmp < max_low_pfn; tmp++) {
+ ram++;
+ if (PageReserved(mem_map+tmp))
+ reservedpages++;
+ }
+
+ codesize = (unsigned long) &_etext - (unsigned long) &_ftext;
+ datasize = (unsigned long) &_edata - (unsigned long) &_fdata;
+ initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
+
+ printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, "
+ "%ldk data, %ldk init %ldk highmem)\n",
+ (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
+ ram << (PAGE_SHIFT-10),
+ codesize >> 10,
+ reservedpages << (PAGE_SHIFT-10),
+ datasize >> 10,
+ initsize >> 10,
+ highmemsize >> 10);
+}
+
+void
+free_reserved_mem(void *start, void *end)
+{
+ for (; start < end; start += PAGE_SIZE) {
+ ClearPageReserved(virt_to_page(start));
+ set_page_count(virt_to_page(start), 1);
+ free_page((unsigned long)start);
+ totalram_pages++;
+ }
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+extern int initrd_is_mapped;
+
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+ if (initrd_is_mapped) {
+ free_reserved_mem((void*)start, (void*)end);
+ printk ("Freeing initrd memory: %ldk freed\n",(end-start)>>10);
+ }
+}
+#endif
+
+void free_initmem(void)
+{
+ free_reserved_mem(&__init_begin, &__init_end);
+ printk("Freeing unused kernel memory: %dk freed\n",
+ (&__init_end - &__init_begin) >> 10);
+}
+
+void show_mem(void)
+{
+ int i, free = 0, total = 0, reserved = 0;
+ int shared = 0, cached = 0;
+
+ printk("Mem-info:\n");
+ show_free_areas();
+ printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
+ i = max_mapnr;
+ while (i-- > 0) {
+ total++;
+ if (PageReserved(mem_map+i))
+ reserved++;
+ else if (PageSwapCache(mem_map+i))
+ cached++;
+ else if (!page_count(mem_map + i))
+ free++;
+ else
+ shared += page_count(mem_map + i) - 1;
+ }
+ printk("%d pages of RAM\n", total);
+ printk("%d reserved pages\n", reserved);
+ printk("%d pages shared\n", shared);
+ printk("%d pages swap cached\n",cached);
+ printk("%d free pages\n", free);
+}
+
+/* ------------------------------------------------------------------------- */
+
+#if (DCACHE_WAY_SIZE > PAGE_SIZE)
+
+/*
+ * With cache aliasing, the page color of the page in kernel space and user
+ * space might mismatch. We temporarily map the page to a different virtual
+ * address with the same color and clear the page there.
+ */
+
+void clear_user_page(void *kaddr, unsigned long vaddr, struct page* page)
+{
+
+ /* There shouldn't be any entries for this page. */
+
+ __flush_invalidate_dcache_page_phys(__pa(page_address(page)));
+
+ if (!PAGE_COLOR_EQ(vaddr, kaddr)) {
+ unsigned long v, p;
+
+ /* Temporarily map page to DTLB_WAY_DCACHE_ALIAS0. */
+
+ spin_lock(&tlb_lock);
+
+ p = (unsigned long)pte_val((mk_pte(page,PAGE_KERNEL)));
+ kaddr = (void*)PAGE_COLOR_MAP0(vaddr);
+ v = (unsigned long)kaddr | DTLB_WAY_DCACHE_ALIAS0;
+ __asm__ __volatile__("wdtlb %0,%1; dsync" : :"a" (p), "a" (v));
+
+ clear_page(kaddr);
+
+ spin_unlock(&tlb_lock);
+ } else {
+ clear_page(kaddr);
+ }
+
+ /* We need to make sure that i$ and d$ are coherent. */
+
+ clear_bit(PG_cache_clean, &page->flags);
+}
+
+/*
+ * With cache aliasing, we have to make sure that the page color of the page
+ * in kernel space matches that of the virtual user address before we read
+ * the page. If the page color differ, we create a temporary DTLB entry with
+ * the corrent page color and use this 'temporary' address as the source.
+ * We then use the same approach as in clear_user_page and copy the data
+ * to the kernel space and clear the PG_cache_clean bit to synchronize caches
+ * later.
+ *
+ * Note:
+ * Instead of using another 'way' for the temporary DTLB entry, we could
+ * probably use the same entry that points to the kernel address (after
+ * saving the original value and restoring it when we are done).
+ */
+
+void copy_user_page(void* to, void* from, unsigned long vaddr,
+ struct page* to_page)
+{
+ /* There shouldn't be any entries for the new page. */
+
+ __flush_invalidate_dcache_page_phys(__pa(page_address(to_page)));
+
+ spin_lock(&tlb_lock);
+
+ if (!PAGE_COLOR_EQ(vaddr, from)) {
+ unsigned long v, p, t;
+
+ __asm__ __volatile__ ("pdtlb %1,%2; rdtlb1 %0,%1"
+ : "=a"(p), "=a"(t) : "a"(from));
+ from = (void*)PAGE_COLOR_MAP0(vaddr);
+ v = (unsigned long)from | DTLB_WAY_DCACHE_ALIAS0;
+ __asm__ __volatile__ ("wdtlb %0,%1; dsync" ::"a" (p), "a" (v));
+ }
+
+ if (!PAGE_COLOR_EQ(vaddr, to)) {
+ unsigned long v, p;
+
+ p = (unsigned long)pte_val((mk_pte(to_page,PAGE_KERNEL)));
+ to = (void*)PAGE_COLOR_MAP1(vaddr);
+ v = (unsigned long)to | DTLB_WAY_DCACHE_ALIAS1;
+ __asm__ __volatile__ ("wdtlb %0,%1; dsync" ::"a" (p), "a" (v));
+ }
+ copy_page(to, from);
+
+ spin_unlock(&tlb_lock);
+
+ /* We need to make sure that i$ and d$ are coherent. */
+
+ clear_bit(PG_cache_clean, &to_page->flags);
+}
+
+
+
+/*
+ * Any time the kernel writes to a user page cache page, or it is about to
+ * read from a page cache page this routine is called.
+ *
+ * Note:
+ * The kernel currently only provides one architecture bit in the page
+ * flags that we use for I$/D$ coherency. Maybe, in future, we can
+ * use a sepearte bit for deferred dcache aliasing:
+ * If the page is not mapped yet, we only need to set a flag,
+ * if mapped, we need to invalidate the page.
+ */
+// FIXME: we probably need this for WB caches not only for Page Coloring..
+
+void flush_dcache_page(struct page *page)
+{
+ unsigned long addr = __pa(page_address(page));
+ struct address_space *mapping = page_mapping(page);
+
+ __flush_invalidate_dcache_page_phys(addr);
+
+ if (!test_bit(PG_cache_clean, &page->flags))
+ return;
+
+ /* If this page hasn't been mapped, yet, handle I$/D$ coherency later.*/
+#if 0
+ if (mapping && !mapping_mapped(mapping))
+ clear_bit(PG_cache_clean, &page->flags);
+ else
+#endif
+ __invalidate_icache_page_phys(addr);
+}
+
+void flush_cache_range(struct vm_area_struct* vma, unsigned long s,
+ unsigned long e)
+{
+ __flush_invalidate_cache_all();
+}
+
+void flush_cache_page(struct vm_area_struct* vma, unsigned long address,
+ unsigned long pfn)
+{
+ struct page *page = pfn_to_page(pfn);
+
+ /* Remove any entry for the old mapping. */
+
+ if (current->active_mm == vma->vm_mm) {
+ unsigned long addr = __pa(page_address(page));
+ __flush_invalidate_dcache_page_phys(addr);
+ if ((vma->vm_flags & VM_EXEC) != 0)
+ __invalidate_icache_page_phys(addr);
+ } else {
+ BUG();
+ }
+}
+
+#endif /* (DCACHE_WAY_SIZE > PAGE_SIZE) */
+
+
+pte_t* pte_alloc_one_kernel (struct mm_struct* mm, unsigned long addr)
+{
+ pte_t* pte = (pte_t*)__get_free_pages(GFP_KERNEL|__GFP_REPEAT, 0);
+ if (likely(pte)) {
+ pte_t* ptep = (pte_t*)(pte_val(*pte) + PAGE_OFFSET);
+ int i;
+ for (i = 0; i < 1024; i++, ptep++)
+ pte_clear(mm, addr, ptep);
+ }
+ return pte;
+}
+
+struct page* pte_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ struct page *page;
+
+ page = alloc_pages(GFP_KERNEL | __GFP_REPEAT, 0);
+
+ if (likely(page)) {
+ pte_t* ptep = kmap_atomic(page, KM_USER0);
+ int i;
+
+ for (i = 0; i < 1024; i++, ptep++)
+ pte_clear(mm, addr, ptep);
+
+ kunmap_atomic(ptep, KM_USER0);
+ }
+ return page;
+}
+
+
+/*
+ * Handle D$/I$ coherency.
+ *
+ * Note:
+ * We only have one architecture bit for the page flags, so we cannot handle
+ * cache aliasing, yet.
+ */
+
+void
+update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t pte)
+{
+ unsigned long pfn = pte_pfn(pte);
+ struct page *page;
+ unsigned long vaddr = addr & PAGE_MASK;
+
+ if (!pfn_valid(pfn))
+ return;
+
+ page = pfn_to_page(pfn);
+
+ invalidate_itlb_mapping(addr);
+ invalidate_dtlb_mapping(addr);
+
+ /* We have a new mapping. Use it. */
+
+ write_dtlb_entry(pte, dtlb_probe(addr));
+
+ /* If the processor can execute from this page, synchronize D$/I$. */
+
+ if ((vma->vm_flags & VM_EXEC) != 0) {
+
+ write_itlb_entry(pte, itlb_probe(addr));
+
+ /* Synchronize caches, if not clean. */
+
+ if (!test_and_set_bit(PG_cache_clean, &page->flags)) {
+ __flush_dcache_page(vaddr);
+ __invalidate_icache_page(vaddr);
+ }
+ }
+}
+
diff --git a/arch/xtensa/mm/misc.S b/arch/xtensa/mm/misc.S
new file mode 100644
index 00000000000..327c0f17187
--- /dev/null
+++ b/arch/xtensa/mm/misc.S
@@ -0,0 +1,374 @@
+/*
+ * arch/xtensa/mm/misc.S
+ *
+ * Miscellaneous assembly functions.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ */
+
+/* Note: we might want to implement some of the loops as zero-overhead-loops,
+ * where applicable and if supported by the processor.
+ */
+
+#include <linux/linkage.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+
+#include <xtensa/cacheasm.h>
+#include <xtensa/cacheattrasm.h>
+
+/* clear_page (page) */
+
+ENTRY(clear_page)
+ entry a1, 16
+ addi a4, a2, PAGE_SIZE
+ movi a3, 0
+
+1: s32i a3, a2, 0
+ s32i a3, a2, 4
+ s32i a3, a2, 8
+ s32i a3, a2, 12
+ s32i a3, a2, 16
+ s32i a3, a2, 20
+ s32i a3, a2, 24
+ s32i a3, a2, 28
+ addi a2, a2, 32
+ blt a2, a4, 1b
+
+ retw
+
+/*
+ * copy_page (void *to, void *from)
+ * a2 a3
+ */
+
+ENTRY(copy_page)
+ entry a1, 16
+ addi a4, a2, PAGE_SIZE
+
+1: l32i a5, a3, 0
+ l32i a6, a3, 4
+ l32i a7, a3, 8
+ s32i a5, a2, 0
+ s32i a6, a2, 4
+ s32i a7, a2, 8
+ l32i a5, a3, 12
+ l32i a6, a3, 16
+ l32i a7, a3, 20
+ s32i a5, a2, 12
+ s32i a6, a2, 16
+ s32i a7, a2, 20
+ l32i a5, a3, 24
+ l32i a6, a3, 28
+ s32i a5, a2, 24
+ s32i a6, a2, 28
+ addi a2, a2, 32
+ addi a3, a3, 32
+ blt a2, a4, 1b
+
+ retw
+
+
+/*
+ * void __flush_invalidate_cache_all(void)
+ */
+
+ENTRY(__flush_invalidate_cache_all)
+ entry sp, 16
+ dcache_writeback_inv_all a2, a3
+ icache_invalidate_all a2, a3
+ retw
+
+/*
+ * void __invalidate_icache_all(void)
+ */
+
+ENTRY(__invalidate_icache_all)
+ entry sp, 16
+ icache_invalidate_all a2, a3
+ retw
+
+/*
+ * void __flush_invalidate_dcache_all(void)
+ */
+
+ENTRY(__flush_invalidate_dcache_all)
+ entry sp, 16
+ dcache_writeback_inv_all a2, a3
+ retw
+
+
+/*
+ * void __flush_invalidate_cache_range(ulong start, ulong size)
+ */
+
+ENTRY(__flush_invalidate_cache_range)
+ entry sp, 16
+ mov a4, a2
+ mov a5, a3
+ dcache_writeback_inv_region a4, a5, a6
+ icache_invalidate_region a2, a3, a4
+ retw
+
+/*
+ * void __invalidate_icache_page(ulong start)
+ */
+
+ENTRY(__invalidate_icache_page)
+ entry sp, 16
+ movi a3, PAGE_SIZE
+ icache_invalidate_region a2, a3, a4
+ retw
+
+/*
+ * void __invalidate_dcache_page(ulong start)
+ */
+
+ENTRY(__invalidate_dcache_page)
+ entry sp, 16
+ movi a3, PAGE_SIZE
+ dcache_invalidate_region a2, a3, a4
+ retw
+
+/*
+ * void __invalidate_icache_range(ulong start, ulong size)
+ */
+
+ENTRY(__invalidate_icache_range)
+ entry sp, 16
+ icache_invalidate_region a2, a3, a4
+ retw
+
+/*
+ * void __invalidate_dcache_range(ulong start, ulong size)
+ */
+
+ENTRY(__invalidate_dcache_range)
+ entry sp, 16
+ dcache_invalidate_region a2, a3, a4
+ retw
+
+/*
+ * void __flush_dcache_page(ulong start)
+ */
+
+ENTRY(__flush_dcache_page)
+ entry sp, 16
+ movi a3, PAGE_SIZE
+ dcache_writeback_region a2, a3, a4
+ retw
+
+/*
+ * void __flush_invalidate_dcache_page(ulong start)
+ */
+
+ENTRY(__flush_invalidate_dcache_page)
+ entry sp, 16
+ movi a3, PAGE_SIZE
+ dcache_writeback_inv_region a2, a3, a4
+ retw
+
+/*
+ * void __flush_invalidate_dcache_range(ulong start, ulong size)
+ */
+
+ENTRY(__flush_invalidate_dcache_range)
+ entry sp, 16
+ dcache_writeback_inv_region a2, a3, a4
+ retw
+
+/*
+ * void __invalidate_dcache_all(void)
+ */
+
+ENTRY(__invalidate_dcache_all)
+ entry sp, 16
+ dcache_invalidate_all a2, a3
+ retw
+
+/*
+ * void __flush_invalidate_dcache_page_phys(ulong start)
+ */
+
+ENTRY(__flush_invalidate_dcache_page_phys)
+ entry sp, 16
+
+ movi a3, XCHAL_DCACHE_SIZE
+ movi a4, PAGE_MASK | 1
+ addi a2, a2, 1
+
+1: addi a3, a3, -XCHAL_DCACHE_LINESIZE
+
+ ldct a6, a3
+ dsync
+ and a6, a6, a4
+ beq a6, a2, 2f
+ bgeui a3, 2, 1b
+ retw
+
+2: diwbi a3, 0
+ bgeui a3, 2, 1b
+ retw
+
+ENTRY(check_dcache_low0)
+ entry sp, 16
+
+ movi a3, XCHAL_DCACHE_SIZE / 4
+ movi a4, PAGE_MASK | 1
+ addi a2, a2, 1
+
+1: addi a3, a3, -XCHAL_DCACHE_LINESIZE
+
+ ldct a6, a3
+ dsync
+ and a6, a6, a4
+ beq a6, a2, 2f
+ bgeui a3, 2, 1b
+ retw
+
+2: j 2b
+
+ENTRY(check_dcache_high0)
+ entry sp, 16
+
+ movi a5, XCHAL_DCACHE_SIZE / 4
+ movi a3, XCHAL_DCACHE_SIZE / 2
+ movi a4, PAGE_MASK | 1
+ addi a2, a2, 1
+
+1: addi a3, a3, -XCHAL_DCACHE_LINESIZE
+ addi a5, a5, -XCHAL_DCACHE_LINESIZE
+
+ ldct a6, a3
+ dsync
+ and a6, a6, a4
+ beq a6, a2, 2f
+ bgeui a5, 2, 1b
+ retw
+
+2: j 2b
+
+ENTRY(check_dcache_low1)
+ entry sp, 16
+
+ movi a5, XCHAL_DCACHE_SIZE / 4
+ movi a3, XCHAL_DCACHE_SIZE * 3 / 4
+ movi a4, PAGE_MASK | 1
+ addi a2, a2, 1
+
+1: addi a3, a3, -XCHAL_DCACHE_LINESIZE
+ addi a5, a5, -XCHAL_DCACHE_LINESIZE
+
+ ldct a6, a3
+ dsync
+ and a6, a6, a4
+ beq a6, a2, 2f
+ bgeui a5, 2, 1b
+ retw
+
+2: j 2b
+
+ENTRY(check_dcache_high1)
+ entry sp, 16
+
+ movi a5, XCHAL_DCACHE_SIZE / 4
+ movi a3, XCHAL_DCACHE_SIZE
+ movi a4, PAGE_MASK | 1
+ addi a2, a2, 1
+
+1: addi a3, a3, -XCHAL_DCACHE_LINESIZE
+ addi a5, a5, -XCHAL_DCACHE_LINESIZE
+
+ ldct a6, a3
+ dsync
+ and a6, a6, a4
+ beq a6, a2, 2f
+ bgeui a5, 2, 1b
+ retw
+
+2: j 2b
+
+
+/*
+ * void __invalidate_icache_page_phys(ulong start)
+ */
+
+ENTRY(__invalidate_icache_page_phys)
+ entry sp, 16
+
+ movi a3, XCHAL_ICACHE_SIZE
+ movi a4, PAGE_MASK | 1
+ addi a2, a2, 1
+
+1: addi a3, a3, -XCHAL_ICACHE_LINESIZE
+
+ lict a6, a3
+ isync
+ and a6, a6, a4
+ beq a6, a2, 2f
+ bgeui a3, 2, 1b
+ retw
+
+2: iii a3, 0
+ bgeui a3, 2, 1b
+ retw
+
+
+#if 0
+
+ movi a3, XCHAL_DCACHE_WAYS - 1
+ movi a4, PAGE_SIZE
+
+1: mov a5, a2
+ add a6, a2, a4
+
+2: diwbi a5, 0
+ diwbi a5, XCHAL_DCACHE_LINESIZE
+ diwbi a5, XCHAL_DCACHE_LINESIZE * 2
+ diwbi a5, XCHAL_DCACHE_LINESIZE * 3
+
+ addi a5, a5, XCHAL_DCACHE_LINESIZE * 4
+ blt a5, a6, 2b
+
+ addi a3, a3, -1
+ addi a2, a2, XCHAL_DCACHE_SIZE / XCHAL_DCACHE_WAYS
+ bgez a3, 1b
+
+ retw
+
+ENTRY(__invalidate_icache_page_index)
+ entry sp, 16
+
+ movi a3, XCHAL_ICACHE_WAYS - 1
+ movi a4, PAGE_SIZE
+
+1: mov a5, a2
+ add a6, a2, a4
+
+2: iii a5, 0
+ iii a5, XCHAL_ICACHE_LINESIZE
+ iii a5, XCHAL_ICACHE_LINESIZE * 2
+ iii a5, XCHAL_ICACHE_LINESIZE * 3
+
+ addi a5, a5, XCHAL_ICACHE_LINESIZE * 4
+ blt a5, a6, 2b
+
+ addi a3, a3, -1
+ addi a2, a2, XCHAL_ICACHE_SIZE / XCHAL_ICACHE_WAYS
+ bgez a3, 2b
+
+ retw
+
+#endif
+
+
+
+
+
+
diff --git a/arch/xtensa/mm/pgtable.c b/arch/xtensa/mm/pgtable.c
new file mode 100644
index 00000000000..e5e119c820e
--- /dev/null
+++ b/arch/xtensa/mm/pgtable.c
@@ -0,0 +1,76 @@
+/*
+ * arch/xtensa/mm/fault.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ */
+
+#if (DCACHE_SIZE > PAGE_SIZE)
+
+pte_t* pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
+{
+ pte_t *pte, p;
+ int color = ADDR_COLOR(address);
+ int i;
+
+ p = (pte_t*) __get_free_pages(GFP_KERNEL|__GFP_REPEAT, COLOR_ORDER);
+
+ if (likely(p)) {
+ struct page *page;
+
+ for (i = 0; i < COLOR_SIZE; i++, p++) {
+ page = virt_to_page(pte);
+
+ set_page_count(page, 1);
+ ClearPageCompound(page);
+
+ if (ADDR_COLOR(p) == color)
+ pte = p;
+ else
+ free_page(p);
+ }
+ clear_page(pte);
+ }
+ return pte;
+}
+
+#ifdef PROFILING
+
+int mask;
+int hit;
+int flush;
+
+#endif
+
+struct page* pte_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+ struct page *page, p;
+ int color = ADDR_COLOR(address);
+
+ p = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER);
+
+ if (likely(p)) {
+ for (i = 0; i < PAGE_ORDER; i++) {
+ set_page_count(p, 1);
+ ClearPageCompound(p);
+
+ if (PADDR_COLOR(page_address(pg)) == color)
+ page = p;
+ else
+ free_page(p);
+ }
+ clear_highpage(page);
+ }
+
+ return page;
+}
+
+#endif
+
+
+
diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c
new file mode 100644
index 00000000000..d3bd3bfc3b3
--- /dev/null
+++ b/arch/xtensa/mm/tlb.c
@@ -0,0 +1,545 @@
+/*
+ * arch/xtensa/mm/mmu.c
+ *
+ * Logic that manipulates the Xtensa MMU. Derived from MIPS.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2003 Tensilica Inc.
+ *
+ * Joe Taylor
+ * Chris Zankel <chris@zankel.net>
+ * Marc Gauthier
+ */
+
+#include <linux/mm.h>
+#include <asm/processor.h>
+#include <asm/mmu_context.h>
+#include <asm/tlbflush.h>
+#include <asm/system.h>
+#include <asm/cacheflush.h>
+
+
+static inline void __flush_itlb_all (void)
+{
+ int way, index;
+
+ for (way = 0; way < XCHAL_ITLB_ARF_WAYS; way++) {
+ for (index = 0; index < ITLB_ENTRIES_PER_ARF_WAY; index++) {
+ int entry = way + (index << PAGE_SHIFT);
+ invalidate_itlb_entry_no_isync (entry);
+ }
+ }
+ asm volatile ("isync\n");
+}
+
+static inline void __flush_dtlb_all (void)
+{
+ int way, index;
+
+ for (way = 0; way < XCHAL_DTLB_ARF_WAYS; way++) {
+ for (index = 0; index < DTLB_ENTRIES_PER_ARF_WAY; index++) {
+ int entry = way + (index << PAGE_SHIFT);
+ invalidate_dtlb_entry_no_isync (entry);
+ }
+ }
+ asm volatile ("isync\n");
+}
+
+
+void flush_tlb_all (void)
+{
+ __flush_itlb_all();
+ __flush_dtlb_all();
+}
+
+/* If mm is current, we simply assign the current task a new ASID, thus,
+ * invalidating all previous tlb entries. If mm is someone else's user mapping,
+ * wie invalidate the context, thus, when that user mapping is swapped in,
+ * a new context will be assigned to it.
+ */
+
+void flush_tlb_mm(struct mm_struct *mm)
+{
+#if 0
+ printk("[tlbmm<%lx>]\n", (unsigned long)mm->context);
+#endif
+
+ if (mm == current->active_mm) {
+ int flags;
+ local_save_flags(flags);
+ get_new_mmu_context(mm, asid_cache);
+ set_rasid_register(ASID_INSERT(mm->context));
+ local_irq_restore(flags);
+ }
+ else
+ mm->context = 0;
+}
+
+void flush_tlb_range (struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned long flags;
+
+ if (mm->context == NO_CONTEXT)
+ return;
+
+#if 0
+ printk("[tlbrange<%02lx,%08lx,%08lx>]\n",
+ (unsigned long)mm->context, start, end);
+#endif
+ local_save_flags(flags);
+
+ if (end-start + (PAGE_SIZE-1) <= SMALLEST_NTLB_ENTRIES << PAGE_SHIFT) {
+ int oldpid = get_rasid_register();
+ set_rasid_register (ASID_INSERT(mm->context));
+ start &= PAGE_MASK;
+ if (vma->vm_flags & VM_EXEC)
+ while(start < end) {
+ invalidate_itlb_mapping(start);
+ invalidate_dtlb_mapping(start);
+ start += PAGE_SIZE;
+ }
+ else
+ while(start < end) {
+ invalidate_dtlb_mapping(start);
+ start += PAGE_SIZE;
+ }
+
+ set_rasid_register(oldpid);
+ } else {
+ get_new_mmu_context(mm, asid_cache);
+ if (mm == current->active_mm)
+ set_rasid_register(ASID_INSERT(mm->context));
+ }
+ local_irq_restore(flags);
+}
+
+void flush_tlb_page (struct vm_area_struct *vma, unsigned long page)
+{
+ struct mm_struct* mm = vma->vm_mm;
+ unsigned long flags;
+ int oldpid;
+#if 0
+ printk("[tlbpage<%02lx,%08lx>]\n",
+ (unsigned long)mm->context, page);
+#endif
+
+ if(mm->context == NO_CONTEXT)
+ return;
+
+ local_save_flags(flags);
+
+ oldpid = get_rasid_register();
+
+ if (vma->vm_flags & VM_EXEC)
+ invalidate_itlb_mapping(page);
+ invalidate_dtlb_mapping(page);
+
+ set_rasid_register(oldpid);
+
+ local_irq_restore(flags);
+
+#if 0
+ flush_tlb_all();
+ return;
+#endif
+}
+
+
+#ifdef DEBUG_TLB
+
+#define USE_ITLB 0
+#define USE_DTLB 1
+
+struct way_config_t {
+ int indicies;
+ int indicies_log2;
+ int pgsz_log2;
+ int arf;
+};
+
+static struct way_config_t itlb[XCHAL_ITLB_WAYS] =
+{
+ { XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ENTRIES),
+ XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ENTRIES_LOG2),
+ XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, PAGESZ_LOG2_MIN),
+ XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ARF)
+ },
+ { XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ENTRIES),
+ XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ENTRIES_LOG2),
+ XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, PAGESZ_LOG2_MIN),
+ XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ARF)
+ },
+ { XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ENTRIES),
+ XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ENTRIES_LOG2),
+ XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, PAGESZ_LOG2_MIN),
+ XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ARF)
+ },
+ { XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ENTRIES),
+ XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ENTRIES_LOG2),
+ XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, PAGESZ_LOG2_MIN),
+ XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ARF)
+ },
+ { XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ENTRIES),
+ XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ENTRIES_LOG2),
+ XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, PAGESZ_LOG2_MIN),
+ XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ARF)
+ },
+ { XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ENTRIES),
+ XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ENTRIES_LOG2),
+ XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, PAGESZ_LOG2_MIN),
+ XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ARF)
+ },
+ { XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ENTRIES),
+ XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ENTRIES_LOG2),
+ XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, PAGESZ_LOG2_MIN),
+ XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ARF)
+ }
+};
+
+static struct way_config_t dtlb[XCHAL_DTLB_WAYS] =
+{
+ { XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ENTRIES),
+ XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ENTRIES_LOG2),
+ XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, PAGESZ_LOG2_MIN),
+ XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ARF)
+ },
+ { XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ENTRIES),
+ XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ENTRIES_LOG2),
+ XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, PAGESZ_LOG2_MIN),
+ XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ARF)
+ },
+ { XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ENTRIES),
+ XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ENTRIES_LOG2),
+ XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, PAGESZ_LOG2_MIN),
+ XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ARF)
+ },
+ { XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ENTRIES),
+ XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ENTRIES_LOG2),
+ XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, PAGESZ_LOG2_MIN),
+ XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ARF)
+ },
+ { XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ENTRIES),
+ XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ENTRIES_LOG2),
+ XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, PAGESZ_LOG2_MIN),
+ XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ARF)
+ },
+ { XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ENTRIES),
+ XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ENTRIES_LOG2),
+ XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, PAGESZ_LOG2_MIN),
+ XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ARF)
+ },
+ { XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ENTRIES),
+ XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ENTRIES_LOG2),
+ XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, PAGESZ_LOG2_MIN),
+ XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ARF)
+ },
+ { XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ENTRIES),
+ XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ENTRIES_LOG2),
+ XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, PAGESZ_LOG2_MIN),
+ XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ARF)
+ },
+ { XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ENTRIES),
+ XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ENTRIES_LOG2),
+ XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, PAGESZ_LOG2_MIN),
+ XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ARF)
+ },
+ { XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ENTRIES),
+ XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ENTRIES_LOG2),
+ XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, PAGESZ_LOG2_MIN),
+ XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ARF)
+ }
+};
+
+/* Total number of entries: */
+#define ITLB_TOTAL_ENTRIES \
+ XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ENTRIES) + \
+ XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ENTRIES) + \
+ XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ENTRIES) + \
+ XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ENTRIES) + \
+ XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ENTRIES) + \
+ XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ENTRIES) + \
+ XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ENTRIES)
+#define DTLB_TOTAL_ENTRIES \
+ XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ENTRIES) + \
+ XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ENTRIES) + \
+ XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ENTRIES) + \
+ XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ENTRIES) + \
+ XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ENTRIES) + \
+ XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ENTRIES) + \
+ XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ENTRIES) + \
+ XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ENTRIES) + \
+ XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ENTRIES) + \
+ XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ENTRIES)
+
+
+typedef struct {
+ unsigned va;
+ unsigned pa;
+ unsigned char asid;
+ unsigned char ca;
+ unsigned char way;
+ unsigned char index;
+ unsigned char pgsz_log2; /* 0 .. 32 */
+ unsigned char type; /* 0=ITLB 1=DTLB */
+} tlb_dump_entry_t;
+
+/* Return -1 if a precedes b, +1 if a follows b, 0 if same: */
+int cmp_tlb_dump_info( tlb_dump_entry_t *a, tlb_dump_entry_t *b )
+{
+ if (a->asid < b->asid) return -1;
+ if (a->asid > b->asid) return 1;
+ if (a->va < b->va) return -1;
+ if (a->va > b->va) return 1;
+ if (a->pa < b->pa) return -1;
+ if (a->pa > b->pa) return 1;
+ if (a->ca < b->ca) return -1;
+ if (a->ca > b->ca) return 1;
+ if (a->way < b->way) return -1;
+ if (a->way > b->way) return 1;
+ if (a->index < b->index) return -1;
+ if (a->index > b->index) return 1;
+ return 0;
+}
+
+void sort_tlb_dump_info( tlb_dump_entry_t *t, int n )
+{
+ int i, j;
+ /* Simple O(n*n) sort: */
+ for (i = 0; i < n-1; i++)
+ for (j = i+1; j < n; j++)
+ if (cmp_tlb_dump_info(t+i, t+j) > 0) {
+ tlb_dump_entry_t tmp = t[i];
+ t[i] = t[j];
+ t[j] = tmp;
+ }
+}
+
+
+static tlb_dump_entry_t itlb_dump_info[ITLB_TOTAL_ENTRIES];
+static tlb_dump_entry_t dtlb_dump_info[DTLB_TOTAL_ENTRIES];
+
+
+static inline char *way_type (int type)
+{
+ return type ? "autorefill" : "non-autorefill";
+}
+
+void print_entry (struct way_config_t *way_info,
+ unsigned int way,
+ unsigned int index,
+ unsigned int virtual,
+ unsigned int translation)
+{
+ char valid_chr;
+ unsigned int va, pa, asid, ca;
+
+ va = virtual &
+ ~((1 << (way_info->pgsz_log2 + way_info->indicies_log2)) - 1);
+ asid = virtual & ((1 << XCHAL_MMU_ASID_BITS) - 1);
+ pa = translation & ~((1 << way_info->pgsz_log2) - 1);
+ ca = translation & ((1 << XCHAL_MMU_CA_BITS) - 1);
+ valid_chr = asid ? 'V' : 'I';
+
+ /* Compute and incorporate the effect of the index bits on the
+ * va. It's more useful for kernel debugging, since we always
+ * want to know the effective va anyway. */
+
+ va += index << way_info->pgsz_log2;
+
+ printk ("\t[%d,%d] (%c) vpn 0x%.8x ppn 0x%.8x asid 0x%.2x am 0x%x\n",
+ way, index, valid_chr, va, pa, asid, ca);
+}
+
+void print_itlb_entry (struct way_config_t *way_info, int way, int index)
+{
+ print_entry (way_info, way, index,
+ read_itlb_virtual (way + (index << way_info->pgsz_log2)),
+ read_itlb_translation (way + (index << way_info->pgsz_log2)));
+}
+
+void print_dtlb_entry (struct way_config_t *way_info, int way, int index)
+{
+ print_entry (way_info, way, index,
+ read_dtlb_virtual (way + (index << way_info->pgsz_log2)),
+ read_dtlb_translation (way + (index << way_info->pgsz_log2)));
+}
+
+void dump_itlb (void)
+{
+ int way, index;
+
+ printk ("\nITLB: ways = %d\n", XCHAL_ITLB_WAYS);
+
+ for (way = 0; way < XCHAL_ITLB_WAYS; way++) {
+ printk ("\nWay: %d, Entries: %d, MinPageSize: %d, Type: %s\n",
+ way, itlb[way].indicies,
+ itlb[way].pgsz_log2, way_type(itlb[way].arf));
+ for (index = 0; index < itlb[way].indicies; index++) {
+ print_itlb_entry(&itlb[way], way, index);
+ }
+ }
+}
+
+void dump_dtlb (void)
+{
+ int way, index;
+
+ printk ("\nDTLB: ways = %d\n", XCHAL_DTLB_WAYS);
+
+ for (way = 0; way < XCHAL_DTLB_WAYS; way++) {
+ printk ("\nWay: %d, Entries: %d, MinPageSize: %d, Type: %s\n",
+ way, dtlb[way].indicies,
+ dtlb[way].pgsz_log2, way_type(dtlb[way].arf));
+ for (index = 0; index < dtlb[way].indicies; index++) {
+ print_dtlb_entry(&dtlb[way], way, index);
+ }
+ }
+}
+
+void dump_tlb (tlb_dump_entry_t *tinfo, struct way_config_t *config,
+ int entries, int ways, int type, int show_invalid)
+{
+ tlb_dump_entry_t *e = tinfo;
+ int way, i;
+
+ /* Gather all info: */
+ for (way = 0; way < ways; way++) {
+ struct way_config_t *cfg = config + way;
+ for (i = 0; i < cfg->indicies; i++) {
+ unsigned wayindex = way + (i << cfg->pgsz_log2);
+ unsigned vv = (type ? read_dtlb_virtual (wayindex)
+ : read_itlb_virtual (wayindex));
+ unsigned pp = (type ? read_dtlb_translation (wayindex)
+ : read_itlb_translation (wayindex));
+
+ /* Compute and incorporate the effect of the index bits on the
+ * va. It's more useful for kernel debugging, since we always
+ * want to know the effective va anyway. */
+
+ e->va = (vv & ~((1 << (cfg->pgsz_log2 + cfg->indicies_log2)) - 1));
+ e->va += (i << cfg->pgsz_log2);
+ e->pa = (pp & ~((1 << cfg->pgsz_log2) - 1));
+ e->asid = (vv & ((1 << XCHAL_MMU_ASID_BITS) - 1));
+ e->ca = (pp & ((1 << XCHAL_MMU_CA_BITS) - 1));
+ e->way = way;
+ e->index = i;
+ e->pgsz_log2 = cfg->pgsz_log2;
+ e->type = type;
+ e++;
+ }
+ }
+#if 1
+ /* Sort by ASID and VADDR: */
+ sort_tlb_dump_info (tinfo, entries);
+#endif
+
+ /* Display all sorted info: */
+ printk ("\n%cTLB dump:\n", (type ? 'D' : 'I'));
+ for (e = tinfo, i = 0; i < entries; i++, e++) {
+#if 0
+ if (e->asid == 0 && !show_invalid)
+ continue;
+#endif
+ printk ("%c way=%d i=%d ASID=%02X V=%08X -> P=%08X CA=%X (%d %cB)\n",
+ (e->type ? 'D' : 'I'), e->way, e->index,
+ e->asid, e->va, e->pa, e->ca,
+ (1 << (e->pgsz_log2 % 10)),
+ " kMG"[e->pgsz_log2 / 10]
+ );
+ }
+}
+
+void dump_tlbs2 (int showinv)
+{
+ dump_tlb (itlb_dump_info, itlb, ITLB_TOTAL_ENTRIES, XCHAL_ITLB_WAYS, 0, showinv);
+ dump_tlb (dtlb_dump_info, dtlb, DTLB_TOTAL_ENTRIES, XCHAL_DTLB_WAYS, 1, showinv);
+}
+
+void dump_all_tlbs (void)
+{
+ dump_tlbs2 (1);
+}
+
+void dump_valid_tlbs (void)
+{
+ dump_tlbs2 (0);
+}
+
+
+void dump_tlbs (void)
+{
+ dump_itlb();
+ dump_dtlb();
+}
+
+void dump_cache_tag(int dcache, int idx)
+{
+ int w, i, s, e;
+ unsigned long tag, index;
+ unsigned long num_lines, num_ways, cache_size, line_size;
+
+ num_ways = dcache ? XCHAL_DCACHE_WAYS : XCHAL_ICACHE_WAYS;
+ cache_size = dcache ? XCHAL_DCACHE_SIZE : XCHAL_ICACHE_SIZE;
+ line_size = dcache ? XCHAL_DCACHE_LINESIZE : XCHAL_ICACHE_LINESIZE;
+
+ num_lines = cache_size / num_ways;
+
+ s = 0; e = num_lines;
+
+ if (idx >= 0)
+ e = (s = idx * line_size) + 1;
+
+ for (i = s; i < e; i+= line_size) {
+ printk("\nline %#08x:", i);
+ for (w = 0; w < num_ways; w++) {
+ index = w * num_lines + i;
+ if (dcache)
+ __asm__ __volatile__("ldct %0, %1\n\t"
+ : "=a"(tag) : "a"(index));
+ else
+ __asm__ __volatile__("lict %0, %1\n\t"
+ : "=a"(tag) : "a"(index));
+
+ printk(" %#010lx", tag);
+ }
+ }
+ printk ("\n");
+}
+
+void dump_icache(int index)
+{
+ unsigned long data, addr;
+ int w, i;
+
+ const unsigned long num_ways = XCHAL_ICACHE_WAYS;
+ const unsigned long cache_size = XCHAL_ICACHE_SIZE;
+ const unsigned long line_size = XCHAL_ICACHE_LINESIZE;
+ const unsigned long num_lines = cache_size / num_ways / line_size;
+
+ for (w = 0; w < num_ways; w++) {
+ printk ("\nWay %d", w);
+
+ for (i = 0; i < line_size; i+= 4) {
+ addr = w * num_lines + index * line_size + i;
+ __asm__ __volatile__("licw %0, %1\n\t"
+ : "=a"(data) : "a"(addr));
+ printk(" %#010lx", data);
+ }
+ }
+ printk ("\n");
+}
+
+void dump_cache_tags(void)
+{
+ printk("Instruction cache\n");
+ dump_cache_tag(0, -1);
+ printk("Data cache\n");
+ dump_cache_tag(1, -1);
+}
+
+#endif
diff --git a/arch/xtensa/platform-iss/Makefile b/arch/xtensa/platform-iss/Makefile
new file mode 100644
index 00000000000..5b394e9620e
--- /dev/null
+++ b/arch/xtensa/platform-iss/Makefile
@@ -0,0 +1,13 @@
+# $Id: Makefile,v 1.1.1.1 2002/08/28 16:10:14 aroll Exp $
+#
+# Makefile for the Xtensa Instruction Set Simulator (ISS)
+# "prom monitor" library routines under Linux.
+#
+# Note! Dependencies are done automagically by 'make dep', which also
+# removes any old dependencies. DON'T put your own dependencies here
+# unless it's something special (ie not a .c file).
+#
+# Note 2! The CFLAGS definitions are in the main makefile...
+
+obj-y = io.o console.o setup.o network.o
+
diff --git a/arch/xtensa/platform-iss/console.c b/arch/xtensa/platform-iss/console.c
new file mode 100644
index 00000000000..9e2b53f6a90
--- /dev/null
+++ b/arch/xtensa/platform-iss/console.c
@@ -0,0 +1,303 @@
+/*
+ * arch/xtensa/platform-iss/console.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001-2005 Tensilica Inc.
+ * Authors Christian Zankel, Joe Taylor
+ */
+
+#include <linux/module.h>
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/console.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/major.h>
+#include <linux/param.h>
+#include <linux/serial.h>
+#include <linux/serialP.h>
+#include <linux/console.h>
+
+#include <asm/uaccess.h>
+#include <asm/irq.h>
+
+#include <xtensa/simcall.h>
+
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+
+#ifdef SERIAL_INLINE
+#define _INLINE_ inline
+#endif
+
+#define SERIAL_MAX_NUM_LINES 1
+#define SERIAL_TIMER_VALUE (20 * HZ)
+
+static struct tty_driver *serial_driver;
+static struct timer_list serial_timer;
+
+static DEFINE_SPINLOCK(timer_lock);
+
+int errno;
+
+static int __simc (int a, int b, int c, int d, int e, int f)
+{
+ int ret;
+ __asm__ __volatile__ ("simcall\n"
+ "mov %0, a2\n"
+ "mov %1, a3\n" : "=a" (ret), "=a" (errno)
+ : : "a2", "a3");
+ return ret;
+}
+
+static char *serial_version = "0.1";
+static char *serial_name = "ISS serial driver";
+
+/*
+ * This routine is called whenever a serial port is opened. It
+ * enables interrupts for a serial port, linking in its async structure into
+ * the IRQ chain. It also performs the serial-specific
+ * initialization for the tty structure.
+ */
+
+static void rs_poll(unsigned long);
+
+static int rs_open(struct tty_struct *tty, struct file * filp)
+{
+ int line = tty->index;
+
+ if ((line < 0) || (line >= SERIAL_MAX_NUM_LINES))
+ return -ENODEV;
+
+ spin_lock(&timer_lock);
+
+ if (tty->count == 1) {
+ init_timer(&serial_timer);
+ serial_timer.data = (unsigned long) tty;
+ serial_timer.function = rs_poll;
+ mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);
+ }
+ spin_unlock(&timer_lock);
+
+ return 0;
+}
+
+
+/*
+ * ------------------------------------------------------------
+ * iss_serial_close()
+ *
+ * This routine is called when the serial port gets closed. First, we
+ * wait for the last remaining data to be sent. Then, we unlink its
+ * async structure from the interrupt chain if necessary, and we free
+ * that IRQ if nothing is left in the chain.
+ * ------------------------------------------------------------
+ */
+static void rs_close(struct tty_struct *tty, struct file * filp)
+{
+ spin_lock(&timer_lock);
+ if (tty->count == 1)
+ del_timer_sync(&serial_timer);
+ spin_unlock(&timer_lock);
+}
+
+
+static int rs_write(struct tty_struct * tty,
+ const unsigned char *buf, int count)
+{
+ /* see drivers/char/serialX.c to reference original version */
+
+ __simc (SYS_write, 1, (unsigned long)buf, count, 0, 0);
+ return count;
+}
+
+static void rs_poll(unsigned long priv)
+{
+ struct tty_struct* tty = (struct tty_struct*) priv;
+
+ struct timeval tv = { .tv_sec = 0, .tv_usec = 0 };
+ int i = 0;
+ unsigned char c;
+
+ spin_lock(&timer_lock);
+
+ while (__simc(SYS_select_one, 0, XTISS_SELECT_ONE_READ, (int)&tv,0,0)){
+ __simc (SYS_read, 0, (unsigned long)&c, 1, 0, 0);
+ tty->flip.count++;
+ *tty->flip.char_buf_ptr++ = c;
+ *tty->flip.flag_buf_ptr++ = TTY_NORMAL;
+ i++;
+ }
+
+ if (i)
+ tty_flip_buffer_push(tty);
+
+
+ mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);
+ spin_unlock(&timer_lock);
+}
+
+
+static void rs_put_char(struct tty_struct *tty, unsigned char ch)
+{
+ char buf[2];
+
+ if (!tty)
+ return;
+
+ buf[0] = ch;
+ buf[1] = '\0'; /* Is this NULL necessary? */
+ __simc (SYS_write, 1, (unsigned long) buf, 1, 0, 0);
+}
+
+static void rs_flush_chars(struct tty_struct *tty)
+{
+}
+
+static int rs_write_room(struct tty_struct *tty)
+{
+ /* Let's say iss can always accept 2K characters.. */
+ return 2 * 1024;
+}
+
+static int rs_chars_in_buffer(struct tty_struct *tty)
+{
+ /* the iss doesn't buffer characters */
+ return 0;
+}
+
+static void rs_hangup(struct tty_struct *tty)
+{
+ /* Stub, once again.. */
+}
+
+static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
+{
+ /* Stub, once again.. */
+}
+
+static int rs_read_proc(char *page, char **start, off_t off, int count,
+ int *eof, void *data)
+{
+ int len = 0;
+ off_t begin = 0;
+
+ len += sprintf(page, "serinfo:1.0 driver:%s\n", serial_version);
+ *eof = 1;
+
+ if (off >= len + begin)
+ return 0;
+
+ *start = page + (off - begin);
+ return ((count < begin + len - off) ? count : begin + len - off);
+}
+
+
+int register_serial(struct serial_struct*);
+void unregister_serial(int);
+
+static struct tty_operations serial_ops = {
+ .open = rs_open,
+ .close = rs_close,
+ .write = rs_write,
+ .put_char = rs_put_char,
+ .flush_chars = rs_flush_chars,
+ .write_room = rs_write_room,
+ .chars_in_buffer = rs_chars_in_buffer,
+ .hangup = rs_hangup,
+ .wait_until_sent = rs_wait_until_sent,
+ .read_proc = rs_read_proc
+};
+
+int __init rs_init(void)
+{
+ serial_driver = alloc_tty_driver(1);
+
+ printk ("%s %s\n", serial_name, serial_version);
+
+ /* Initialize the tty_driver structure */
+
+ serial_driver->owner = THIS_MODULE;
+ serial_driver->driver_name = "iss_serial";
+ serial_driver->name = "ttyS";
+ serial_driver->major = TTY_MAJOR;
+ serial_driver->minor_start = 64;
+ serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ serial_driver->subtype = SERIAL_TYPE_NORMAL;
+ serial_driver->init_termios = tty_std_termios;
+ serial_driver->init_termios.c_cflag =
+ B9600 | CS8 | CREAD | HUPCL | CLOCAL;
+ serial_driver->flags = TTY_DRIVER_REAL_RAW;
+
+ tty_set_operations(serial_driver, &serial_ops);
+
+ if (tty_register_driver(serial_driver))
+ panic("Couldn't register serial driver\n");
+ return 0;
+}
+
+
+static __exit void rs_exit(void)
+{
+ int error;
+
+ if ((error = tty_unregister_driver(serial_driver)))
+ printk("ISS_SERIAL: failed to unregister serial driver (%d)\n",
+ error);
+ put_tty_driver(serial_driver);
+}
+
+
+/* We use `late_initcall' instead of just `__initcall' as a workaround for
+ * the fact that (1) simcons_tty_init can't be called before tty_init,
+ * (2) tty_init is called via `module_init', (3) if statically linked,
+ * module_init == device_init, and (4) there's no ordering of init lists.
+ * We can do this easily because simcons is always statically linked, but
+ * other tty drivers that depend on tty_init and which must use
+ * `module_init' to declare their init routines are likely to be broken.
+ */
+
+late_initcall(rs_init);
+
+
+#ifdef CONFIG_SERIAL_CONSOLE
+
+static void iss_console_write(struct console *co, const char *s, unsigned count)
+{
+ int len = strlen(s);
+
+ if (s != 0 && *s != 0)
+ __simc (SYS_write, 1, (unsigned long)s,
+ count < len ? count : len,0,0);
+}
+
+static struct tty_driver* iss_console_device(struct console *c, int *index)
+{
+ *index = c->index;
+ return serial_driver;
+}
+
+
+static struct console sercons = {
+ .name = "ttyS",
+ .write = iss_console_write,
+ .device = iss_console_device,
+ .flags = CON_PRINTBUFFER,
+ .index = -1
+};
+
+static int __init iss_console_init(void)
+{
+ register_console(&sercons);
+ return 0;
+}
+
+console_initcall(iss_console_init);
+
+#endif /* CONFIG_SERIAL_CONSOLE */
+
diff --git a/arch/xtensa/platform-iss/io.c b/arch/xtensa/platform-iss/io.c
new file mode 100644
index 00000000000..5b161a5cb65
--- /dev/null
+++ b/arch/xtensa/platform-iss/io.c
@@ -0,0 +1,32 @@
+/* This file isn't really needed right now. */
+
+#if 0
+
+#include <asm/io.h>
+#include <xtensa/simcall.h>
+
+extern int __simc ();
+
+
+char iss_serial_getc()
+{
+ char c;
+ __simc( SYS_read, 0, &c, 1 );
+ return c;
+}
+
+void iss_serial_putc( char c )
+{
+ __simc( SYS_write, 1, &c, 1 );
+}
+
+void iss_serial_puts( char *s )
+{
+ if( s != 0 && *s != 0 )
+ __simc( SYS_write, 1, s, strlen(s) );
+}
+
+/*#error Need I/O ports to specific hardware!*/
+
+#endif
+
diff --git a/arch/xtensa/platform-iss/network.c b/arch/xtensa/platform-iss/network.c
new file mode 100644
index 00000000000..498d7dced1f
--- /dev/null
+++ b/arch/xtensa/platform-iss/network.c
@@ -0,0 +1,855 @@
+/*
+ *
+ * arch/xtensa/platform-iss/network.c
+ *
+ * Platform specific initialization.
+ *
+ * Authors: Chris Zankel <chris@zankel.net>
+ * Based on work form the UML team.
+ *
+ * Copyright 2005 Tensilica Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/list.h>
+#include <linux/irq.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/if_ether.h>
+#include <linux/inetdevice.h>
+#include <linux/init.h>
+#include <linux/if_tun.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/ioctl.h>
+#include <linux/bootmem.h>
+#include <linux/ethtool.h>
+#include <linux/rtnetlink.h>
+#include <linux/timer.h>
+
+#include <xtensa/simcall.h>
+
+#define DRIVER_NAME "iss-netdev"
+#define ETH_MAX_PACKET 1500
+#define ETH_HEADER_OTHER 14
+#define ISS_NET_TIMER_VALUE (2 * HZ)
+
+
+static DEFINE_SPINLOCK(opened_lock);
+static LIST_HEAD(opened);
+
+static DEFINE_SPINLOCK(devices_lock);
+static LIST_HEAD(devices);
+
+/* ------------------------------------------------------------------------- */
+
+/* We currently only support the TUNTAP transport protocol. */
+
+#define TRANSPORT_TUNTAP_NAME "tuntap"
+#define TRANSPORT_TUNTAP_MTU ETH_MAX_PACKET
+
+struct tuntap_info {
+ char dev_name[IFNAMSIZ];
+ int fixed_config;
+ unsigned char gw[ETH_ALEN];
+ int fd;
+};
+
+/* ------------------------------------------------------------------------- */
+
+
+/* This structure contains out private information for the driver. */
+
+struct iss_net_private {
+
+ struct list_head device_list;
+ struct list_head opened_list;
+
+ spinlock_t lock;
+ struct net_device *dev;
+ struct platform_device pdev;
+ struct timer_list tl;
+ struct net_device_stats stats;
+
+ struct timer_list timer;
+ unsigned int timer_val;
+
+ int index;
+ int mtu;
+
+ unsigned char mac[ETH_ALEN];
+ int have_mac;
+
+ struct {
+ union {
+ struct tuntap_info tuntap;
+ } info;
+
+ int (*open)(struct iss_net_private *lp);
+ void (*close)(struct iss_net_private *lp);
+ int (*read)(struct iss_net_private *lp, struct sk_buff **skb);
+ int (*write)(struct iss_net_private *lp, struct sk_buff **skb);
+ unsigned short (*protocol)(struct sk_buff *skb);
+ int (*poll)(struct iss_net_private *lp);
+ } tp;
+
+};
+
+/* ======================= ISS SIMCALL INTERFACE =========================== */
+
+/* Note: __simc must _not_ be declared inline! */
+
+static int errno;
+
+static int __simc (int a, int b, int c, int d, int e, int f)
+{
+ int ret;
+ __asm__ __volatile__ ("simcall\n"
+ "mov %0, a2\n"
+ "mov %1, a3\n" : "=a" (ret), "=a" (errno)
+ : : "a2", "a3");
+ return ret;
+}
+
+static int inline simc_open(char *file, int flags, int mode)
+{
+ return __simc(SYS_open, (int) file, flags, mode, 0, 0);
+}
+
+static int inline simc_close(int fd)
+{
+ return __simc(SYS_close, fd, 0, 0, 0, 0);
+}
+
+static int inline simc_ioctl(int fd, int request, void *arg)
+{
+ return __simc(SYS_ioctl, fd, request, (int) arg, 0, 0);
+}
+
+static int inline simc_read(int fd, void *buf, size_t count)
+{
+ return __simc(SYS_read, fd, (int) buf, count, 0, 0);
+}
+
+static int inline simc_write(int fd, void *buf, size_t count)
+{
+ return __simc(SYS_write, fd, (int) buf, count, 0, 0);
+}
+
+static int inline simc_poll(int fd)
+{
+ struct timeval tv = { .tv_sec = 0, .tv_usec = 0 };
+
+ return __simc(SYS_select_one, fd, XTISS_SELECT_ONE_READ, (int)&tv,0,0);
+}
+
+/* ================================ HELPERS ================================ */
+
+
+static char *split_if_spec(char *str, ...)
+{
+ char **arg, *end;
+ va_list ap;
+
+ va_start(ap, str);
+ while ((arg = va_arg(ap, char**)) != NULL) {
+ if (*str == '\0')
+ return NULL;
+ end = strchr(str, ',');
+ if (end != str)
+ *arg = str;
+ if (end == NULL)
+ return NULL;
+ *end ++ = '\0';
+ str = end;
+ }
+ va_end(ap);
+ return str;
+}
+
+
+#if 0
+/* Adjust SKB. */
+
+struct sk_buff *ether_adjust_skb(struct sk_buff *skb, int extra)
+{
+ if ((skb != NULL) && (skb_tailroom(skb) < extra)) {
+ struct sk_buff *skb2;
+
+ skb2 = skb_copy_expand(skb, 0, extra, GFP_ATOMIC);
+ dev_kfree_skb(skb);
+ skb = skb2;
+ }
+ if (skb != NULL)
+ skb_put(skb, extra);
+
+ return skb;
+}
+#endif
+
+/* Return the IP address as a string for a given device. */
+
+static void dev_ip_addr(void *d, char *buf, char *bin_buf)
+{
+ struct net_device *dev = d;
+ struct in_device *ip = dev->ip_ptr;
+ struct in_ifaddr *in;
+ u32 addr;
+
+ if ((ip == NULL) || ((in = ip->ifa_list) == NULL)) {
+ printk(KERN_WARNING "Device not assigned an IP address!\n");
+ return;
+ }
+
+ addr = in->ifa_address;
+ sprintf(buf, "%d.%d.%d.%d", addr & 0xff, (addr >> 8) & 0xff,
+ (addr >> 16) & 0xff, addr >> 24);
+
+ if (bin_buf) {
+ bin_buf[0] = addr & 0xff;
+ bin_buf[1] = (addr >> 8) & 0xff;
+ bin_buf[2] = (addr >> 16) & 0xff;
+ bin_buf[3] = addr >> 24;
+ }
+}
+
+/* Set Ethernet address of the specified device. */
+
+static void inline set_ether_mac(void *d, unsigned char *addr)
+{
+ struct net_device *dev = d;
+ memcpy(dev->dev_addr, addr, ETH_ALEN);
+}
+
+
+/* ======================= TUNTAP TRANSPORT INTERFACE ====================== */
+
+static int tuntap_open(struct iss_net_private *lp)
+{
+ struct ifreq ifr;
+ char *dev_name = lp->tp.info.tuntap.dev_name;
+ int err = -EINVAL;
+ int fd;
+
+ /* We currently only support a fixed configuration. */
+
+ if (!lp->tp.info.tuntap.fixed_config)
+ return -EINVAL;
+
+ if ((fd = simc_open("/dev/net/tun", 02, 0)) < 0) { /* O_RDWR */
+ printk("Failed to open /dev/net/tun, returned %d "
+ "(errno = %d)\n", fd, errno);
+ return fd;
+ }
+
+ memset(&ifr, 0, sizeof ifr);
+ ifr.ifr_flags = IFF_TAP | IFF_NO_PI;
+ strlcpy(ifr.ifr_name, dev_name, sizeof ifr.ifr_name - 1);
+
+ if ((err = simc_ioctl(fd, TUNSETIFF, (void*) &ifr)) < 0) {
+ printk("Failed to set interface, returned %d "
+ "(errno = %d)\n", err, errno);
+ simc_close(fd);
+ return err;
+ }
+
+ lp->tp.info.tuntap.fd = fd;
+ return err;
+}
+
+static void tuntap_close(struct iss_net_private *lp)
+{
+#if 0
+ if (lp->tp.info.tuntap.fixed_config)
+ iter_addresses(lp->tp.info.tuntap.dev, close_addr, lp->host.dev_name);
+#endif
+ simc_close(lp->tp.info.tuntap.fd);
+ lp->tp.info.tuntap.fd = -1;
+}
+
+static int tuntap_read (struct iss_net_private *lp, struct sk_buff **skb)
+{
+#if 0
+ *skb = ether_adjust_skb(*skb, ETH_HEADER_OTHER);
+ if (*skb == NULL)
+ return -ENOMEM;
+#endif
+
+ return simc_read(lp->tp.info.tuntap.fd,
+ (*skb)->data, (*skb)->dev->mtu + ETH_HEADER_OTHER);
+}
+
+static int tuntap_write (struct iss_net_private *lp, struct sk_buff **skb)
+{
+ return simc_write(lp->tp.info.tuntap.fd, (*skb)->data, (*skb)->len);
+}
+
+unsigned short tuntap_protocol(struct sk_buff *skb)
+{
+ return eth_type_trans(skb, skb->dev);
+}
+
+static int tuntap_poll(struct iss_net_private *lp)
+{
+ return simc_poll(lp->tp.info.tuntap.fd);
+}
+
+/*
+ * Currently only a device name is supported.
+ * ethX=tuntap[,[mac address][,[device name]]]
+ */
+
+static int tuntap_probe(struct iss_net_private *lp, int index, char *init)
+{
+ const int len = strlen(TRANSPORT_TUNTAP_NAME);
+ char *dev_name = NULL, *mac_str = NULL, *rem = NULL;
+
+ /* Transport should be 'tuntap': ethX=tuntap,mac,dev_name */
+
+ if (strncmp(init, TRANSPORT_TUNTAP_NAME, len))
+ return 0;
+
+ if (*(init += strlen(TRANSPORT_TUNTAP_NAME)) == ',') {
+ if ((rem=split_if_spec(init+1, &mac_str, &dev_name)) != NULL) {
+ printk("Extra garbage on specification : '%s'\n", rem);
+ return 0;
+ }
+ } else if (*init != '\0') {
+ printk("Invalid argument: %s. Skipping device!\n", init);
+ return 0;
+ }
+
+ if (dev_name) {
+ strncpy(lp->tp.info.tuntap.dev_name, dev_name,
+ sizeof lp->tp.info.tuntap.dev_name);
+ lp->tp.info.tuntap.fixed_config = 1;
+ } else
+ strcpy(lp->tp.info.tuntap.dev_name, TRANSPORT_TUNTAP_NAME);
+
+
+#if 0
+ if (setup_etheraddr(mac_str, lp->mac))
+ lp->have_mac = 1;
+#endif
+ lp->mtu = TRANSPORT_TUNTAP_MTU;
+
+ //lp->info.tuntap.gate_addr = gate_addr;
+
+ lp->tp.info.tuntap.fd = -1;
+
+ lp->tp.open = tuntap_open;
+ lp->tp.close = tuntap_close;
+ lp->tp.read = tuntap_read;
+ lp->tp.write = tuntap_write;
+ lp->tp.protocol = tuntap_protocol;
+ lp->tp.poll = tuntap_poll;
+
+ printk("TUN/TAP backend - ");
+#if 0
+ if (lp->host.gate_addr != NULL)
+ printk("IP = %s", lp->host.gate_addr);
+#endif
+ printk("\n");
+
+ return 1;
+}
+
+/* ================================ ISS NET ================================ */
+
+static int iss_net_rx(struct net_device *dev)
+{
+ struct iss_net_private *lp = dev->priv;
+ int pkt_len;
+ struct sk_buff *skb;
+
+ /* Check if there is any new data. */
+
+ if (lp->tp.poll(lp) == 0)
+ return 0;
+
+ /* Try to allocate memory, if it fails, try again next round. */
+
+ if ((skb = dev_alloc_skb(dev->mtu + 2 + ETH_HEADER_OTHER)) == NULL) {
+ lp->stats.rx_dropped++;
+ return 0;
+ }
+
+ skb_reserve(skb, 2);
+
+ /* Setup skb */
+
+ skb->dev = dev;
+ skb->mac.raw = skb->data;
+ pkt_len = lp->tp.read(lp, &skb);
+ skb_put(skb, pkt_len);
+
+ if (pkt_len > 0) {
+ skb_trim(skb, pkt_len);
+ skb->protocol = lp->tp.protocol(skb);
+ // netif_rx(skb);
+ netif_rx_ni(skb);
+
+ lp->stats.rx_bytes += skb->len;
+ lp->stats.rx_packets++;
+ return pkt_len;
+ }
+ kfree_skb(skb);
+ return pkt_len;
+}
+
+static int iss_net_poll(void)
+{
+ struct list_head *ele;
+ int err, ret = 0;
+
+ spin_lock(&opened_lock);
+
+ list_for_each(ele, &opened) {
+ struct iss_net_private *lp;
+
+ lp = list_entry(ele, struct iss_net_private, opened_list);
+
+ if (!netif_running(lp->dev))
+ break;
+
+ spin_lock(&lp->lock);
+
+ while ((err = iss_net_rx(lp->dev)) > 0)
+ ret++;
+
+ spin_unlock(&lp->lock);
+
+ if (err < 0) {
+ printk(KERN_ERR "Device '%s' read returned %d, "
+ "shutting it down\n", lp->dev->name, err);
+ dev_close(lp->dev);
+ } else {
+ // FIXME reactivate_fd(lp->fd, ISS_ETH_IRQ);
+ }
+ }
+
+ spin_unlock(&opened_lock);
+ return ret;
+}
+
+
+static void iss_net_timer(unsigned long priv)
+{
+ struct iss_net_private* lp = (struct iss_net_private*) priv;
+
+ spin_lock(&lp->lock);
+
+ iss_net_poll();
+
+ mod_timer(&lp->timer, jiffies + lp->timer_val);
+
+ spin_unlock(&lp->lock);
+}
+
+
+static int iss_net_open(struct net_device *dev)
+{
+ struct iss_net_private *lp = dev->priv;
+ char addr[sizeof "255.255.255.255\0"];
+ int err;
+
+ spin_lock(&lp->lock);
+
+ if ((err = lp->tp.open(lp)) < 0)
+ goto out;
+
+ if (!lp->have_mac) {
+ dev_ip_addr(dev, addr, &lp->mac[2]);
+ set_ether_mac(dev, lp->mac);
+ }
+
+ netif_start_queue(dev);
+
+ /* clear buffer - it can happen that the host side of the interface
+ * is full when we gethere. In this case, new data is never queued,
+ * SIGIOs never arrive, and the net never works.
+ */
+ while ((err = iss_net_rx(dev)) > 0)
+ ;
+
+ spin_lock(&opened_lock);
+ list_add(&lp->opened_list, &opened);
+ spin_unlock(&opened_lock);
+
+ init_timer(&lp->timer);
+ lp->timer_val = ISS_NET_TIMER_VALUE;
+ lp->timer.data = (unsigned long) lp;
+ lp->timer.function = iss_net_timer;
+ mod_timer(&lp->timer, jiffies + lp->timer_val);
+
+out:
+ spin_unlock(&lp->lock);
+ return err;
+}
+
+static int iss_net_close(struct net_device *dev)
+{
+ struct iss_net_private *lp = dev->priv;
+printk("iss_net_close!\n");
+ netif_stop_queue(dev);
+ spin_lock(&lp->lock);
+
+ spin_lock(&opened_lock);
+ list_del(&opened);
+ spin_unlock(&opened_lock);
+
+ del_timer_sync(&lp->timer);
+
+ lp->tp.close(lp);
+
+ spin_unlock(&lp->lock);
+ return 0;
+}
+
+static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct iss_net_private *lp = dev->priv;
+ unsigned long flags;
+ int len;
+
+ netif_stop_queue(dev);
+ spin_lock_irqsave(&lp->lock, flags);
+
+ len = lp->tp.write(lp, &skb);
+
+ if (len == skb->len) {
+ lp->stats.tx_packets++;
+ lp->stats.tx_bytes += skb->len;
+ dev->trans_start = jiffies;
+ netif_start_queue(dev);
+
+ /* this is normally done in the interrupt when tx finishes */
+ netif_wake_queue(dev);
+
+ } else if (len == 0) {
+ netif_start_queue(dev);
+ lp->stats.tx_dropped++;
+
+ } else {
+ netif_start_queue(dev);
+ printk(KERN_ERR "iss_net_start_xmit: failed(%d)\n", len);
+ }
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+
+static struct net_device_stats *iss_net_get_stats(struct net_device *dev)
+{
+ struct iss_net_private *lp = dev->priv;
+ return &lp->stats;
+}
+
+static void iss_net_set_multicast_list(struct net_device *dev)
+{
+#if 0
+ if (dev->flags & IFF_PROMISC)
+ return;
+ else if (dev->mc_count)
+ dev->flags |= IFF_ALLMULTI;
+ else
+ dev->flags &= ~IFF_ALLMULTI;
+#endif
+}
+
+static void iss_net_tx_timeout(struct net_device *dev)
+{
+#if 0
+ dev->trans_start = jiffies;
+ netif_wake_queue(dev);
+#endif
+}
+
+static int iss_net_set_mac(struct net_device *dev, void *addr)
+{
+#if 0
+ struct iss_net_private *lp = dev->priv;
+ struct sockaddr *hwaddr = addr;
+
+ spin_lock(&lp->lock);
+ memcpy(dev->dev_addr, hwaddr->sa_data, ETH_ALEN);
+ spin_unlock(&lp->lock);
+#endif
+
+ return 0;
+}
+
+static int iss_net_change_mtu(struct net_device *dev, int new_mtu)
+{
+#if 0
+ struct iss_net_private *lp = dev->priv;
+ int err = 0;
+
+ spin_lock(&lp->lock);
+
+ // FIXME not needed new_mtu = transport_set_mtu(new_mtu, &lp->user);
+
+ if (new_mtu < 0)
+ err = new_mtu;
+ else
+ dev->mtu = new_mtu;
+
+ spin_unlock(&lp->lock);
+ return err;
+#endif
+ return -EINVAL;
+}
+
+static int iss_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+#if 0
+ static const struct ethtool_drvinfo info = {
+ .cmd = ETHTOOL_GDRVINFO,
+ .driver = DRIVER_NAME,
+ .version = "42",
+ };
+ void *useraddr;
+ u32 ethcmd;
+
+ switch (cmd) {
+ case SIOCETHTOOL:
+ useraddr = ifr->ifr_data;
+ if (copy_from_user(&ethcmd, useraddr, sizeof(ethcmd)))
+ return -EFAULT;
+
+ switch (ethcmd) {
+ case ETHTOOL_GDRVINFO:
+ if (copy_to_user(useraddr, &info, sizeof(info)))
+ return -EFAULT;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ default:
+ return -EINVAL;
+ }
+#endif
+ return -EINVAL;
+}
+
+void iss_net_user_timer_expire(unsigned long _conn)
+{
+}
+
+
+static struct device_driver iss_net_driver = {
+ .name = DRIVER_NAME,
+ .bus = &platform_bus_type,
+};
+
+static int driver_registered;
+
+static int iss_net_configure(int index, char *init)
+{
+ struct net_device *dev;
+ struct iss_net_private *lp;
+ int err;
+
+ if ((dev = alloc_etherdev(sizeof *lp)) == NULL) {
+ printk(KERN_ERR "eth_configure: failed to allocate device\n");
+ return 1;
+ }
+
+ /* Initialize private element. */
+
+ lp = dev->priv;
+ *lp = ((struct iss_net_private) {
+ .device_list = LIST_HEAD_INIT(lp->device_list),
+ .opened_list = LIST_HEAD_INIT(lp->opened_list),
+ .lock = SPIN_LOCK_UNLOCKED,
+ .dev = dev,
+ .index = index,
+ //.fd = -1,
+ .mac = { 0xfe, 0xfd, 0x0, 0x0, 0x0, 0x0 },
+ .have_mac = 0,
+ });
+
+ /*
+ * Try all transport protocols.
+ * Note: more protocols can be added by adding '&& !X_init(lp, eth)'.
+ */
+
+ if (!tuntap_probe(lp, index, init)) {
+ printk("Invalid arguments. Skipping device!\n");
+ goto errout;
+ }
+
+ printk(KERN_INFO "Netdevice %d ", index);
+ if (lp->have_mac)
+ printk("(%02x:%02x:%02x:%02x:%02x:%02x) ",
+ lp->mac[0], lp->mac[1],
+ lp->mac[2], lp->mac[3],
+ lp->mac[4], lp->mac[5]);
+ printk(": ");
+
+ /* sysfs register */
+
+ if (!driver_registered) {
+ driver_register(&iss_net_driver);
+ driver_registered = 1;
+ }
+
+ spin_lock(&devices_lock);
+ list_add(&lp->device_list, &devices);
+ spin_unlock(&devices_lock);
+
+ lp->pdev.id = index;
+ lp->pdev.name = DRIVER_NAME;
+ platform_device_register(&lp->pdev);
+ SET_NETDEV_DEV(dev,&lp->pdev.dev);
+
+ /*
+ * If this name ends up conflicting with an existing registered
+ * netdevice, that is OK, register_netdev{,ice}() will notice this
+ * and fail.
+ */
+ snprintf(dev->name, sizeof dev->name, "eth%d", index);
+
+ dev->mtu = lp->mtu;
+ dev->open = iss_net_open;
+ dev->hard_start_xmit = iss_net_start_xmit;
+ dev->stop = iss_net_close;
+ dev->get_stats = iss_net_get_stats;
+ dev->set_multicast_list = iss_net_set_multicast_list;
+ dev->tx_timeout = iss_net_tx_timeout;
+ dev->set_mac_address = iss_net_set_mac;
+ dev->change_mtu = iss_net_change_mtu;
+ dev->do_ioctl = iss_net_ioctl;
+ dev->watchdog_timeo = (HZ >> 1);
+ dev->irq = -1;
+
+ rtnl_lock();
+ err = register_netdevice(dev);
+ rtnl_unlock();
+
+ if (err) {
+ printk("Error registering net device!\n");
+ /* XXX: should we call ->remove() here? */
+ free_netdev(dev);
+ return 1;
+ }
+
+ init_timer(&lp->tl);
+ lp->tl.function = iss_net_user_timer_expire;
+
+#if 0
+ if (lp->have_mac)
+ set_ether_mac(dev, lp->mac);
+#endif
+ return 0;
+
+errout:
+ // FIXME: unregister; free, etc..
+ return -EIO;
+
+}
+
+/* ------------------------------------------------------------------------- */
+
+/* Filled in during early boot */
+
+struct list_head eth_cmd_line = LIST_HEAD_INIT(eth_cmd_line);
+
+struct iss_net_init {
+ struct list_head list;
+ char *init; /* init string */
+ int index;
+};
+
+/*
+ * Parse the command line and look for 'ethX=...' fields, and register all
+ * those fields. They will be later initialized in iss_net_init.
+ */
+
+#define ERR KERN_ERR "iss_net_setup: "
+
+static int iss_net_setup(char *str)
+{
+ struct iss_net_private *device = NULL;
+ struct iss_net_init *new;
+ struct list_head *ele;
+ char *end;
+ int n;
+
+ n = simple_strtoul(str, &end, 0);
+ if (end == str) {
+ printk(ERR "Failed to parse '%s'\n", str);
+ return 1;
+ }
+ if (n < 0) {
+ printk(ERR "Device %d is negative\n", n);
+ return 1;
+ }
+ if (*(str = end) != '=') {
+ printk(ERR "Expected '=' after device number\n");
+ return 1;
+ }
+
+ spin_lock(&devices_lock);
+
+ list_for_each(ele, &devices) {
+ device = list_entry(ele, struct iss_net_private, device_list);
+ if (device->index == n)
+ break;
+ }
+
+ spin_unlock(&devices_lock);
+
+ if (device && device->index == n) {
+ printk(ERR "Device %d already configured\n", n);
+ return 1;
+ }
+
+ if ((new = alloc_bootmem(sizeof new)) == NULL) {
+ printk("Alloc_bootmem failed\n");
+ return 1;
+ }
+
+ INIT_LIST_HEAD(&new->list);
+ new->index = n;
+ new->init = str + 1;
+
+ list_add_tail(&new->list, &eth_cmd_line);
+ return 1;
+}
+
+#undef ERR
+
+__setup("eth", iss_net_setup);
+
+/*
+ * Initialize all ISS Ethernet devices previously registered in iss_net_setup.
+ */
+
+static int iss_net_init(void)
+{
+ struct list_head *ele, *next;
+
+ /* Walk through all Ethernet devices specified in the command line. */
+
+ list_for_each_safe(ele, next, &eth_cmd_line) {
+ struct iss_net_init *eth;
+ eth = list_entry(ele, struct iss_net_init, list);
+ iss_net_configure(eth->index, eth->init);
+ }
+
+ return 1;
+}
+
+module_init(iss_net_init);
+
diff --git a/arch/xtensa/platform-iss/setup.c b/arch/xtensa/platform-iss/setup.c
new file mode 100644
index 00000000000..2e6dcbf0cc0
--- /dev/null
+++ b/arch/xtensa/platform-iss/setup.c
@@ -0,0 +1,112 @@
+/*
+ *
+ * arch/xtensa/platform-iss/setup.c
+ *
+ * Platform specific initialization.
+ *
+ * Authors: Chris Zankel <chris@zankel.net>
+ * Joe Taylor <joe@tensilica.com>
+ *
+ * Copyright 2001 - 2005 Tensilica Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/config.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/reboot.h>
+#include <linux/pci.h>
+#include <linux/kdev_t.h>
+#include <linux/types.h>
+#include <linux/major.h>
+#include <linux/blkdev.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/stringify.h>
+#include <linux/notifier.h>
+
+#include <asm/platform.h>
+#include <asm/bootparam.h>
+
+
+void __init platform_init(bp_tag_t* bootparam)
+{
+
+}
+
+void platform_halt(void)
+{
+ printk (" ** Called platform_halt(), looping forever! **\n");
+ while (1);
+}
+
+void platform_power_off(void)
+{
+ printk (" ** Called platform_power_off(), looping forever! **\n");
+ while (1);
+}
+void platform_restart(void)
+{
+ /* Flush and reset the mmu, simulate a processor reset, and
+ * jump to the reset vector. */
+
+ __asm__ __volatile__("movi a2, 15\n\t"
+ "wsr a2, " __stringify(ICOUNTLEVEL) "\n\t"
+ "movi a2, 0\n\t"
+ "wsr a2, " __stringify(ICOUNT) "\n\t"
+ "wsr a2, " __stringify(IBREAKENABLE) "\n\t"
+ "wsr a2, " __stringify(LCOUNT) "\n\t"
+ "movi a2, 0x1f\n\t"
+ "wsr a2, " __stringify(PS) "\n\t"
+ "isync\n\t"
+ "jx %0\n\t"
+ :
+ : "a" (XCHAL_RESET_VECTOR_VADDR)
+ : "a2");
+
+ /* control never gets here */
+}
+
+extern void iss_net_poll(void);
+
+const char twirl[]="|/-\\|/-\\";
+
+void platform_heartbeat(void)
+{
+#if 0
+ static int i = 0, j = 0;
+
+ if (--i < 0) {
+ i = 99;
+ printk("\r%c\r", twirl[j++]);
+ if (j == 8)
+ j = 0;
+ }
+#endif
+}
+
+
+
+static int
+iss_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
+{
+ __asm__ __volatile__("movi a2, -1; simcall\n");
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block iss_panic_block = {
+ iss_panic_event,
+ NULL,
+ 0
+};
+
+void __init platform_setup(char **p_cmdline)
+{
+ notifier_chain_register(&panic_notifier_list, &iss_panic_block);
+}