diff options
Diffstat (limited to 'arch/arm/kernel')
27 files changed, 790 insertions, 279 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index c11169b5ed9..2ce0e3a27a4 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -2,15 +2,16 @@ # Makefile for the linux kernel. # -AFLAGS_head.o := -DKERNEL_RAM_ADDR=$(TEXTADDR) +AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET) # Object file lists. -obj-y := compat.o dma.o entry-armv.o entry-common.o irq.o \ +obj-y := compat.o entry-armv.o entry-common.o irq.o \ process.o ptrace.o semaphore.o setup.o signal.o sys_arm.o \ time.o traps.o obj-$(CONFIG_APM) += apm.o +obj-$(CONFIG_ISA_DMA_API) += dma.o obj-$(CONFIG_ARCH_ACORN) += ecard.o obj-$(CONFIG_FOOTBRIDGE) += isa.o obj-$(CONFIG_FIQ) += fiq.o @@ -19,6 +20,7 @@ obj-$(CONFIG_ARTHUR) += arthur.o obj-$(CONFIG_ISA_DMA) += dma-isa.o obj-$(CONFIG_PCI) += bios32.o obj-$(CONFIG_SMP) += smp.o +obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o obj-$(CONFIG_IWMMXT) += iwmmxt.o AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt diff --git a/arch/arm/kernel/apm.c b/arch/arm/kernel/apm.c index b0bbd1e62eb..766b6c05c6d 100644 --- a/arch/arm/kernel/apm.c +++ b/arch/arm/kernel/apm.c @@ -18,6 +18,7 @@ #include <linux/proc_fs.h> #include <linux/miscdevice.h> #include <linux/apm_bios.h> +#include <linux/capability.h> #include <linux/sched.h> #include <linux/pm.h> #include <linux/device.h> @@ -80,6 +81,7 @@ struct apm_user { */ static int suspends_pending; static int apm_disabled; +static int arm_apm_active; static DECLARE_WAIT_QUEUE_HEAD(apm_waitqueue); static DECLARE_WAIT_QUEUE_HEAD(apm_suspend_waitqueue); @@ -476,9 +478,9 @@ static int kapmd(void *arg) apm_event_t event; wait_event_interruptible(kapmd_wait, - !queue_empty(&kapmd_queue) || !pm_active); + !queue_empty(&kapmd_queue) || !arm_apm_active); - if (!pm_active) + if (!arm_apm_active) break; spin_lock_irq(&kapmd_queue_lock); @@ -521,16 +523,11 @@ static int __init apm_init(void) return -ENODEV; } - if (PM_IS_ACTIVE()) { - printk(KERN_NOTICE "apm: overridden by ACPI.\n"); - return -EINVAL; - } - - pm_active = 1; + arm_apm_active = 1; ret = kernel_thread(kapmd, NULL, CLONE_KERNEL); if (ret < 0) { - pm_active = 0; + arm_apm_active = 0; return ret; } @@ -542,7 +539,7 @@ static int __init apm_init(void) if (ret != 0) { remove_proc_entry("apm", NULL); - pm_active = 0; + arm_apm_active = 0; wake_up(&kapmd_wait); wait_for_completion(&kapmd_exit); } @@ -555,7 +552,7 @@ static void __exit apm_exit(void) misc_deregister(&apm_device); remove_proc_entry("apm", NULL); - pm_active = 0; + arm_apm_active = 0; wake_up(&kapmd_wait); wait_for_completion(&kapmd_exit); } diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index 7a3261f0bf7..1574941ebfe 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c @@ -35,6 +35,16 @@ extern void __udivsi3(void); extern void __umodsi3(void); extern void __do_div64(void); +extern void __aeabi_idiv(void); +extern void __aeabi_idivmod(void); +extern void __aeabi_lasr(void); +extern void __aeabi_llsl(void); +extern void __aeabi_llsr(void); +extern void __aeabi_lmul(void); +extern void __aeabi_uidiv(void); +extern void __aeabi_uidivmod(void); +extern void __aeabi_ulcmp(void); + extern void fpundefinstr(void); extern void fp_enter(void); @@ -120,7 +130,6 @@ EXPORT_SYMBOL(__arch_strncpy_from_user); EXPORT_SYMBOL(__get_user_1); EXPORT_SYMBOL(__get_user_2); EXPORT_SYMBOL(__get_user_4); -EXPORT_SYMBOL(__get_user_8); EXPORT_SYMBOL(__put_user_1); EXPORT_SYMBOL(__put_user_2); @@ -142,6 +151,18 @@ EXPORT_SYMBOL(__udivsi3); EXPORT_SYMBOL(__umodsi3); EXPORT_SYMBOL(__do_div64); +#ifdef CONFIG_AEABI +EXPORT_SYMBOL(__aeabi_idiv); +EXPORT_SYMBOL(__aeabi_idivmod); +EXPORT_SYMBOL(__aeabi_lasr); +EXPORT_SYMBOL(__aeabi_llsl); +EXPORT_SYMBOL(__aeabi_llsr); +EXPORT_SYMBOL(__aeabi_lmul); +EXPORT_SYMBOL(__aeabi_uidiv); +EXPORT_SYMBOL(__aeabi_uidivmod); +EXPORT_SYMBOL(__aeabi_ulcmp); +#endif + /* bitops */ EXPORT_SYMBOL(_set_bit_le); EXPORT_SYMBOL(_test_and_set_bit_le); diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 04d3082a7b9..0abbce8c70b 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -23,20 +23,15 @@ #error Sorry, your compiler targets APCS-26 but this kernel requires APCS-32 #endif /* - * GCC 2.95.1, 2.95.2: ignores register clobber list in asm(). * GCC 3.0, 3.1: general bad code generation. * GCC 3.2.0: incorrect function argument offset calculation. * GCC 3.2.x: miscompiles NEW_AUX_ENT in fs/binfmt_elf.c * (http://gcc.gnu.org/PR8896) and incorrect structure * initialisation in fs/jffs2/erase.c */ -#if __GNUC__ < 2 || \ - (__GNUC__ == 2 && __GNUC_MINOR__ < 95) || \ - (__GNUC__ == 2 && __GNUC_MINOR__ == 95 && __GNUC_PATCHLEVEL__ != 0 && \ - __GNUC_PATCHLEVEL__ < 3) || \ - (__GNUC__ == 3 && __GNUC_MINOR__ < 3) +#if (__GNUC__ == 3 && __GNUC_MINOR__ < 3) #error Your compiler is too buggy; it is known to miscompile kernels. -#error Known good compilers: 2.95.3, 2.95.4, 2.96, 3.3 +#error Known good compilers: 3.3 #endif /* Use marker if you need to separate the values later */ diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index 2ad4aa2a153..75e6f9a9471 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S @@ -13,7 +13,7 @@ #define NR_syscalls 328 #else -__syscall_start: +100: /* 0 */ .long sys_restart_syscall .long sys_exit .long sys_fork_wrapper @@ -27,7 +27,7 @@ __syscall_start: /* 10 */ .long sys_unlink .long sys_execve_wrapper .long sys_chdir - .long sys_time /* used by libc4 */ + .long OBSOLETE(sys_time) /* used by libc4 */ .long sys_mknod /* 15 */ .long sys_chmod .long sys_lchown16 @@ -36,15 +36,15 @@ __syscall_start: .long sys_lseek /* 20 */ .long sys_getpid .long sys_mount - .long sys_oldumount /* used by libc4 */ + .long OBSOLETE(sys_oldumount) /* used by libc4 */ .long sys_setuid16 .long sys_getuid16 -/* 25 */ .long sys_stime +/* 25 */ .long OBSOLETE(sys_stime) .long sys_ptrace - .long sys_alarm /* used by libc4 */ + .long OBSOLETE(sys_alarm) /* used by libc4 */ .long sys_ni_syscall /* was sys_fstat */ .long sys_pause -/* 30 */ .long sys_utime /* used by libc4 */ +/* 30 */ .long OBSOLETE(sys_utime) /* used by libc4 */ .long sys_ni_syscall /* was sys_stty */ .long sys_ni_syscall /* was sys_getty */ .long sys_access @@ -90,21 +90,21 @@ __syscall_start: .long sys_sigpending .long sys_sethostname /* 75 */ .long sys_setrlimit - .long sys_old_getrlimit /* used by libc4 */ + .long OBSOLETE(sys_old_getrlimit) /* used by libc4 */ .long sys_getrusage .long sys_gettimeofday .long sys_settimeofday /* 80 */ .long sys_getgroups16 .long sys_setgroups16 - .long old_select /* used by libc4 */ + .long OBSOLETE(old_select) /* used by libc4 */ .long sys_symlink .long sys_ni_syscall /* was sys_lstat */ /* 85 */ .long sys_readlink .long sys_uselib .long sys_swapon .long sys_reboot - .long old_readdir /* used by libc4 */ -/* 90 */ .long old_mmap /* used by libc4 */ + .long OBSOLETE(old_readdir) /* used by libc4 */ +/* 90 */ .long OBSOLETE(old_mmap) /* used by libc4 */ .long sys_munmap .long sys_truncate .long sys_ftruncate @@ -116,7 +116,7 @@ __syscall_start: .long sys_statfs /* 100 */ .long sys_fstatfs .long sys_ni_syscall - .long sys_socketcall + .long OBSOLETE(sys_socketcall) .long sys_syslog .long sys_setitimer /* 105 */ .long sys_getitimer @@ -127,11 +127,11 @@ __syscall_start: /* 110 */ .long sys_ni_syscall /* was sys_iopl */ .long sys_vhangup .long sys_ni_syscall - .long sys_syscall /* call a syscall */ + .long OBSOLETE(sys_syscall) /* call a syscall */ .long sys_wait4 /* 115 */ .long sys_swapoff .long sys_sysinfo - .long sys_ipc_wrapper + .long OBSOLETE(ABI(sys_ipc, sys_oabi_ipc)) .long sys_fsync .long sys_sigreturn_wrapper /* 120 */ .long sys_clone_wrapper @@ -194,8 +194,8 @@ __syscall_start: .long sys_rt_sigtimedwait .long sys_rt_sigqueueinfo .long sys_rt_sigsuspend_wrapper -/* 180 */ .long sys_pread64 - .long sys_pwrite64 +/* 180 */ .long ABI(sys_pread64, sys_oabi_pread64) + .long ABI(sys_pwrite64, sys_oabi_pwrite64) .long sys_chown16 .long sys_getcwd .long sys_capget @@ -207,11 +207,11 @@ __syscall_start: /* 190 */ .long sys_vfork_wrapper .long sys_getrlimit .long sys_mmap2 - .long sys_truncate64 - .long sys_ftruncate64 -/* 195 */ .long sys_stat64 - .long sys_lstat64 - .long sys_fstat64 + .long ABI(sys_truncate64, sys_oabi_truncate64) + .long ABI(sys_ftruncate64, sys_oabi_ftruncate64) +/* 195 */ .long ABI(sys_stat64, sys_oabi_stat64) + .long ABI(sys_lstat64, sys_oabi_lstat64) + .long ABI(sys_fstat64, sys_oabi_fstat64) .long sys_lchown .long sys_getuid /* 200 */ .long sys_getgid @@ -235,11 +235,11 @@ __syscall_start: .long sys_pivot_root .long sys_mincore /* 220 */ .long sys_madvise - .long sys_fcntl64 + .long ABI(sys_fcntl64, sys_oabi_fcntl64) .long sys_ni_syscall /* TUX */ .long sys_ni_syscall .long sys_gettid -/* 225 */ .long sys_readahead +/* 225 */ .long ABI(sys_readahead, sys_oabi_readahead) .long sys_setxattr .long sys_lsetxattr .long sys_fsetxattr @@ -254,7 +254,7 @@ __syscall_start: .long sys_fremovexattr .long sys_tkill .long sys_sendfile64 -/* 240 */ .long sys_futex_wrapper +/* 240 */ .long sys_futex .long sys_sched_setaffinity .long sys_sched_getaffinity .long sys_io_setup @@ -265,8 +265,8 @@ __syscall_start: .long sys_exit_group .long sys_lookup_dcookie /* 250 */ .long sys_epoll_create - .long sys_epoll_ctl - .long sys_epoll_wait + .long ABI(sys_epoll_ctl, sys_oabi_epoll_ctl) + .long ABI(sys_epoll_wait, sys_oabi_epoll_wait) .long sys_remap_file_pages .long sys_ni_syscall /* sys_set_thread_area */ /* 255 */ .long sys_ni_syscall /* sys_get_thread_area */ @@ -280,11 +280,11 @@ __syscall_start: .long sys_clock_gettime .long sys_clock_getres /* 265 */ .long sys_clock_nanosleep - .long sys_statfs64 - .long sys_fstatfs64 + .long sys_statfs64_wrapper + .long sys_fstatfs64_wrapper .long sys_tgkill .long sys_utimes -/* 270 */ .long sys_arm_fadvise64_64_wrapper +/* 270 */ .long sys_arm_fadvise64_64 .long sys_pciconfig_iobase .long sys_pciconfig_read .long sys_pciconfig_write @@ -312,7 +312,7 @@ __syscall_start: /* 295 */ .long sys_getsockopt .long sys_sendmsg .long sys_recvmsg - .long sys_semop + .long ABI(sys_semop, sys_oabi_semop) .long sys_semget /* 300 */ .long sys_semctl .long sys_msgsnd @@ -326,19 +326,18 @@ __syscall_start: .long sys_add_key /* 310 */ .long sys_request_key .long sys_keyctl - .long sys_semtimedop + .long ABI(sys_semtimedop, sys_oabi_semtimedop) /* vserver */ .long sys_ni_syscall .long sys_ioprio_set /* 315 */ .long sys_ioprio_get .long sys_inotify_init .long sys_inotify_add_watch .long sys_inotify_rm_watch - .long sys_mbind_wrapper + .long sys_mbind /* 320 */ .long sys_get_mempolicy .long sys_set_mempolicy -__syscall_end: - .rept NR_syscalls - (__syscall_end - __syscall_start) / 4 + .rept NR_syscalls - (. - 100b) / 4 .long sys_ni_syscall .endr #endif diff --git a/arch/arm/kernel/dma-isa.c b/arch/arm/kernel/dma-isa.c index e9a36304ec3..03532769a97 100644 --- a/arch/arm/kernel/dma-isa.c +++ b/arch/arm/kernel/dma-isa.c @@ -18,7 +18,7 @@ */ #include <linux/ioport.h> #include <linux/init.h> -#include <linux/pci.h> +#include <linux/dma-mapping.h> #include <asm/dma.h> #include <asm/io.h> @@ -65,37 +65,41 @@ static void isa_enable_dma(dmach_t channel, dma_t *dma) { if (dma->invalid) { unsigned long address, length; - unsigned int mode, direction; + unsigned int mode; + enum dma_data_direction direction; mode = channel & 3; switch (dma->dma_mode & DMA_MODE_MASK) { case DMA_MODE_READ: mode |= ISA_DMA_MODE_READ; - direction = PCI_DMA_FROMDEVICE; + direction = DMA_FROM_DEVICE; break; case DMA_MODE_WRITE: mode |= ISA_DMA_MODE_WRITE; - direction = PCI_DMA_TODEVICE; + direction = DMA_TO_DEVICE; break; case DMA_MODE_CASCADE: mode |= ISA_DMA_MODE_CASCADE; - direction = PCI_DMA_BIDIRECTIONAL; + direction = DMA_BIDIRECTIONAL; break; default: - direction = PCI_DMA_NONE; + direction = DMA_NONE; break; } - if (!dma->using_sg) { + if (!dma->sg) { /* * Cope with ISA-style drivers which expect cache * coherence. */ - dma->buf.dma_address = pci_map_single(NULL, - dma->buf.__address, dma->buf.length, + dma->sg = &dma->buf; + dma->sgcount = 1; + dma->buf.length = dma->count; + dma->buf.dma_address = dma_map_single(NULL, + dma->addr, dma->count, direction); } diff --git a/arch/arm/kernel/dma.c b/arch/arm/kernel/dma.c index 2b788388423..5a0f4bc5da9 100644 --- a/arch/arm/kernel/dma.c +++ b/arch/arm/kernel/dma.c @@ -12,8 +12,6 @@ * DMA facilities. */ #include <linux/module.h> -#include <linux/slab.h> -#include <linux/mman.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/errno.h> @@ -23,8 +21,7 @@ #include <asm/mach/dma.h> DEFINE_SPINLOCK(dma_spin_lock); - -#if MAX_DMA_CHANNELS > 0 +EXPORT_SYMBOL(dma_spin_lock); static dma_t dma_chan[MAX_DMA_CHANNELS]; @@ -81,6 +78,7 @@ bad_dma: busy: return -EBUSY; } +EXPORT_SYMBOL(request_dma); /* * Free DMA channel @@ -112,6 +110,7 @@ void free_dma(dmach_t channel) bad_dma: printk(KERN_ERR "dma: trying to free DMA%d\n", channel); } +EXPORT_SYMBOL(free_dma); /* Set DMA Scatter-Gather list */ @@ -125,15 +124,15 @@ void set_dma_sg (dmach_t channel, struct scatterlist *sg, int nr_sg) dma->sg = sg; dma->sgcount = nr_sg; - dma->using_sg = 1; dma->invalid = 1; } +EXPORT_SYMBOL(set_dma_sg); /* Set DMA address * * Copy address to the structure, and set the invalid bit */ -void set_dma_addr (dmach_t channel, unsigned long physaddr) +void __set_dma_addr (dmach_t channel, void *addr) { dma_t *dma = dma_chan + channel; @@ -141,12 +140,11 @@ void set_dma_addr (dmach_t channel, unsigned long physaddr) printk(KERN_ERR "dma%d: altering DMA address while " "DMA active\n", channel); - dma->sg = &dma->buf; - dma->sgcount = 1; - dma->buf.__address = bus_to_virt(physaddr); - dma->using_sg = 0; + dma->sg = NULL; + dma->addr = addr; dma->invalid = 1; } +EXPORT_SYMBOL(__set_dma_addr); /* Set DMA byte count * @@ -160,12 +158,11 @@ void set_dma_count (dmach_t channel, unsigned long count) printk(KERN_ERR "dma%d: altering DMA count while " "DMA active\n", channel); - dma->sg = &dma->buf; - dma->sgcount = 1; - dma->buf.length = count; - dma->using_sg = 0; + dma->sg = NULL; + dma->count = count; dma->invalid = 1; } +EXPORT_SYMBOL(set_dma_count); /* Set DMA direction mode */ @@ -180,6 +177,7 @@ void set_dma_mode (dmach_t channel, dmamode_t mode) dma->dma_mode = mode; dma->invalid = 1; } +EXPORT_SYMBOL(set_dma_mode); /* Enable DMA channel */ @@ -200,6 +198,7 @@ free_dma: printk(KERN_ERR "dma%d: trying to enable free DMA\n", channel); BUG(); } +EXPORT_SYMBOL(enable_dma); /* Disable DMA channel */ @@ -220,6 +219,7 @@ free_dma: printk(KERN_ERR "dma%d: trying to disable free DMA\n", channel); BUG(); } +EXPORT_SYMBOL(disable_dma); /* * Is the specified DMA channel active? @@ -233,6 +233,7 @@ void set_dma_page(dmach_t channel, char pagenr) { printk(KERN_ERR "dma%d: trying to set_dma_page\n", channel); } +EXPORT_SYMBOL(set_dma_page); void set_dma_speed(dmach_t channel, int cycle_ns) { @@ -243,6 +244,7 @@ void set_dma_speed(dmach_t channel, int cycle_ns) ret = dma->d_ops->setspeed(channel, dma, cycle_ns); dma->speed = ret; } +EXPORT_SYMBOL(set_dma_speed); int get_dma_residue(dmach_t channel) { @@ -254,49 +256,12 @@ int get_dma_residue(dmach_t channel) return ret; } +EXPORT_SYMBOL(get_dma_residue); -void __init init_dma(void) +static int __init init_dma(void) { arch_dma_init(dma_chan); -} - -#else - -int request_dma(dmach_t channel, const char *device_id) -{ - return -EINVAL; -} - -int get_dma_residue(dmach_t channel) -{ return 0; } -#define GLOBAL_ALIAS(_a,_b) asm (".set " #_a "," #_b "; .globl " #_a) -GLOBAL_ALIAS(disable_dma, get_dma_residue); -GLOBAL_ALIAS(enable_dma, get_dma_residue); -GLOBAL_ALIAS(free_dma, get_dma_residue); -GLOBAL_ALIAS(get_dma_list, get_dma_residue); -GLOBAL_ALIAS(set_dma_mode, get_dma_residue); -GLOBAL_ALIAS(set_dma_page, get_dma_residue); -GLOBAL_ALIAS(set_dma_count, get_dma_residue); -GLOBAL_ALIAS(set_dma_addr, get_dma_residue); -GLOBAL_ALIAS(set_dma_sg, get_dma_residue); -GLOBAL_ALIAS(set_dma_speed, get_dma_residue); -GLOBAL_ALIAS(init_dma, get_dma_residue); - -#endif - -EXPORT_SYMBOL(request_dma); -EXPORT_SYMBOL(free_dma); -EXPORT_SYMBOL(enable_dma); -EXPORT_SYMBOL(disable_dma); -EXPORT_SYMBOL(set_dma_addr); -EXPORT_SYMBOL(set_dma_count); -EXPORT_SYMBOL(set_dma_mode); -EXPORT_SYMBOL(set_dma_page); -EXPORT_SYMBOL(get_dma_residue); -EXPORT_SYMBOL(set_dma_sg); -EXPORT_SYMBOL(set_dma_speed); - -EXPORT_SYMBOL(dma_spin_lock); +core_initcall(init_dma); diff --git a/arch/arm/kernel/ecard.c b/arch/arm/kernel/ecard.c index dceb826bd21..74ea29c3205 100644 --- a/arch/arm/kernel/ecard.c +++ b/arch/arm/kernel/ecard.c @@ -40,6 +40,7 @@ #include <linux/proc_fs.h> #include <linux/device.h> #include <linux/init.h> +#include <linux/mutex.h> #include <asm/dma.h> #include <asm/ecard.h> @@ -206,7 +207,7 @@ static void ecard_task_readbytes(struct ecard_request *req) static DECLARE_WAIT_QUEUE_HEAD(ecard_wait); static struct ecard_request *ecard_req; -static DECLARE_MUTEX(ecard_sem); +static DEFINE_MUTEX(ecard_mutex); /* * Set up the expansion card daemon's page tables. @@ -299,7 +300,7 @@ static void ecard_call(struct ecard_request *req) req->complete = &completion; - down(&ecard_sem); + mutex_lock(&ecard_mutex); ecard_req = req; wake_up(&ecard_wait); @@ -307,7 +308,7 @@ static void ecard_call(struct ecard_request *req) * Now wait for kecardd to run. */ wait_for_completion(&completion); - up(&ecard_sem); + mutex_unlock(&ecard_mutex); } /* ======================= Mid-level card control ===================== */ @@ -1146,9 +1147,11 @@ static void ecard_drv_shutdown(struct device *dev) struct ecard_driver *drv = ECARD_DRV(dev->driver); struct ecard_request req; - if (drv->shutdown) - drv->shutdown(ec); - ecard_release(ec); + if (dev->driver) { + if (drv->shutdown) + drv->shutdown(ec); + ecard_release(ec); + } /* * If this card has a loader, call the reset handler. @@ -1163,9 +1166,6 @@ static void ecard_drv_shutdown(struct device *dev) int ecard_register_driver(struct ecard_driver *drv) { drv->drv.bus = &ecard_bus_type; - drv->drv.probe = ecard_drv_probe; - drv->drv.remove = ecard_drv_remove; - drv->drv.shutdown = ecard_drv_shutdown; return driver_register(&drv->drv); } @@ -1194,6 +1194,9 @@ struct bus_type ecard_bus_type = { .name = "ecard", .dev_attrs = ecard_dev_attrs, .match = ecard_match, + .probe = ecard_drv_probe, + .remove = ecard_drv_remove, + .shutdown = ecard_drv_shutdown, }; static int ecard_bus_init(void) diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index d9fb819bf7c..d401d908c46 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -3,6 +3,7 @@ * * Copyright (C) 1996,1997,1998 Russell King. * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk) + * nommu support by Hyok S. Choi (hyok.choi@samsung.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -18,8 +19,6 @@ #include <asm/memory.h> #include <asm/glue.h> #include <asm/vfpmacros.h> -#include <asm/hardware.h> /* should be moved into entry-macro.S */ -#include <asm/arch/irqs.h> /* should be moved into entry-macro.S */ #include <asm/arch/entry-macro.S> #include "entry-header.S" @@ -106,14 +105,24 @@ common_invalid: /* * SVC mode handlers */ + +#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) +#define SPFIX(code...) code +#else +#define SPFIX(code...) +#endif + .macro svc_entry sub sp, sp, #S_FRAME_SIZE + SPFIX( tst sp, #4 ) + SPFIX( bicne sp, sp, #4 ) stmib sp, {r1 - r12} ldmia r0, {r1 - r3} add r5, sp, #S_SP @ here for interlock avoidance mov r4, #-1 @ "" "" "" "" add r0, sp, #S_FRAME_SIZE @ "" "" "" "" + SPFIX( addne r0, r0, #4 ) str r1, [sp] @ save the "real" r0 copied @ from the exception stack @@ -304,7 +313,14 @@ __pabt_svc: /* * User mode handlers + * + * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE */ + +#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7) +#error "sizeof(struct pt_regs) must be a multiple of 8" +#endif + .macro usr_entry sub sp, sp, #S_FRAME_SIZE stmib sp, {r1 - r12} @@ -540,7 +556,11 @@ ENTRY(__switch_to) add ip, r1, #TI_CPU_SAVE ldr r3, [r2, #TI_TP_VALUE] stmia ip!, {r4 - sl, fp, sp, lr} @ Store most regs on stack +#ifndef CONFIG_MMU + add r2, r2, #TI_CPU_DOMAIN +#else ldr r6, [r2, #TI_CPU_DOMAIN]! +#endif #if __LINUX_ARM_ARCH__ >= 6 #ifdef CONFIG_CPU_MPCORE clrex @@ -558,7 +578,9 @@ ENTRY(__switch_to) mov r4, #0xffff0fff str r3, [r4, #-15] @ TLS val at 0xffff0ff0 #endif +#ifdef CONFIG_MMU mcr p15, 0, r6, c3, c0, 0 @ Set domain register +#endif #ifdef CONFIG_VFP @ Always disable VFP so we can lazily save/restore the old @ state. This occurs in the context of the previous thread. @@ -614,6 +636,47 @@ __kuser_helper_start: /* * Reference prototype: * + * void __kernel_memory_barrier(void) + * + * Input: + * + * lr = return address + * + * Output: + * + * none + * + * Clobbered: + * + * the Z flag might be lost + * + * Definition and user space usage example: + * + * typedef void (__kernel_dmb_t)(void); + * #define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0) + * + * Apply any needed memory barrier to preserve consistency with data modified + * manually and __kuser_cmpxchg usage. + * + * This could be used as follows: + * + * #define __kernel_dmb() \ + * asm volatile ( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #95" \ + * : : : "lr","cc" ) + */ + +__kuser_memory_barrier: @ 0xffff0fa0 + +#if __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_SMP) + mcr p15, 0, r0, c7, c10, 5 @ dmb +#endif + mov pc, lr + + .align 5 + +/* + * Reference prototype: + * * int __kernel_cmpxchg(int oldval, int newval, int *ptr) * * Input: @@ -642,6 +705,8 @@ __kuser_helper_start: * The C flag is also set if *ptr was changed to allow for assembly * optimization in the calling code. * + * Note: this routine already includes memory barriers as needed. + * * For example, a user space atomic_add implementation could look like this: * * #define atomic_add(ptr, val) \ @@ -670,8 +735,11 @@ __kuser_cmpxchg: @ 0xffff0fc0 * The kernel itself must perform the operation. * A special ghost syscall is used for that (see traps.c). */ + stmfd sp!, {r7, lr} + mov r7, #0xff00 @ 0xfff0 into r7 for EABI + orr r7, r7, #0xf0 swi #0x9ffff0 - mov pc, lr + ldmfd sp!, {r7, pc} #elif __LINUX_ARM_ARCH__ < 6 @@ -698,10 +766,16 @@ __kuser_cmpxchg: @ 0xffff0fc0 #else +#ifdef CONFIG_SMP + mcr p15, 0, r0, c7, c10, 5 @ dmb +#endif ldrex r3, [r2] subs r3, r3, r0 strexeq r3, r1, [r2] rsbs r0, r3, #0 +#ifdef CONFIG_SMP + mcr p15, 0, r0, c7, c10, 5 @ dmb +#endif mov pc, lr #endif diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 066597f4345..2b92ce85f97 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -48,8 +48,7 @@ work_pending: mov r0, sp @ 'regs' mov r2, why @ 'syscall' bl do_notify_resume - disable_irq @ disable interrupts - b no_work_pending + b ret_slow_syscall @ Check work again work_resched: bl schedule @@ -99,20 +98,14 @@ ENTRY(ret_from_fork) run on an ARM7 and we can save a couple of instructions. --pb */ #ifdef CONFIG_CPU_ARM710 - .macro arm710_bug_check, instr, temp - and \temp, \instr, #0x0f000000 @ check for SWI - teq \temp, #0x0f000000 - bne .Larm700bug - .endm - -.Larm700bug: +#define A710(code...) code +.Larm710bug: ldmia sp, {r0 - lr}^ @ Get calling r0 - lr mov r0, r0 add sp, sp, #S_FRAME_SIZE subs pc, lr, #4 #else - .macro arm710_bug_check, instr, temp - .endm +#define A710(code...) #endif .align 5 @@ -130,14 +123,50 @@ ENTRY(vector_swi) /* * Get the system call number. */ + +#if defined(CONFIG_OABI_COMPAT) + + /* + * If we have CONFIG_OABI_COMPAT then we need to look at the swi + * value to determine if it is an EABI or an old ABI call. + */ #ifdef CONFIG_ARM_THUMB + tst r8, #PSR_T_BIT + movne r10, #0 @ no thumb OABI emulation + ldreq r10, [lr, #-4] @ get SWI instruction +#else + ldr r10, [lr, #-4] @ get SWI instruction + A710( and ip, r10, #0x0f000000 @ check for SWI ) + A710( teq ip, #0x0f000000 ) + A710( bne .Larm710bug ) +#endif + +#elif defined(CONFIG_AEABI) + + /* + * Pure EABI user space always put syscall number into scno (r7). + */ + A710( ldr ip, [lr, #-4] @ get SWI instruction ) + A710( and ip, ip, #0x0f000000 @ check for SWI ) + A710( teq ip, #0x0f000000 ) + A710( bne .Larm710bug ) + +#elif defined(CONFIG_ARM_THUMB) + + /* Legacy ABI only, possibly thumb mode. */ tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in ldreq scno, [lr, #-4] + #else + + /* Legacy ABI only. */ ldr scno, [lr, #-4] @ get SWI instruction + A710( and ip, scno, #0x0f000000 @ check for SWI ) + A710( teq ip, #0x0f000000 ) + A710( bne .Larm710bug ) + #endif - arm710_bug_check scno, ip #ifdef CONFIG_ALIGNMENT_TRAP ldr ip, __cr_alignment @@ -146,18 +175,31 @@ ENTRY(vector_swi) #endif enable_irq - str r4, [sp, #-S_OFF]! @ push fifth arg - get_thread_info tsk + adr tbl, sys_call_table @ load syscall table pointer ldr ip, [tsk, #TI_FLAGS] @ check for syscall tracing + +#if defined(CONFIG_OABI_COMPAT) + /* + * If the swi argument is zero, this is an EABI call and we do nothing. + * + * If this is an old ABI call, get the syscall number into scno and + * get the old ABI syscall table address. + */ + bics r10, r10, #0xff000000 + eorne scno, r10, #__NR_OABI_SYSCALL_BASE + ldrne tbl, =sys_oabi_call_table +#elif !defined(CONFIG_AEABI) bic scno, scno, #0xff000000 @ mask off SWI op-code eor scno, scno, #__NR_SYSCALL_BASE @ check OS number - adr tbl, sys_call_table @ load syscall table pointer +#endif + + stmdb sp!, {r4, r5} @ push fifth and sixth args tst ip, #_TIF_SYSCALL_TRACE @ are we tracing syscalls? bne __sys_trace - adr lr, ret_fast_syscall @ return address cmp scno, #NR_syscalls @ check upper syscall limit + adr lr, ret_fast_syscall @ return address ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine add r1, sp, #S_OFF @@ -172,11 +214,13 @@ ENTRY(vector_swi) * context switches, and waiting for our parent to respond. */ __sys_trace: + mov r2, scno add r1, sp, #S_OFF mov r0, #0 @ trace entry [IP = 0] bl syscall_trace adr lr, __sys_trace_return @ return address + mov scno, r0 @ syscall number (possibly new) add r1, sp, #S_R0 + S_OFF @ pointer to regs cmp scno, #NR_syscalls @ check upper syscall limit ldmccia r1, {r0 - r3} @ have to reload r0 - r3 @@ -185,6 +229,7 @@ __sys_trace: __sys_trace_return: str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 + mov r2, scno mov r1, sp mov r0, #1 @ trace exit [IP = 1] bl syscall_trace @@ -196,19 +241,33 @@ __sys_trace_return: __cr_alignment: .word cr_alignment #endif + .ltorg + +/* + * This is the syscall table declaration for native ABI syscalls. + * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall. + */ +#define ABI(native, compat) native +#ifdef CONFIG_AEABI +#define OBSOLETE(syscall) sys_ni_syscall +#else +#define OBSOLETE(syscall) syscall +#endif .type sys_call_table, #object ENTRY(sys_call_table) #include "calls.S" +#undef ABI +#undef OBSOLETE /*============================================================================ * Special system call wrappers */ @ r0 = syscall number -@ r5 = syscall table +@ r8 = syscall table .type sys_syscall, #function sys_syscall: - eor scno, r0, #__NR_SYSCALL_BASE + eor scno, r0, #__NR_OABI_SYSCALL_BASE cmp scno, #__NR_syscall - __NR_SYSCALL_BASE cmpne scno, #NR_syscalls @ check range stmloia sp, {r5, r6} @ shuffle args @@ -256,21 +315,15 @@ sys_sigaltstack_wrapper: ldr r2, [sp, #S_OFF + S_SP] b do_sigaltstack -sys_futex_wrapper: - str r5, [sp, #4] @ push sixth arg - b sys_futex +sys_statfs64_wrapper: + teq r1, #88 + moveq r1, #84 + b sys_statfs64 -sys_arm_fadvise64_64_wrapper: - str r5, [sp, #4] @ push r5 to stack - b sys_arm_fadvise64_64 - -sys_mbind_wrapper: - str r5, [sp, #4] - b sys_mbind - -sys_ipc_wrapper: - str r5, [sp, #4] @ push sixth arg - b sys_ipc +sys_fstatfs64_wrapper: + teq r1, #88 + moveq r1, #84 + b sys_fstatfs64 /* * Note: off_4k (r5) is always units of 4K. If we can't do the requested @@ -288,3 +341,49 @@ sys_mmap2: str r5, [sp, #4] b do_mmap2 #endif + +#ifdef CONFIG_OABI_COMPAT + +/* + * These are syscalls with argument register differences + */ + +sys_oabi_pread64: + stmia sp, {r3, r4} + b sys_pread64 + +sys_oabi_pwrite64: + stmia sp, {r3, r4} + b sys_pwrite64 + +sys_oabi_truncate64: + mov r3, r2 + mov r2, r1 + b sys_truncate64 + +sys_oabi_ftruncate64: + mov r3, r2 + mov r2, r1 + b sys_ftruncate64 + +sys_oabi_readahead: + str r3, [sp] + mov r3, r2 + mov r2, r1 + b sys_readahead + +/* + * Let's declare a second syscall table for old ABI binaries + * using the compatibility syscall entries. + */ +#define ABI(native, compat) compat +#define OBSOLETE(syscall) syscall + + .type sys_oabi_call_table, #object +ENTRY(sys_oabi_call_table) +#include "calls.S" +#undef ABI +#undef OBSOLETE + +#endif + diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index 648cfff9313..55c99cdab7d 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S @@ -19,6 +19,7 @@ @ @ Most of the stack format comes from struct pt_regs, but with @ the addition of 8 bytes for storing syscall args 5 and 6. +@ This _must_ remain a multiple of 8 for EABI. @ #define S_OFF 8 diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c index 9299dfc2569..1ec3f7faa25 100644 --- a/arch/arm/kernel/fiq.c +++ b/arch/arm/kernel/fiq.c @@ -101,7 +101,7 @@ void __attribute__((naked)) set_fiq_regs(struct pt_regs *regs) ldmia %1, {r8 - r14}\n\ msr cpsr_c, %0 @ return to SVC mode\n\ mov r0, r0\n\ - ldmea fp, {fp, sp, pc}" + ldmfd sp, {fp, sp, pc}" : "=&r" (tmp) : "r" (®s->ARM_r8), "I" (PSR_I_BIT | PSR_F_BIT | FIQ_MODE)); } @@ -119,7 +119,7 @@ void __attribute__((naked)) get_fiq_regs(struct pt_regs *regs) stmia %1, {r8 - r14}\n\ msr cpsr_c, %0 @ return to SVC mode\n\ mov r0, r0\n\ - ldmea fp, {fp, sp, pc}" + ldmfd sp, {fp, sp, pc}" : "=&r" (tmp) : "r" (®s->ARM_r8), "I" (PSR_I_BIT | PSR_F_BIT | FIQ_MODE)); } diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 8d8748407cb..1aca1775b28 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -17,7 +17,6 @@ #include <asm/assembler.h> #include <asm/domain.h> -#include <asm/mach-types.h> #include <asm/procinfo.h> #include <asm/ptrace.h> #include <asm/asm-offsets.h> @@ -34,6 +33,8 @@ #define MACHINFO_PGOFFIO 12 #define MACHINFO_NAME 16 +#define KERNEL_RAM_ADDR (PAGE_OFFSET + TEXT_OFFSET) + /* * swapper_pg_dir is the virtual address of the initial page table. * We place the page tables 16K below KERNEL_RAM_ADDR. Therefore, we must @@ -83,7 +84,7 @@ ENTRY(stext) @ and irqs disabled bl __lookup_processor_type @ r5=procinfo r9=cpuid movs r10, r5 @ invalid processor (r5=0)? - beq __error_p @ yes, error 'p' + beq __error_p @ yes, error 'p' bl __lookup_machine_type @ r5=machinfo movs r8, r5 @ invalid machine (r5=0)? beq __error_a @ yes, error 'a' @@ -250,12 +251,11 @@ __turn_mmu_on: * r10 = procinfo * * Returns: - * r0, r3, r5, r6, r7 corrupted + * r0, r3, r6, r7 corrupted * r4 = physical page table address */ .type __create_page_tables, %function __create_page_tables: - ldr r5, [r8, #MACHINFO_PHYSRAM] @ physram pgtbl r4 @ page table address /* @@ -302,7 +302,7 @@ __create_page_tables: * Then map first 1MB of ram in case it contains our boot params. */ add r0, r4, #PAGE_OFFSET >> 18 - orr r6, r5, r7 + orr r6, r7, #PHYS_OFFSET str r6, [r0] #ifdef CONFIG_XIP_KERNEL @@ -310,7 +310,7 @@ __create_page_tables: * Map some ram to cover our .data and .bss areas. * Mapping 3MB should be plenty. */ - sub r3, r4, r5 + sub r3, r4, #PHYS_OFFSET mov r3, r3, lsr #20 add r0, r0, r3, lsl #2 add r6, r6, r3, lsl #20 @@ -343,16 +343,12 @@ __create_page_tables: bne 1b #if defined(CONFIG_ARCH_NETWINDER) || defined(CONFIG_ARCH_CATS) /* - * If we're using the NetWinder, we need to map in - * the 16550-type serial port for the debug messages + * If we're using the NetWinder or CATS, we also need to map + * in the 16550-type serial port for the debug messages */ - teq r1, #MACH_TYPE_NETWINDER - teqne r1, #MACH_TYPE_CATS - bne 1f add r0, r4, #0xff000000 >> 18 orr r3, r7, #0x7c000000 str r3, [r0] -1: #endif #ifdef CONFIG_ARCH_RPC /* diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index d7099dbbb87..1d50d2b98f5 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -684,8 +684,12 @@ int setup_irq(unsigned int irq, struct irqaction *new) spin_lock_irqsave(&irq_controller_lock, flags); p = &desc->action; if ((old = *p) != NULL) { - /* Can't share interrupts unless both agree to */ - if (!(old->flags & new->flags & SA_SHIRQ)) { + /* + * Can't share interrupts unless both agree to and are + * the same type. + */ + if (!(old->flags & new->flags & SA_SHIRQ) || + (~old->flags & new->flags) & SA_TRIGGER_MASK) { spin_unlock_irqrestore(&irq_controller_lock, flags); return -EBUSY; } @@ -705,6 +709,13 @@ int setup_irq(unsigned int irq, struct irqaction *new) desc->running = 0; desc->pending = 0; desc->disable_depth = 1; + + if (new->flags & SA_TRIGGER_MASK && + desc->chip->set_type) { + unsigned int type = new->flags & SA_TRIGGER_MASK; + desc->chip->set_type(irq, type); + } + if (!desc->noautoenable) { desc->disable_depth = 0; desc->chip->unmask(irq); @@ -1027,7 +1038,6 @@ void __init init_irq_proc(void) void __init init_IRQ(void) { struct irqdesc *desc; - extern void init_dma(void); int irq; #ifdef CONFIG_SMP @@ -1041,7 +1051,6 @@ void __init init_IRQ(void) } init_arch_irq(); - init_dma(); } static int __init noirqdebug_setup(char *str) diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c index 6055e1427ba..055bf5d2889 100644 --- a/arch/arm/kernel/module.c +++ b/arch/arm/kernel/module.c @@ -101,6 +101,8 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, break; case R_ARM_PC24: + case R_ARM_CALL: + case R_ARM_JUMP24: offset = (*(u32 *)loc & 0x00ffffff) << 2; if (offset & 0x02000000) offset -= 0x04000000; diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 30494aab829..4b4e4cf79c8 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -28,10 +28,9 @@ #include <linux/init.h> #include <linux/cpu.h> -#include <asm/system.h> -#include <asm/io.h> #include <asm/leds.h> #include <asm/processor.h> +#include <asm/system.h> #include <asm/uaccess.h> #include <asm/mach/time.h> @@ -343,10 +342,10 @@ void flush_thread(void) void release_thread(struct task_struct *dead_task) { #if defined(CONFIG_VFP) - vfp_release_thread(&dead_task->thread_info->vfpstate); + vfp_release_thread(&task_thread_info(dead_task)->vfpstate); #endif #if defined(CONFIG_IWMMXT) - iwmmxt_task_release(dead_task->thread_info); + iwmmxt_task_release(task_thread_info(dead_task)); #endif } @@ -356,10 +355,9 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long stack_start, unsigned long stk_sz, struct task_struct *p, struct pt_regs *regs) { - struct thread_info *thread = p->thread_info; - struct pt_regs *childregs; + struct thread_info *thread = task_thread_info(p); + struct pt_regs *childregs = task_pt_regs(p); - childregs = (void *)thread + THREAD_START_SP - sizeof(*regs); *childregs = *regs; childregs->ARM_r0 = 0; childregs->ARM_sp = stack_start; @@ -461,8 +459,8 @@ unsigned long get_wchan(struct task_struct *p) if (!p || p == current || p->state == TASK_RUNNING) return 0; - stack_start = (unsigned long)(p->thread_info + 1); - stack_end = ((unsigned long)p->thread_info) + THREAD_SIZE; + stack_start = (unsigned long)end_of_stack(p); + stack_end = (unsigned long)task_stack_page(p) + THREAD_SIZE; fp = thread_saved_fp(p); do { diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 9a340e790da..7b6256bb590 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -55,23 +55,6 @@ #endif /* - * Get the address of the live pt_regs for the specified task. - * These are saved onto the top kernel stack when the process - * is not running. - * - * Note: if a user thread is execve'd from kernel space, the - * kernel stack will not be empty on entry to the kernel, so - * ptracing these tasks will fail. - */ -static inline struct pt_regs * -get_user_regs(struct task_struct *task) -{ - return (struct pt_regs *) - ((unsigned long)task->thread_info + THREAD_SIZE - - 8 - sizeof(struct pt_regs)); -} - -/* * this routine will get a word off of the processes privileged stack. * the offset is how far from the base addr as stored in the THREAD. * this routine assumes that all the privileged stacks are in our @@ -79,7 +62,7 @@ get_user_regs(struct task_struct *task) */ static inline long get_user_reg(struct task_struct *task, int offset) { - return get_user_regs(task)->uregs[offset]; + return task_pt_regs(task)->uregs[offset]; } /* @@ -91,7 +74,7 @@ static inline long get_user_reg(struct task_struct *task, int offset) static inline int put_user_reg(struct task_struct *task, int offset, long data) { - struct pt_regs newregs, *regs = get_user_regs(task); + struct pt_regs newregs, *regs = task_pt_regs(task); int ret = -EINVAL; newregs = *regs; @@ -242,6 +225,15 @@ get_branch_address(struct task_struct *child, unsigned long pc, unsigned long in */ long aluop1, aluop2, ccbit; + if ((insn & 0x0fffffd0) == 0x012fff10) { + /* + * bx or blx + */ + alt = get_user_reg(child, insn & 15); + break; + } + + if ((insn & 0xf000) != 0xf000) break; @@ -412,7 +404,7 @@ void ptrace_set_bpt(struct task_struct *child) u32 insn; int res; - regs = get_user_regs(child); + regs = task_pt_regs(child); pc = instruction_pointer(regs); if (thumb_mode(regs)) { @@ -563,7 +555,7 @@ static int ptrace_write_user(struct task_struct *tsk, unsigned long off, */ static int ptrace_getregs(struct task_struct *tsk, void __user *uregs) { - struct pt_regs *regs = get_user_regs(tsk); + struct pt_regs *regs = task_pt_regs(tsk); return copy_to_user(uregs, regs, sizeof(struct pt_regs)) ? -EFAULT : 0; } @@ -578,7 +570,7 @@ static int ptrace_setregs(struct task_struct *tsk, void __user *uregs) ret = -EFAULT; if (copy_from_user(&newregs, uregs, sizeof(struct pt_regs)) == 0) { - struct pt_regs *regs = get_user_regs(tsk); + struct pt_regs *regs = task_pt_regs(tsk); ret = -EINVAL; if (valid_user_regs(&newregs)) { @@ -595,7 +587,7 @@ static int ptrace_setregs(struct task_struct *tsk, void __user *uregs) */ static int ptrace_getfpregs(struct task_struct *tsk, void __user *ufp) { - return copy_to_user(ufp, &tsk->thread_info->fpstate, + return copy_to_user(ufp, &task_thread_info(tsk)->fpstate, sizeof(struct user_fp)) ? -EFAULT : 0; } @@ -604,7 +596,7 @@ static int ptrace_getfpregs(struct task_struct *tsk, void __user *ufp) */ static int ptrace_setfpregs(struct task_struct *tsk, void __user *ufp) { - struct thread_info *thread = tsk->thread_info; + struct thread_info *thread = task_thread_info(tsk); thread->used_cp[1] = thread->used_cp[2] = 1; return copy_from_user(&thread->fpstate, ufp, sizeof(struct user_fp)) ? -EFAULT : 0; @@ -617,7 +609,7 @@ static int ptrace_setfpregs(struct task_struct *tsk, void __user *ufp) */ static int ptrace_getwmmxregs(struct task_struct *tsk, void __user *ufp) { - struct thread_info *thread = tsk->thread_info; + struct thread_info *thread = task_thread_info(tsk); void *ptr = &thread->fpstate; if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT)) @@ -634,7 +626,7 @@ static int ptrace_getwmmxregs(struct task_struct *tsk, void __user *ufp) */ static int ptrace_setwmmxregs(struct task_struct *tsk, void __user *ufp) { - struct thread_info *thread = tsk->thread_info; + struct thread_info *thread = task_thread_info(tsk); void *ptr = &thread->fpstate; if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT)) @@ -770,10 +762,15 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) #endif case PTRACE_GET_THREAD_AREA: - ret = put_user(child->thread_info->tp_value, + ret = put_user(task_thread_info(child)->tp_value, (unsigned long __user *) data); break; + case PTRACE_SET_SYSCALL: + ret = 0; + child->ptrace_message = data; + break; + default: ret = ptrace_request(child, request, addr, data); break; @@ -782,14 +779,14 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) return ret; } -asmlinkage void syscall_trace(int why, struct pt_regs *regs) +asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno) { unsigned long ip; if (!test_thread_flag(TIF_SYSCALL_TRACE)) - return; + return scno; if (!(current->ptrace & PT_PTRACED)) - return; + return scno; /* * Save IP. IP is used to denote syscall entry/exit: @@ -798,6 +795,8 @@ asmlinkage void syscall_trace(int why, struct pt_regs *regs) ip = regs->ARM_ip; regs->ARM_ip = why; + current->ptrace_message = scno; + /* the 0x80 provides a way for the tracing parent to distinguish between a syscall stop and SIGTRAP delivery */ ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) @@ -812,4 +811,6 @@ asmlinkage void syscall_trace(int why, struct pt_regs *regs) current->exit_code = 0; } regs->ARM_ip = ip; + + return current->ptrace_message; } diff --git a/arch/arm/kernel/semaphore.c b/arch/arm/kernel/semaphore.c index 4c31f292305..981fe5c6ccb 100644 --- a/arch/arm/kernel/semaphore.c +++ b/arch/arm/kernel/semaphore.c @@ -177,41 +177,42 @@ int __down_trylock(struct semaphore * sem) * ip contains the semaphore pointer on entry. Save the C-clobbered * registers (r0 to r3 and lr), but not ip, as we use it as a return * value in some cases.. + * To remain AAPCS compliant (64-bit stack align) we save r4 as well. */ asm(" .section .sched.text,\"ax\",%progbits \n\ .align 5 \n\ .globl __down_failed \n\ __down_failed: \n\ - stmfd sp!, {r0 - r3, lr} \n\ + stmfd sp!, {r0 - r4, lr} \n\ mov r0, ip \n\ bl __down \n\ - ldmfd sp!, {r0 - r3, pc} \n\ + ldmfd sp!, {r0 - r4, pc} \n\ \n\ .align 5 \n\ .globl __down_interruptible_failed \n\ __down_interruptible_failed: \n\ - stmfd sp!, {r0 - r3, lr} \n\ + stmfd sp!, {r0 - r4, lr} \n\ mov r0, ip \n\ bl __down_interruptible \n\ mov ip, r0 \n\ - ldmfd sp!, {r0 - r3, pc} \n\ + ldmfd sp!, {r0 - r4, pc} \n\ \n\ .align 5 \n\ .globl __down_trylock_failed \n\ __down_trylock_failed: \n\ - stmfd sp!, {r0 - r3, lr} \n\ + stmfd sp!, {r0 - r4, lr} \n\ mov r0, ip \n\ bl __down_trylock \n\ mov ip, r0 \n\ - ldmfd sp!, {r0 - r3, pc} \n\ + ldmfd sp!, {r0 - r4, pc} \n\ \n\ .align 5 \n\ .globl __up_wakeup \n\ __up_wakeup: \n\ - stmfd sp!, {r0 - r3, lr} \n\ + stmfd sp!, {r0 - r4, lr} \n\ mov r0, ip \n\ bl __up \n\ - ldmfd sp!, {r0 - r3, pc} \n\ + ldmfd sp!, {r0 - r4, pc} \n\ "); EXPORT_SYMBOL(__down_failed); diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 85774165e9f..c45d10d07bd 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -26,8 +26,6 @@ #include <asm/cpu.h> #include <asm/elf.h> -#include <asm/hardware.h> -#include <asm/io.h> #include <asm/procinfo.h> #include <asm/setup.h> #include <asm/mach-types.h> @@ -207,7 +205,7 @@ static const char *proc_arch[] = { "5TE", "5TEJ", "6TEJ", - "?(10)", + "7", "?(11)", "?(12)", "?(13)", @@ -260,14 +258,17 @@ int cpu_architecture(void) { int cpu_arch; - if ((processor_id & 0x0000f000) == 0) { + if ((processor_id & 0x0008f000) == 0) { cpu_arch = CPU_ARCH_UNKNOWN; - } else if ((processor_id & 0x0000f000) == 0x00007000) { + } else if ((processor_id & 0x0008f000) == 0x00007000) { cpu_arch = (processor_id & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3; - } else { + } else if ((processor_id & 0x00080000) == 0x00000000) { cpu_arch = (processor_id >> 16) & 7; if (cpu_arch) cpu_arch += CPU_ARCH_ARMv3; + } else { + /* the revised CPUID */ + cpu_arch = ((processor_id >> 12) & 0xf) - 0xb + CPU_ARCH_ARMv6; } return cpu_arch; @@ -865,11 +866,11 @@ static int c_show(struct seq_file *m, void *v) seq_printf(m, "\nCPU implementer\t: 0x%02x\n", processor_id >> 24); seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]); - if ((processor_id & 0x0000f000) == 0x00000000) { + if ((processor_id & 0x0008f000) == 0x00000000) { /* pre-ARM7 */ seq_printf(m, "CPU part\t\t: %07x\n", processor_id >> 4); } else { - if ((processor_id & 0x0000f000) == 0x00007000) { + if ((processor_id & 0x0008f000) == 0x00007000) { /* ARM7 */ seq_printf(m, "CPU variant\t: 0x%02x\n", (processor_id >> 16) & 127); diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index a917e3dd366..a0cd0a90a10 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -30,15 +30,21 @@ #define SWI_SYS_RT_SIGRETURN (0xef000000|(__NR_rt_sigreturn)) /* + * With EABI, the syscall number has to be loaded into r7. + */ +#define MOV_R7_NR_SIGRETURN (0xe3a07000 | (__NR_sigreturn - __NR_SYSCALL_BASE)) +#define MOV_R7_NR_RT_SIGRETURN (0xe3a07000 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE)) + +/* * For Thumb syscalls, we pass the syscall number via r7. We therefore * need two 16-bit instructions. */ #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE)) #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE)) -const unsigned long sigreturn_codes[4] = { - SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN, - SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN +const unsigned long sigreturn_codes[7] = { + MOV_R7_NR_SIGRETURN, SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN, + MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN, }; static int do_signal(sigset_t *oldset, struct pt_regs * regs, int syscall); @@ -189,7 +195,7 @@ struct aux_sigframe { struct sigframe { struct sigcontext sc; unsigned long extramask[_NSIG_WORDS-1]; - unsigned long retcode; + unsigned long retcode[2]; struct aux_sigframe aux __attribute__((aligned(8))); }; @@ -198,7 +204,7 @@ struct rt_sigframe { void __user *puc; struct siginfo info; struct ucontext uc; - unsigned long retcode; + unsigned long retcode[2]; struct aux_sigframe aux __attribute__((aligned(8))); }; @@ -436,12 +442,13 @@ setup_return(struct pt_regs *regs, struct k_sigaction *ka, if (ka->sa.sa_flags & SA_RESTORER) { retcode = (unsigned long)ka->sa.sa_restorer; } else { - unsigned int idx = thumb; + unsigned int idx = thumb << 1; if (ka->sa.sa_flags & SA_SIGINFO) - idx += 2; + idx += 3; - if (__put_user(sigreturn_codes[idx], rc)) + if (__put_user(sigreturn_codes[idx], rc) || + __put_user(sigreturn_codes[idx+1], rc+1)) return 1; if (cpsr & MODE32_BIT) { @@ -456,7 +463,7 @@ setup_return(struct pt_regs *regs, struct k_sigaction *ka, * the return code written onto the stack. */ flush_icache_range((unsigned long)rc, - (unsigned long)(rc + 1)); + (unsigned long)(rc + 2)); retcode = ((unsigned long)rc) + thumb; } @@ -488,7 +495,7 @@ setup_frame(int usig, struct k_sigaction *ka, sigset_t *set, struct pt_regs *reg } if (err == 0) - err = setup_return(regs, ka, &frame->retcode, frame, usig); + err = setup_return(regs, ka, frame->retcode, frame, usig); return err; } @@ -522,7 +529,7 @@ setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); if (err == 0) - err = setup_return(regs, ka, &frame->retcode, frame, usig); + err = setup_return(regs, ka, frame->retcode, frame, usig); if (err == 0) { /* @@ -595,23 +602,22 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, */ ret |= !valid_user_regs(regs); - /* - * Block the signal if we were unsuccessful. - */ if (ret != 0) { - spin_lock_irq(&tsk->sighand->siglock); - sigorsets(&tsk->blocked, &tsk->blocked, - &ka->sa.sa_mask); - if (!(ka->sa.sa_flags & SA_NODEFER)) - sigaddset(&tsk->blocked, sig); - recalc_sigpending(); - spin_unlock_irq(&tsk->sighand->siglock); + force_sigsegv(sig, tsk); + return; } - if (ret == 0) - return; + /* + * Block the signal if we were successful. + */ + spin_lock_irq(&tsk->sighand->siglock); + sigorsets(&tsk->blocked, &tsk->blocked, + &ka->sa.sa_mask); + if (!(ka->sa.sa_flags & SA_NODEFER)) + sigaddset(&tsk->blocked, sig); + recalc_sigpending(); + spin_unlock_irq(&tsk->sighand->siglock); - force_sigsegv(sig, tsk); } /* diff --git a/arch/arm/kernel/signal.h b/arch/arm/kernel/signal.h index 91d26faca62..9991049c522 100644 --- a/arch/arm/kernel/signal.h +++ b/arch/arm/kernel/signal.h @@ -9,4 +9,4 @@ */ #define KERN_SIGRETURN_CODE 0xffff0500 -extern const unsigned long sigreturn_codes[4]; +extern const unsigned long sigreturn_codes[7]; diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index e55ea952f7a..7338948bd7d 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -114,7 +114,7 @@ int __cpuinit __cpu_up(unsigned int cpu) * We need to tell the secondary core where to find * its stack and the page tables. */ - secondary_data.stack = (void *)idle->thread_info + THREAD_START_SP; + secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; secondary_data.pgdir = virt_to_phys(pgd); wmb(); @@ -245,7 +245,7 @@ void __cpuexit cpu_die(void) __asm__("mov sp, %0\n" " b secondary_start_kernel" : - : "r" ((void *)current->thread_info + THREAD_SIZE - 8)); + : "r" (task_stack_page(current) + THREAD_SIZE - 8)); } #endif /* CONFIG_HOTPLUG_CPU */ @@ -256,9 +256,7 @@ void __cpuexit cpu_die(void) asmlinkage void __cpuinit secondary_start_kernel(void) { struct mm_struct *mm = &init_mm; - unsigned int cpu; - - cpu = smp_processor_id(); + unsigned int cpu = smp_processor_id(); printk("CPU%u: Booted secondary processor\n", cpu); diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c index ea569ba482b..a491de2d902 100644 --- a/arch/arm/kernel/sys_arm.c +++ b/arch/arm/kernel/sys_arm.c @@ -147,6 +147,7 @@ asmlinkage int old_select(struct sel_arg_struct __user *arg) return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp); } +#if !defined(CONFIG_AEABI) || defined(CONFIG_OABI_COMPAT) /* * sys_ipc() is the de-multiplexer for the SysV IPC calls.. * @@ -226,6 +227,7 @@ asmlinkage int sys_ipc(uint call, int first, int second, int third, return -ENOSYS; } } +#endif /* Fork a new task - this creates a new program thread. * This is called indirectly via a small wrapper diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c new file mode 100644 index 00000000000..eafa8e5284a --- /dev/null +++ b/arch/arm/kernel/sys_oabi-compat.c @@ -0,0 +1,339 @@ +/* + * arch/arm/kernel/sys_oabi-compat.c + * + * Compatibility wrappers for syscalls that are used from + * old ABI user space binaries with an EABI kernel. + * + * Author: Nicolas Pitre + * Created: Oct 7, 2005 + * Copyright: MontaVista Software, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* + * The legacy ABI and the new ARM EABI have different rules making some + * syscalls incompatible especially with structure arguments. + * Most notably, Eabi says 64-bit members should be 64-bit aligned instead of + * simply word aligned. EABI also pads structures to the size of the largest + * member it contains instead of the invariant 32-bit. + * + * The following syscalls are affected: + * + * sys_stat64: + * sys_lstat64: + * sys_fstat64: + * + * struct stat64 has different sizes and some members are shifted + * Compatibility wrappers are needed for them and provided below. + * + * sys_fcntl64: + * + * struct flock64 has different sizes and some members are shifted + * A compatibility wrapper is needed and provided below. + * + * sys_statfs64: + * sys_fstatfs64: + * + * struct statfs64 has extra padding with EABI growing its size from + * 84 to 88. This struct is now __attribute__((packed,aligned(4))) + * with a small assembly wrapper to force the sz argument to 84 if it is 88 + * to avoid copying the extra padding over user space unexpecting it. + * + * sys_newuname: + * + * struct new_utsname has no padding with EABI. No problem there. + * + * sys_epoll_ctl: + * sys_epoll_wait: + * + * struct epoll_event has its second member shifted also affecting the + * structure size. Compatibility wrappers are needed and provided below. + * + * sys_ipc: + * sys_semop: + * sys_semtimedop: + * + * struct sembuf loses its padding with EABI. Since arrays of them are + * used they have to be copyed to remove the padding. Compatibility wrappers + * provided below. + */ + +#include <linux/syscalls.h> +#include <linux/errno.h> +#include <linux/fs.h> +#include <linux/fcntl.h> +#include <linux/eventpoll.h> +#include <linux/sem.h> +#include <asm/ipc.h> +#include <asm/uaccess.h> + +struct oldabi_stat64 { + unsigned long long st_dev; + unsigned int __pad1; + unsigned long __st_ino; + unsigned int st_mode; + unsigned int st_nlink; + + unsigned long st_uid; + unsigned long st_gid; + + unsigned long long st_rdev; + unsigned int __pad2; + + long long st_size; + unsigned long st_blksize; + unsigned long long st_blocks; + + unsigned long st_atime; + unsigned long st_atime_nsec; + + unsigned long st_mtime; + unsigned long st_mtime_nsec; + + unsigned long st_ctime; + unsigned long st_ctime_nsec; + + unsigned long long st_ino; +} __attribute__ ((packed,aligned(4))); + +static long cp_oldabi_stat64(struct kstat *stat, + struct oldabi_stat64 __user *statbuf) +{ + struct oldabi_stat64 tmp; + + tmp.st_dev = huge_encode_dev(stat->dev); + tmp.__pad1 = 0; + tmp.__st_ino = stat->ino; + tmp.st_mode = stat->mode; + tmp.st_nlink = stat->nlink; + tmp.st_uid = stat->uid; + tmp.st_gid = stat->gid; + tmp.st_rdev = huge_encode_dev(stat->rdev); + tmp.st_size = stat->size; + tmp.st_blocks = stat->blocks; + tmp.__pad2 = 0; + tmp.st_blksize = stat->blksize; + tmp.st_atime = stat->atime.tv_sec; + tmp.st_atime_nsec = stat->atime.tv_nsec; + tmp.st_mtime = stat->mtime.tv_sec; + tmp.st_mtime_nsec = stat->mtime.tv_nsec; + tmp.st_ctime = stat->ctime.tv_sec; + tmp.st_ctime_nsec = stat->ctime.tv_nsec; + tmp.st_ino = stat->ino; + return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; +} + +asmlinkage long sys_oabi_stat64(char __user * filename, + struct oldabi_stat64 __user * statbuf) +{ + struct kstat stat; + int error = vfs_stat(filename, &stat); + if (!error) + error = cp_oldabi_stat64(&stat, statbuf); + return error; +} + +asmlinkage long sys_oabi_lstat64(char __user * filename, + struct oldabi_stat64 __user * statbuf) +{ + struct kstat stat; + int error = vfs_lstat(filename, &stat); + if (!error) + error = cp_oldabi_stat64(&stat, statbuf); + return error; +} + +asmlinkage long sys_oabi_fstat64(unsigned long fd, + struct oldabi_stat64 __user * statbuf) +{ + struct kstat stat; + int error = vfs_fstat(fd, &stat); + if (!error) + error = cp_oldabi_stat64(&stat, statbuf); + return error; +} + +struct oabi_flock64 { + short l_type; + short l_whence; + loff_t l_start; + loff_t l_len; + pid_t l_pid; +} __attribute__ ((packed,aligned(4))); + +asmlinkage long sys_oabi_fcntl64(unsigned int fd, unsigned int cmd, + unsigned long arg) +{ + struct oabi_flock64 user; + struct flock64 kernel; + mm_segment_t fs = USER_DS; /* initialized to kill a warning */ + unsigned long local_arg = arg; + int ret; + + switch (cmd) { + case F_GETLK64: + case F_SETLK64: + case F_SETLKW64: + if (copy_from_user(&user, (struct oabi_flock64 __user *)arg, + sizeof(user))) + return -EFAULT; + kernel.l_type = user.l_type; + kernel.l_whence = user.l_whence; + kernel.l_start = user.l_start; + kernel.l_len = user.l_len; + kernel.l_pid = user.l_pid; + local_arg = (unsigned long)&kernel; + fs = get_fs(); + set_fs(KERNEL_DS); + } + + ret = sys_fcntl64(fd, cmd, local_arg); + + switch (cmd) { + case F_GETLK64: + if (!ret) { + user.l_type = kernel.l_type; + user.l_whence = kernel.l_whence; + user.l_start = kernel.l_start; + user.l_len = kernel.l_len; + user.l_pid = kernel.l_pid; + if (copy_to_user((struct oabi_flock64 __user *)arg, + &user, sizeof(user))) + ret = -EFAULT; + } + case F_SETLK64: + case F_SETLKW64: + set_fs(fs); + } + + return ret; +} + +struct oabi_epoll_event { + __u32 events; + __u64 data; +} __attribute__ ((packed,aligned(4))); + +asmlinkage long sys_oabi_epoll_ctl(int epfd, int op, int fd, + struct oabi_epoll_event __user *event) +{ + struct oabi_epoll_event user; + struct epoll_event kernel; + mm_segment_t fs; + long ret; + + if (op == EPOLL_CTL_DEL) + return sys_epoll_ctl(epfd, op, fd, NULL); + if (copy_from_user(&user, event, sizeof(user))) + return -EFAULT; + kernel.events = user.events; + kernel.data = user.data; + fs = get_fs(); + set_fs(KERNEL_DS); + ret = sys_epoll_ctl(epfd, op, fd, &kernel); + set_fs(fs); + return ret; +} + +asmlinkage long sys_oabi_epoll_wait(int epfd, + struct oabi_epoll_event __user *events, + int maxevents, int timeout) +{ + struct epoll_event *kbuf; + mm_segment_t fs; + long ret, err, i; + + if (maxevents <= 0 || maxevents > (INT_MAX/sizeof(struct epoll_event))) + return -EINVAL; + kbuf = kmalloc(sizeof(*kbuf) * maxevents, GFP_KERNEL); + if (!kbuf) + return -ENOMEM; + fs = get_fs(); + set_fs(KERNEL_DS); + ret = sys_epoll_wait(epfd, kbuf, maxevents, timeout); + set_fs(fs); + err = 0; + for (i = 0; i < ret; i++) { + __put_user_error(kbuf[i].events, &events->events, err); + __put_user_error(kbuf[i].data, &events->data, err); + events++; + } + kfree(kbuf); + return err ? -EFAULT : ret; +} + +struct oabi_sembuf { + unsigned short sem_num; + short sem_op; + short sem_flg; + unsigned short __pad; +}; + +asmlinkage long sys_oabi_semtimedop(int semid, + struct oabi_sembuf __user *tsops, + unsigned nsops, + const struct timespec __user *timeout) +{ + struct sembuf *sops; + struct timespec local_timeout; + long err; + int i; + + if (nsops < 1) + return -EINVAL; + sops = kmalloc(sizeof(*sops) * nsops, GFP_KERNEL); + if (!sops) + return -ENOMEM; + err = 0; + for (i = 0; i < nsops; i++) { + __get_user_error(sops[i].sem_num, &tsops->sem_num, err); + __get_user_error(sops[i].sem_op, &tsops->sem_op, err); + __get_user_error(sops[i].sem_flg, &tsops->sem_flg, err); + tsops++; + } + if (timeout) { + /* copy this as well before changing domain protection */ + err |= copy_from_user(&local_timeout, timeout, sizeof(*timeout)); + timeout = &local_timeout; + } + if (err) { + err = -EFAULT; + } else { + mm_segment_t fs = get_fs(); + set_fs(KERNEL_DS); + err = sys_semtimedop(semid, sops, nsops, timeout); + set_fs(fs); + } + kfree(sops); + return err; +} + +asmlinkage long sys_oabi_semop(int semid, struct oabi_sembuf __user *tsops, + unsigned nsops) +{ + return sys_oabi_semtimedop(semid, tsops, nsops, NULL); +} + +extern asmlinkage int sys_ipc(uint call, int first, int second, int third, + void __user *ptr, long fifth); + +asmlinkage int sys_oabi_ipc(uint call, int first, int second, int third, + void __user *ptr, long fifth) +{ + switch (call & 0xffff) { + case SEMOP: + return sys_oabi_semtimedop(first, + (struct oabi_sembuf __user *)ptr, + second, NULL); + case SEMTIMEDOP: + return sys_oabi_semtimedop(first, + (struct oabi_sembuf __user *)ptr, + second, + (const struct timespec __user *)fifth); + default: + return sys_ipc(call, first, second, third, ptr, fifth); + } +} diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c index fc4729106a3..d7d932c0286 100644 --- a/arch/arm/kernel/time.c +++ b/arch/arm/kernel/time.c @@ -29,9 +29,6 @@ #include <linux/sysdev.h> #include <linux/timer.h> -#include <asm/hardware.h> -#include <asm/io.h> -#include <asm/irq.h> #include <asm/leds.h> #include <asm/thread_info.h> #include <asm/mach/time.h> diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 45e9ea6cd2a..10235b01582 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -23,7 +23,6 @@ #include <asm/atomic.h> #include <asm/cacheflush.h> -#include <asm/io.h> #include <asm/system.h> #include <asm/uaccess.h> #include <asm/unistd.h> @@ -165,7 +164,7 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) } else if (verify_stack(fp)) { printk("invalid frame pointer 0x%08x", fp); ok = 0; - } else if (fp < (unsigned long)(tsk->thread_info + 1)) + } else if (fp < (unsigned long)end_of_stack(tsk)) printk("frame pointer underflow"); printk("\n"); @@ -211,7 +210,7 @@ static void __die(const char *str, int err, struct thread_info *thread, struct p if (!user_mode(regs) || in_interrupt()) { dump_mem("Stack: ", regs->ARM_sp, - THREAD_SIZE + (unsigned long)tsk->thread_info); + THREAD_SIZE + (unsigned long)task_stack_page(tsk)); dump_backtrace(regs, tsk); dump_instr(regs); } @@ -405,7 +404,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) struct thread_info *thread = current_thread_info(); siginfo_t info; - if ((no >> 16) != 0x9f) + if ((no >> 16) != (__ARM_NR_BASE>> 16)) return bad_syscall(no, regs); switch (no & 0xffff) { diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 80c8e4c8cef..2b254e88595 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -17,15 +17,13 @@ jiffies = jiffies_64; jiffies = jiffies_64 + 4; #endif +SECTIONS +{ #ifdef CONFIG_XIP_KERNEL -#define TEXTADDR XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR) + . = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR); #else -#define TEXTADDR KERNEL_RAM_ADDR + . = PAGE_OFFSET + TEXT_OFFSET; #endif - -SECTIONS -{ - . = TEXTADDR; .init : { /* Init code and data */ _stext = .; _sinittext = .; @@ -104,7 +102,7 @@ SECTIONS #ifdef CONFIG_XIP_KERNEL __data_loc = ALIGN(4); /* location in binary */ - . = KERNEL_RAM_ADDR; + . = PAGE_OFFSET + TEXT_OFFSET; #else . = ALIGN(THREAD_SIZE); __data_loc = .; @@ -172,6 +170,10 @@ SECTIONS .comment 0 : { *(.comment) } } -/* those must never be empty */ +/* + * These must never be empty + * If you have to comment these two assert statements out, your + * binutils is too old (for other reasons as well) + */ ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support") ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined") |