aboutsummaryrefslogtreecommitdiff
path: root/arch/parisc
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-10-15 13:46:29 +0200
committerIngo Molnar <mingo@elte.hu>2008-10-15 13:46:29 +0200
commitb2aaf8f74cdc84a9182f6cabf198b7763bcb9d40 (patch)
tree53ccb1c2c14751fe69cf93102e76e97021f6df07 /arch/parisc
parent4f962d4d65923d7b722192e729840cfb79af0a5a (diff)
parent278429cff8809958d25415ba0ed32b59866ab1a8 (diff)
Merge branch 'linus' into stackprotector
Conflicts: arch/x86/kernel/Makefile include/asm-x86/pda.h
Diffstat (limited to 'arch/parisc')
-rw-r--r--arch/parisc/Kconfig4
-rw-r--r--arch/parisc/hpux/fs.c30
-rw-r--r--arch/parisc/hpux/sys_hpux.c12
-rw-r--r--arch/parisc/kernel/cache.c6
-rw-r--r--arch/parisc/kernel/module.c14
-rw-r--r--arch/parisc/kernel/perf.c4
-rw-r--r--arch/parisc/kernel/smp.c136
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S1
-rw-r--r--arch/parisc/mm/init.c7
9 files changed, 72 insertions, 142 deletions
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index bc7a19da624..8313fccced5 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -76,9 +76,6 @@ config IRQ_PER_CPU
bool
default y
-config ARCH_SUPPORTS_AOUT
- def_bool y
-
# unless you want to implement ACPI on PA-RISC ... ;-)
config PM
bool
@@ -199,6 +196,7 @@ endchoice
config SMP
bool "Symmetric multi-processing support"
+ select USE_GENERIC_SMP_HELPERS
---help---
This enables support for systems with more than one CPU. If you have
a system with only one CPU, like most personal computers, say N. If
diff --git a/arch/parisc/hpux/fs.c b/arch/parisc/hpux/fs.c
index 1263f00dc35..12c04c5e558 100644
--- a/arch/parisc/hpux/fs.c
+++ b/arch/parisc/hpux/fs.c
@@ -84,22 +84,28 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
if (reclen > buf->count)
return -EINVAL;
d_ino = ino;
- if (sizeof(d_ino) < sizeof(ino) && d_ino != ino)
+ if (sizeof(d_ino) < sizeof(ino) && d_ino != ino) {
+ buf->error = -EOVERFLOW;
return -EOVERFLOW;
+ }
dirent = buf->previous;
if (dirent)
- put_user(offset, &dirent->d_off);
+ if (put_user(offset, &dirent->d_off))
+ goto Efault;
dirent = buf->current_dir;
+ if (put_user(d_ino, &dirent->d_ino) ||
+ put_user(reclen, &dirent->d_reclen) ||
+ put_user(namlen, &dirent->d_namlen) ||
+ copy_to_user(dirent->d_name, name, namlen) ||
+ put_user(0, dirent->d_name + namlen))
+ goto Efault;
buf->previous = dirent;
- put_user(d_ino, &dirent->d_ino);
- put_user(reclen, &dirent->d_reclen);
- put_user(namlen, &dirent->d_namlen);
- copy_to_user(dirent->d_name, name, namlen);
- put_user(0, dirent->d_name + namlen);
- dirent = (void __user *)dirent + reclen;
- buf->current_dir = dirent;
+ buf->current_dir = (void __user *)dirent + reclen;
buf->count -= reclen;
return 0;
+Efault:
+ buf->error = -EFAULT;
+ return -EFAULT;
}
#undef NAME_OFFSET
@@ -126,8 +132,10 @@ int hpux_getdents(unsigned int fd, struct hpux_dirent __user *dirent, unsigned i
error = buf.error;
lastdirent = buf.previous;
if (lastdirent) {
- put_user(file->f_pos, &lastdirent->d_off);
- error = count - buf.count;
+ if (put_user(file->f_pos, &lastdirent->d_off))
+ error = -EFAULT;
+ else
+ error = count - buf.count;
}
out_putf:
diff --git a/arch/parisc/hpux/sys_hpux.c b/arch/parisc/hpux/sys_hpux.c
index 0c5b9dabb47..18072e03a01 100644
--- a/arch/parisc/hpux/sys_hpux.c
+++ b/arch/parisc/hpux/sys_hpux.c
@@ -210,19 +210,19 @@ static int vfs_statfs_hpux(struct dentry *dentry, struct hpux_statfs *buf)
}
/* hpux statfs */
-asmlinkage long hpux_statfs(const char __user *path,
+asmlinkage long hpux_statfs(const char __user *pathname,
struct hpux_statfs __user *buf)
{
- struct nameidata nd;
+ struct path path;
int error;
- error = user_path_walk(path, &nd);
+ error = user_path(pathname, &path);
if (!error) {
struct hpux_statfs tmp;
- error = vfs_statfs_hpux(nd.path.dentry, &tmp);
+ error = vfs_statfs_hpux(path.dentry, &tmp);
if (!error && copy_to_user(buf, &tmp, sizeof(tmp)))
error = -EFAULT;
- path_put(&nd.path);
+ path_put(&path);
}
return error;
}
@@ -448,7 +448,7 @@ int hpux_pipe(int *kstack_fildes)
int error;
lock_kernel();
- error = do_pipe(kstack_fildes);
+ error = do_pipe_flags(kstack_fildes, 0);
unlock_kernel();
return error;
}
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index e10d25d2d9c..5259d8c2067 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -51,12 +51,12 @@ static struct pdc_btlb_info btlb_info __read_mostly;
void
flush_data_cache(void)
{
- on_each_cpu(flush_data_cache_local, NULL, 1, 1);
+ on_each_cpu(flush_data_cache_local, NULL, 1);
}
void
flush_instruction_cache(void)
{
- on_each_cpu(flush_instruction_cache_local, NULL, 1, 1);
+ on_each_cpu(flush_instruction_cache_local, NULL, 1);
}
#endif
@@ -515,7 +515,7 @@ static void cacheflush_h_tmp_function(void *dummy)
void flush_cache_all(void)
{
- on_each_cpu(cacheflush_h_tmp_function, NULL, 1, 1);
+ on_each_cpu(cacheflush_h_tmp_function, NULL, 1);
}
void flush_cache_mm(struct mm_struct *mm)
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
index fdacdd4341c..44138c3e6ea 100644
--- a/arch/parisc/kernel/module.c
+++ b/arch/parisc/kernel/module.c
@@ -47,7 +47,9 @@
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/bug.h>
+#include <linux/uaccess.h>
+#include <asm/sections.h>
#include <asm/unwind.h>
#if 0
@@ -860,3 +862,15 @@ void module_arch_cleanup(struct module *mod)
deregister_unwind_table(mod);
module_bug_cleanup(mod);
}
+
+#ifdef CONFIG_64BIT
+void *dereference_function_descriptor(void *ptr)
+{
+ Elf64_Fdesc *desc = ptr;
+ void *p;
+
+ if (!probe_kernel_address(&desc->addr, p))
+ ptr = p;
+ return ptr;
+}
+#endif
diff --git a/arch/parisc/kernel/perf.c b/arch/parisc/kernel/perf.c
index 89d6d5ad44b..f696f57faa1 100644
--- a/arch/parisc/kernel/perf.c
+++ b/arch/parisc/kernel/perf.c
@@ -46,6 +46,7 @@
#include <linux/init.h>
#include <linux/proc_fs.h>
#include <linux/miscdevice.h>
+#include <linux/smp_lock.h>
#include <linux/spinlock.h>
#include <asm/uaccess.h>
@@ -260,13 +261,16 @@ printk("Preparing to start counters\n");
*/
static int perf_open(struct inode *inode, struct file *file)
{
+ lock_kernel();
spin_lock(&perf_lock);
if (perf_enabled) {
spin_unlock(&perf_lock);
+ unlock_kernel();
return -EBUSY;
}
perf_enabled = 1;
spin_unlock(&perf_lock);
+ unlock_kernel();
return 0;
}
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index 85fc7754ec2..d47f3975c9c 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -84,19 +84,11 @@ EXPORT_SYMBOL(cpu_possible_map);
DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED;
-struct smp_call_struct {
- void (*func) (void *info);
- void *info;
- long wait;
- atomic_t unstarted_count;
- atomic_t unfinished_count;
-};
-static volatile struct smp_call_struct *smp_call_function_data;
-
enum ipi_message_type {
IPI_NOP=0,
IPI_RESCHEDULE=1,
IPI_CALL_FUNC,
+ IPI_CALL_FUNC_SINGLE,
IPI_CPU_START,
IPI_CPU_STOP,
IPI_CPU_TEST
@@ -187,33 +179,12 @@ ipi_interrupt(int irq, void *dev_id)
case IPI_CALL_FUNC:
smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC\n", this_cpu);
- {
- volatile struct smp_call_struct *data;
- void (*func)(void *info);
- void *info;
- int wait;
-
- data = smp_call_function_data;
- func = data->func;
- info = data->info;
- wait = data->wait;
-
- mb();
- atomic_dec ((atomic_t *)&data->unstarted_count);
-
- /* At this point, *data can't
- * be relied upon.
- */
-
- (*func)(info);
-
- /* Notify the sending CPU that the
- * task is done.
- */
- mb();
- if (wait)
- atomic_dec ((atomic_t *)&data->unfinished_count);
- }
+ generic_smp_call_function_interrupt();
+ break;
+
+ case IPI_CALL_FUNC_SINGLE:
+ smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC_SINGLE\n", this_cpu);
+ generic_smp_call_function_single_interrupt();
break;
case IPI_CPU_START:
@@ -256,6 +227,14 @@ ipi_send(int cpu, enum ipi_message_type op)
spin_unlock_irqrestore(lock, flags);
}
+static void
+send_IPI_mask(cpumask_t mask, enum ipi_message_type op)
+{
+ int cpu;
+
+ for_each_cpu_mask(cpu, mask)
+ ipi_send(cpu, op);
+}
static inline void
send_IPI_single(int dest_cpu, enum ipi_message_type op)
@@ -295,86 +274,15 @@ smp_send_all_nop(void)
send_IPI_allbutself(IPI_NOP);
}
-
-/**
- * Run a function on all other CPUs.
- * <func> The function to run. This must be fast and non-blocking.
- * <info> An arbitrary pointer to pass to the function.
- * <retry> If true, keep retrying until ready.
- * <wait> If true, wait until function has completed on other CPUs.
- * [RETURNS] 0 on success, else a negative status code.
- *
- * Does not return until remote CPUs are nearly ready to execute <func>
- * or have executed.
- */
-
-int
-smp_call_function (void (*func) (void *info), void *info, int retry, int wait)
+void arch_send_call_function_ipi(cpumask_t mask)
{
- struct smp_call_struct data;
- unsigned long timeout;
- static DEFINE_SPINLOCK(lock);
- int retries = 0;
-
- if (num_online_cpus() < 2)
- return 0;
-
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
-
- /* can also deadlock if IPIs are disabled */
- WARN_ON((get_eiem() & (1UL<<(CPU_IRQ_MAX - IPI_IRQ))) == 0);
-
-
- data.func = func;
- data.info = info;
- data.wait = wait;
- atomic_set(&data.unstarted_count, num_online_cpus() - 1);
- atomic_set(&data.unfinished_count, num_online_cpus() - 1);
-
- if (retry) {
- spin_lock (&lock);
- while (smp_call_function_data != 0)
- barrier();
- }
- else {
- spin_lock (&lock);
- if (smp_call_function_data) {
- spin_unlock (&lock);
- return -EBUSY;
- }
- }
-
- smp_call_function_data = &data;
- spin_unlock (&lock);
-
- /* Send a message to all other CPUs and wait for them to respond */
- send_IPI_allbutself(IPI_CALL_FUNC);
-
- retry:
- /* Wait for response */
- timeout = jiffies + HZ;
- while ( (atomic_read (&data.unstarted_count) > 0) &&
- time_before (jiffies, timeout) )
- barrier ();
-
- if (atomic_read (&data.unstarted_count) > 0) {
- printk(KERN_CRIT "SMP CALL FUNCTION TIMED OUT! (cpu=%d), try %d\n",
- smp_processor_id(), ++retries);
- goto retry;
- }
- /* We either got one or timed out. Release the lock */
-
- mb();
- smp_call_function_data = NULL;
-
- while (wait && atomic_read (&data.unfinished_count) > 0)
- barrier ();
-
- return 0;
+ send_IPI_mask(mask, IPI_CALL_FUNC);
}
-EXPORT_SYMBOL(smp_call_function);
+void arch_send_call_function_single_ipi(int cpu)
+{
+ send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE);
+}
/*
* Flush all other CPU's tlb and then mine. Do this with on_each_cpu()
@@ -384,7 +292,7 @@ EXPORT_SYMBOL(smp_call_function);
void
smp_flush_tlb_all(void)
{
- on_each_cpu(flush_tlb_all_local, NULL, 1, 1);
+ on_each_cpu(flush_tlb_all_local, NULL, 1);
}
/*
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index 2e516b87175..1a3b6ccd362 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -67,7 +67,6 @@ SECTIONS
_etext = .;
RODATA
- BUG_TABLE
/* writeable */
/* Make sure this is page aligned so
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index ce0da689a89..7c155c254e7 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -36,7 +36,6 @@ extern int data_start;
#ifdef CONFIG_DISCONTIGMEM
struct node_map_data node_data[MAX_NUMNODES] __read_mostly;
-bootmem_data_t bmem_data[MAX_NUMNODES] __read_mostly;
unsigned char pfnnid_map[PFNNID_MAP_MAX] __read_mostly;
#endif
@@ -262,7 +261,7 @@ static void __init setup_bootmem(void)
#ifdef CONFIG_DISCONTIGMEM
for (i = 0; i < MAX_PHYSMEM_RANGES; i++) {
memset(NODE_DATA(i), 0, sizeof(pg_data_t));
- NODE_DATA(i)->bdata = &bmem_data[i];
+ NODE_DATA(i)->bdata = &bootmem_node_data[i];
}
memset(pfnnid_map, 0xff, sizeof(pfnnid_map));
@@ -888,7 +887,7 @@ void __init paging_init(void)
}
#endif
- free_area_init_node(i, NODE_DATA(i), zones_size,
+ free_area_init_node(i, zones_size,
pmem_ranges[i].start_pfn, NULL);
}
}
@@ -1053,7 +1052,7 @@ void flush_tlb_all(void)
do_recycle++;
}
spin_unlock(&sid_lock);
- on_each_cpu(flush_tlb_all_local, NULL, 1, 1);
+ on_each_cpu(flush_tlb_all_local, NULL, 1);
if (do_recycle) {
spin_lock(&sid_lock);
recycle_sids(recycle_ndirty,recycle_dirty_array);