aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile5
-rw-r--r--kernel/acct.c2
-rw-r--r--kernel/capability.c4
-rw-r--r--kernel/cpuset.c2
-rw-r--r--kernel/dma-coherent.c47
-rw-r--r--kernel/exec_domain.c3
-rw-r--r--kernel/exit.c17
-rw-r--r--kernel/fork.c20
-rw-r--r--kernel/futex.c17
-rw-r--r--kernel/hrtimer.c8
-rw-r--r--kernel/irq/handle.c16
-rw-r--r--kernel/irq/manage.c10
-rw-r--r--kernel/itimer.c7
-rw-r--r--kernel/kallsyms.c16
-rw-r--r--kernel/kexec.c5
-rw-r--r--kernel/kprobes.c2
-rw-r--r--kernel/module.c10
-rw-r--r--kernel/posix-cpu-timers.c70
-rw-r--r--kernel/posix-timers.c43
-rw-r--r--kernel/power/Makefile3
-rw-r--r--kernel/power/disk.c10
-rw-r--r--kernel/printk.c7
-rw-r--r--kernel/ptrace.c2
-rw-r--r--kernel/rcuclassic.c2
-rw-r--r--kernel/rcutree.c2
-rw-r--r--kernel/relay.c4
-rw-r--r--kernel/resource.c1
-rw-r--r--kernel/sched.c43
-rw-r--r--kernel/sched_fair.c37
-rw-r--r--kernel/sched_stats.h33
-rw-r--r--kernel/signal.c59
-rw-r--r--kernel/softlockup.c9
-rw-r--r--kernel/sys.c70
-rw-r--r--kernel/sys_ni.c1
-rw-r--r--kernel/sysctl.c15
-rw-r--r--kernel/time.c14
-rw-r--r--kernel/time/tick-sched.c2
-rw-r--r--kernel/timer.c18
-rw-r--r--kernel/uid16.c39
-rw-r--r--kernel/up.c1
-rw-r--r--kernel/workqueue.c20
41 files changed, 335 insertions, 361 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index 2aebc4cd787..170a9213c1b 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -40,9 +40,8 @@ obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
-ifeq ($(CONFIG_USE_GENERIC_SMP_HELPERS),y)
-obj-y += smp.o
-else
+obj-$(CONFIG_USE_GENERIC_SMP_HELPERS) += smp.o
+ifneq ($(CONFIG_SMP),y)
obj-y += up.o
endif
obj-$(CONFIG_SMP) += spinlock.o
diff --git a/kernel/acct.c b/kernel/acct.c
index d57b7cbb98b..7afa3156416 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -277,7 +277,7 @@ static int acct_on(char *name)
* should be written. If the filename is NULL, accounting will be
* shutdown.
*/
-asmlinkage long sys_acct(const char __user *name)
+SYSCALL_DEFINE1(acct, const char __user *, name)
{
int error;
diff --git a/kernel/capability.c b/kernel/capability.c
index 688926e496b..4e17041963f 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -161,7 +161,7 @@ static inline int cap_get_target_pid(pid_t pid, kernel_cap_t *pEp,
*
* Returns 0 on success and < 0 on error.
*/
-asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr)
+SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
{
int ret = 0;
pid_t pid;
@@ -235,7 +235,7 @@ asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr)
*
* Returns 0 on success and < 0 on error.
*/
-asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data)
+SYSCALL_DEFINE2(capset, cap_user_header_t, header, const cap_user_data_t, data)
{
struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S];
unsigned i, tocopy;
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 647c77a88fc..a85678865c5 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -568,7 +568,7 @@ update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c)
* load balancing domains (sched domains) as specified by that partial
* partition.
*
- * See "What is sched_load_balance" in Documentation/cpusets.txt
+ * See "What is sched_load_balance" in Documentation/cgroups/cpusets.txt
* for a background explanation of this.
*
* Does not return errors, on the theory that the callers of this
diff --git a/kernel/dma-coherent.c b/kernel/dma-coherent.c
index 038707404b7..962a3b574f2 100644
--- a/kernel/dma-coherent.c
+++ b/kernel/dma-coherent.c
@@ -98,7 +98,7 @@ EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
* @size: size of requested memory area
* @dma_handle: This will be filled with the correct dma handle
* @ret: This pointer will be filled with the virtual address
- * to allocated area.
+ * to allocated area.
*
* This function should be only called from per-arch dma_alloc_coherent()
* to support allocation from per-device coherent memory pools.
@@ -118,31 +118,32 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size,
mem = dev->dma_mem;
if (!mem)
return 0;
- if (unlikely(size > mem->size))
- return 0;
+
+ *ret = NULL;
+
+ if (unlikely(size > (mem->size << PAGE_SHIFT)))
+ goto err;
pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
- if (pageno >= 0) {
- /*
- * Memory was found in the per-device arena.
- */
- *dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
- *ret = mem->virt_base + (pageno << PAGE_SHIFT);
- memset(*ret, 0, size);
- } else if (mem->flags & DMA_MEMORY_EXCLUSIVE) {
- /*
- * The per-device arena is exhausted and we are not
- * permitted to fall back to generic memory.
- */
- *ret = NULL;
- } else {
- /*
- * The per-device arena is exhausted and we are
- * permitted to fall back to generic memory.
- */
- return 0;
- }
+ if (unlikely(pageno < 0))
+ goto err;
+
+ /*
+ * Memory was found in the per-device area.
+ */
+ *dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
+ *ret = mem->virt_base + (pageno << PAGE_SHIFT);
+ memset(*ret, 0, size);
+
return 1;
+
+err:
+ /*
+ * In the case where the allocation can not be satisfied from the
+ * per-device area, try to fall back to generic memory if the
+ * constraints allow it.
+ */
+ return mem->flags & DMA_MEMORY_EXCLUSIVE;
}
EXPORT_SYMBOL(dma_alloc_from_coherent);
diff --git a/kernel/exec_domain.c b/kernel/exec_domain.c
index 0511716e942..667c841c295 100644
--- a/kernel/exec_domain.c
+++ b/kernel/exec_domain.c
@@ -209,8 +209,7 @@ static int __init proc_execdomains_init(void)
module_init(proc_execdomains_init);
#endif
-asmlinkage long
-sys_personality(u_long personality)
+SYSCALL_DEFINE1(personality, u_long, personality)
{
u_long old = current->personality;
diff --git a/kernel/exit.c b/kernel/exit.c
index c7740fa3252..f80dec3f187 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1141,7 +1141,7 @@ NORET_TYPE void complete_and_exit(struct completion *comp, long code)
EXPORT_SYMBOL(complete_and_exit);
-asmlinkage long sys_exit(int error_code)
+SYSCALL_DEFINE1(exit, int, error_code)
{
do_exit((error_code&0xff)<<8);
}
@@ -1182,9 +1182,11 @@ do_group_exit(int exit_code)
* wait4()-ing process will get the correct exit code - even if this
* thread is not the thread group leader.
*/
-asmlinkage void sys_exit_group(int error_code)
+SYSCALL_DEFINE1(exit_group, int, error_code)
{
do_group_exit((error_code & 0xff) << 8);
+ /* NOTREACHED */
+ return 0;
}
static struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
@@ -1752,9 +1754,8 @@ end:
return retval;
}
-asmlinkage long sys_waitid(int which, pid_t upid,
- struct siginfo __user *infop, int options,
- struct rusage __user *ru)
+SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
+ infop, int, options, struct rusage __user *, ru)
{
struct pid *pid = NULL;
enum pid_type type;
@@ -1793,8 +1794,8 @@ asmlinkage long sys_waitid(int which, pid_t upid,
return ret;
}
-asmlinkage long sys_wait4(pid_t upid, int __user *stat_addr,
- int options, struct rusage __user *ru)
+SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr,
+ int, options, struct rusage __user *, ru)
{
struct pid *pid = NULL;
enum pid_type type;
@@ -1831,7 +1832,7 @@ asmlinkage long sys_wait4(pid_t upid, int __user *stat_addr,
* sys_waitpid() remains for compatibility. waitpid() should be
* implemented by calling sys_wait4() from libc.a.
*/
-asmlinkage long sys_waitpid(pid_t pid, int __user *stat_addr, int options)
+SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options)
{
return sys_wait4(pid, stat_addr, options, NULL);
}
diff --git a/kernel/fork.c b/kernel/fork.c
index 1d68f1255dd..242a706e772 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -817,17 +817,17 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig)
static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
{
struct signal_struct *sig;
- int ret;
if (clone_flags & CLONE_THREAD) {
- ret = thread_group_cputime_clone_thread(current);
- if (likely(!ret)) {
- atomic_inc(&current->signal->count);
- atomic_inc(&current->signal->live);
- }
- return ret;
+ atomic_inc(&current->signal->count);
+ atomic_inc(&current->signal->live);
+ return 0;
}
sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
+
+ if (sig)
+ posix_cpu_timers_init_group(sig);
+
tsk->signal = sig;
if (!sig)
return -ENOMEM;
@@ -864,8 +864,6 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
task_unlock(current->group_leader);
- posix_cpu_timers_init_group(sig);
-
acct_init_pacct(&sig->pacct);
tty_audit_fork(sig);
@@ -901,7 +899,7 @@ static void copy_flags(unsigned long clone_flags, struct task_struct *p)
clear_freeze_flag(p);
}
-asmlinkage long sys_set_tid_address(int __user *tidptr)
+SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
{
current->clear_child_tid = tidptr;
@@ -1603,7 +1601,7 @@ static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp
* constructed. Here we are modifying the current, active,
* task_struct.
*/
-asmlinkage long sys_unshare(unsigned long unshare_flags)
+SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
{
int err = 0;
struct fs_struct *fs, *new_fs = NULL;
diff --git a/kernel/futex.c b/kernel/futex.c
index 002aa189eb0..f89d373a9c6 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1733,9 +1733,8 @@ pi_faulted:
* @head: pointer to the list-head
* @len: length of the list-head, as userspace expects
*/
-asmlinkage long
-sys_set_robust_list(struct robust_list_head __user *head,
- size_t len)
+SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
+ size_t, len)
{
if (!futex_cmpxchg_enabled)
return -ENOSYS;
@@ -1756,9 +1755,9 @@ sys_set_robust_list(struct robust_list_head __user *head,
* @head_ptr: pointer to a list-head pointer, the kernel fills it in
* @len_ptr: pointer to a length field, the kernel fills in the header size
*/
-asmlinkage long
-sys_get_robust_list(int pid, struct robust_list_head __user * __user *head_ptr,
- size_t __user *len_ptr)
+SYSCALL_DEFINE3(get_robust_list, int, pid,
+ struct robust_list_head __user * __user *, head_ptr,
+ size_t __user *, len_ptr)
{
struct robust_list_head __user *head;
unsigned long ret;
@@ -1978,9 +1977,9 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
}
-asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val,
- struct timespec __user *utime, u32 __user *uaddr2,
- u32 val3)
+SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
+ struct timespec __user *, utime, u32 __user *, uaddr2,
+ u32, val3)
{
struct timespec ts;
ktime_t t, *tp = NULL;
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 1455b7651b6..f33afb0407b 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -614,7 +614,9 @@ void clock_was_set(void)
*/
void hres_timers_resume(void)
{
- /* Retrigger the CPU local events: */
+ WARN_ONCE(!irqs_disabled(),
+ KERN_INFO "hres_timers_resume() called with IRQs enabled!");
+
retrigger_next_event(NULL);
}
@@ -1467,8 +1469,8 @@ out:
return ret;
}
-asmlinkage long
-sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp)
+SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
+ struct timespec __user *, rmtp)
{
struct timespec tu;
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index c20db0be917..3aba8d12f32 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -39,6 +39,18 @@ void handle_bad_irq(unsigned int irq, struct irq_desc *desc)
ack_bad_irq(irq);
}
+#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
+static void __init init_irq_default_affinity(void)
+{
+ alloc_bootmem_cpumask_var(&irq_default_affinity);
+ cpumask_setall(irq_default_affinity);
+}
+#else
+static void __init init_irq_default_affinity(void)
+{
+}
+#endif
+
/*
* Linux has a controller-independent interrupt architecture.
* Every controller has a 'controller-template', that is used
@@ -134,6 +146,8 @@ int __init early_irq_init(void)
int legacy_count;
int i;
+ init_irq_default_affinity();
+
desc = irq_desc_legacy;
legacy_count = ARRAY_SIZE(irq_desc_legacy);
@@ -219,6 +233,8 @@ int __init early_irq_init(void)
int count;
int i;
+ init_irq_default_affinity();
+
desc = irq_desc;
count = ARRAY_SIZE(irq_desc);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index cd0cd8dcb34..291f0366455 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -15,17 +15,9 @@
#include "internals.h"
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
cpumask_var_t irq_default_affinity;
-static int init_irq_default_affinity(void)
-{
- alloc_cpumask_var(&irq_default_affinity, GFP_KERNEL);
- cpumask_setall(irq_default_affinity);
- return 0;
-}
-core_initcall(init_irq_default_affinity);
-
/**
* synchronize_irq - wait for pending IRQ handlers (on other CPUs)
* @irq: interrupt number to wait for
diff --git a/kernel/itimer.c b/kernel/itimer.c
index db7c358b9a0..6a5fe93dd8b 100644
--- a/kernel/itimer.c
+++ b/kernel/itimer.c
@@ -100,7 +100,7 @@ int do_getitimer(int which, struct itimerval *value)
return 0;
}
-asmlinkage long sys_getitimer(int which, struct itimerval __user *value)
+SYSCALL_DEFINE2(getitimer, int, which, struct itimerval __user *, value)
{
int error = -EFAULT;
struct itimerval get_buffer;
@@ -260,9 +260,8 @@ unsigned int alarm_setitimer(unsigned int seconds)
return it_old.it_value.tv_sec;
}
-asmlinkage long sys_setitimer(int which,
- struct itimerval __user *value,
- struct itimerval __user *ovalue)
+SYSCALL_DEFINE3(setitimer, int, which, struct itimerval __user *, value,
+ struct itimerval __user *, ovalue)
{
struct itimerval set_buffer, get_buffer;
int error;
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index e694afa0eb8..7b8b0f21a5b 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -30,19 +30,20 @@
#define all_var 0
#endif
-extern const unsigned long kallsyms_addresses[];
-extern const u8 kallsyms_names[];
+/* These will be re-linked against their real values during the second link stage */
+extern const unsigned long kallsyms_addresses[] __attribute__((weak));
+extern const u8 kallsyms_names[] __attribute__((weak));
/* tell the compiler that the count isn't in the small data section if the arch
* has one (eg: FRV)
*/
extern const unsigned long kallsyms_num_syms
- __attribute__((__section__(".rodata")));
+__attribute__((weak, section(".rodata")));
-extern const u8 kallsyms_token_table[];
-extern const u16 kallsyms_token_index[];
+extern const u8 kallsyms_token_table[] __attribute__((weak));
+extern const u16 kallsyms_token_index[] __attribute__((weak));
-extern const unsigned long kallsyms_markers[];
+extern const unsigned long kallsyms_markers[] __attribute__((weak));
static inline int is_kernel_inittext(unsigned long addr)
{
@@ -167,6 +168,9 @@ static unsigned long get_symbol_pos(unsigned long addr,
unsigned long symbol_start = 0, symbol_end = 0;
unsigned long i, low, high, mid;
+ /* This kernel should never had been booted. */
+ BUG_ON(!kallsyms_addresses);
+
/* do a binary search on the sorted kallsyms_addresses array */
low = 0;
high = kallsyms_num_syms;
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 3fb855ad6aa..8a6d7b08864 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -934,9 +934,8 @@ struct kimage *kexec_crash_image;
static DEFINE_MUTEX(kexec_mutex);
-asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
- struct kexec_segment __user *segments,
- unsigned long flags)
+SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
+ struct kexec_segment __user *, segments, unsigned long, flags)
{
struct kimage **dest_image, *image;
int result;
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 1b9cbdc0127..7ba8cd9845c 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -123,7 +123,7 @@ static int collect_garbage_slots(void);
static int __kprobes check_safety(void)
{
int ret = 0;
-#if defined(CONFIG_PREEMPT) && defined(CONFIG_PM)
+#if defined(CONFIG_PREEMPT) && defined(CONFIG_FREEZER)
ret = freeze_processes();
if (ret == 0) {
struct task_struct *p, *q;
diff --git a/kernel/module.c b/kernel/module.c
index c9332c90d5a..e8b51d41dd7 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -743,8 +743,8 @@ static void wait_for_zero_refcount(struct module *mod)
mutex_lock(&module_mutex);
}
-asmlinkage long
-sys_delete_module(const char __user *name_user, unsigned int flags)
+SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
+ unsigned int, flags)
{
struct module *mod;
char name[MODULE_NAME_LEN];
@@ -2296,10 +2296,8 @@ static noinline struct module *load_module(void __user *umod,
}
/* This is where the real work happens */
-asmlinkage long
-sys_init_module(void __user *umod,
- unsigned long len,
- const char __user *uargs)
+SYSCALL_DEFINE3(init_module, void __user *, umod,
+ unsigned long, len, const char __user *, uargs)
{
struct module *mod;
int ret = 0;
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 157de3a4783..fa07da94d7b 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -10,76 +10,6 @@
#include <linux/kernel_stat.h>
/*
- * Allocate the thread_group_cputime structure appropriately and fill in the
- * current values of the fields. Called from copy_signal() via
- * thread_group_cputime_clone_thread() when adding a second or subsequent
- * thread to a thread group. Assumes interrupts are enabled when called.
- */
-int thread_group_cputime_alloc(struct task_struct *tsk)
-{
- struct signal_struct *sig = tsk->signal;
- struct task_cputime *cputime;
-
- /*
- * If we have multiple threads and we don't already have a
- * per-CPU task_cputime struct (checked in the caller), allocate
- * one and fill it in with the times accumulated so far. We may
- * race with another thread so recheck after we pick up the sighand
- * lock.
- */
- cputime = alloc_percpu(struct task_cputime);
- if (cputime == NULL)
- return -ENOMEM;
- spin_lock_irq(&tsk->sighand->siglock);
- if (sig->cputime.totals) {
- spin_unlock_irq(&tsk->sighand->siglock);
- free_percpu(cputime);
- return 0;
- }
- sig->cputime.totals = cputime;
- cputime = per_cpu_ptr(sig->cputime.totals, smp_processor_id());
- cputime->utime = tsk->utime;
- cputime->stime = tsk->stime;
- cputime->sum_exec_runtime = tsk->se.sum_exec_runtime;
- spin_unlock_irq(&tsk->sighand->siglock);
- return 0;
-}
-
-/**
- * thread_group_cputime - Sum the thread group time fields across all CPUs.
- *
- * @tsk: The task we use to identify the thread group.
- * @times: task_cputime structure in which we return the summed fields.
- *
- * Walk the list of CPUs to sum the per-CPU time fields in the thread group
- * time structure.
- */
-void thread_group_cputime(
- struct task_struct *tsk,
- struct task_cputime *times)
-{
- struct task_cputime *totals, *tot;
- int i;
-
- totals = tsk->signal->cputime.totals;
- if (!totals) {
- times->utime = tsk->utime;
- times->stime = tsk->stime;
- times->sum_exec_runtime = tsk->se.sum_exec_runtime;
- return;
- }
-
- times->stime = times->utime = cputime_zero;
- times->sum_exec_runtime = 0;
- for_each_possible_cpu(i) {
- tot = per_cpu_ptr(totals, i);
- times->utime = cputime_add(times->utime, tot->utime);
- times->stime = cputime_add(times->stime, tot->stime);
- times->sum_exec_runtime += tot->sum_exec_runtime;
- }
-}
-
-/*
* Called after updating RLIMIT_CPU to set timer expiration if necessary.
*/
void update_rlimit_cpu(unsigned long rlim_new)
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 887c63787de..052ec4d195c 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -477,10 +477,9 @@ static void release_posix_timer(struct k_itimer *tmr, int it_id_set)
/* Create a POSIX.1b interval timer. */
-asmlinkage long
-sys_timer_create(const clockid_t which_clock,
- struct sigevent __user *timer_event_spec,
- timer_t __user * created_timer_id)
+SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
+ struct sigevent __user *, timer_event_spec,
+ timer_t __user *, created_timer_id)
{
struct k_itimer *new_timer;
int error, new_timer_id;
@@ -661,8 +660,8 @@ common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
}
/* Get the time remaining on a POSIX.1b interval timer. */
-asmlinkage long
-sys_timer_gettime(timer_t timer_id, struct itimerspec __user *setting)
+SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
+ struct itimerspec __user *, setting)
{
struct k_itimer *timr;
struct itimerspec cur_setting;
@@ -691,8 +690,7 @@ sys_timer_gettime(timer_t timer_id, struct itimerspec __user *setting)
* the call back to do_schedule_next_timer(). So all we need to do is
* to pick up the frozen overrun.
*/
-asmlinkage long
-sys_timer_getoverrun(timer_t timer_id)
+SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id)
{
struct k_itimer *timr;
int overrun;
@@ -760,10 +758,9 @@ common_timer_set(struct k_itimer *timr, int flags,
}
/* Set a POSIX.1b interval timer */
-asmlinkage long
-sys_timer_settime(timer_t timer_id, int flags,
- const struct itimerspec __user *new_setting,
- struct itimerspec __user *old_setting)
+SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
+ const struct itimerspec __user *, new_setting,
+ struct itimerspec __user *, old_setting)
{
struct k_itimer *timr;
struct itimerspec new_spec, old_spec;
@@ -816,8 +813,7 @@ static inline int timer_delete_hook(struct k_itimer *timer)
}
/* Delete a POSIX.1b interval timer. */
-asmlinkage long
-sys_timer_delete(timer_t timer_id)
+SYSCALL_DEFINE1(timer_delete, timer_t, timer_id)
{
struct k_itimer *timer;
unsigned long flags;
@@ -903,8 +899,8 @@ int do_posix_clock_nonanosleep(const clockid_t clock, int flags,
}
EXPORT_SYMBOL_GPL(do_posix_clock_nonanosleep);
-asmlinkage long sys_clock_settime(const clockid_t which_clock,
- const struct timespec __user *tp)
+SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
+ const struct timespec __user *, tp)
{
struct timespec new_tp;
@@ -916,8 +912,8 @@ asmlinkage long sys_clock_settime(const clockid_t which_clock,
return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
}
-asmlinkage long
-sys_clock_gettime(const clockid_t which_clock, struct timespec __user *tp)
+SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock,
+ struct timespec __user *,tp)
{
struct timespec kernel_tp;
int error;
@@ -933,8 +929,8 @@ sys_clock_gettime(const clockid_t which_clock, struct timespec __user *tp)
}
-asmlinkage long
-sys_clock_getres(const clockid_t which_clock, struct timespec __user *tp)
+SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock,
+ struct timespec __user *, tp)
{
struct timespec rtn_tp;
int error;
@@ -963,10 +959,9 @@ static int common_nsleep(const clockid_t which_clock, int flags,
which_clock);
}
-asmlinkage long
-sys_clock_nanosleep(const clockid_t which_clock, int flags,
- const struct timespec __user *rqtp,
- struct timespec __user *rmtp)
+SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
+ const struct timespec __user *, rqtp,
+ struct timespec __user *, rmtp)
{
struct timespec t;
diff --git a/kernel/power/Makefile b/kernel/power/Makefile
index 597823b5b70..d7a10167a25 100644
--- a/kernel/power/Makefile
+++ b/kernel/power/Makefile
@@ -4,7 +4,8 @@ EXTRA_CFLAGS += -DDEBUG
endif
obj-y := main.o
-obj-$(CONFIG_PM_SLEEP) += process.o console.o
+obj-$(CONFIG_PM_SLEEP) += console.o
+obj-$(CONFIG_FREEZER) += process.o
obj-$(CONFIG_HIBERNATION) += swsusp.o disk.o snapshot.o swap.o user.o
obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index 45e8541ab7e..432ee575c9e 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -71,6 +71,14 @@ void hibernation_set_ops(struct platform_hibernation_ops *ops)
mutex_unlock(&pm_mutex);
}
+static bool entering_platform_hibernation;
+
+bool system_entering_hibernation(void)
+{
+ return entering_platform_hibernation;
+}
+EXPORT_SYMBOL(system_entering_hibernation);
+
#ifdef CONFIG_PM_DEBUG
static void hibernation_debug_sleep(void)
{
@@ -411,6 +419,7 @@ int hibernation_platform_enter(void)
if (error)
goto Close;
+ entering_platform_hibernation = true;
suspend_console();
error = device_suspend(PMSG_HIBERNATE);
if (error) {
@@ -445,6 +454,7 @@ int hibernation_platform_enter(void)
Finish:
hibernation_ops->finish();
Resume_devices:
+ entering_platform_hibernation = false;
device_resume(PMSG_RESTORE);
resume_console();
Close:
diff --git a/kernel/printk.c b/kernel/printk.c
index 7015733793e..69188f226a9 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -382,7 +382,7 @@ out:
return error;
}
-asmlinkage long sys_syslog(int type, char __user *buf, int len)
+SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
{
return do_syslog(type, buf, len);
}
@@ -742,11 +742,6 @@ EXPORT_SYMBOL(vprintk);
#else
-asmlinkage long sys_syslog(int type, char __user *buf, int len)
-{
- return -ENOSYS;
-}
-
static void call_console_drivers(unsigned start, unsigned end)
{
}
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 29dc700e198..c9cf48b21f0 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -574,7 +574,7 @@ struct task_struct *ptrace_get_task_struct(pid_t pid)
#define arch_ptrace_attach(child) do { } while (0)
#endif
-asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
+SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data)
{
struct task_struct *child;
long ret;
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c
index 490934fc7ac..bd5a9003497 100644
--- a/kernel/rcuclassic.c
+++ b/kernel/rcuclassic.c
@@ -716,7 +716,7 @@ void rcu_check_callbacks(int cpu, int user)
raise_rcu_softirq();
}
-static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp,
+static void __cpuinit rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp,
struct rcu_data *rdp)
{
unsigned long flags;
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index f2d8638e6c6..b2fd602a6f6 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -1314,7 +1314,7 @@ int rcu_needs_cpu(int cpu)
* access due to the fact that this CPU cannot possibly have any RCU
* callbacks in flight yet.
*/
-static void
+static void __cpuinit
rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
{
unsigned long flags;
diff --git a/kernel/relay.c b/kernel/relay.c
index 09ac2008f77..9d79b7854fa 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -663,8 +663,10 @@ int relay_late_setup_files(struct rchan *chan,
mutex_lock(&relay_channels_mutex);
/* Is chan already set up? */
- if (unlikely(chan->has_base_filename))
+ if (unlikely(chan->has_base_filename)) {
+ mutex_unlock(&relay_channels_mutex);
return -EEXIST;
+ }
chan->has_base_filename = 1;
chan->parent = parent;
curr_cpu = get_cpu();
diff --git a/kernel/resource.c b/kernel/resource.c
index ca6a1536b20..fd5d7d574bb 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -620,6 +620,7 @@ resource_size_t resource_alignment(struct resource *res)
* @start: resource start address
* @n: resource region size
* @name: reserving caller's ID string
+ * @flags: IO resource flags
*/
struct resource * __request_region(struct resource *parent,
resource_size_t start, resource_size_t n,
diff --git a/kernel/sched.c b/kernel/sched.c
index 8be2c13b50d..52bbf1c842a 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1323,8 +1323,8 @@ static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
* slice expiry etc.
*/
-#define WEIGHT_IDLEPRIO 2
-#define WMULT_IDLEPRIO (1 << 31)
+#define WEIGHT_IDLEPRIO 3
+#define WMULT_IDLEPRIO 1431655765
/*
* Nice levels are multiplicative, with a gentle 10% change for every
@@ -4440,7 +4440,7 @@ void __kprobes sub_preempt_count(int val)
/*
* Underflow?
*/
- if (DEBUG_LOCKS_WARN_ON(val > preempt_count() - (!!kernel_locked())))
+ if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
return;
/*
* Is the spinlock portion underflowing?
@@ -5126,7 +5126,7 @@ int can_nice(const struct task_struct *p, const int nice)
* sys_setpriority is a more generic, but much slower function that
* does similar things.
*/
-asmlinkage long sys_nice(int increment)
+SYSCALL_DEFINE1(nice, int, increment)
{
long nice, retval;
@@ -5433,8 +5433,8 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
* @policy: new policy.
* @param: structure containing the new RT priority.
*/
-asmlinkage long
-sys_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
+SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
+ struct sched_param __user *, param)
{
/* negative values for policy are not valid */
if (policy < 0)
@@ -5448,7 +5448,7 @@ sys_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
* @pid: the pid in question.
* @param: structure containing the new RT priority.
*/
-asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param)
+SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
{
return do_sched_setscheduler(pid, -1, param);
}
@@ -5457,7 +5457,7 @@ asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param)
* sys_sched_getscheduler - get the policy (scheduling class) of a thread
* @pid: the pid in question.
*/
-asmlinkage long sys_sched_getscheduler(pid_t pid)
+SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
{
struct task_struct *p;
int retval;
@@ -5482,7 +5482,7 @@ asmlinkage long sys_sched_getscheduler(pid_t pid)
* @pid: the pid in question.
* @param: structure containing the RT priority.
*/
-asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param)
+SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
{
struct sched_param lp;
struct task_struct *p;
@@ -5600,8 +5600,8 @@ static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
* @len: length in bytes of the bitmask pointed to by user_mask_ptr
* @user_mask_ptr: user-space pointer to the new cpu mask
*/
-asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
- unsigned long __user *user_mask_ptr)
+SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
+ unsigned long __user *, user_mask_ptr)
{
cpumask_var_t new_mask;
int retval;
@@ -5648,8 +5648,8 @@ out_unlock:
* @len: length in bytes of the bitmask pointed to by user_mask_ptr
* @user_mask_ptr: user-space pointer to hold the current cpu mask
*/
-asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len,
- unsigned long __user *user_mask_ptr)
+SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
+ unsigned long __user *, user_mask_ptr)
{
int ret;
cpumask_var_t mask;
@@ -5678,7 +5678,7 @@ asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len,
* This function yields the current CPU to other tasks. If there are no
* other threads running on this CPU then this function will return.
*/
-asmlinkage long sys_sched_yield(void)
+SYSCALL_DEFINE0(sched_yield)
{
struct rq *rq = this_rq_lock();
@@ -5819,7 +5819,7 @@ long __sched io_schedule_timeout(long timeout)
* this syscall returns the maximum rt_priority that can be used
* by a given scheduling class.
*/
-asmlinkage long sys_sched_get_priority_max(int policy)
+SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
{
int ret = -EINVAL;
@@ -5844,7 +5844,7 @@ asmlinkage long sys_sched_get_priority_max(int policy)
* this syscall returns the minimum rt_priority that can be used
* by a given scheduling class.
*/
-asmlinkage long sys_sched_get_priority_min(int policy)
+SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
{
int ret = -EINVAL;
@@ -5869,8 +5869,8 @@ asmlinkage long sys_sched_get_priority_min(int policy)
* this syscall writes the default timeslice value of a given process
* into the user-space timespec buffer. A value of '0' means infinity.
*/
-asmlinkage
-long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
+SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
+ struct timespec __user *, interval)
{
struct task_struct *p;
unsigned int time_slice;
@@ -9050,6 +9050,13 @@ static int tg_schedulable(struct task_group *tg, void *data)
runtime = d->rt_runtime;
}
+#ifdef CONFIG_USER_SCHED
+ if (tg == &root_task_group) {
+ period = global_rt_period();
+ runtime = global_rt_runtime();
+ }
+#endif
+
/*
* Cannot have more runtime than the period.
*/
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 8e1352c7555..5cc1c162044 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -283,7 +283,7 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
struct sched_entity,
run_node);
- if (vruntime == cfs_rq->min_vruntime)
+ if (!cfs_rq->curr)
vruntime = se->vruntime;
else
vruntime = min_vruntime(vruntime, se->vruntime);
@@ -429,7 +429,10 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
for_each_sched_entity(se) {
- struct load_weight *load = &cfs_rq->load;
+ struct load_weight *load;
+
+ cfs_rq = cfs_rq_of(se);
+ load = &cfs_rq->load;
if (unlikely(!se->on_rq)) {
struct load_weight lw = cfs_rq->load;
@@ -677,9 +680,13 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
unsigned long thresh = sysctl_sched_latency;
/*
- * convert the sleeper threshold into virtual time
+ * Convert the sleeper threshold into virtual time.
+ * SCHED_IDLE is a special sub-class. We care about
+ * fairness only relative to other SCHED_IDLE tasks,
+ * all of which have the same weight.
*/
- if (sched_feat(NORMALIZED_SLEEPER))
+ if (sched_feat(NORMALIZED_SLEEPER) &&
+ task_of(se)->policy != SCHED_IDLE)
thresh = calc_delta_fair(thresh, se);
vruntime -= thresh;
@@ -1340,14 +1347,18 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
static void set_last_buddy(struct sched_entity *se)
{
- for_each_sched_entity(se)
- cfs_rq_of(se)->last = se;
+ if (likely(task_of(se)->policy != SCHED_IDLE)) {
+ for_each_sched_entity(se)
+ cfs_rq_of(se)->last = se;
+ }
}
static void set_next_buddy(struct sched_entity *se)
{
- for_each_sched_entity(se)
- cfs_rq_of(se)->next = se;
+ if (likely(task_of(se)->policy != SCHED_IDLE)) {
+ for_each_sched_entity(se)
+ cfs_rq_of(se)->next = se;
+ }
}
/*
@@ -1393,12 +1404,18 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync)
return;
/*
- * Batch tasks do not preempt (their preemption is driven by
+ * Batch and idle tasks do not preempt (their preemption is driven by
* the tick):
*/
- if (unlikely(p->policy == SCHED_BATCH))
+ if (unlikely(p->policy != SCHED_NORMAL))
return;
+ /* Idle tasks are by definition preempted by everybody. */
+ if (unlikely(curr->policy == SCHED_IDLE)) {
+ resched_task(curr);
+ return;
+ }
+
if (!sched_feat(WAKEUP_PREEMPT))
return;
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index f2773b5d122..8ab0cef8eca 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -296,6 +296,7 @@ sched_info_switch(struct task_struct *prev, struct task_struct *next)
static inline void account_group_user_time(struct task_struct *tsk,
cputime_t cputime)
{
+ struct task_cputime *times;
struct signal_struct *sig;
/* tsk == current, ensure it is safe to use ->signal */
@@ -303,13 +304,11 @@ static inline void account_group_user_time(struct task_struct *tsk,
return;
sig = tsk->signal;
- if (sig->cputime.totals) {
- struct task_cputime *times;
+ times = &sig->cputime.totals;
- times = per_cpu_ptr(sig->cputime.totals, get_cpu());
- times->utime = cputime_add(times->utime, cputime);
- put_cpu_no_resched();
- }
+ spin_lock(&times->lock);
+ times->utime = cputime_add(times->utime, cputime);
+ spin_unlock(&times->lock);
}
/**
@@ -325,6 +324,7 @@ static inline void account_group_user_time(struct task_struct *tsk,
static inline void account_group_system_time(struct task_struct *tsk,
cputime_t cputime)
{
+ struct task_cputime *times;
struct signal_struct *sig;
/* tsk == current, ensure it is safe to use ->signal */
@@ -332,13 +332,11 @@ static inline void account_group_system_time(struct task_struct *tsk,
return;
sig = tsk->signal;
- if (sig->cputime.totals) {
- struct task_cputime *times;
+ times = &sig->cputime.totals;
- times = per_cpu_ptr(sig->cputime.totals, get_cpu());
- times->stime = cputime_add(times->stime, cputime);
- put_cpu_no_resched();
- }
+ spin_lock(&times->lock);
+ times->stime = cputime_add(times->stime, cputime);
+ spin_unlock(&times->lock);
}
/**
@@ -354,6 +352,7 @@ static inline void account_group_system_time(struct task_struct *tsk,
static inline void account_group_exec_runtime(struct task_struct *tsk,
unsigned long long ns)
{
+ struct task_cputime *times;
struct signal_struct *sig;
sig = tsk->signal;
@@ -362,11 +361,9 @@ static inline void account_group_exec_runtime(struct task_struct *tsk,
if (unlikely(!sig))
return;
- if (sig->cputime.totals) {
- struct task_cputime *times;
+ times = &sig->cputime.totals;
- times = per_cpu_ptr(sig->cputime.totals, get_cpu());
- times->sum_exec_runtime += ns;
- put_cpu_no_resched();
- }
+ spin_lock(&times->lock);
+ times->sum_exec_runtime += ns;
+ spin_unlock(&times->lock);
}
diff --git a/kernel/signal.c b/kernel/signal.c
index 3152ac3b62e..e73759783dc 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1961,7 +1961,7 @@ EXPORT_SYMBOL(unblock_all_signals);
* System call entry points.
*/
-asmlinkage long sys_restart_syscall(void)
+SYSCALL_DEFINE0(restart_syscall)
{
struct restart_block *restart = &current_thread_info()->restart_block;
return restart->fn(restart);
@@ -2014,8 +2014,8 @@ int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
return error;
}
-asmlinkage long
-sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
+SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, set,
+ sigset_t __user *, oset, size_t, sigsetsize)
{
int error = -EINVAL;
sigset_t old_set, new_set;
@@ -2074,8 +2074,7 @@ out:
return error;
}
-asmlinkage long
-sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
+SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize)
{
return do_sigpending(set, sigsetsize);
}
@@ -2146,11 +2145,9 @@ int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
#endif
-asmlinkage long
-sys_rt_sigtimedwait(const sigset_t __user *uthese,
- siginfo_t __user *uinfo,
- const struct timespec __user *uts,
- size_t sigsetsize)
+SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
+ siginfo_t __user *, uinfo, const struct timespec __user *, uts,
+ size_t, sigsetsize)
{
int ret, sig;
sigset_t these;
@@ -2223,8 +2220,7 @@ sys_rt_sigtimedwait(const sigset_t __user *uthese,
return ret;
}
-asmlinkage long
-sys_kill(pid_t pid, int sig)
+SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
{
struct siginfo info;
@@ -2283,7 +2279,7 @@ static int do_tkill(pid_t tgid, pid_t pid, int sig)
* exists but it's not belonging to the target process anymore. This
* method solves the problem of threads exiting and PIDs getting reused.
*/
-asmlinkage long sys_tgkill(pid_t tgid, pid_t pid, int sig)
+SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
{
/* This is only valid for single tasks */
if (pid <= 0 || tgid <= 0)
@@ -2295,8 +2291,7 @@ asmlinkage long sys_tgkill(pid_t tgid, pid_t pid, int sig)
/*
* Send a signal to only one task, even if it's a CLONE_THREAD task.
*/
-asmlinkage long
-sys_tkill(pid_t pid, int sig)
+SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
{
/* This is only valid for single tasks */
if (pid <= 0)
@@ -2305,8 +2300,8 @@ sys_tkill(pid_t pid, int sig)
return do_tkill(0, pid, sig);
}
-asmlinkage long
-sys_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t __user *uinfo)
+SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
+ siginfo_t __user *, uinfo)
{
siginfo_t info;
@@ -2434,8 +2429,7 @@ out:
#ifdef __ARCH_WANT_SYS_SIGPENDING
-asmlinkage long
-sys_sigpending(old_sigset_t __user *set)
+SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
{
return do_sigpending(set, sizeof(*set));
}
@@ -2446,8 +2440,8 @@ sys_sigpending(old_sigset_t __user *set)
/* Some platforms have their own version with special arguments others
support only sys_rt_sigprocmask. */
-asmlinkage long
-sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
+SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, set,
+ old_sigset_t __user *, oset)
{
int error;
old_sigset_t old_set, new_set;
@@ -2497,11 +2491,10 @@ out:
#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
#ifdef __ARCH_WANT_SYS_RT_SIGACTION
-asmlinkage long
-sys_rt_sigaction(int sig,
- const struct sigaction __user *act,
- struct sigaction __user *oact,
- size_t sigsetsize)
+SYSCALL_DEFINE4(rt_sigaction, int, sig,
+ const struct sigaction __user *, act,
+ struct sigaction __user *, oact,
+ size_t, sigsetsize)
{
struct k_sigaction new_sa, old_sa;
int ret = -EINVAL;
@@ -2531,15 +2524,13 @@ out:
/*
* For backwards compatibility. Functionality superseded by sigprocmask.
*/
-asmlinkage long
-sys_sgetmask(void)
+SYSCALL_DEFINE0(sgetmask)
{
/* SMP safe */
return current->blocked.sig[0];
}
-asmlinkage long
-sys_ssetmask(int newmask)
+SYSCALL_DEFINE1(ssetmask, int, newmask)
{
int old;
@@ -2559,8 +2550,7 @@ sys_ssetmask(int newmask)
/*
* For backwards compatibility. Functionality superseded by sigaction.
*/
-asmlinkage unsigned long
-sys_signal(int sig, __sighandler_t handler)
+SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
{
struct k_sigaction new_sa, old_sa;
int ret;
@@ -2577,8 +2567,7 @@ sys_signal(int sig, __sighandler_t handler)
#ifdef __ARCH_WANT_SYS_PAUSE
-asmlinkage long
-sys_pause(void)
+SYSCALL_DEFINE0(pause)
{
current->state = TASK_INTERRUPTIBLE;
schedule();
@@ -2588,7 +2577,7 @@ sys_pause(void)
#endif
#ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
-asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
+SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
{
sigset_t newset;
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index d9188c66278..85d5a245510 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -16,6 +16,7 @@
#include <linux/lockdep.h>
#include <linux/notifier.h>
#include <linux/module.h>
+#include <linux/sysctl.h>
#include <asm/irq_regs.h>
@@ -88,6 +89,14 @@ void touch_all_softlockup_watchdogs(void)
}
EXPORT_SYMBOL(touch_all_softlockup_watchdogs);
+int proc_dosoftlockup_thresh(struct ctl_table *table, int write,
+ struct file *filp, void __user *buffer,
+ size_t *lenp, loff_t *ppos)
+{
+ touch_all_softlockup_watchdogs();
+ return proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
+}
+
/*
* This callback runs from the timer interrupt, and checks
* whether the watchdog thread has hung or not:
diff --git a/kernel/sys.c b/kernel/sys.c
index 763c3c17ded..e7dc0e10a48 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -143,7 +143,7 @@ out:
return error;
}
-asmlinkage long sys_setpriority(int which, int who, int niceval)
+SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
{
struct task_struct *g, *p;
struct user_struct *user;
@@ -208,7 +208,7 @@ out:
* has been offset by 20 (ie it returns 40..1 instead of -20..19)
* to stay compatible.
*/
-asmlinkage long sys_getpriority(int which, int who)
+SYSCALL_DEFINE2(getpriority, int, which, int, who)
{
struct task_struct *g, *p;
struct user_struct *user;
@@ -355,7 +355,8 @@ EXPORT_SYMBOL_GPL(kernel_power_off);
*
* reboot doesn't sync: do that yourself before calling this.
*/
-asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user * arg)
+SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
+ void __user *, arg)
{
char buffer[256];
@@ -478,7 +479,7 @@ void ctrl_alt_del(void)
* SMP: There are not races, the GIDs are checked only by filesystem
* operations (as far as semantic preservation is concerned).
*/
-asmlinkage long sys_setregid(gid_t rgid, gid_t egid)
+SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
{
const struct cred *old;
struct cred *new;
@@ -529,7 +530,7 @@ error:
*
* SMP: Same implicit races as above.
*/
-asmlinkage long sys_setgid(gid_t gid)
+SYSCALL_DEFINE1(setgid, gid_t, gid)
{
const struct cred *old;
struct cred *new;
@@ -597,7 +598,7 @@ static int set_user(struct cred *new)
* 100% compatible with BSD. A program which uses just setuid() will be
* 100% compatible with POSIX with saved IDs.
*/
-asmlinkage long sys_setreuid(uid_t ruid, uid_t euid)
+SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
{
const struct cred *old;
struct cred *new;
@@ -661,7 +662,7 @@ error:
* will allow a root program to temporarily drop privileges and be able to
* regain them by swapping the real and effective uid.
*/
-asmlinkage long sys_setuid(uid_t uid)
+SYSCALL_DEFINE1(setuid, uid_t, uid)
{
const struct cred *old;
struct cred *new;
@@ -705,7 +706,7 @@ error:
* This function implements a generic ability to update ruid, euid,
* and suid. This allows you to implement the 4.4 compatible seteuid().
*/
-asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
+SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
{
const struct cred *old;
struct cred *new;
@@ -756,7 +757,7 @@ error:
return retval;
}
-asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __user *suid)
+SYSCALL_DEFINE3(getresuid, uid_t __user *, ruid, uid_t __user *, euid, uid_t __user *, suid)
{
const struct cred *cred = current_cred();
int retval;
@@ -771,7 +772,7 @@ asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __us
/*
* Same as above, but for rgid, egid, sgid.
*/
-asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
+SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
{
const struct cred *old;
struct cred *new;
@@ -814,7 +815,7 @@ error:
return retval;
}
-asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __user *sgid)
+SYSCALL_DEFINE3(getresgid, gid_t __user *, rgid, gid_t __user *, egid, gid_t __user *, sgid)
{
const struct cred *cred = current_cred();
int retval;
@@ -833,7 +834,7 @@ asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __us
* whatever uid it wants to). It normally shadows "euid", except when
* explicitly set by setfsuid() or for access..
*/
-asmlinkage long sys_setfsuid(uid_t uid)
+SYSCALL_DEFINE1(setfsuid, uid_t, uid)
{
const struct cred *old;
struct cred *new;
@@ -870,7 +871,7 @@ change_okay:
/*
* Samma på svenska..
*/
-asmlinkage long sys_setfsgid(gid_t gid)
+SYSCALL_DEFINE1(setfsgid, gid_t, gid)
{
const struct cred *old;
struct cred *new;
@@ -919,7 +920,7 @@ void do_sys_times(struct tms *tms)
tms->tms_cstime = cputime_to_clock_t(cstime);
}
-asmlinkage long sys_times(struct tms __user * tbuf)
+SYSCALL_DEFINE1(times, struct tms __user *, tbuf)
{
if (tbuf) {
struct tms tmp;
@@ -944,7 +945,7 @@ asmlinkage long sys_times(struct tms __user * tbuf)
* Auch. Had to add the 'did_exec' flag to conform completely to POSIX.
* LBT 04.03.94
*/
-asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
+SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
{
struct task_struct *p;
struct task_struct *group_leader = current->group_leader;
@@ -1015,7 +1016,7 @@ out:
return err;
}
-asmlinkage long sys_getpgid(pid_t pid)
+SYSCALL_DEFINE1(getpgid, pid_t, pid)
{
struct task_struct *p;
struct pid *grp;
@@ -1045,14 +1046,14 @@ out:
#ifdef __ARCH_WANT_SYS_GETPGRP
-asmlinkage long sys_getpgrp(void)
+SYSCALL_DEFINE0(getpgrp)
{
return sys_getpgid(0);
}
#endif
-asmlinkage long sys_getsid(pid_t pid)
+SYSCALL_DEFINE1(getsid, pid_t, pid)
{
struct task_struct *p;
struct pid *sid;
@@ -1080,7 +1081,7 @@ out:
return retval;
}
-asmlinkage long sys_setsid(void)
+SYSCALL_DEFINE0(setsid)
{
struct task_struct *group_leader = current->group_leader;
struct pid *sid = task_pid(group_leader);
@@ -1311,7 +1312,7 @@ int set_current_groups(struct group_info *group_info)
EXPORT_SYMBOL(set_current_groups);
-asmlinkage long sys_getgroups(int gidsetsize, gid_t __user *grouplist)
+SYSCALL_DEFINE2(getgroups, int, gidsetsize, gid_t __user *, grouplist)
{
const struct cred *cred = current_cred();
int i;
@@ -1340,7 +1341,7 @@ out:
* without another task interfering.
*/
-asmlinkage long sys_setgroups(int gidsetsize, gid_t __user *grouplist)
+SYSCALL_DEFINE2(setgroups, int, gidsetsize, gid_t __user *, grouplist)
{
struct group_info *group_info;
int retval;
@@ -1394,7 +1395,7 @@ EXPORT_SYMBOL(in_egroup_p);
DECLARE_RWSEM(uts_sem);
-asmlinkage long sys_newuname(struct new_utsname __user * name)
+SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
{
int errno = 0;
@@ -1405,7 +1406,7 @@ asmlinkage long sys_newuname(struct new_utsname __user * name)
return errno;
}
-asmlinkage long sys_sethostname(char __user *name, int len)
+SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
{
int errno;
char tmp[__NEW_UTS_LEN];
@@ -1429,7 +1430,7 @@ asmlinkage long sys_sethostname(char __user *name, int len)
#ifdef __ARCH_WANT_SYS_GETHOSTNAME
-asmlinkage long sys_gethostname(char __user *name, int len)
+SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
{
int i, errno;
struct new_utsname *u;
@@ -1454,7 +1455,7 @@ asmlinkage long sys_gethostname(char __user *name, int len)
* Only setdomainname; getdomainname can be implemented by calling
* uname()
*/
-asmlinkage long sys_setdomainname(char __user *name, int len)
+SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
{
int errno;
char tmp[__NEW_UTS_LEN];
@@ -1477,7 +1478,7 @@ asmlinkage long sys_setdomainname(char __user *name, int len)
return errno;
}
-asmlinkage long sys_getrlimit(unsigned int resource, struct rlimit __user *rlim)
+SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim)
{
if (resource >= RLIM_NLIMITS)
return -EINVAL;
@@ -1496,7 +1497,8 @@ asmlinkage long sys_getrlimit(unsigned int resource, struct rlimit __user *rlim)
* Back compatibility for getrlimit. Needed for some apps.
*/
-asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *rlim)
+SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
+ struct rlimit __user *, rlim)
{
struct rlimit x;
if (resource >= RLIM_NLIMITS)
@@ -1514,7 +1516,7 @@ asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *r
#endif
-asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
+SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim)
{
struct rlimit new_rlim, *old_rlim;
int retval;
@@ -1687,7 +1689,7 @@ int getrusage(struct task_struct *p, int who, struct rusage __user *ru)
return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
}
-asmlinkage long sys_getrusage(int who, struct rusage __user *ru)
+SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru)
{
if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
who != RUSAGE_THREAD)
@@ -1695,14 +1697,14 @@ asmlinkage long sys_getrusage(int who, struct rusage __user *ru)
return getrusage(current, who, ru);
}
-asmlinkage long sys_umask(int mask)
+SYSCALL_DEFINE1(umask, int, mask)
{
mask = xchg(&current->fs->umask, mask & S_IRWXUGO);
return mask;
}
-asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
- unsigned long arg4, unsigned long arg5)
+SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
+ unsigned long, arg4, unsigned long, arg5)
{
struct task_struct *me = current;
unsigned char comm[sizeof(me->comm)];
@@ -1815,8 +1817,8 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
return error;
}
-asmlinkage long sys_getcpu(unsigned __user *cpup, unsigned __user *nodep,
- struct getcpu_cache __user *unused)
+SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep,
+ struct getcpu_cache __user *, unused)
{
int err = 0;
int cpu = raw_smp_processor_id();
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index e14a2328170..27dad296738 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -131,6 +131,7 @@ cond_syscall(sys_io_destroy);
cond_syscall(sys_io_submit);
cond_syscall(sys_io_cancel);
cond_syscall(sys_io_getevents);
+cond_syscall(sys_syslog);
/* arch-specific weak syscall entries */
cond_syscall(sys_pciconfig_read);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 89d74436318..790f9d78566 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -144,6 +144,7 @@ extern int acct_parm[];
#ifdef CONFIG_IA64
extern int no_unaligned_warning;
+extern int unaligned_dump_stack;
#endif
#ifdef CONFIG_RT_MUTEXES
@@ -781,6 +782,14 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = &proc_dointvec,
},
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "unaligned-dump-stack",
+ .data = &unaligned_dump_stack,
+ .maxlen = sizeof (int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
#endif
#ifdef CONFIG_DETECT_SOFTLOCKUP
{
@@ -800,7 +809,7 @@ static struct ctl_table kern_table[] = {
.data = &softlockup_thresh,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
+ .proc_handler = &proc_dosoftlockup_thresh,
.strategy = &sysctl_intvec,
.extra1 = &neg_one,
.extra2 = &sixty,
@@ -1688,7 +1697,7 @@ int do_sysctl(int __user *name, int nlen, void __user *oldval, size_t __user *ol
return error;
}
-asmlinkage long sys_sysctl(struct __sysctl_args __user *args)
+SYSCALL_DEFINE1(sysctl, struct __sysctl_args __user *, args)
{
struct __sysctl_args tmp;
int error;
@@ -2989,7 +2998,7 @@ int sysctl_ms_jiffies(struct ctl_table *table,
#else /* CONFIG_SYSCTL_SYSCALL */
-asmlinkage long sys_sysctl(struct __sysctl_args __user *args)
+SYSCALL_DEFINE1(sysctl, struct __sysctl_args __user *, args)
{
struct __sysctl_args tmp;
int error;
diff --git a/kernel/time.c b/kernel/time.c
index 4886e3ce83a..29511943871 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -60,7 +60,7 @@ EXPORT_SYMBOL(sys_tz);
* why not move it into the appropriate arch directory (for those
* architectures that need it).
*/
-asmlinkage long sys_time(time_t __user * tloc)
+SYSCALL_DEFINE1(time, time_t __user *, tloc)
{
time_t i = get_seconds();
@@ -79,7 +79,7 @@ asmlinkage long sys_time(time_t __user * tloc)
* architectures that need it).
*/
-asmlinkage long sys_stime(time_t __user *tptr)
+SYSCALL_DEFINE1(stime, time_t __user *, tptr)
{
struct timespec tv;
int err;
@@ -99,8 +99,8 @@ asmlinkage long sys_stime(time_t __user *tptr)
#endif /* __ARCH_WANT_SYS_TIME */
-asmlinkage long sys_gettimeofday(struct timeval __user *tv,
- struct timezone __user *tz)
+SYSCALL_DEFINE2(gettimeofday, struct timeval __user *, tv,
+ struct timezone __user *, tz)
{
if (likely(tv != NULL)) {
struct timeval ktv;
@@ -184,8 +184,8 @@ int do_sys_settimeofday(struct timespec *tv, struct timezone *tz)
return 0;
}
-asmlinkage long sys_settimeofday(struct timeval __user *tv,
- struct timezone __user *tz)
+SYSCALL_DEFINE2(settimeofday, struct timeval __user *, tv,
+ struct timezone __user *, tz)
{
struct timeval user_tv;
struct timespec new_ts;
@@ -205,7 +205,7 @@ asmlinkage long sys_settimeofday(struct timeval __user *tv,
return do_sys_settimeofday(tv ? &new_ts : NULL, tz ? &new_tz : NULL);
}
-asmlinkage long sys_adjtimex(struct timex __user *txc_p)
+SYSCALL_DEFINE1(adjtimex, struct timex __user *, txc_p)
{
struct timex txc; /* Local copy of parameter */
int ret;
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 1b6c05bd0d0..d3f1ef4d5cb 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -134,7 +134,7 @@ __setup("nohz=", setup_tick_nohz);
* value. We do this unconditionally on any cpu, as we don't know whether the
* cpu, which has the update task assigned is in a long sleep.
*/
-void tick_nohz_update_jiffies(void)
+static void tick_nohz_update_jiffies(void)
{
int cpu = smp_processor_id();
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
diff --git a/kernel/timer.c b/kernel/timer.c
index dee3f641a7a..13dd64fe143 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1129,7 +1129,7 @@ void do_timer(unsigned long ticks)
* For backwards compatibility? This can be done in libc so Alpha
* and all newer ports shouldn't need it.
*/
-asmlinkage unsigned long sys_alarm(unsigned int seconds)
+SYSCALL_DEFINE1(alarm, unsigned int, seconds)
{
return alarm_setitimer(seconds);
}
@@ -1152,7 +1152,7 @@ asmlinkage unsigned long sys_alarm(unsigned int seconds)
*
* This is SMP safe as current->tgid does not change.
*/
-asmlinkage long sys_getpid(void)
+SYSCALL_DEFINE0(getpid)
{
return task_tgid_vnr(current);
}
@@ -1163,7 +1163,7 @@ asmlinkage long sys_getpid(void)
* value of ->real_parent under rcu_read_lock(), see
* release_task()->call_rcu(delayed_put_task_struct).
*/
-asmlinkage long sys_getppid(void)
+SYSCALL_DEFINE0(getppid)
{
int pid;
@@ -1174,25 +1174,25 @@ asmlinkage long sys_getppid(void)
return pid;
}
-asmlinkage long sys_getuid(void)
+SYSCALL_DEFINE0(getuid)
{
/* Only we change this so SMP safe */
return current_uid();
}
-asmlinkage long sys_geteuid(void)
+SYSCALL_DEFINE0(geteuid)
{
/* Only we change this so SMP safe */
return current_euid();
}
-asmlinkage long sys_getgid(void)
+SYSCALL_DEFINE0(getgid)
{
/* Only we change this so SMP safe */
return current_gid();
}
-asmlinkage long sys_getegid(void)
+SYSCALL_DEFINE0(getegid)
{
/* Only we change this so SMP safe */
return current_egid();
@@ -1308,7 +1308,7 @@ signed long __sched schedule_timeout_uninterruptible(signed long timeout)
EXPORT_SYMBOL(schedule_timeout_uninterruptible);
/* Thread ID - the internal kernel "pid" */
-asmlinkage long sys_gettid(void)
+SYSCALL_DEFINE0(gettid)
{
return task_pid_vnr(current);
}
@@ -1400,7 +1400,7 @@ out:
return 0;
}
-asmlinkage long sys_sysinfo(struct sysinfo __user *info)
+SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
{
struct sysinfo val;
diff --git a/kernel/uid16.c b/kernel/uid16.c
index 2460c3199b5..0314501688b 100644
--- a/kernel/uid16.c
+++ b/kernel/uid16.c
@@ -17,7 +17,7 @@
#include <asm/uaccess.h>
-asmlinkage long sys_chown16(const char __user * filename, old_uid_t user, old_gid_t group)
+SYSCALL_DEFINE3(chown16, const char __user *, filename, old_uid_t, user, old_gid_t, group)
{
long ret = sys_chown(filename, low2highuid(user), low2highgid(group));
/* avoid REGPARM breakage on x86: */
@@ -25,7 +25,7 @@ asmlinkage long sys_chown16(const char __user * filename, old_uid_t user, old_gi
return ret;
}
-asmlinkage long sys_lchown16(const char __user * filename, old_uid_t user, old_gid_t group)
+SYSCALL_DEFINE3(lchown16, const char __user *, filename, old_uid_t, user, old_gid_t, group)
{
long ret = sys_lchown(filename, low2highuid(user), low2highgid(group));
/* avoid REGPARM breakage on x86: */
@@ -33,7 +33,7 @@ asmlinkage long sys_lchown16(const char __user * filename, old_uid_t user, old_g
return ret;
}
-asmlinkage long sys_fchown16(unsigned int fd, old_uid_t user, old_gid_t group)
+SYSCALL_DEFINE3(fchown16, unsigned int, fd, old_uid_t, user, old_gid_t, group)
{
long ret = sys_fchown(fd, low2highuid(user), low2highgid(group));
/* avoid REGPARM breakage on x86: */
@@ -41,7 +41,7 @@ asmlinkage long sys_fchown16(unsigned int fd, old_uid_t user, old_gid_t group)
return ret;
}
-asmlinkage long sys_setregid16(old_gid_t rgid, old_gid_t egid)
+SYSCALL_DEFINE2(setregid16, old_gid_t, rgid, old_gid_t, egid)
{
long ret = sys_setregid(low2highgid(rgid), low2highgid(egid));
/* avoid REGPARM breakage on x86: */
@@ -49,7 +49,7 @@ asmlinkage long sys_setregid16(old_gid_t rgid, old_gid_t egid)
return ret;
}
-asmlinkage long sys_setgid16(old_gid_t gid)
+SYSCALL_DEFINE1(setgid16, old_gid_t, gid)
{
long ret = sys_setgid(low2highgid(gid));
/* avoid REGPARM breakage on x86: */
@@ -57,7 +57,7 @@ asmlinkage long sys_setgid16(old_gid_t gid)
return ret;
}
-asmlinkage long sys_setreuid16(old_uid_t ruid, old_uid_t euid)
+SYSCALL_DEFINE2(setreuid16, old_uid_t, ruid, old_uid_t, euid)
{
long ret = sys_setreuid(low2highuid(ruid), low2highuid(euid));
/* avoid REGPARM breakage on x86: */
@@ -65,7 +65,7 @@ asmlinkage long sys_setreuid16(old_uid_t ruid, old_uid_t euid)
return ret;
}
-asmlinkage long sys_setuid16(old_uid_t uid)
+SYSCALL_DEFINE1(setuid16, old_uid_t, uid)
{
long ret = sys_setuid(low2highuid(uid));
/* avoid REGPARM breakage on x86: */
@@ -73,7 +73,7 @@ asmlinkage long sys_setuid16(old_uid_t uid)
return ret;
}
-asmlinkage long sys_setresuid16(old_uid_t ruid, old_uid_t euid, old_uid_t suid)
+SYSCALL_DEFINE3(setresuid16, old_uid_t, ruid, old_uid_t, euid, old_uid_t, suid)
{
long ret = sys_setresuid(low2highuid(ruid), low2highuid(euid),
low2highuid(suid));
@@ -82,7 +82,7 @@ asmlinkage long sys_setresuid16(old_uid_t ruid, old_uid_t euid, old_uid_t suid)
return ret;
}
-asmlinkage long sys_getresuid16(old_uid_t __user *ruid, old_uid_t __user *euid, old_uid_t __user *suid)
+SYSCALL_DEFINE3(getresuid16, old_uid_t __user *, ruid, old_uid_t __user *, euid, old_uid_t __user *, suid)
{
const struct cred *cred = current_cred();
int retval;
@@ -94,7 +94,7 @@ asmlinkage long sys_getresuid16(old_uid_t __user *ruid, old_uid_t __user *euid,
return retval;
}
-asmlinkage long sys_setresgid16(old_gid_t rgid, old_gid_t egid, old_gid_t sgid)
+SYSCALL_DEFINE3(setresgid16, old_gid_t, rgid, old_gid_t, egid, old_gid_t, sgid)
{
long ret = sys_setresgid(low2highgid(rgid), low2highgid(egid),
low2highgid(sgid));
@@ -103,7 +103,8 @@ asmlinkage long sys_setresgid16(old_gid_t rgid, old_gid_t egid, old_gid_t sgid)
return ret;
}
-asmlinkage long sys_getresgid16(old_gid_t __user *rgid, old_gid_t __user *egid, old_gid_t __user *sgid)
+
+SYSCALL_DEFINE3(getresgid16, old_gid_t __user *, rgid, old_gid_t __user *, egid, old_gid_t __user *, sgid)
{
const struct cred *cred = current_cred();
int retval;
@@ -115,7 +116,7 @@ asmlinkage long sys_getresgid16(old_gid_t __user *rgid, old_gid_t __user *egid,
return retval;
}
-asmlinkage long sys_setfsuid16(old_uid_t uid)
+SYSCALL_DEFINE1(setfsuid16, old_uid_t, uid)
{
long ret = sys_setfsuid(low2highuid(uid));
/* avoid REGPARM breakage on x86: */
@@ -123,7 +124,7 @@ asmlinkage long sys_setfsuid16(old_uid_t uid)
return ret;
}
-asmlinkage long sys_setfsgid16(old_gid_t gid)
+SYSCALL_DEFINE1(setfsgid16, old_gid_t, gid)
{
long ret = sys_setfsgid(low2highgid(gid));
/* avoid REGPARM breakage on x86: */
@@ -161,7 +162,7 @@ static int groups16_from_user(struct group_info *group_info,
return 0;
}
-asmlinkage long sys_getgroups16(int gidsetsize, old_gid_t __user *grouplist)
+SYSCALL_DEFINE2(getgroups16, int, gidsetsize, old_gid_t __user *, grouplist)
{
const struct cred *cred = current_cred();
int i;
@@ -184,7 +185,7 @@ out:
return i;
}
-asmlinkage long sys_setgroups16(int gidsetsize, old_gid_t __user *grouplist)
+SYSCALL_DEFINE2(setgroups16, int, gidsetsize, old_gid_t __user *, grouplist)
{
struct group_info *group_info;
int retval;
@@ -209,22 +210,22 @@ asmlinkage long sys_setgroups16(int gidsetsize, old_gid_t __user *grouplist)
return retval;
}
-asmlinkage long sys_getuid16(void)
+SYSCALL_DEFINE0(getuid16)
{
return high2lowuid(current_uid());
}
-asmlinkage long sys_geteuid16(void)
+SYSCALL_DEFINE0(geteuid16)
{
return high2lowuid(current_euid());
}
-asmlinkage long sys_getgid16(void)
+SYSCALL_DEFINE0(getgid16)
{
return high2lowgid(current_gid());
}
-asmlinkage long sys_getegid16(void)
+SYSCALL_DEFINE0(getegid16)
{
return high2lowgid(current_egid());
}
diff --git a/kernel/up.c b/kernel/up.c
index c04b9dcfceb..1ff27a28bb7 100644
--- a/kernel/up.c
+++ b/kernel/up.c
@@ -2,6 +2,7 @@
* Uniprocessor-only support functions. The counterpart to kernel/smp.c
*/
+#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/smp.h>
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 2f445833ae3..1f0c509b40d 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -971,6 +971,8 @@ undo:
}
#ifdef CONFIG_SMP
+static struct workqueue_struct *work_on_cpu_wq __read_mostly;
+
struct work_for_cpu {
struct work_struct work;
long (*fn)(void *);
@@ -991,8 +993,8 @@ static void do_work_for_cpu(struct work_struct *w)
* @fn: the function to run
* @arg: the function arg
*
- * This will return -EINVAL in the cpu is not online, or the return value
- * of @fn otherwise.
+ * This will return the value @fn returns.
+ * It is up to the caller to ensure that the cpu doesn't go offline.
*/
long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
{
@@ -1001,14 +1003,8 @@ long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
INIT_WORK(&wfc.work, do_work_for_cpu);
wfc.fn = fn;
wfc.arg = arg;
- get_online_cpus();
- if (unlikely(!cpu_online(cpu)))
- wfc.ret = -EINVAL;
- else {
- schedule_work_on(cpu, &wfc.work);
- flush_work(&wfc.work);
- }
- put_online_cpus();
+ queue_work_on(cpu, work_on_cpu_wq, &wfc.work);
+ flush_work(&wfc.work);
return wfc.ret;
}
@@ -1025,4 +1021,8 @@ void __init init_workqueues(void)
hotcpu_notifier(workqueue_cpu_callback, 0);
keventd_wq = create_workqueue("events");
BUG_ON(!keventd_wq);
+#ifdef CONFIG_SMP
+ work_on_cpu_wq = create_workqueue("work_on_cpu");
+ BUG_ON(!work_on_cpu_wq);
+#endif
}