aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/exit.c6
-rw-r--r--kernel/fork.c9
-rw-r--r--kernel/hrtimer.c5
-rw-r--r--kernel/irq/Makefile3
-rw-r--r--kernel/irq/manage.c6
-rw-r--r--kernel/irq/migration.c5
-rw-r--r--kernel/kprobes.c3
-rw-r--r--kernel/panic.c1
-rw-r--r--kernel/power/Kconfig2
-rw-r--r--kernel/power/main.c2
-rw-r--r--kernel/power/pm.c20
-rw-r--r--kernel/power/snapshot.c9
-rw-r--r--kernel/printk.c6
-rw-r--r--kernel/profile.c2
-rw-r--r--kernel/ptrace.c10
-rw-r--r--kernel/rcupdate.c4
-rw-r--r--kernel/sched.c64
-rw-r--r--kernel/signal.c11
-rw-r--r--kernel/softirq.c4
-rw-r--r--kernel/softlockup.c4
-rw-r--r--kernel/sys_ni.c12
-rw-r--r--kernel/time.c8
-rw-r--r--kernel/timer.c38
-rw-r--r--kernel/uid16.c59
-rw-r--r--kernel/workqueue.c2
25 files changed, 174 insertions, 121 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index 6c2eeb8f639..f86434d7b3d 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -34,6 +34,7 @@
#include <linux/mutex.h>
#include <linux/futex.h>
#include <linux/compat.h>
+#include <linux/pipe_fs_i.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
@@ -55,7 +56,7 @@ static void __unhash_process(struct task_struct *p)
detach_pid(p, PIDTYPE_PGID);
detach_pid(p, PIDTYPE_SID);
- list_del_init(&p->tasks);
+ list_del_rcu(&p->tasks);
__get_cpu_var(process_counts)--;
}
list_del_rcu(&p->thread_group);
@@ -941,6 +942,9 @@ fastcall NORET_TYPE void do_exit(long code)
if (tsk->io_context)
exit_io_context();
+ if (tsk->splice_pipe)
+ __free_pipe_info(tsk->splice_pipe);
+
/* PF_DEAD causes final put_task_struct after we schedule. */
preempt_disable();
BUG_ON(tsk->flags & PF_DEAD);
diff --git a/kernel/fork.c b/kernel/fork.c
index 3384eb89cb1..d2fa57d480d 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -124,12 +124,6 @@ void __put_task_struct(struct task_struct *tsk)
free_task(tsk);
}
-void __put_task_struct_cb(struct rcu_head *rhp)
-{
- struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
- __put_task_struct(tsk);
-}
-
void __init fork_init(unsigned long mempages)
{
#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
@@ -186,6 +180,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
atomic_set(&tsk->usage,2);
atomic_set(&tsk->fs_excl, 0);
tsk->btrace_seq = 0;
+ tsk->splice_pipe = NULL;
return tsk;
}
@@ -1210,7 +1205,7 @@ static task_t *copy_process(unsigned long clone_flags,
attach_pid(p, PIDTYPE_PGID, process_group(p));
attach_pid(p, PIDTYPE_SID, p->signal->session);
- list_add_tail(&p->tasks, &init_task.tasks);
+ list_add_tail_rcu(&p->tasks, &init_task.tasks);
__get_cpu_var(process_counts)++;
}
attach_pid(p, PIDTYPE_PID, p->pid);
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index f181ff4dd32..b7f0388bd71 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -501,6 +501,7 @@ int hrtimer_cancel(struct hrtimer *timer)
if (ret >= 0)
return ret;
+ cpu_relax();
}
}
@@ -835,7 +836,7 @@ static void migrate_hrtimers(int cpu)
}
#endif /* CONFIG_HOTPLUG_CPU */
-static int __devinit hrtimer_cpu_notify(struct notifier_block *self,
+static int hrtimer_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
@@ -859,7 +860,7 @@ static int __devinit hrtimer_cpu_notify(struct notifier_block *self,
return NOTIFY_OK;
}
-static struct notifier_block __devinitdata hrtimers_nb = {
+static struct notifier_block hrtimers_nb = {
.notifier_call = hrtimer_cpu_notify,
};
diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile
index 2b33f852be3..9f77f50d814 100644
--- a/kernel/irq/Makefile
+++ b/kernel/irq/Makefile
@@ -1,4 +1,5 @@
-obj-y := handle.o manage.o spurious.o migration.o
+obj-y := handle.o manage.o spurious.o
obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o
obj-$(CONFIG_PROC_FS) += proc.o
+obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index ac766ad573e..1279e349953 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -246,8 +246,10 @@ int setup_irq(unsigned int irq, struct irqaction * new)
mismatch:
spin_unlock_irqrestore(&desc->lock, flags);
- printk(KERN_ERR "%s: irq handler mismatch\n", __FUNCTION__);
- dump_stack();
+ if (!(new->flags & SA_PROBEIRQ)) {
+ printk(KERN_ERR "%s: irq handler mismatch\n", __FUNCTION__);
+ dump_stack();
+ }
return -EBUSY;
}
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index 52a8655fa08..134f9f2e0e3 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -1,6 +1,5 @@
-#include <linux/irq.h>
-#if defined(CONFIG_GENERIC_PENDING_IRQ)
+#include <linux/irq.h>
void set_pending_irq(unsigned int irq, cpumask_t mask)
{
@@ -61,5 +60,3 @@ void move_native_irq(int irq)
}
cpus_clear(pending_irq_cpumask[irq]);
}
-
-#endif
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 1156eb0977d..1fbf466a29a 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -585,6 +585,9 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
int i;
rp->kp.pre_handler = pre_handler_kretprobe;
+ rp->kp.post_handler = NULL;
+ rp->kp.fault_handler = NULL;
+ rp->kp.break_handler = NULL;
/* Pre-allocate memory for max kretprobe instances */
if (rp->maxactive <= 0) {
diff --git a/kernel/panic.c b/kernel/panic.c
index f895c7c01d5..cc2a4c9c36a 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -27,7 +27,6 @@ static int pause_on_oops_flag;
static DEFINE_SPINLOCK(pause_on_oops_lock);
int panic_timeout;
-EXPORT_SYMBOL(panic_timeout);
ATOMIC_NOTIFIER_HEAD(panic_notifier_list);
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 9fd8d4f0359..ce0dfb8f4a4 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -41,7 +41,7 @@ config SOFTWARE_SUSPEND
depends on PM && SWAP && (X86 && (!SMP || SUSPEND_SMP)) || ((FRV || PPC32) && !SMP)
---help---
Enable the possibility of suspending the machine.
- It doesn't need APM.
+ It doesn't need ACPI or APM.
You may suspend your machine by 'swsusp' or 'shutdown -z <time>'
(patch for sysvinit needed).
diff --git a/kernel/power/main.c b/kernel/power/main.c
index ee371f50cca..a6d9ef46009 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -272,7 +272,7 @@ static ssize_t state_store(struct subsystem * subsys, const char * buf, size_t n
if (*s && !strncmp(buf, *s, len))
break;
}
- if (*s)
+ if (state < PM_SUSPEND_MAX && *s)
error = enter_state(state);
else
error = -EINVAL;
diff --git a/kernel/power/pm.c b/kernel/power/pm.c
index 0f6908cce1d..84063ac8fcf 100644
--- a/kernel/power/pm.c
+++ b/kernel/power/pm.c
@@ -75,25 +75,6 @@ struct pm_dev *pm_register(pm_dev_t type,
return dev;
}
-/**
- * pm_unregister - unregister a device with power management
- * @dev: device to unregister
- *
- * Remove a device from the power management notification lists. The
- * dev passed must be a handle previously returned by pm_register.
- */
-
-void pm_unregister(struct pm_dev *dev)
-{
- if (dev) {
- mutex_lock(&pm_devs_lock);
- list_del(&dev->entry);
- mutex_unlock(&pm_devs_lock);
-
- kfree(dev);
- }
-}
-
static void __pm_unregister(struct pm_dev *dev)
{
if (dev) {
@@ -258,7 +239,6 @@ int pm_send_all(pm_request_t rqst, void *data)
}
EXPORT_SYMBOL(pm_register);
-EXPORT_SYMBOL(pm_unregister);
EXPORT_SYMBOL(pm_unregister_all);
EXPORT_SYMBOL(pm_send_all);
EXPORT_SYMBOL(pm_active);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index c5863d02c89..3eeedbb13b7 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -240,14 +240,15 @@ static void copy_data_pages(struct pbe *pblist)
* free_pagedir - free pages allocated with alloc_pagedir()
*/
-static void free_pagedir(struct pbe *pblist)
+static void free_pagedir(struct pbe *pblist, int clear_nosave_free)
{
struct pbe *pbe;
while (pblist) {
pbe = (pblist + PB_PAGE_SKIP)->next;
ClearPageNosave(virt_to_page(pblist));
- ClearPageNosaveFree(virt_to_page(pblist));
+ if (clear_nosave_free)
+ ClearPageNosaveFree(virt_to_page(pblist));
free_page((unsigned long)pblist);
pblist = pbe;
}
@@ -389,7 +390,7 @@ struct pbe *alloc_pagedir(unsigned int nr_pages, gfp_t gfp_mask, int safe_needed
pbe->next = alloc_image_page(gfp_mask, safe_needed);
}
if (!pbe) { /* get_zeroed_page() failed */
- free_pagedir(pblist);
+ free_pagedir(pblist, 1);
pblist = NULL;
} else
create_pbe_list(pblist, nr_pages);
@@ -736,7 +737,7 @@ static int create_image(struct snapshot_handle *handle)
pblist = alloc_pagedir(nr_copy_pages, GFP_ATOMIC, 1);
if (pblist)
copy_page_backup_list(pblist, p);
- free_pagedir(p);
+ free_pagedir(p, 0);
if (!pblist)
error = -ENOMEM;
}
diff --git a/kernel/printk.c b/kernel/printk.c
index 8cc19431e74..c056f332443 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -360,8 +360,7 @@ static void call_console_drivers(unsigned long start, unsigned long end)
unsigned long cur_index, start_print;
static int msg_level = -1;
- if (((long)(start - end)) > 0)
- BUG();
+ BUG_ON(((long)(start - end)) > 0);
cur_index = start;
start_print = start;
@@ -708,8 +707,7 @@ int __init add_preferred_console(char *name, int idx, char *options)
*/
void acquire_console_sem(void)
{
- if (in_interrupt())
- BUG();
+ BUG_ON(in_interrupt());
down(&console_sem);
console_locked = 1;
console_may_schedule = 1;
diff --git a/kernel/profile.c b/kernel/profile.c
index 5a730fdb1a2..68afe121e50 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -299,7 +299,7 @@ out:
}
#ifdef CONFIG_HOTPLUG_CPU
-static int __devinit profile_cpu_callback(struct notifier_block *info,
+static int profile_cpu_callback(struct notifier_block *info,
unsigned long action, void *__cpu)
{
int node, cpu = (unsigned long)__cpu;
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 86a7f6c60cb..4e0f0ec003f 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -30,8 +30,7 @@
*/
void __ptrace_link(task_t *child, task_t *new_parent)
{
- if (!list_empty(&child->ptrace_list))
- BUG();
+ BUG_ON(!list_empty(&child->ptrace_list));
if (child->parent == new_parent)
return;
list_add(&child->ptrace_list, &child->parent->ptrace_children);
@@ -57,10 +56,6 @@ void ptrace_untrace(task_t *child)
signal_wake_up(child, 1);
}
}
- if (child->signal->flags & SIGNAL_GROUP_EXIT) {
- sigaddset(&child->pending.signal, SIGKILL);
- signal_wake_up(child, 1);
- }
spin_unlock(&child->sighand->siglock);
}
@@ -82,7 +77,8 @@ void __ptrace_unlink(task_t *child)
add_parent(child);
}
- ptrace_untrace(child);
+ if (child->state == TASK_TRACED)
+ ptrace_untrace(child);
}
/*
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 13458bbaa1b..6d32ff26f94 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -520,7 +520,7 @@ static void __devinit rcu_online_cpu(int cpu)
tasklet_init(&per_cpu(rcu_tasklet, cpu), rcu_process_callbacks, 0UL);
}
-static int __devinit rcu_cpu_notify(struct notifier_block *self,
+static int rcu_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
@@ -537,7 +537,7 @@ static int __devinit rcu_cpu_notify(struct notifier_block *self,
return NOTIFY_OK;
}
-static struct notifier_block __devinitdata rcu_nb = {
+static struct notifier_block rcu_nb = {
.notifier_call = rcu_cpu_notify,
};
diff --git a/kernel/sched.c b/kernel/sched.c
index dd153d6f8a0..4c64f85698a 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -665,13 +665,55 @@ static int effective_prio(task_t *p)
}
/*
+ * We place interactive tasks back into the active array, if possible.
+ *
+ * To guarantee that this does not starve expired tasks we ignore the
+ * interactivity of a task if the first expired task had to wait more
+ * than a 'reasonable' amount of time. This deadline timeout is
+ * load-dependent, as the frequency of array switched decreases with
+ * increasing number of running tasks. We also ignore the interactivity
+ * if a better static_prio task has expired, and switch periodically
+ * regardless, to ensure that highly interactive tasks do not starve
+ * the less fortunate for unreasonably long periods.
+ */
+static inline int expired_starving(runqueue_t *rq)
+{
+ int limit;
+
+ /*
+ * Arrays were recently switched, all is well
+ */
+ if (!rq->expired_timestamp)
+ return 0;
+
+ limit = STARVATION_LIMIT * rq->nr_running;
+
+ /*
+ * It's time to switch arrays
+ */
+ if (jiffies - rq->expired_timestamp >= limit)
+ return 1;
+
+ /*
+ * There's a better selection in the expired array
+ */
+ if (rq->curr->static_prio > rq->best_expired_prio)
+ return 1;
+
+ /*
+ * All is well
+ */
+ return 0;
+}
+
+/*
* __activate_task - move a task to the runqueue.
*/
static void __activate_task(task_t *p, runqueue_t *rq)
{
prio_array_t *target = rq->active;
- if (batch_task(p))
+ if (unlikely(batch_task(p) || (expired_starving(rq) && !rt_task(p))))
target = rq->expired;
enqueue_task(p, target);
rq->nr_running++;
@@ -2490,22 +2532,6 @@ unsigned long long current_sched_time(const task_t *tsk)
}
/*
- * We place interactive tasks back into the active array, if possible.
- *
- * To guarantee that this does not starve expired tasks we ignore the
- * interactivity of a task if the first expired task had to wait more
- * than a 'reasonable' amount of time. This deadline timeout is
- * load-dependent, as the frequency of array switched decreases with
- * increasing number of running tasks. We also ignore the interactivity
- * if a better static_prio task has expired:
- */
-#define EXPIRED_STARVING(rq) \
- ((STARVATION_LIMIT && ((rq)->expired_timestamp && \
- (jiffies - (rq)->expired_timestamp >= \
- STARVATION_LIMIT * ((rq)->nr_running) + 1))) || \
- ((rq)->curr->static_prio > (rq)->best_expired_prio))
-
-/*
* Account user cpu time to a process.
* @p: the process that the cpu time gets accounted to
* @hardirq_offset: the offset to subtract from hardirq_count()
@@ -2640,7 +2666,7 @@ void scheduler_tick(void)
if (!rq->expired_timestamp)
rq->expired_timestamp = jiffies;
- if (!TASK_INTERACTIVE(p) || EXPIRED_STARVING(rq)) {
+ if (!TASK_INTERACTIVE(p) || expired_starving(rq)) {
enqueue_task(p, rq->expired);
if (p->static_prio < rq->best_expired_prio)
rq->best_expired_prio = p->static_prio;
@@ -4788,7 +4814,7 @@ static int migration_call(struct notifier_block *nfb, unsigned long action,
/* Register at highest priority so that task migration (migrate_all_tasks)
* happens before everything else.
*/
-static struct notifier_block __devinitdata migration_notifier = {
+static struct notifier_block migration_notifier = {
.notifier_call = migration_call,
.priority = 10
};
diff --git a/kernel/signal.c b/kernel/signal.c
index 92025b10879..e5f8aea78ff 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -769,8 +769,7 @@ specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
{
int ret = 0;
- if (!irqs_disabled())
- BUG();
+ BUG_ON(!irqs_disabled());
assert_spin_locked(&t->sighand->siglock);
/* Short-circuit ignored signals. */
@@ -869,7 +868,6 @@ __group_complete_signal(int sig, struct task_struct *p)
if (t == NULL)
/* restart balancing at this thread */
t = p->signal->curr_target = p;
- BUG_ON(t->tgid != p->tgid);
while (!wants_signal(sig, t)) {
t = next_thread(t);
@@ -1384,8 +1382,7 @@ send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
* the overrun count. Other uses should not try to
* send the signal multiple times.
*/
- if (q->info.si_code != SI_TIMER)
- BUG();
+ BUG_ON(q->info.si_code != SI_TIMER);
q->info.si_overrun++;
goto out;
}
@@ -1757,9 +1754,9 @@ relock:
/* Let the debugger run. */
ptrace_stop(signr, signr, info);
- /* We're back. Did the debugger cancel the sig or group_exit? */
+ /* We're back. Did the debugger cancel the sig? */
signr = current->exit_code;
- if (signr == 0 || current->signal->flags & SIGNAL_GROUP_EXIT)
+ if (signr == 0)
continue;
current->exit_code = 0;
diff --git a/kernel/softirq.c b/kernel/softirq.c
index ec8fed42a86..336f92d64e2 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -446,7 +446,7 @@ static void takeover_tasklets(unsigned int cpu)
}
#endif /* CONFIG_HOTPLUG_CPU */
-static int __devinit cpu_callback(struct notifier_block *nfb,
+static int cpu_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
@@ -484,7 +484,7 @@ static int __devinit cpu_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
-static struct notifier_block __devinitdata cpu_nfb = {
+static struct notifier_block cpu_nfb = {
.notifier_call = cpu_callback
};
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index ced91e1ff56..14c7faf0290 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -104,7 +104,7 @@ static int watchdog(void * __bind_cpu)
/*
* Create/destroy watchdog threads as CPUs come and go:
*/
-static int __devinit
+static int
cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
int hotcpu = (unsigned long)hcpu;
@@ -140,7 +140,7 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
return NOTIFY_OK;
}
-static struct notifier_block __devinitdata cpu_nfb = {
+static struct notifier_block cpu_nfb = {
.notifier_call = cpu_callback
};
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index d82864c4a61..5433195040f 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -120,3 +120,15 @@ cond_syscall(sys32_sysctl);
cond_syscall(ppc_rtas);
cond_syscall(sys_spu_run);
cond_syscall(sys_spu_create);
+
+/* mmu depending weak syscall entries */
+cond_syscall(sys_mprotect);
+cond_syscall(sys_msync);
+cond_syscall(sys_mlock);
+cond_syscall(sys_munlock);
+cond_syscall(sys_mlockall);
+cond_syscall(sys_munlockall);
+cond_syscall(sys_mincore);
+cond_syscall(sys_madvise);
+cond_syscall(sys_mremap);
+cond_syscall(sys_remap_file_pages);
diff --git a/kernel/time.c b/kernel/time.c
index ff8e7019c4c..b00ddc71ced 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -410,7 +410,7 @@ EXPORT_SYMBOL(current_kernel_time);
* current_fs_time - Return FS time
* @sb: Superblock.
*
- * Return the current time truncated to the time granuality supported by
+ * Return the current time truncated to the time granularity supported by
* the fs.
*/
struct timespec current_fs_time(struct super_block *sb)
@@ -421,11 +421,11 @@ struct timespec current_fs_time(struct super_block *sb)
EXPORT_SYMBOL(current_fs_time);
/**
- * timespec_trunc - Truncate timespec to a granuality
+ * timespec_trunc - Truncate timespec to a granularity
* @t: Timespec
- * @gran: Granuality in ns.
+ * @gran: Granularity in ns.
*
- * Truncate a timespec to a granuality. gran must be smaller than a second.
+ * Truncate a timespec to a granularity. gran must be smaller than a second.
* Always rounds down.
*
* This function should be only used for timestamps returned by
diff --git a/kernel/timer.c b/kernel/timer.c
index 6b812c04737..67eaf0f5409 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -81,9 +81,10 @@ struct tvec_t_base_s {
} ____cacheline_aligned_in_smp;
typedef struct tvec_t_base_s tvec_base_t;
-static DEFINE_PER_CPU(tvec_base_t *, tvec_bases);
+
tvec_base_t boot_tvec_bases;
EXPORT_SYMBOL(boot_tvec_bases);
+static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = { &boot_tvec_bases };
static inline void set_running_timer(tvec_base_t *base,
struct timer_list *timer)
@@ -1224,28 +1225,36 @@ static int __devinit init_timers_cpu(int cpu)
{
int j;
tvec_base_t *base;
+ static char __devinitdata tvec_base_done[NR_CPUS];
- base = per_cpu(tvec_bases, cpu);
- if (!base) {
+ if (!tvec_base_done[cpu]) {
static char boot_done;
- /*
- * Cannot do allocation in init_timers as that runs before the
- * allocator initializes (and would waste memory if there are
- * more possible CPUs than will ever be installed/brought up).
- */
if (boot_done) {
+ /*
+ * The APs use this path later in boot
+ */
base = kmalloc_node(sizeof(*base), GFP_KERNEL,
cpu_to_node(cpu));
if (!base)
return -ENOMEM;
memset(base, 0, sizeof(*base));
+ per_cpu(tvec_bases, cpu) = base;
} else {
- base = &boot_tvec_bases;
+ /*
+ * This is for the boot CPU - we use compile-time
+ * static initialisation because per-cpu memory isn't
+ * ready yet and because the memory allocators are not
+ * initialised either.
+ */
boot_done = 1;
+ base = &boot_tvec_bases;
}
- per_cpu(tvec_bases, cpu) = base;
+ tvec_base_done[cpu] = 1;
+ } else {
+ base = per_cpu(tvec_bases, cpu);
}
+
spin_lock_init(&base->lock);
for (j = 0; j < TVN_SIZE; j++) {
INIT_LIST_HEAD(base->tv5.vec + j);
@@ -1305,7 +1314,7 @@ static void __devinit migrate_timers(int cpu)
}
#endif /* CONFIG_HOTPLUG_CPU */
-static int __devinit timer_cpu_notify(struct notifier_block *self,
+static int timer_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
@@ -1325,7 +1334,7 @@ static int __devinit timer_cpu_notify(struct notifier_block *self,
return NOTIFY_OK;
}
-static struct notifier_block __devinitdata timers_nb = {
+static struct notifier_block timers_nb = {
.notifier_call = timer_cpu_notify,
};
@@ -1455,7 +1464,7 @@ static void time_interpolator_update(long delta_nsec)
*/
if (jiffies % INTERPOLATOR_ADJUST == 0)
{
- if (time_interpolator->skips == 0 && time_interpolator->offset > TICK_NSEC)
+ if (time_interpolator->skips == 0 && time_interpolator->offset > tick_nsec)
time_interpolator->nsec_per_cyc--;
if (time_interpolator->ns_skipped > INTERPOLATOR_MAX_SKIP && time_interpolator->offset == 0)
time_interpolator->nsec_per_cyc++;
@@ -1479,8 +1488,7 @@ register_time_interpolator(struct time_interpolator *ti)
unsigned long flags;
/* Sanity check */
- if (ti->frequency == 0 || ti->mask == 0)
- BUG();
+ BUG_ON(ti->frequency == 0 || ti->mask == 0);
ti->nsec_per_cyc = ((u64)NSEC_PER_SEC << ti->shift) / ti->frequency;
spin_lock(&time_interpolator_lock);
diff --git a/kernel/uid16.c b/kernel/uid16.c
index aa25605027c..187e2a42387 100644
--- a/kernel/uid16.c
+++ b/kernel/uid16.c
@@ -20,43 +20,67 @@
asmlinkage long sys_chown16(const char __user * filename, old_uid_t user, old_gid_t group)
{
- return sys_chown(filename, low2highuid(user), low2highgid(group));
+ long ret = sys_chown(filename, low2highuid(user), low2highgid(group));
+ /* avoid REGPARM breakage on x86: */
+ prevent_tail_call(ret);
+ return ret;
}
asmlinkage long sys_lchown16(const char __user * filename, old_uid_t user, old_gid_t group)
{
- return sys_lchown(filename, low2highuid(user), low2highgid(group));
+ long ret = sys_lchown(filename, low2highuid(user), low2highgid(group));
+ /* avoid REGPARM breakage on x86: */
+ prevent_tail_call(ret);
+ return ret;
}
asmlinkage long sys_fchown16(unsigned int fd, old_uid_t user, old_gid_t group)
{
- return sys_fchown(fd, low2highuid(user), low2highgid(group));
+ long ret = sys_fchown(fd, low2highuid(user), low2highgid(group));
+ /* avoid REGPARM breakage on x86: */
+ prevent_tail_call(ret);
+ return ret;
}
asmlinkage long sys_setregid16(old_gid_t rgid, old_gid_t egid)
{
- return sys_setregid(low2highgid(rgid), low2highgid(egid));
+ long ret = sys_setregid(low2highgid(rgid), low2highgid(egid));
+ /* avoid REGPARM breakage on x86: */
+ prevent_tail_call(ret);
+ return ret;
}
asmlinkage long sys_setgid16(old_gid_t gid)
{
- return sys_setgid(low2highgid(gid));
+ long ret = sys_setgid(low2highgid(gid));
+ /* avoid REGPARM breakage on x86: */
+ prevent_tail_call(ret);
+ return ret;
}
asmlinkage long sys_setreuid16(old_uid_t ruid, old_uid_t euid)
{
- return sys_setreuid(low2highuid(ruid), low2highuid(euid));
+ long ret = sys_setreuid(low2highuid(ruid), low2highuid(euid));
+ /* avoid REGPARM breakage on x86: */
+ prevent_tail_call(ret);
+ return ret;
}
asmlinkage long sys_setuid16(old_uid_t uid)
{
- return sys_setuid(low2highuid(uid));
+ long ret = sys_setuid(low2highuid(uid));
+ /* avoid REGPARM breakage on x86: */
+ prevent_tail_call(ret);
+ return ret;
}
asmlinkage long sys_setresuid16(old_uid_t ruid, old_uid_t euid, old_uid_t suid)
{
- return sys_setresuid(low2highuid(ruid), low2highuid(euid),
- low2highuid(suid));
+ long ret = sys_setresuid(low2highuid(ruid), low2highuid(euid),
+ low2highuid(suid));
+ /* avoid REGPARM breakage on x86: */
+ prevent_tail_call(ret);
+ return ret;
}
asmlinkage long sys_getresuid16(old_uid_t __user *ruid, old_uid_t __user *euid, old_uid_t __user *suid)
@@ -72,8 +96,11 @@ asmlinkage long sys_getresuid16(old_uid_t __user *ruid, old_uid_t __user *euid,
asmlinkage long sys_setresgid16(old_gid_t rgid, old_gid_t egid, old_gid_t sgid)
{
- return sys_setresgid(low2highgid(rgid), low2highgid(egid),
- low2highgid(sgid));
+ long ret = sys_setresgid(low2highgid(rgid), low2highgid(egid),
+ low2highgid(sgid));
+ /* avoid REGPARM breakage on x86: */
+ prevent_tail_call(ret);
+ return ret;
}
asmlinkage long sys_getresgid16(old_gid_t __user *rgid, old_gid_t __user *egid, old_gid_t __user *sgid)
@@ -89,12 +116,18 @@ asmlinkage long sys_getresgid16(old_gid_t __user *rgid, old_gid_t __user *egid,
asmlinkage long sys_setfsuid16(old_uid_t uid)
{
- return sys_setfsuid(low2highuid(uid));
+ long ret = sys_setfsuid(low2highuid(uid));
+ /* avoid REGPARM breakage on x86: */
+ prevent_tail_call(ret);
+ return ret;
}
asmlinkage long sys_setfsgid16(old_gid_t gid)
{
- return sys_setfsgid(low2highgid(gid));
+ long ret = sys_setfsgid(low2highgid(gid));
+ /* avoid REGPARM breakage on x86: */
+ prevent_tail_call(ret);
+ return ret;
}
static int groups16_to_user(old_gid_t __user *grouplist,
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index e9e464a9037..880fb415a8f 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -547,7 +547,7 @@ static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
}
/* We're holding the cpucontrol mutex here */
-static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
+static int workqueue_cpu_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{