aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpuset.c5
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/fork.c8
-rw-r--r--kernel/posix-timers.c10
-rw-r--r--kernel/power/Kconfig9
-rw-r--r--kernel/power/Makefile3
-rw-r--r--kernel/power/pm.c1
-rw-r--r--kernel/printk.c16
-rw-r--r--kernel/ptrace.c2
-rw-r--r--kernel/rcutorture.c34
-rw-r--r--kernel/sched.c10
-rw-r--r--kernel/signal.c11
-rw-r--r--kernel/stop_machine.c6
13 files changed, 74 insertions, 43 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 5a737ed9dac..7430640f981 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1809,11 +1809,12 @@ int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */
return 0;
+ if (current->flags & PF_EXITING) /* Let dying task have memory */
+ return 1;
+
/* Not hardwall and node outside mems_allowed: scan up cpusets */
down(&callback_sem);
- if (current->flags & PF_EXITING) /* Let dying task have memory */
- return 1;
task_lock(current);
cs = nearest_exclusive_ancestor(current->cpuset);
task_unlock(current);
diff --git a/kernel/exit.c b/kernel/exit.c
index 452a1d11617..ee515683b92 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -859,7 +859,7 @@ fastcall NORET_TYPE void do_exit(long code)
if (group_dead && tsk->signal->leader)
disassociate_ctty(1);
- module_put(tsk->thread_info->exec_domain->module);
+ module_put(task_thread_info(tsk)->exec_domain->module);
if (tsk->binfmt)
module_put(tsk->binfmt->module);
diff --git a/kernel/fork.c b/kernel/fork.c
index 158710d2256..e0d0b77343f 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -171,10 +171,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
return NULL;
}
- *ti = *orig->thread_info;
*tsk = *orig;
tsk->thread_info = ti;
- ti->task = tsk;
+ setup_thread_stack(tsk, orig);
/* One for us, one for whoever does the "release_task()" (usually parent) */
atomic_set(&tsk->usage,2);
@@ -324,7 +323,6 @@ static struct mm_struct * mm_init(struct mm_struct * mm)
spin_lock_init(&mm->page_table_lock);
rwlock_init(&mm->ioctx_list_lock);
mm->ioctx_list = NULL;
- mm->default_kioctx = (struct kioctx)INIT_KIOCTX(mm->default_kioctx, *mm);
mm->free_area_cache = TASK_UNMAPPED_BASE;
mm->cached_hole_size = ~0UL;
@@ -919,7 +917,7 @@ static task_t *copy_process(unsigned long clone_flags,
if (nr_threads >= max_threads)
goto bad_fork_cleanup_count;
- if (!try_module_get(p->thread_info->exec_domain->module))
+ if (!try_module_get(task_thread_info(p)->exec_domain->module))
goto bad_fork_cleanup_count;
if (p->binfmt && !try_module_get(p->binfmt->module))
@@ -1180,7 +1178,7 @@ bad_fork_cleanup:
if (p->binfmt)
module_put(p->binfmt->module);
bad_fork_cleanup_put_domain:
- module_put(p->thread_info->exec_domain->module);
+ module_put(task_thread_info(p)->exec_domain->module);
bad_fork_cleanup_count:
put_group_info(p->group_info);
atomic_dec(&p->user->processes);
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index ea55c7a1cd7..5870efb3e20 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -270,7 +270,7 @@ static void tstojiffie(struct timespec *tp, int res, u64 *jiff)
long sec = tp->tv_sec;
long nsec = tp->tv_nsec + res - 1;
- if (nsec > NSEC_PER_SEC) {
+ if (nsec >= NSEC_PER_SEC) {
sec++;
nsec -= NSEC_PER_SEC;
}
@@ -1209,13 +1209,9 @@ static int do_posix_clock_monotonic_get(clockid_t clock, struct timespec *tp)
do_posix_clock_monotonic_gettime_parts(tp, &wall_to_mono);
- tp->tv_sec += wall_to_mono.tv_sec;
- tp->tv_nsec += wall_to_mono.tv_nsec;
+ set_normalized_timespec(tp, tp->tv_sec + wall_to_mono.tv_sec,
+ tp->tv_nsec + wall_to_mono.tv_nsec);
- if ((tp->tv_nsec - NSEC_PER_SEC) > 0) {
- tp->tv_nsec -= NSEC_PER_SEC;
- tp->tv_sec++;
- }
return 0;
}
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 46a5e5acff9..5ec248cb7f4 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -19,6 +19,15 @@ config PM
will issue the hlt instruction if nothing is to be done, thereby
sending the processor to sleep and saving power.
+config PM_LEGACY
+ bool "Legacy Power Management API"
+ depends on PM
+ default y
+ ---help---
+ Support for pm_register() and friends.
+
+ If unsure, say Y.
+
config PM_DEBUG
bool "Power Management Debug Support"
depends on PM
diff --git a/kernel/power/Makefile b/kernel/power/Makefile
index c71eb4579c0..04be7d0d96a 100644
--- a/kernel/power/Makefile
+++ b/kernel/power/Makefile
@@ -3,7 +3,8 @@ ifeq ($(CONFIG_PM_DEBUG),y)
EXTRA_CFLAGS += -DDEBUG
endif
-obj-y := main.o process.o console.o pm.o
+obj-y := main.o process.o console.o
+obj-$(CONFIG_PM_LEGACY) += pm.o
obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o disk.o snapshot.o
obj-$(CONFIG_SUSPEND_SMP) += smp.o
diff --git a/kernel/power/pm.c b/kernel/power/pm.c
index 159149321b3..33c508e857d 100644
--- a/kernel/power/pm.c
+++ b/kernel/power/pm.c
@@ -23,6 +23,7 @@
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/pm.h>
+#include <linux/pm_legacy.h>
#include <linux/interrupt.h>
int pm_active;
diff --git a/kernel/printk.c b/kernel/printk.c
index e9be027bc93..ac8a08f3620 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -491,7 +491,10 @@ __attribute__((weak)) unsigned long long printk_clock(void)
return sched_clock();
}
-/*
+/**
+ * printk - print a kernel message
+ * @fmt: format string
+ *
* This is printk. It can be called from any context. We want it to work.
*
* We try to grab the console_sem. If we succeed, it's easy - we log the output and
@@ -503,6 +506,9 @@ __attribute__((weak)) unsigned long long printk_clock(void)
* One effect of this deferred printing is that code which calls printk() and
* then changes console_loglevel may break. This is because console_loglevel
* is inspected when the actual printing occurs.
+ *
+ * See also:
+ * printf(3)
*/
asmlinkage int printk(const char *fmt, ...)
@@ -655,6 +661,9 @@ static void call_console_drivers(unsigned long start, unsigned long end)
/**
* add_preferred_console - add a device to the list of preferred consoles.
+ * @name: device name
+ * @idx: device index
+ * @options: options for this console
*
* The last preferred console added will be used for kernel messages
* and stdin/out/err for init. Normally this is used by console_setup
@@ -764,7 +773,8 @@ void release_console_sem(void)
}
EXPORT_SYMBOL(release_console_sem);
-/** console_conditional_schedule - yield the CPU if required
+/**
+ * console_conditional_schedule - yield the CPU if required
*
* If the console code is currently allowed to sleep, and
* if this CPU should yield the CPU to another task, do
@@ -976,6 +986,8 @@ EXPORT_SYMBOL(unregister_console);
/**
* tty_write_message - write a message to a certain tty, not just the console.
+ * @tty: the destination tty_struct
+ * @msg: the message to write
*
* This is used for messages that need to be redirected to a specific tty.
* We don't put it into the syslog queue right now maybe in the future if
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index b88d4186cd7..17ee7e5a345 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -470,7 +470,7 @@ asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
if (request == PTRACE_ATTACH) {
ret = ptrace_attach(child);
- goto out;
+ goto out_put_task_struct;
}
ret = ptrace_check_attach(child, request == PTRACE_KILL);
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 9b58f1eff3c..88c28d47655 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -80,6 +80,7 @@ struct rcu_torture {
struct rcu_head rtort_rcu;
int rtort_pipe_count;
struct list_head rtort_free;
+ int rtort_mbtest;
};
static int fullstop = 0; /* stop generating callbacks at test end. */
@@ -96,6 +97,8 @@ static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
atomic_t n_rcu_torture_alloc;
atomic_t n_rcu_torture_alloc_fail;
atomic_t n_rcu_torture_free;
+atomic_t n_rcu_torture_mberror;
+atomic_t n_rcu_torture_error;
/*
* Allocate an element from the rcu_tortures pool.
@@ -145,9 +148,10 @@ rcu_torture_cb(struct rcu_head *p)
if (i > RCU_TORTURE_PIPE_LEN)
i = RCU_TORTURE_PIPE_LEN;
atomic_inc(&rcu_torture_wcount[i]);
- if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN)
+ if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
+ rp->rtort_mbtest = 0;
rcu_torture_free(rp);
- else
+ } else
call_rcu(p, rcu_torture_cb);
}
@@ -195,6 +199,8 @@ rcu_torture_writer(void *arg)
static DEFINE_RCU_RANDOM(rand);
VERBOSE_PRINTK_STRING("rcu_torture_writer task started");
+ set_user_nice(current, 19);
+
do {
schedule_timeout_uninterruptible(1);
if (rcu_batches_completed() == oldbatch)
@@ -204,6 +210,7 @@ rcu_torture_writer(void *arg)
rp->rtort_pipe_count = 0;
udelay(rcu_random(&rand) & 0x3ff);
old_rp = rcu_torture_current;
+ rp->rtort_mbtest = 1;
rcu_assign_pointer(rcu_torture_current, rp);
smp_wmb();
if (old_rp != NULL) {
@@ -238,6 +245,8 @@ rcu_torture_reader(void *arg)
int pipe_count;
VERBOSE_PRINTK_STRING("rcu_torture_reader task started");
+ set_user_nice(current, 19);
+
do {
rcu_read_lock();
completed = rcu_batches_completed();
@@ -248,6 +257,8 @@ rcu_torture_reader(void *arg)
schedule_timeout_interruptible(HZ);
continue;
}
+ if (p->rtort_mbtest == 0)
+ atomic_inc(&n_rcu_torture_mberror);
udelay(rcu_random(&rand) & 0x7f);
preempt_disable();
pipe_count = p->rtort_pipe_count;
@@ -296,16 +307,22 @@ rcu_torture_printk(char *page)
}
cnt += sprintf(&page[cnt], "rcutorture: ");
cnt += sprintf(&page[cnt],
- "rtc: %p ver: %ld tfle: %d rta: %d rtaf: %d rtf: %d",
+ "rtc: %p ver: %ld tfle: %d rta: %d rtaf: %d rtf: %d "
+ "rtmbe: %d",
rcu_torture_current,
rcu_torture_current_version,
list_empty(&rcu_torture_freelist),
atomic_read(&n_rcu_torture_alloc),
atomic_read(&n_rcu_torture_alloc_fail),
- atomic_read(&n_rcu_torture_free));
+ atomic_read(&n_rcu_torture_free),
+ atomic_read(&n_rcu_torture_mberror));
+ if (atomic_read(&n_rcu_torture_mberror) != 0)
+ cnt += sprintf(&page[cnt], " !!!");
cnt += sprintf(&page[cnt], "\nrcutorture: ");
- if (i > 1)
+ if (i > 1) {
cnt += sprintf(&page[cnt], "!!! ");
+ atomic_inc(&n_rcu_torture_error);
+ }
cnt += sprintf(&page[cnt], "Reader Pipe: ");
for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
cnt += sprintf(&page[cnt], " %ld", pipesummary[i]);
@@ -396,7 +413,9 @@ rcu_torture_cleanup(void)
for (i = 0; i < RCU_TORTURE_PIPE_LEN; i++)
synchronize_rcu();
rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
- PRINTK_STRING("--- End of test");
+ printk(KERN_ALERT TORTURE_FLAG
+ "--- End of test: %s\n",
+ atomic_read(&n_rcu_torture_error) == 0 ? "SUCCESS" : "FAILURE");
}
static int
@@ -421,6 +440,7 @@ rcu_torture_init(void)
INIT_LIST_HEAD(&rcu_torture_freelist);
for (i = 0; i < sizeof(rcu_tortures) / sizeof(rcu_tortures[0]); i++) {
+ rcu_tortures[i].rtort_mbtest = 0;
list_add_tail(&rcu_tortures[i].rtort_free,
&rcu_torture_freelist);
}
@@ -432,6 +452,8 @@ rcu_torture_init(void)
atomic_set(&n_rcu_torture_alloc, 0);
atomic_set(&n_rcu_torture_alloc_fail, 0);
atomic_set(&n_rcu_torture_free, 0);
+ atomic_set(&n_rcu_torture_mberror, 0);
+ atomic_set(&n_rcu_torture_error, 0);
for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
atomic_set(&rcu_torture_wcount[i], 0);
for_each_cpu(cpu) {
diff --git a/kernel/sched.c b/kernel/sched.c
index b6506671b2b..6f46c94cc29 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1437,7 +1437,7 @@ void fastcall sched_fork(task_t *p, int clone_flags)
#endif
#ifdef CONFIG_PREEMPT
/* Want to start with kernel preemption disabled. */
- p->thread_info->preempt_count = 1;
+ task_thread_info(p)->preempt_count = 1;
#endif
/*
* Share the timeslice between parent and child, thus the
@@ -4327,10 +4327,10 @@ static void show_task(task_t *p)
#endif
#ifdef CONFIG_DEBUG_STACK_USAGE
{
- unsigned long *n = (unsigned long *) (p->thread_info+1);
+ unsigned long *n = end_of_stack(p);
while (!*n)
n++;
- free = (unsigned long) n - (unsigned long)(p->thread_info+1);
+ free = (unsigned long)n - (unsigned long)end_of_stack(p);
}
#endif
printk("%5lu %5d %6d ", free, p->pid, p->parent->pid);
@@ -4410,9 +4410,9 @@ void __devinit init_idle(task_t *idle, int cpu)
/* Set the preempt count _outside_ the spinlocks! */
#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL)
- idle->thread_info->preempt_count = (idle->lock_depth >= 0);
+ task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0);
#else
- idle->thread_info->preempt_count = 0;
+ task_thread_info(idle)->preempt_count = 0;
#endif
}
diff --git a/kernel/signal.c b/kernel/signal.c
index 80789a59b4d..d7611f189ef 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -513,16 +513,7 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
{
int sig = 0;
- /* SIGKILL must have priority, otherwise it is quite easy
- * to create an unkillable process, sending sig < SIGKILL
- * to self */
- if (unlikely(sigismember(&pending->signal, SIGKILL))) {
- if (!sigismember(mask, SIGKILL))
- sig = SIGKILL;
- }
-
- if (likely(!sig))
- sig = next_signal(pending, mask);
+ sig = next_signal(pending, mask);
if (sig) {
if (current->notifier) {
if (sigismember(current->notifier_mask, sig)) {
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 84a9d18aa8d..b3d4dc858e3 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -119,13 +119,12 @@ static int stop_machine(void)
return ret;
}
- /* Don't schedule us away at this point, please. */
- local_irq_disable();
-
/* Now they are all started, make them hold the CPUs, ready. */
+ preempt_disable();
stopmachine_set_state(STOPMACHINE_PREPARE);
/* Make them disable irqs. */
+ local_irq_disable();
stopmachine_set_state(STOPMACHINE_DISABLE_IRQ);
return 0;
@@ -135,6 +134,7 @@ static void restart_machine(void)
{
stopmachine_set_state(STOPMACHINE_EXIT);
local_irq_enable();
+ preempt_enable_no_resched();
}
struct stop_machine_data