aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-02-20 17:40:43 +0100
committerIngo Molnar <mingo@elte.hu>2009-02-20 17:40:43 +0100
commit3b6f7b9beb553ea71a6d4d76ba82cf32eb6dadd2 (patch)
tree2cd8a8d61f350aa3b00e6b0afc998544923f183e /kernel
parent9be1b56a3e718aa998772019c57c398dbb19e258 (diff)
parent07a66d7c53a538e1a9759954a82bb6c07365eff9 (diff)
Merge branch 'x86/urgent' into x86/core
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/cgroup.c2
-rw-r--r--kernel/futex.c53
-rw-r--r--kernel/posix-cpu-timers.c60
-rw-r--r--kernel/power/Makefile2
-rw-r--r--kernel/power/swap.c5
-rw-r--r--kernel/sched.c15
-rw-r--r--kernel/trace/Kconfig25
-rw-r--r--kernel/trace/ftrace.c6
-rw-r--r--kernel/trace/trace_mmiotrace.c14
-rw-r--r--kernel/trace/trace_selftest.c19
11 files changed, 135 insertions, 67 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index 170a9213c1b..e4791b3ba55 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -51,6 +51,7 @@ obj-$(CONFIG_UID16) += uid16.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_KALLSYMS) += kallsyms.o
obj-$(CONFIG_PM) += power/
+obj-$(CONFIG_FREEZER) += power/
obj-$(CONFIG_BSD_PROCESS_ACCT) += acct.o
obj-$(CONFIG_KEXEC) += kexec.o
obj-$(CONFIG_BACKTRACE_SELF_TEST) += backtracetest.o
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index e14db9c089b..9edb5c4b79b 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1122,8 +1122,8 @@ static void cgroup_kill_sb(struct super_block *sb) {
mutex_unlock(&cgroup_mutex);
- kfree(root);
kill_litter_super(sb);
+ kfree(root);
}
static struct file_system_type cgroup_fs_type = {
diff --git a/kernel/futex.c b/kernel/futex.c
index f89d373a9c6..438701adce2 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1165,6 +1165,7 @@ static int futex_wait(u32 __user *uaddr, int fshared,
u32 val, ktime_t *abs_time, u32 bitset, int clockrt)
{
struct task_struct *curr = current;
+ struct restart_block *restart;
DECLARE_WAITQUEUE(wait, curr);
struct futex_hash_bucket *hb;
struct futex_q q;
@@ -1216,11 +1217,13 @@ retry:
if (!ret)
goto retry;
- return ret;
+ goto out;
}
ret = -EWOULDBLOCK;
- if (uval != val)
- goto out_unlock_put_key;
+ if (unlikely(uval != val)) {
+ queue_unlock(&q, hb);
+ goto out_put_key;
+ }
/* Only actually queue if *uaddr contained val. */
queue_me(&q, hb);
@@ -1284,38 +1287,38 @@ retry:
*/
/* If we were woken (and unqueued), we succeeded, whatever. */
+ ret = 0;
if (!unqueue_me(&q))
- return 0;
+ goto out_put_key;
+ ret = -ETIMEDOUT;
if (rem)
- return -ETIMEDOUT;
+ goto out_put_key;
/*
* We expect signal_pending(current), but another thread may
* have handled it for us already.
*/
+ ret = -ERESTARTSYS;
if (!abs_time)
- return -ERESTARTSYS;
- else {
- struct restart_block *restart;
- restart = &current_thread_info()->restart_block;
- restart->fn = futex_wait_restart;
- restart->futex.uaddr = (u32 *)uaddr;
- restart->futex.val = val;
- restart->futex.time = abs_time->tv64;
- restart->futex.bitset = bitset;
- restart->futex.flags = 0;
-
- if (fshared)
- restart->futex.flags |= FLAGS_SHARED;
- if (clockrt)
- restart->futex.flags |= FLAGS_CLOCKRT;
- return -ERESTART_RESTARTBLOCK;
- }
+ goto out_put_key;
-out_unlock_put_key:
- queue_unlock(&q, hb);
- put_futex_key(fshared, &q.key);
+ restart = &current_thread_info()->restart_block;
+ restart->fn = futex_wait_restart;
+ restart->futex.uaddr = (u32 *)uaddr;
+ restart->futex.val = val;
+ restart->futex.time = abs_time->tv64;
+ restart->futex.bitset = bitset;
+ restart->futex.flags = 0;
+
+ if (fshared)
+ restart->futex.flags |= FLAGS_SHARED;
+ if (clockrt)
+ restart->futex.flags |= FLAGS_CLOCKRT;
+ ret = -ERESTART_RESTARTBLOCK;
+
+out_put_key:
+ put_futex_key(fshared, &q.key);
out:
return ret;
}
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 2313a4cc14e..e976e505648 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -681,6 +681,33 @@ static void cpu_timer_fire(struct k_itimer *timer)
}
/*
+ * Sample a process (thread group) timer for the given group_leader task.
+ * Must be called with tasklist_lock held for reading.
+ */
+static int cpu_timer_sample_group(const clockid_t which_clock,
+ struct task_struct *p,
+ union cpu_time_count *cpu)
+{
+ struct task_cputime cputime;
+
+ thread_group_cputimer(p, &cputime);
+ switch (CPUCLOCK_WHICH(which_clock)) {
+ default:
+ return -EINVAL;
+ case CPUCLOCK_PROF:
+ cpu->cpu = cputime_add(cputime.utime, cputime.stime);
+ break;
+ case CPUCLOCK_VIRT:
+ cpu->cpu = cputime.utime;
+ break;
+ case CPUCLOCK_SCHED:
+ cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p);
+ break;
+ }
+ return 0;
+}
+
+/*
* Guts of sys_timer_settime for CPU timers.
* This is called with the timer locked and interrupts disabled.
* If we return TIMER_RETRY, it's necessary to release the timer's lock
@@ -741,7 +768,7 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
cpu_clock_sample(timer->it_clock, p, &val);
} else {
- cpu_clock_sample_group(timer->it_clock, p, &val);
+ cpu_timer_sample_group(timer->it_clock, p, &val);
}
if (old) {
@@ -889,7 +916,7 @@ void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
read_unlock(&tasklist_lock);
goto dead;
} else {
- cpu_clock_sample_group(timer->it_clock, p, &now);
+ cpu_timer_sample_group(timer->it_clock, p, &now);
clear_dead = (unlikely(p->exit_state) &&
thread_group_empty(p));
}
@@ -1244,7 +1271,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
clear_dead_task(timer, now);
goto out_unlock;
}
- cpu_clock_sample_group(timer->it_clock, p, &now);
+ cpu_timer_sample_group(timer->it_clock, p, &now);
bump_cpu_timer(timer, now);
/* Leave the tasklist_lock locked for the call below. */
}
@@ -1409,33 +1436,6 @@ void run_posix_cpu_timers(struct task_struct *tsk)
}
/*
- * Sample a process (thread group) timer for the given group_leader task.
- * Must be called with tasklist_lock held for reading.
- */
-static int cpu_timer_sample_group(const clockid_t which_clock,
- struct task_struct *p,
- union cpu_time_count *cpu)
-{
- struct task_cputime cputime;
-
- thread_group_cputimer(p, &cputime);
- switch (CPUCLOCK_WHICH(which_clock)) {
- default:
- return -EINVAL;
- case CPUCLOCK_PROF:
- cpu->cpu = cputime_add(cputime.utime, cputime.stime);
- break;
- case CPUCLOCK_VIRT:
- cpu->cpu = cputime.utime;
- break;
- case CPUCLOCK_SCHED:
- cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p);
- break;
- }
- return 0;
-}
-
-/*
* Set one of the process-wide special case CPU timers.
* The tsk->sighand->siglock must be held by the caller.
* The *newval argument is relative and we update it to be absolute, *oldval
diff --git a/kernel/power/Makefile b/kernel/power/Makefile
index d7a10167a25..720ea4f781b 100644
--- a/kernel/power/Makefile
+++ b/kernel/power/Makefile
@@ -3,7 +3,7 @@ ifeq ($(CONFIG_PM_DEBUG),y)
EXTRA_CFLAGS += -DDEBUG
endif
-obj-y := main.o
+obj-$(CONFIG_PM) += main.o
obj-$(CONFIG_PM_SLEEP) += console.o
obj-$(CONFIG_FREEZER) += process.o
obj-$(CONFIG_HIBERNATION) += swsusp.o disk.o snapshot.o swap.o user.o
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 6da14358537..505f319e489 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -60,6 +60,7 @@ static struct block_device *resume_bdev;
static int submit(int rw, pgoff_t page_off, struct page *page,
struct bio **bio_chain)
{
+ const int bio_rw = rw | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
struct bio *bio;
bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1);
@@ -80,7 +81,7 @@ static int submit(int rw, pgoff_t page_off, struct page *page,
bio_get(bio);
if (bio_chain == NULL) {
- submit_bio(rw | (1 << BIO_RW_SYNC), bio);
+ submit_bio(bio_rw, bio);
wait_on_page_locked(page);
if (rw == READ)
bio_set_pages_dirty(bio);
@@ -90,7 +91,7 @@ static int submit(int rw, pgoff_t page_off, struct page *page,
get_page(page); /* These pages are freed later */
bio->bi_private = *bio_chain;
*bio_chain = bio;
- submit_bio(rw | (1 << BIO_RW_SYNC), bio);
+ submit_bio(bio_rw, bio);
}
return 0;
}
diff --git a/kernel/sched.c b/kernel/sched.c
index 61245b8d0f1..7d97ff7c447 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -6939,20 +6939,26 @@ static void free_rootdomain(struct root_domain *rd)
static void rq_attach_root(struct rq *rq, struct root_domain *rd)
{
+ struct root_domain *old_rd = NULL;
unsigned long flags;
spin_lock_irqsave(&rq->lock, flags);
if (rq->rd) {
- struct root_domain *old_rd = rq->rd;
+ old_rd = rq->rd;
if (cpumask_test_cpu(rq->cpu, old_rd->online))
set_rq_offline(rq);
cpumask_clear_cpu(rq->cpu, old_rd->span);
- if (atomic_dec_and_test(&old_rd->refcount))
- free_rootdomain(old_rd);
+ /*
+ * If we dont want to free the old_rt yet then
+ * set old_rd to NULL to skip the freeing later
+ * in this function:
+ */
+ if (!atomic_dec_and_test(&old_rd->refcount))
+ old_rd = NULL;
}
atomic_inc(&rd->refcount);
@@ -6963,6 +6969,9 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
set_rq_online(rq);
spin_unlock_irqrestore(&rq->lock, flags);
+
+ if (old_rd)
+ free_rootdomain(old_rd);
}
static int __init_refok init_rootdomain(struct root_domain *rd, bool bootmem)
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index e2a4ff6fc3a..34e707e5ab8 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -52,6 +52,7 @@ config FUNCTION_TRACER
depends on HAVE_FUNCTION_TRACER
depends on DEBUG_KERNEL
select FRAME_POINTER
+ select KALLSYMS
select TRACING
select CONTEXT_SWITCH_TRACER
help
@@ -238,6 +239,7 @@ config STACK_TRACER
depends on DEBUG_KERNEL
select FUNCTION_TRACER
select STACKTRACE
+ select KALLSYMS
help
This special tracer records the maximum stack footprint of the
kernel and displays it in debugfs/tracing/stack_trace.
@@ -302,4 +304,27 @@ config FTRACE_STARTUP_TEST
functioning properly. It will do tests on all the configured
tracers of ftrace.
+config MMIOTRACE
+ bool "Memory mapped IO tracing"
+ depends on HAVE_MMIOTRACE_SUPPORT && DEBUG_KERNEL && PCI
+ select TRACING
+ help
+ Mmiotrace traces Memory Mapped I/O access and is meant for
+ debugging and reverse engineering. It is called from the ioremap
+ implementation and works via page faults. Tracing is disabled by
+ default and can be enabled at run-time.
+
+ See Documentation/tracers/mmiotrace.txt.
+ If you are not helping to develop drivers, say N.
+
+config MMIOTRACE_TEST
+ tristate "Test module for mmiotrace"
+ depends on MMIOTRACE && m
+ help
+ This is a dumb module for testing mmiotrace. It is very dangerous
+ as it will write garbage to IO memory starting at a given address.
+ However, it should be safe to use on e.g. unused portion of VRAM.
+
+ Say N, unless you absolutely know what you are doing.
+
endmenu
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 9a236ffe2aa..fdf913dfc7e 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -2033,7 +2033,7 @@ free:
static int start_graph_tracing(void)
{
struct ftrace_ret_stack **ret_stack_list;
- int ret;
+ int ret, cpu;
ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
sizeof(struct ftrace_ret_stack *),
@@ -2042,6 +2042,10 @@ static int start_graph_tracing(void)
if (!ret_stack_list)
return -ENOMEM;
+ /* The cpu_boot init_task->ret_stack will never be freed */
+ for_each_online_cpu(cpu)
+ ftrace_graph_init_task(idle_task(cpu));
+
do {
ret = alloc_retstack_tasklist(ret_stack_list);
} while (ret == -EAGAIN);
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
index fffcb069f1d..80e503ef613 100644
--- a/kernel/trace/trace_mmiotrace.c
+++ b/kernel/trace/trace_mmiotrace.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/mmiotrace.h>
#include <linux/pci.h>
+#include <asm/atomic.h>
#include "trace.h"
@@ -19,6 +20,7 @@ struct header_iter {
static struct trace_array *mmio_trace_array;
static bool overrun_detected;
static unsigned long prev_overruns;
+static atomic_t dropped_count;
static void mmio_reset_data(struct trace_array *tr)
{
@@ -121,11 +123,11 @@ static void mmio_close(struct trace_iterator *iter)
static unsigned long count_overruns(struct trace_iterator *iter)
{
- unsigned long cnt = 0;
+ unsigned long cnt = atomic_xchg(&dropped_count, 0);
unsigned long over = ring_buffer_overruns(iter->tr->buffer);
if (over > prev_overruns)
- cnt = over - prev_overruns;
+ cnt += over - prev_overruns;
prev_overruns = over;
return cnt;
}
@@ -310,8 +312,10 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
&irq_flags);
- if (!event)
+ if (!event) {
+ atomic_inc(&dropped_count);
return;
+ }
entry = ring_buffer_event_data(event);
tracing_generic_entry_update(&entry->ent, 0, preempt_count());
entry->ent.type = TRACE_MMIO_RW;
@@ -338,8 +342,10 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
&irq_flags);
- if (!event)
+ if (!event) {
+ atomic_inc(&dropped_count);
return;
+ }
entry = ring_buffer_event_data(event);
tracing_generic_entry_update(&entry->ent, 0, preempt_count());
entry->ent.type = TRACE_MMIO_MAP;
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 88c8eb70f54..bc8e80a86bc 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -23,10 +23,20 @@ static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
{
struct ring_buffer_event *event;
struct trace_entry *entry;
+ unsigned int loops = 0;
while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
entry = ring_buffer_event_data(event);
+ /*
+ * The ring buffer is a size of trace_buf_size, if
+ * we loop more than the size, there's something wrong
+ * with the ring buffer.
+ */
+ if (loops++ > trace_buf_size) {
+ printk(KERN_CONT ".. bad ring buffer ");
+ goto failed;
+ }
if (!trace_valid_entry(entry)) {
printk(KERN_CONT ".. invalid entry %d ",
entry->type);
@@ -57,11 +67,20 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
cnt = ring_buffer_entries(tr->buffer);
+ /*
+ * The trace_test_buffer_cpu runs a while loop to consume all data.
+ * If the calling tracer is broken, and is constantly filling
+ * the buffer, this will run forever, and hard lock the box.
+ * We disable the ring buffer while we do this test to prevent
+ * a hard lock up.
+ */
+ tracing_off();
for_each_possible_cpu(cpu) {
ret = trace_test_buffer_cpu(tr, cpu);
if (ret)
break;
}
+ tracing_on();
__raw_spin_unlock(&ftrace_max_lock);
local_irq_restore(flags);