aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/acct.c2
-rw-r--r--kernel/futex.c51
-rw-r--r--kernel/hrtimer.c2
-rw-r--r--kernel/kexec.c1
-rw-r--r--kernel/kmod.c13
-rw-r--r--kernel/lockdep.c12
-rw-r--r--kernel/module.c29
-rw-r--r--kernel/params.c10
-rw-r--r--kernel/power/main.c3
-rw-r--r--kernel/power/pm.c4
-rw-r--r--kernel/printk.c2
-rw-r--r--kernel/ptrace.c6
-rw-r--r--kernel/rcupdate.c2
-rw-r--r--kernel/sched.c10
-rw-r--r--kernel/sched_debug.c8
-rw-r--r--kernel/timer.c4
-rw-r--r--kernel/workqueue.c5
17 files changed, 112 insertions, 52 deletions
diff --git a/kernel/acct.c b/kernel/acct.c
index cf19547cc9e..521dfa53cb9 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -482,7 +482,7 @@ static void do_acct_process(struct file *file)
#endif
#if ACCT_VERSION==3
ac.ac_pid = current->tgid;
- ac.ac_ppid = current->parent->tgid;
+ ac.ac_ppid = current->real_parent->tgid;
#endif
spin_lock_irq(&current->sighand->siglock);
diff --git a/kernel/futex.c b/kernel/futex.c
index 172a1aeeafd..db9824de8bf 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1097,15 +1097,15 @@ static void unqueue_me_pi(struct futex_q *q)
}
/*
- * Fixup the pi_state owner with current.
+ * Fixup the pi_state owner with the new owner.
*
* Must be called with hash bucket lock held and mm->sem held for non
* private futexes.
*/
static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
- struct task_struct *curr)
+ struct task_struct *newowner)
{
- u32 newtid = task_pid_vnr(curr) | FUTEX_WAITERS;
+ u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
struct futex_pi_state *pi_state = q->pi_state;
u32 uval, curval, newval;
int ret;
@@ -1119,12 +1119,12 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
} else
newtid |= FUTEX_OWNER_DIED;
- pi_state->owner = curr;
+ pi_state->owner = newowner;
- spin_lock_irq(&curr->pi_lock);
+ spin_lock_irq(&newowner->pi_lock);
WARN_ON(!list_empty(&pi_state->list));
- list_add(&pi_state->list, &curr->pi_state_list);
- spin_unlock_irq(&curr->pi_lock);
+ list_add(&pi_state->list, &newowner->pi_state_list);
+ spin_unlock_irq(&newowner->pi_lock);
/*
* We own it, so we have to replace the pending owner
@@ -1508,9 +1508,40 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
* when we were on the way back before we locked the
* hash bucket.
*/
- if (q.pi_state->owner == curr &&
- rt_mutex_trylock(&q.pi_state->pi_mutex)) {
- ret = 0;
+ if (q.pi_state->owner == curr) {
+ /*
+ * Try to get the rt_mutex now. This might
+ * fail as some other task acquired the
+ * rt_mutex after we removed ourself from the
+ * rt_mutex waiters list.
+ */
+ if (rt_mutex_trylock(&q.pi_state->pi_mutex))
+ ret = 0;
+ else {
+ /*
+ * pi_state is incorrect, some other
+ * task did a lock steal and we
+ * returned due to timeout or signal
+ * without taking the rt_mutex. Too
+ * late. We can access the
+ * rt_mutex_owner without locking, as
+ * the other task is now blocked on
+ * the hash bucket lock. Fix the state
+ * up.
+ */
+ struct task_struct *owner;
+ int res;
+
+ owner = rt_mutex_owner(&q.pi_state->pi_mutex);
+ res = fixup_pi_state_owner(uaddr, &q, owner);
+
+ WARN_ON(rt_mutex_owner(&q.pi_state->pi_mutex) !=
+ owner);
+
+ /* propagate -EFAULT, if the fixup failed */
+ if (res)
+ ret = res;
+ }
} else {
/*
* Paranoia check. If we did not take the lock
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index e65dd0b47cd..f994bb8065e 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -1378,7 +1378,7 @@ sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp)
/*
* Functions related to boot-time initialization:
*/
-static void __devinit init_hrtimers_cpu(int cpu)
+static void __cpuinit init_hrtimers_cpu(int cpu)
{
struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
int i;
diff --git a/kernel/kexec.c b/kernel/kexec.c
index aa74a1ef2da..9a26eec9eb0 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1404,6 +1404,7 @@ static int __init crash_save_vmcoreinfo_init(void)
VMCOREINFO_OFFSET(list_head, next);
VMCOREINFO_OFFSET(list_head, prev);
VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
+ VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
VMCOREINFO_NUMBER(NR_FREE_PAGES);
arch_crash_save_vmcoreinfo();
diff --git a/kernel/kmod.c b/kernel/kmod.c
index c6a4f8aebeb..bb7df2a28bd 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -451,13 +451,11 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info,
enum umh_wait wait)
{
DECLARE_COMPLETION_ONSTACK(done);
- int retval;
+ int retval = 0;
helper_lock();
- if (sub_info->path[0] == '\0') {
- retval = 0;
+ if (sub_info->path[0] == '\0')
goto out;
- }
if (!khelper_wq || usermodehelper_disabled) {
retval = -EBUSY;
@@ -468,13 +466,14 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info,
sub_info->wait = wait;
queue_work(khelper_wq, &sub_info->work);
- if (wait == UMH_NO_WAIT) /* task has freed sub_info */
- return 0;
+ if (wait == UMH_NO_WAIT) /* task has freed sub_info */
+ goto unlock;
wait_for_completion(&done);
retval = sub_info->retval;
- out:
+out:
call_usermodehelper_freeinfo(sub_info);
+unlock:
helper_unlock();
return retval;
}
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 723bd9f9255..4335f12a27c 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -2943,9 +2943,10 @@ void lockdep_free_key_range(void *start, unsigned long size)
struct list_head *head;
unsigned long flags;
int i;
+ int locked;
raw_local_irq_save(flags);
- graph_lock();
+ locked = graph_lock();
/*
* Unhash all classes that were created by this module:
@@ -2959,7 +2960,8 @@ void lockdep_free_key_range(void *start, unsigned long size)
zap_class(class);
}
- graph_unlock();
+ if (locked)
+ graph_unlock();
raw_local_irq_restore(flags);
}
@@ -2969,6 +2971,7 @@ void lockdep_reset_lock(struct lockdep_map *lock)
struct list_head *head;
unsigned long flags;
int i, j;
+ int locked;
raw_local_irq_save(flags);
@@ -2987,7 +2990,7 @@ void lockdep_reset_lock(struct lockdep_map *lock)
* Debug check: in the end all mapped classes should
* be gone.
*/
- graph_lock();
+ locked = graph_lock();
for (i = 0; i < CLASSHASH_SIZE; i++) {
head = classhash_table + i;
if (list_empty(head))
@@ -3000,7 +3003,8 @@ void lockdep_reset_lock(struct lockdep_map *lock)
}
}
}
- graph_unlock();
+ if (locked)
+ graph_unlock();
out_restore:
raw_local_irq_restore(flags);
diff --git a/kernel/module.c b/kernel/module.c
index 91fe6958b6e..c2e3e2e9880 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2214,29 +2214,34 @@ static const char *get_ksymbol(struct module *mod,
/* For kallsyms to ask for address resolution. NULL means not found.
We don't lock, as this is used for oops resolution and races are a
lesser concern. */
+/* FIXME: Risky: returns a pointer into a module w/o lock */
const char *module_address_lookup(unsigned long addr,
unsigned long *size,
unsigned long *offset,
char **modname)
{
struct module *mod;
+ const char *ret = NULL;
+ preempt_disable();
list_for_each_entry(mod, &modules, list) {
if (within(addr, mod->module_init, mod->init_size)
|| within(addr, mod->module_core, mod->core_size)) {
if (modname)
*modname = mod->name;
- return get_ksymbol(mod, addr, size, offset);
+ ret = get_ksymbol(mod, addr, size, offset);
+ break;
}
}
- return NULL;
+ preempt_enable();
+ return ret;
}
int lookup_module_symbol_name(unsigned long addr, char *symname)
{
struct module *mod;
- mutex_lock(&module_mutex);
+ preempt_disable();
list_for_each_entry(mod, &modules, list) {
if (within(addr, mod->module_init, mod->init_size) ||
within(addr, mod->module_core, mod->core_size)) {
@@ -2246,12 +2251,12 @@ int lookup_module_symbol_name(unsigned long addr, char *symname)
if (!sym)
goto out;
strlcpy(symname, sym, KSYM_NAME_LEN);
- mutex_unlock(&module_mutex);
+ preempt_enable();
return 0;
}
}
out:
- mutex_unlock(&module_mutex);
+ preempt_enable();
return -ERANGE;
}
@@ -2260,7 +2265,7 @@ int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size,
{
struct module *mod;
- mutex_lock(&module_mutex);
+ preempt_disable();
list_for_each_entry(mod, &modules, list) {
if (within(addr, mod->module_init, mod->init_size) ||
within(addr, mod->module_core, mod->core_size)) {
@@ -2273,12 +2278,12 @@ int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size,
strlcpy(modname, mod->name, MODULE_NAME_LEN);
if (name)
strlcpy(name, sym, KSYM_NAME_LEN);
- mutex_unlock(&module_mutex);
+ preempt_enable();
return 0;
}
}
out:
- mutex_unlock(&module_mutex);
+ preempt_enable();
return -ERANGE;
}
@@ -2287,7 +2292,7 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
{
struct module *mod;
- mutex_lock(&module_mutex);
+ preempt_disable();
list_for_each_entry(mod, &modules, list) {
if (symnum < mod->num_symtab) {
*value = mod->symtab[symnum].st_value;
@@ -2296,12 +2301,12 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
KSYM_NAME_LEN);
strlcpy(module_name, mod->name, MODULE_NAME_LEN);
*exported = is_exported(name, mod);
- mutex_unlock(&module_mutex);
+ preempt_enable();
return 0;
}
symnum -= mod->num_symtab;
}
- mutex_unlock(&module_mutex);
+ preempt_enable();
return -ERANGE;
}
@@ -2324,6 +2329,7 @@ unsigned long module_kallsyms_lookup_name(const char *name)
unsigned long ret = 0;
/* Don't lock: we're in enough trouble already. */
+ preempt_disable();
if ((colon = strchr(name, ':')) != NULL) {
*colon = '\0';
if ((mod = find_module(name)) != NULL)
@@ -2334,6 +2340,7 @@ unsigned long module_kallsyms_lookup_name(const char *name)
if ((ret = mod_find_symname(mod, name)) != 0)
break;
}
+ preempt_enable();
return ret;
}
#endif /* CONFIG_KALLSYMS */
diff --git a/kernel/params.c b/kernel/params.c
index 2a4c51487e7..7686417ee00 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -697,8 +697,18 @@ static struct kset_uevent_ops module_uevent_ops = {
decl_subsys(module, &module_ktype, &module_uevent_ops);
int module_sysfs_initialized;
+static void module_release(struct kobject *kobj)
+{
+ /*
+ * Stupid empty release function to allow the memory for the kobject to
+ * be properly cleaned up. This will not need to be present for 2.6.25
+ * with the upcoming kobject core rework.
+ */
+}
+
static struct kobj_type module_ktype = {
.sysfs_ops = &module_sysfs_ops,
+ .release = module_release,
};
/*
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 3cdf95b1dc9..f71c9504a5c 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -28,6 +28,9 @@ BLOCKING_NOTIFIER_HEAD(pm_chain_head);
DEFINE_MUTEX(pm_mutex);
+unsigned int pm_flags;
+EXPORT_SYMBOL(pm_flags);
+
#ifdef CONFIG_SUSPEND
/* This is just an arbitrary number */
diff --git a/kernel/power/pm.c b/kernel/power/pm.c
index c50d15266c1..60c73fa670d 100644
--- a/kernel/power/pm.c
+++ b/kernel/power/pm.c
@@ -27,8 +27,6 @@
#include <linux/interrupt.h>
#include <linux/mutex.h>
-int pm_active;
-
/*
* Locking notes:
* pm_devs_lock can be a semaphore providing pm ops are not called
@@ -204,6 +202,4 @@ int pm_send_all(pm_request_t rqst, void *data)
EXPORT_SYMBOL(pm_register);
EXPORT_SYMBOL(pm_send_all);
-EXPORT_SYMBOL(pm_active);
-
diff --git a/kernel/printk.c b/kernel/printk.c
index a30fe33de39..89011bf8c10 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -817,7 +817,7 @@ __setup("console=", console_setup);
* commonly to provide a default console (ie from PROM variables) when
* the user has not supplied one.
*/
-int __init add_preferred_console(char *name, int idx, char *options)
+int add_preferred_console(char *name, int idx, char *options)
{
struct console_cmdline *c;
int i;
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 7c76f2ffaea..c25db863081 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -120,7 +120,7 @@ int ptrace_check_attach(struct task_struct *child, int kill)
return ret;
}
-static int may_attach(struct task_struct *task)
+int __ptrace_may_attach(struct task_struct *task)
{
/* May we inspect the given task?
* This check is used both for attaching with ptrace
@@ -154,7 +154,7 @@ int ptrace_may_attach(struct task_struct *task)
{
int err;
task_lock(task);
- err = may_attach(task);
+ err = __ptrace_may_attach(task);
task_unlock(task);
return !err;
}
@@ -196,7 +196,7 @@ repeat:
/* the same process cannot be attached many times */
if (task->ptrace & PT_PTRACED)
goto bad;
- retval = may_attach(task);
+ retval = __ptrace_may_attach(task);
if (retval)
goto bad;
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index a66d4d1615f..f2c1a04e9b1 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -549,7 +549,7 @@ static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp,
rdp->blimit = blimit;
}
-static void __devinit rcu_online_cpu(int cpu)
+static void __cpuinit rcu_online_cpu(int cpu)
{
struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
struct rcu_data *bh_rdp = &per_cpu(rcu_bh_data, cpu);
diff --git a/kernel/sched.c b/kernel/sched.c
index 3df84ea6aba..e76b11ca6df 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4918,7 +4918,7 @@ static void show_task(struct task_struct *p)
}
#endif
printk(KERN_CONT "%5lu %5d %6d\n", free,
- task_pid_nr(p), task_pid_nr(p->parent));
+ task_pid_nr(p), task_pid_nr(p->real_parent));
if (state != TASK_RUNNING)
show_stack(p, NULL);
@@ -7153,6 +7153,14 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
{
int i;
+ /*
+ * A weight of 0 or 1 can cause arithmetics problems.
+ * (The default weight is 1024 - so there's no practical
+ * limitation from this.)
+ */
+ if (shares < 2)
+ shares = 2;
+
spin_lock(&tg->lock);
if (tg->shares == shares)
goto done;
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index d30467b47dd..80fbbfc0429 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -31,9 +31,9 @@
/*
* Ease the printing of nsec fields:
*/
-static long long nsec_high(long long nsec)
+static long long nsec_high(unsigned long long nsec)
{
- if (nsec < 0) {
+ if ((long long)nsec < 0) {
nsec = -nsec;
do_div(nsec, 1000000);
return -nsec;
@@ -43,9 +43,9 @@ static long long nsec_high(long long nsec)
return nsec;
}
-static unsigned long nsec_low(long long nsec)
+static unsigned long nsec_low(unsigned long long nsec)
{
- if (nsec < 0)
+ if ((long long)nsec < 0)
nsec = -nsec;
return do_div(nsec, 1000000);
diff --git a/kernel/timer.c b/kernel/timer.c
index d4527dcef1a..2a00c22203f 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -978,7 +978,7 @@ asmlinkage long sys_getppid(void)
int pid;
rcu_read_lock();
- pid = task_ppid_nr_ns(current, current->nsproxy->pid_ns);
+ pid = task_tgid_nr_ns(current->real_parent, current->nsproxy->pid_ns);
rcu_read_unlock();
return pid;
@@ -1289,7 +1289,7 @@ static void migrate_timer_list(tvec_base_t *new_base, struct list_head *head)
}
}
-static void __devinit migrate_timers(int cpu)
+static void __cpuinit migrate_timers(int cpu)
{
tvec_base_t *old_base;
tvec_base_t *new_base;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 52d5e7c9a8e..8db0b597509 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -722,7 +722,8 @@ static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
struct workqueue_struct *__create_workqueue_key(const char *name,
int singlethread,
int freezeable,
- struct lock_class_key *key)
+ struct lock_class_key *key,
+ const char *lock_name)
{
struct workqueue_struct *wq;
struct cpu_workqueue_struct *cwq;
@@ -739,7 +740,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name,
}
wq->name = name;
- lockdep_init_map(&wq->lockdep_map, name, key, 0);
+ lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
wq->singlethread = singlethread;
wq->freezeable = freezeable;
INIT_LIST_HEAD(&wq->list);