aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpuset.c24
-rw-r--r--kernel/exit.c1
-rw-r--r--kernel/fork.c1
-rw-r--r--kernel/kprobes.c2
-rw-r--r--kernel/ptrace.c15
-rw-r--r--kernel/rcupreempt.c20
-rw-r--r--kernel/sched.c7
7 files changed, 41 insertions, 29 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 9fceb97e989..798b3ab054e 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1882,7 +1882,7 @@ static void scan_for_empty_cpusets(const struct cpuset *root)
* in order to minimize text size.
*/
-static void common_cpu_mem_hotplug_unplug(void)
+static void common_cpu_mem_hotplug_unplug(int rebuild_sd)
{
cgroup_lock();
@@ -1894,7 +1894,8 @@ static void common_cpu_mem_hotplug_unplug(void)
* Scheduler destroys domains on hotplug events.
* Rebuild them based on the current settings.
*/
- rebuild_sched_domains();
+ if (rebuild_sd)
+ rebuild_sched_domains();
cgroup_unlock();
}
@@ -1912,11 +1913,22 @@ static void common_cpu_mem_hotplug_unplug(void)
static int cpuset_handle_cpuhp(struct notifier_block *unused_nb,
unsigned long phase, void *unused_cpu)
{
- if (phase == CPU_DYING || phase == CPU_DYING_FROZEN)
+ switch (phase) {
+ case CPU_UP_CANCELED:
+ case CPU_UP_CANCELED_FROZEN:
+ case CPU_DOWN_FAILED:
+ case CPU_DOWN_FAILED_FROZEN:
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ common_cpu_mem_hotplug_unplug(1);
+ break;
+ default:
return NOTIFY_DONE;
+ }
- common_cpu_mem_hotplug_unplug();
- return 0;
+ return NOTIFY_OK;
}
#ifdef CONFIG_MEMORY_HOTPLUG
@@ -1929,7 +1941,7 @@ static int cpuset_handle_cpuhp(struct notifier_block *unused_nb,
void cpuset_track_online_nodes(void)
{
- common_cpu_mem_hotplug_unplug();
+ common_cpu_mem_hotplug_unplug(0);
}
#endif
diff --git a/kernel/exit.c b/kernel/exit.c
index 8f6185e69b6..ceb25878283 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -13,6 +13,7 @@
#include <linux/personality.h>
#include <linux/tty.h>
#include <linux/mnt_namespace.h>
+#include <linux/iocontext.h>
#include <linux/key.h>
#include <linux/security.h>
#include <linux/cpu.h>
diff --git a/kernel/fork.c b/kernel/fork.c
index 19908b26cf8..b71ccd09fc8 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -23,6 +23,7 @@
#include <linux/sem.h>
#include <linux/file.h>
#include <linux/fdtable.h>
+#include <linux/iocontext.h>
#include <linux/key.h>
#include <linux/binfmts.h>
#include <linux/mman.h>
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index d4998f81e22..1485ca8d0e0 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -79,7 +79,7 @@ static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
*
* For such cases, we now have a blacklist
*/
-struct kprobe_blackpoint kprobe_blacklist[] = {
+static struct kprobe_blackpoint kprobe_blacklist[] = {
{"preempt_schedule",},
{NULL} /* Terminator */
};
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 6c19e94fd0a..e337390fce0 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -121,7 +121,7 @@ int ptrace_check_attach(struct task_struct *child, int kill)
return ret;
}
-int __ptrace_may_attach(struct task_struct *task)
+int __ptrace_may_access(struct task_struct *task, unsigned int mode)
{
/* May we inspect the given task?
* This check is used both for attaching with ptrace
@@ -148,16 +148,16 @@ int __ptrace_may_attach(struct task_struct *task)
if (!dumpable && !capable(CAP_SYS_PTRACE))
return -EPERM;
- return security_ptrace(current, task);
+ return security_ptrace(current, task, mode);
}
-int ptrace_may_attach(struct task_struct *task)
+bool ptrace_may_access(struct task_struct *task, unsigned int mode)
{
int err;
task_lock(task);
- err = __ptrace_may_attach(task);
+ err = __ptrace_may_access(task, mode);
task_unlock(task);
- return !err;
+ return (!err ? true : false);
}
int ptrace_attach(struct task_struct *task)
@@ -195,7 +195,7 @@ repeat:
/* the same process cannot be attached many times */
if (task->ptrace & PT_PTRACED)
goto bad;
- retval = __ptrace_may_attach(task);
+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
if (retval)
goto bad;
@@ -494,7 +494,8 @@ int ptrace_traceme(void)
*/
task_lock(current);
if (!(current->ptrace & PT_PTRACED)) {
- ret = security_ptrace(current->parent, current);
+ ret = security_ptrace(current->parent, current,
+ PTRACE_MODE_ATTACH);
/*
* Set the ptrace bit in the process ptrace flags.
*/
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c
index 5e02b774070..41d275a81df 100644
--- a/kernel/rcupreempt.c
+++ b/kernel/rcupreempt.c
@@ -925,26 +925,22 @@ void rcu_offline_cpu(int cpu)
spin_unlock_irqrestore(&rdp->lock, flags);
}
-void __devinit rcu_online_cpu(int cpu)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags);
- cpu_set(cpu, rcu_cpu_online_map);
- spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags);
-}
-
#else /* #ifdef CONFIG_HOTPLUG_CPU */
void rcu_offline_cpu(int cpu)
{
}
-void __devinit rcu_online_cpu(int cpu)
+#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
+
+void __cpuinit rcu_online_cpu(int cpu)
{
-}
+ unsigned long flags;
-#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
+ spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags);
+ cpu_set(cpu, rcu_cpu_online_map);
+ spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags);
+}
static void rcu_process_callbacks(struct softirq_action *unused)
{
diff --git a/kernel/sched.c b/kernel/sched.c
index 94ead43eda6..4e2f6033565 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5622,10 +5622,10 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
double_rq_lock(rq_src, rq_dest);
/* Already moved. */
if (task_cpu(p) != src_cpu)
- goto out;
+ goto done;
/* Affinity changed (again). */
if (!cpu_isset(dest_cpu, p->cpus_allowed))
- goto out;
+ goto fail;
on_rq = p->se.on_rq;
if (on_rq)
@@ -5636,8 +5636,9 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
activate_task(rq_dest, p, 0);
check_preempt_curr(rq_dest, p);
}
+done:
ret = 1;
-out:
+fail:
double_rq_unlock(rq_src, rq_dest);
return ret;
}