aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorDavid Woodhouse <dwmw2@infradead.org>2008-02-03 18:29:41 +1100
committerDavid Woodhouse <dwmw2@infradead.org>2008-02-03 18:30:32 +1100
commitc1f3ee120bb61045b1c0a3ead620d1d65af47130 (patch)
tree908430bf2b47fe8e96ac623ae7ab6dd5698d0938 /kernel
parente619a75ff6201b567a539e787aa9af9bc63a3187 (diff)
parent9135f1901ee6449dfe338adf6e40e9c2025b8150 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Kconfig.hz2
-rw-r--r--kernel/Kconfig.instrumentation6
-rw-r--r--kernel/Kconfig.preempt13
-rw-r--r--kernel/Makefile9
-rw-r--r--kernel/acct.c4
-rw-r--r--kernel/audit.c444
-rw-r--r--kernel/auditfilter.c54
-rw-r--r--kernel/auditsc.c349
-rw-r--r--kernel/backtracetest.c48
-rw-r--r--kernel/cgroup.c9
-rw-r--r--kernel/cpu.c164
-rw-r--r--kernel/cpu_acct.c186
-rw-r--r--kernel/cpuset.c14
-rw-r--r--kernel/exit.c102
-rw-r--r--kernel/extable.c3
-rw-r--r--kernel/fork.c78
-rw-r--r--kernel/futex.c123
-rw-r--r--kernel/futex_compat.c29
-rw-r--r--kernel/hrtimer.c270
-rw-r--r--kernel/irq/chip.c9
-rw-r--r--kernel/irq/handle.c8
-rw-r--r--kernel/irq/manage.c3
-rw-r--r--kernel/irq/proc.c21
-rw-r--r--kernel/irq/spurious.c5
-rw-r--r--kernel/kallsyms.c18
-rw-r--r--kernel/kexec.c1
-rw-r--r--kernel/kmod.c13
-rw-r--r--kernel/kprobes.c2
-rw-r--r--kernel/ksysfs.c82
-rw-r--r--kernel/kthread.c12
-rw-r--r--kernel/latencytop.c239
-rw-r--r--kernel/lockdep.c75
-rw-r--r--kernel/marker.c41
-rw-r--r--kernel/module.c294
-rw-r--r--kernel/mutex.c36
-rw-r--r--kernel/panic.c41
-rw-r--r--kernel/params.c55
-rw-r--r--kernel/pid.c2
-rw-r--r--kernel/posix-cpu-timers.c30
-rw-r--r--kernel/power/Kconfig65
-rw-r--r--kernel/power/disk.c236
-rw-r--r--kernel/power/main.c197
-rw-r--r--kernel/power/pm.c4
-rw-r--r--kernel/power/power.h94
-rw-r--r--kernel/power/process.c6
-rw-r--r--kernel/power/snapshot.c31
-rw-r--r--kernel/power/swap.c33
-rw-r--r--kernel/power/swsusp.c48
-rw-r--r--kernel/power/user.c109
-rw-r--r--kernel/printk.c68
-rw-r--r--kernel/profile.c99
-rw-r--r--kernel/ptrace.c181
-rw-r--r--kernel/rcuclassic.c575
-rw-r--r--kernel/rcupdate.c576
-rw-r--r--kernel/rcupreempt.c953
-rw-r--r--kernel/rcupreempt_trace.c330
-rw-r--r--kernel/rcutorture.c6
-rw-r--r--kernel/resource.c2
-rw-r--r--kernel/rtmutex-tester.c2
-rw-r--r--kernel/rwsem.c5
-rw-r--r--kernel/sched.c1876
-rw-r--r--kernel/sched_debug.c30
-rw-r--r--kernel/sched_fair.c509
-rw-r--r--kernel/sched_idletask.c42
-rw-r--r--kernel/sched_rt.c1115
-rw-r--r--kernel/sched_stats.h14
-rw-r--r--kernel/signal.c30
-rw-r--r--kernel/softirq.c11
-rw-r--r--kernel/softlockup.c114
-rw-r--r--kernel/spinlock.c3
-rw-r--r--kernel/stop_machine.c4
-rw-r--r--kernel/sys.c20
-rw-r--r--kernel/sys_ni.c5
-rw-r--r--kernel/sysctl.c319
-rw-r--r--kernel/sysctl_check.c83
-rw-r--r--kernel/taskstats.c36
-rw-r--r--kernel/test_kprobes.c216
-rw-r--r--kernel/time.c1
-rw-r--r--kernel/time/clockevents.c18
-rw-r--r--kernel/time/clocksource.c33
-rw-r--r--kernel/time/ntp.c11
-rw-r--r--kernel/time/tick-broadcast.c65
-rw-r--r--kernel/time/tick-internal.h2
-rw-r--r--kernel/time/tick-sched.c97
-rw-r--r--kernel/time/timekeeping.c36
-rw-r--r--kernel/time/timer_list.c8
-rw-r--r--kernel/time/timer_stats.c2
-rw-r--r--kernel/timer.c123
-rw-r--r--kernel/user.c147
-rw-r--r--kernel/utsname_sysctl.c4
-rw-r--r--kernel/wait.c2
-rw-r--r--kernel/workqueue.c40
92 files changed, 8290 insertions, 3240 deletions
diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz
index 4af15802ccd..526128a2e62 100644
--- a/kernel/Kconfig.hz
+++ b/kernel/Kconfig.hz
@@ -54,3 +54,5 @@ config HZ
default 300 if HZ_300
default 1000 if HZ_1000
+config SCHED_HRTICK
+ def_bool HIGH_RES_TIMERS && X86
diff --git a/kernel/Kconfig.instrumentation b/kernel/Kconfig.instrumentation
index f5f2c769d95..468f47ad750 100644
--- a/kernel/Kconfig.instrumentation
+++ b/kernel/Kconfig.instrumentation
@@ -20,8 +20,8 @@ config PROFILING
config OPROFILE
tristate "OProfile system profiling (EXPERIMENTAL)"
- depends on PROFILING
- depends on ALPHA || ARM || BLACKFIN || X86_32 || IA64 || M32R || MIPS || PARISC || PPC || S390 || SUPERH || SPARC || X86_64
+ depends on PROFILING && !UML
+ depends on ARCH_SUPPORTS_OPROFILE || ALPHA || ARM || BLACKFIN || IA64 || M32R || PARISC || PPC || S390 || SUPERH || SPARC
help
OProfile is a profiling system capable of profiling the
whole system, include the kernel, kernel modules, libraries,
@@ -31,7 +31,7 @@ config OPROFILE
config KPROBES
bool "Kprobes"
- depends on KALLSYMS && MODULES
+ depends on KALLSYMS && MODULES && !UML
depends on X86_32 || IA64 || PPC || S390 || SPARC64 || X86_64 || AVR32
help
Kprobes allows you to trap at almost any kernel address and
diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
index c64ce9c1420..0669b70fa6a 100644
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -52,14 +52,13 @@ config PREEMPT
endchoice
-config PREEMPT_BKL
- bool "Preempt The Big Kernel Lock"
- depends on SMP || PREEMPT
+config RCU_TRACE
+ bool "Enable tracing for RCU - currently stats in debugfs"
+ select DEBUG_FS
default y
help
- This option reduces the latency of the kernel by making the
- big kernel lock preemptible.
+ This option provides tracing in RCU which presents stats
+ in debugfs for debugging RCU implementation.
- Say Y here if you are building a kernel for a desktop system.
+ Say Y here if you want to enable RCU tracing
Say N if you are unsure.
-
diff --git a/kernel/Makefile b/kernel/Makefile
index f60afe74259..8885627ea02 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -36,14 +36,15 @@ obj-$(CONFIG_KALLSYMS) += kallsyms.o
obj-$(CONFIG_PM) += power/
obj-$(CONFIG_BSD_PROCESS_ACCT) += acct.o
obj-$(CONFIG_KEXEC) += kexec.o
+obj-$(CONFIG_BACKTRACE_SELF_TEST) += backtracetest.o
obj-$(CONFIG_COMPAT) += compat.o
obj-$(CONFIG_CGROUPS) += cgroup.o
obj-$(CONFIG_CGROUP_DEBUG) += cgroup_debug.o
obj-$(CONFIG_CPUSETS) += cpuset.o
-obj-$(CONFIG_CGROUP_CPUACCT) += cpu_acct.o
obj-$(CONFIG_CGROUP_NS) += ns_cgroup.o
obj-$(CONFIG_IKCONFIG) += configs.o
obj-$(CONFIG_STOP_MACHINE) += stop_machine.o
+obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o
obj-$(CONFIG_AUDIT) += audit.o auditfilter.o
obj-$(CONFIG_AUDITSYSCALL) += auditsc.o
obj-$(CONFIG_AUDIT_TREE) += audit_tree.o
@@ -53,11 +54,17 @@ obj-$(CONFIG_DETECT_SOFTLOCKUP) += softlockup.o
obj-$(CONFIG_GENERIC_HARDIRQS) += irq/
obj-$(CONFIG_SECCOMP) += seccomp.o
obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
+obj-$(CONFIG_CLASSIC_RCU) += rcuclassic.o
+obj-$(CONFIG_PREEMPT_RCU) += rcupreempt.o
+ifeq ($(CONFIG_PREEMPT_RCU),y)
+obj-$(CONFIG_RCU_TRACE) += rcupreempt_trace.o
+endif
obj-$(CONFIG_RELAY) += relay.o
obj-$(CONFIG_SYSCTL) += utsname_sysctl.o
obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o
obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o
obj-$(CONFIG_MARKERS) += marker.o
+obj-$(CONFIG_LATENCYTOP) += latencytop.o
ifneq ($(CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER),y)
# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is
diff --git a/kernel/acct.c b/kernel/acct.c
index fce53d8df8a..521dfa53cb9 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -413,7 +413,7 @@ static u32 encode_float(u64 value)
* The acct_process() call is the workhorse of the process
* accounting system. The struct acct is built here and then written
* into the accounting file. This function should only be called from
- * do_exit().
+ * do_exit() or when switching to a different output file.
*/
/*
@@ -482,7 +482,7 @@ static void do_acct_process(struct file *file)
#endif
#if ACCT_VERSION==3
ac.ac_pid = current->tgid;
- ac.ac_ppid = current->parent->tgid;
+ ac.ac_ppid = current->real_parent->tgid;
#endif
spin_lock_irq(&current->sighand->siglock);
diff --git a/kernel/audit.c b/kernel/audit.c
index f93c2713017..c8555b18021 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -66,10 +66,11 @@
* (Initialization happens after skb_init is called.) */
static int audit_initialized;
-/* 0 - no auditing
- * 1 - auditing enabled
- * 2 - auditing enabled and configuration is locked/unchangeable. */
+#define AUDIT_OFF 0
+#define AUDIT_ON 1
+#define AUDIT_LOCKED 2
int audit_enabled;
+int audit_ever_enabled;
/* Default state when kernel boots without any parameters. */
static int audit_default;
@@ -152,8 +153,10 @@ struct audit_buffer {
static void audit_set_pid(struct audit_buffer *ab, pid_t pid)
{
- struct nlmsghdr *nlh = nlmsg_hdr(ab->skb);
- nlh->nlmsg_pid = pid;
+ if (ab) {
+ struct nlmsghdr *nlh = nlmsg_hdr(ab->skb);
+ nlh->nlmsg_pid = pid;
+ }
}
void audit_panic(const char *message)
@@ -163,7 +166,8 @@ void audit_panic(const char *message)
case AUDIT_FAIL_SILENT:
break;
case AUDIT_FAIL_PRINTK:
- printk(KERN_ERR "audit: %s\n", message);
+ if (printk_ratelimit())
+ printk(KERN_ERR "audit: %s\n", message);
break;
case AUDIT_FAIL_PANIC:
panic("audit: %s\n", message);
@@ -231,161 +235,107 @@ void audit_log_lost(const char *message)
}
if (print) {
- printk(KERN_WARNING
- "audit: audit_lost=%d audit_rate_limit=%d audit_backlog_limit=%d\n",
- atomic_read(&audit_lost),
- audit_rate_limit,
- audit_backlog_limit);
+ if (printk_ratelimit())
+ printk(KERN_WARNING
+ "audit: audit_lost=%d audit_rate_limit=%d "
+ "audit_backlog_limit=%d\n",
+ atomic_read(&audit_lost),
+ audit_rate_limit,
+ audit_backlog_limit);
audit_panic(message);
}
}
-static int audit_set_rate_limit(int limit, uid_t loginuid, u32 sid)
+static int audit_log_config_change(char *function_name, int new, int old,
+ uid_t loginuid, u32 sid, int allow_changes)
{
- int res, rc = 0, old = audit_rate_limit;
-
- /* check if we are locked */
- if (audit_enabled == 2)
- res = 0;
- else
- res = 1;
+ struct audit_buffer *ab;
+ int rc = 0;
+ ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
+ audit_log_format(ab, "%s=%d old=%d by auid=%u", function_name, new,
+ old, loginuid);
if (sid) {
char *ctx = NULL;
u32 len;
- if ((rc = selinux_sid_to_string(sid, &ctx, &len)) == 0) {
- audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE,
- "audit_rate_limit=%d old=%d by auid=%u"
- " subj=%s res=%d",
- limit, old, loginuid, ctx, res);
+
+ rc = selinux_sid_to_string(sid, &ctx, &len);
+ if (rc) {
+ audit_log_format(ab, " sid=%u", sid);
+ allow_changes = 0; /* Something weird, deny request */
+ } else {
+ audit_log_format(ab, " subj=%s", ctx);
kfree(ctx);
- } else
- res = 0; /* Something weird, deny request */
+ }
}
- audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE,
- "audit_rate_limit=%d old=%d by auid=%u res=%d",
- limit, old, loginuid, res);
-
- /* If we are allowed, make the change */
- if (res == 1)
- audit_rate_limit = limit;
- /* Not allowed, update reason */
- else if (rc == 0)
- rc = -EPERM;
+ audit_log_format(ab, " res=%d", allow_changes);
+ audit_log_end(ab);
return rc;
}
-static int audit_set_backlog_limit(int limit, uid_t loginuid, u32 sid)
+static int audit_do_config_change(char *function_name, int *to_change,
+ int new, uid_t loginuid, u32 sid)
{
- int res, rc = 0, old = audit_backlog_limit;
+ int allow_changes, rc = 0, old = *to_change;
/* check if we are locked */
- if (audit_enabled == 2)
- res = 0;
+ if (audit_enabled == AUDIT_LOCKED)
+ allow_changes = 0;
else
- res = 1;
+ allow_changes = 1;
- if (sid) {
- char *ctx = NULL;
- u32 len;
- if ((rc = selinux_sid_to_string(sid, &ctx, &len)) == 0) {
- audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE,
- "audit_backlog_limit=%d old=%d by auid=%u"
- " subj=%s res=%d",
- limit, old, loginuid, ctx, res);
- kfree(ctx);
- } else
- res = 0; /* Something weird, deny request */
+ if (audit_enabled != AUDIT_OFF) {
+ rc = audit_log_config_change(function_name, new, old,
+ loginuid, sid, allow_changes);
+ if (rc)
+ allow_changes = 0;
}
- audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE,
- "audit_backlog_limit=%d old=%d by auid=%u res=%d",
- limit, old, loginuid, res);
/* If we are allowed, make the change */
- if (res == 1)
- audit_backlog_limit = limit;
+ if (allow_changes == 1)
+ *to_change = new;
/* Not allowed, update reason */
else if (rc == 0)
rc = -EPERM;
return rc;
}
-static int audit_set_enabled(int state, uid_t loginuid, u32 sid)
+static int audit_set_rate_limit(int limit, uid_t loginuid, u32 sid)
{
- int res, rc = 0, old = audit_enabled;
+ return audit_do_config_change("audit_rate_limit", &audit_rate_limit,
+ limit, loginuid, sid);
+}
+
+static int audit_set_backlog_limit(int limit, uid_t loginuid, u32 sid)
+{
+ return audit_do_config_change("audit_backlog_limit", &audit_backlog_limit,
+ limit, loginuid, sid);
+}
- if (state < 0 || state > 2)
+static int audit_set_enabled(int state, uid_t loginuid, u32 sid)
+{
+ int rc;
+ if (state < AUDIT_OFF || state > AUDIT_LOCKED)
return -EINVAL;
- /* check if we are locked */
- if (audit_enabled == 2)
- res = 0;
- else
- res = 1;
+ rc = audit_do_config_change("audit_enabled", &audit_enabled, state,
+ loginuid, sid);
- if (sid) {
- char *ctx = NULL;
- u32 len;
- if ((rc = selinux_sid_to_string(sid, &ctx, &len)) == 0) {
- audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE,
- "audit_enabled=%d old=%d by auid=%u"
- " subj=%s res=%d",
- state, old, loginuid, ctx, res);
- kfree(ctx);
- } else
- res = 0; /* Something weird, deny request */
- }
- audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE,
- "audit_enabled=%d old=%d by auid=%u res=%d",
- state, old, loginuid, res);
+ if (!rc)
+ audit_ever_enabled |= !!state;
- /* If we are allowed, make the change */
- if (res == 1)
- audit_enabled = state;
- /* Not allowed, update reason */
- else if (rc == 0)
- rc = -EPERM;
return rc;
}
static int audit_set_failure(int state, uid_t loginuid, u32 sid)
{
- int res, rc = 0, old = audit_failure;
-
if (state != AUDIT_FAIL_SILENT
&& state != AUDIT_FAIL_PRINTK
&& state != AUDIT_FAIL_PANIC)
return -EINVAL;
- /* check if we are locked */
- if (audit_enabled == 2)
- res = 0;
- else
- res = 1;
-
- if (sid) {
- char *ctx = NULL;
- u32 len;
- if ((rc = selinux_sid_to_string(sid, &ctx, &len)) == 0) {
- audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE,
- "audit_failure=%d old=%d by auid=%u"
- " subj=%s res=%d",
- state, old, loginuid, ctx, res);
- kfree(ctx);
- } else
- res = 0; /* Something weird, deny request */
- }
- audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE,
- "audit_failure=%d old=%d by auid=%u res=%d",
- state, old, loginuid, res);
-
- /* If we are allowed, make the change */
- if (res == 1)
- audit_failure = state;
- /* Not allowed, update reason */
- else if (rc == 0)
- rc = -EPERM;
- return rc;
+ return audit_do_config_change("audit_failure", &audit_failure, state,
+ loginuid, sid);
}
static int kauditd_thread(void *dummy)
@@ -405,7 +355,11 @@ static int kauditd_thread(void *dummy)
audit_pid = 0;
}
} else {
- printk(KERN_NOTICE "%s\n", skb->data + NLMSG_SPACE(0));
+ if (printk_ratelimit())
+ printk(KERN_NOTICE "%s\n", skb->data +
+ NLMSG_SPACE(0));
+ else
+ audit_log_lost("printk limit exceeded\n");
kfree_skb(skb);
}
} else {
@@ -573,6 +527,33 @@ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type)
return err;
}
+static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type,
+ u32 pid, u32 uid, uid_t auid, u32 sid)
+{
+ int rc = 0;
+ char *ctx = NULL;
+ u32 len;
+
+ if (!audit_enabled) {
+ *ab = NULL;
+ return rc;
+ }
+
+ *ab = audit_log_start(NULL, GFP_KERNEL, msg_type);
+ audit_log_format(*ab, "user pid=%d uid=%u auid=%u",
+ pid, uid, auid);
+ if (sid) {
+ rc = selinux_sid_to_string(sid, &ctx, &len);
+ if (rc)
+ audit_log_format(*ab, " ssid=%u", sid);
+ else
+ audit_log_format(*ab, " subj=%s", ctx);
+ kfree(ctx);
+ }
+
+ return rc;
+}
+
static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
{
u32 uid, pid, seq, sid;
@@ -583,7 +564,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
u16 msg_type = nlh->nlmsg_type;
uid_t loginuid; /* loginuid of sender */
struct audit_sig_info *sig_data;
- char *ctx;
+ char *ctx = NULL;
u32 len;
err = audit_netlink_ok(skb, msg_type);
@@ -634,23 +615,14 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
if (err < 0) return err;
}
if (status_get->mask & AUDIT_STATUS_PID) {
- int old = audit_pid;
- if (sid) {
- if ((err = selinux_sid_to_string(
- sid, &ctx, &len)))
- return err;
- else
- audit_log(NULL, GFP_KERNEL,
- AUDIT_CONFIG_CHANGE,
- "audit_pid=%d old=%d by auid=%u subj=%s",
- status_get->pid, old,
- loginuid, ctx);
- kfree(ctx);
- } else
- audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE,
- "audit_pid=%d old=%d by auid=%u",
- status_get->pid, old, loginuid);
- audit_pid = status_get->pid;
+ int new_pid = status_get->pid;
+
+ if (audit_enabled != AUDIT_OFF)
+ audit_log_config_change("audit_pid", new_pid,
+ audit_pid, loginuid,
+ sid, 1);
+
+ audit_pid = new_pid;
}
if (status_get->mask & AUDIT_STATUS_RATE_LIMIT)
err = audit_set_rate_limit(status_get->rate_limit,
@@ -673,64 +645,35 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
if (err)
break;
}
- ab = audit_log_start(NULL, GFP_KERNEL, msg_type);
- if (ab) {
- audit_log_format(ab,
- "user pid=%d uid=%u auid=%u",
- pid, uid, loginuid);
- if (sid) {
- if (selinux_sid_to_string(
- sid, &ctx, &len)) {
- audit_log_format(ab,
- " ssid=%u", sid);
- /* Maybe call audit_panic? */
- } else
- audit_log_format(ab,
- " subj=%s", ctx);
- kfree(ctx);
- }
- if (msg_type != AUDIT_USER_TTY)
- audit_log_format(ab, " msg='%.1024s'",
- (char *)data);
- else {
- int size;
-
- audit_log_format(ab, " msg=");
- size = nlmsg_len(nlh);
- audit_log_n_untrustedstring(ab, size,
- data);
- }
- audit_set_pid(ab, pid);
- audit_log_end(ab);
+ audit_log_common_recv_msg(&ab, msg_type, pid, uid,
+ loginuid, sid);
+
+ if (msg_type != AUDIT_USER_TTY)
+ audit_log_format(ab, " msg='%.1024s'",
+ (char *)data);
+ else {
+ int size;
+
+ audit_log_format(ab, " msg=");
+ size = nlmsg_len(nlh);
+ audit_log_n_untrustedstring(ab, size,
+ data);
}
+ audit_set_pid(ab, pid);
+ audit_log_end(ab);
}
break;
case AUDIT_ADD:
case AUDIT_DEL:
if (nlmsg_len(nlh) < sizeof(struct audit_rule))
return -EINVAL;
- if (audit_enabled == 2) {
- ab = audit_log_start(NULL, GFP_KERNEL,
- AUDIT_CONFIG_CHANGE);
- if (ab) {
- audit_log_format(ab,
- "pid=%d uid=%u auid=%u",
- pid, uid, loginuid);
- if (sid) {
- if (selinux_sid_to_string(
- sid, &ctx, &len)) {
- audit_log_format(ab,
- " ssid=%u", sid);
- /* Maybe call audit_panic? */
- } else
- audit_log_format(ab,
- " subj=%s", ctx);
- kfree(ctx);
- }
- audit_log_format(ab, " audit_enabled=%d res=0",
- audit_enabled);
- audit_log_end(ab);
- }
+ if (audit_enabled == AUDIT_LOCKED) {
+ audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE, pid,
+ uid, loginuid, sid);
+
+ audit_log_format(ab, " audit_enabled=%d res=0",
+ audit_enabled);
+ audit_log_end(ab);
return -EPERM;
}
/* fallthrough */
@@ -743,28 +686,13 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
case AUDIT_DEL_RULE:
if (nlmsg_len(nlh) < sizeof(struct audit_rule_data))
return -EINVAL;
- if (audit_enabled == 2) {
- ab = audit_log_start(NULL, GFP_KERNEL,
- AUDIT_CONFIG_CHANGE);
- if (ab) {
- audit_log_format(ab,
- "pid=%d uid=%u auid=%u",
- pid, uid, loginuid);
- if (sid) {
- if (selinux_sid_to_string(
- sid, &ctx, &len)) {
- audit_log_format(ab,
- " ssid=%u", sid);
- /* Maybe call audit_panic? */
- } else
- audit_log_format(ab,
- " subj=%s", ctx);
- kfree(ctx);
- }
- audit_log_format(ab, " audit_enabled=%d res=0",
- audit_enabled);
- audit_log_end(ab);
- }
+ if (audit_enabled == AUDIT_LOCKED) {
+ audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE, pid,
+ uid, loginuid, sid);
+
+ audit_log_format(ab, " audit_enabled=%d res=0",
+ audit_enabled);
+ audit_log_end(ab);
return -EPERM;
}
/* fallthrough */
@@ -775,19 +703,10 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
break;
case AUDIT_TRIM:
audit_trim_trees();
- ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
- if (!ab)
- break;
- audit_log_format(ab, "auid=%u", loginuid);
- if (sid) {
- u32 len;
- ctx = NULL;
- if (selinux_sid_to_string(sid, &ctx, &len))
- audit_log_format(ab, " ssid=%u", sid);
- else
- audit_log_format(ab, " subj=%s", ctx);
- kfree(ctx);
- }
+
+ audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE, pid,
+ uid, loginuid, sid);
+
audit_log_format(ab, " op=trim res=1");
audit_log_end(ab);
break;
@@ -817,22 +736,9 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
/* OK, here comes... */
err = audit_tag_tree(old, new);
- ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
- if (!ab) {
- kfree(old);
- kfree(new);
- break;
- }
- audit_log_format(ab, "auid=%u", loginuid);
- if (sid) {
- u32 len;
- ctx = NULL;
- if (selinux_sid_to_string(sid, &ctx, &len))
- audit_log_format(ab, " ssid=%u", sid);
- else
- audit_log_format(ab, " subj=%s", ctx);
- kfree(ctx);
- }
+ audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE, pid,
+ uid, loginuid, sid);
+
audit_log_format(ab, " op=make_equiv old=");
audit_log_untrustedstring(ab, old);
audit_log_format(ab, " new=");
@@ -965,6 +871,7 @@ static int __init audit_init(void)
skb_queue_head_init(&audit_skb_queue);
audit_initialized = 1;
audit_enabled = audit_default;
+ audit_ever_enabled |= !!audit_default;
/* Register the callback with selinux. This callback will be invoked
* when a new policy is loaded. */
@@ -992,8 +899,10 @@ static int __init audit_enable(char *str)
printk(KERN_INFO "audit: %s%s\n",
audit_default ? "enabled" : "disabled",
audit_initialized ? "" : " (after initialization)");
- if (audit_initialized)
+ if (audit_initialized) {
audit_enabled = audit_default;
+ audit_ever_enabled |= !!audit_default;
+ }
return 1;
}
@@ -1130,7 +1039,7 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
{
struct audit_buffer *ab = NULL;
struct timespec t;
- unsigned int serial;
+ unsigned int uninitialized_var(serial);
int reserve;
unsigned long timeout_start = jiffies;
@@ -1164,7 +1073,7 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
remove_wait_queue(&audit_backlog_wait, &wait);
continue;
}
- if (audit_rate_check())
+ if (audit_rate_check() && printk_ratelimit())
printk(KERN_WARNING
"audit: audit_backlog=%d > "
"audit_backlog_limit=%d\n",
@@ -1200,13 +1109,17 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
static inline int audit_expand(struct audit_buffer *ab, int extra)
{
struct sk_buff *skb = ab->skb;
- int ret = pskb_expand_head(skb, skb_headroom(skb), extra,
- ab->gfp_mask);
+ int oldtail = skb_tailroom(skb);
+ int ret = pskb_expand_head(skb, 0, extra, ab->gfp_mask);
+ int newtail = skb_tailroom(skb);
+
if (ret < 0) {
audit_log_lost("out of memory in audit_expand");
return 0;
}
- return skb_tailroom(skb);
+
+ skb->truesize += newtail - oldtail;
+ return newtail;
}
/*
@@ -1245,6 +1158,7 @@ static void audit_log_vformat(struct audit_buffer *ab, const char *fmt,
goto out;
len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args2);
}
+ va_end(args2);
if (len > 0)
skb_put(skb, len);
out:
@@ -1346,6 +1260,21 @@ static void audit_log_n_string(struct audit_buffer *ab, size_t slen,
}
/**
+ * audit_string_contains_control - does a string need to be logged in hex
+ * @string - string to be checked
+ * @len - max length of the string to check
+ */
+int audit_string_contains_control(const char *string, size_t len)
+{
+ const unsigned char *p;
+ for (p = string; p < (const unsigned char *)string + len && *p; p++) {
+ if (*p == '"' || *p < 0x21 || *p > 0x7f)
+ return 1;
+ }
+ return 0;
+}
+
+/**
* audit_log_n_untrustedstring - log a string that may contain random characters
* @ab: audit_buffer
* @len: lenth of string (not including trailing null)
@@ -1359,19 +1288,13 @@ static void audit_log_n_string(struct audit_buffer *ab, size_t slen,
* The caller specifies the number of characters in the string to log, which may
* or may not be the entire string.
*/
-const char *audit_log_n_untrustedstring(struct audit_buffer *ab, size_t len,
- const char *string)
+void audit_log_n_untrustedstring(struct audit_buffer *ab, size_t len,
+ const char *string)
{
- const unsigned char *p;
-
- for (p = string; p < (const unsigned char *)string + len && *p; p++) {
- if (*p == '"' || *p < 0x21 || *p > 0x7f) {
- audit_log_hex(ab, string, len);
- return string + len + 1;
- }
- }
- audit_log_n_string(ab, len, string);
- return p + 1;
+ if (audit_string_contains_control(string, len))
+ audit_log_hex(ab, string, len);
+ else
+ audit_log_n_string(ab, len, string);
}
/**
@@ -1382,9 +1305,9 @@ const char *audit_log_n_untrustedstring(struct audit_buffer *ab, size_t len,
* Same as audit_log_n_untrustedstring(), except that strlen is used to
* determine string length.
*/
-const char *audit_log_untrustedstring(struct audit_buffer *ab, const char *string)
+void audit_log_untrustedstring(struct audit_buffer *ab, const char *string)
{
- return audit_log_n_untrustedstring(ab, strlen(string), string);
+ audit_log_n_untrustedstring(ab, strlen(string), string);
}
/* This is a helper-function to print the escaped d_path */
@@ -1433,8 +1356,11 @@ void audit_log_end(struct audit_buffer *ab)
skb_queue_tail(&audit_skb_queue, ab->skb);
ab->skb = NULL;
wake_up_interruptible(&kauditd_wait);
+ } else if (printk_ratelimit()) {
+ struct nlmsghdr *nlh = nlmsg_hdr(ab->skb);
+ printk(KERN_NOTICE "type=%d %s\n", nlh->nlmsg_type, ab->skb->data + NLMSG_SPACE(0));
} else {
- printk(KERN_NOTICE "%s\n", ab->skb->data + NLMSG_SPACE(0));
+ audit_log_lost("printk limit exceeded\n");
}
}
audit_buffer_free(ab);
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index 5d96f2cc7be..6f19fd477aa 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -95,6 +95,8 @@ extern struct inotify_handle *audit_ih;
/* Inotify events we care about. */
#define AUDIT_IN_WATCH IN_MOVE|IN_CREATE|IN_DELETE|IN_DELETE_SELF|IN_MOVE_SELF
+extern int audit_enabled;
+
void audit_free_parent(struct inotify_watch *i_watch)
{
struct audit_parent *parent;
@@ -974,7 +976,6 @@ static void audit_update_watch(struct audit_parent *parent,
struct audit_watch *owatch, *nwatch, *nextw;
struct audit_krule *r, *nextr;
struct audit_entry *oentry, *nentry;
- struct audit_buffer *ab;
mutex_lock(&audit_filter_mutex);
list_for_each_entry_safe(owatch, nextw, &parent->watches, wlist) {
@@ -1014,13 +1015,18 @@ static void audit_update_watch(struct audit_parent *parent,
call_rcu(&oentry->rcu, audit_free_rule_rcu);
}
- ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
- audit_log_format(ab, "op=updated rules specifying path=");
- audit_log_untrustedstring(ab, owatch->path);
- audit_log_format(ab, " with dev=%u ino=%lu\n", dev, ino);
- audit_log_format(ab, " list=%d res=1", r->listnr);
- audit_log_end(ab);
-
+ if (audit_enabled) {
+ struct audit_buffer *ab;
+ ab = audit_log_start(NULL, GFP_KERNEL,
+ AUDIT_CONFIG_CHANGE);
+ audit_log_format(ab,
+ "op=updated rules specifying path=");
+ audit_log_untrustedstring(ab, owatch->path);
+ audit_log_format(ab, " with dev=%u ino=%lu\n",
+ dev, ino);
+ audit_log_format(ab, " list=%d res=1", r->listnr);
+ audit_log_end(ab);
+ }
audit_remove_watch(owatch);
goto add_watch_to_parent; /* event applies to a single watch */
}
@@ -1039,25 +1045,28 @@ static void audit_remove_parent_watches(struct audit_parent *parent)
struct audit_watch *w, *nextw;
struct audit_krule *r, *nextr;
struct audit_entry *e;
- struct audit_buffer *ab;
mutex_lock(&audit_filter_mutex);
parent->flags |= AUDIT_PARENT_INVALID;
list_for_each_entry_safe(w, nextw, &parent->watches, wlist) {
list_for_each_entry_safe(r, nextr, &w->rules, rlist) {
e = container_of(r, struct audit_entry, rule);
-
- ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
- audit_log_format(ab, "op=remove rule path=");
- audit_log_untrustedstring(ab, w->path);
- if (r->filterkey) {
- audit_log_format(ab, " key=");
- audit_log_untrustedstring(ab, r->filterkey);
- } else
- audit_log_format(ab, " key=(null)");
- audit_log_format(ab, " list=%d res=1", r->listnr);
- audit_log_end(ab);
-
+ if (audit_enabled) {
+ struct audit_buffer *ab;
+ ab = audit_log_start(NULL, GFP_KERNEL,
+ AUDIT_CONFIG_CHANGE);
+ audit_log_format(ab, "op=remove rule path=");
+ audit_log_untrustedstring(ab, w->path);
+ if (r->filterkey) {
+ audit_log_format(ab, " key=");
+ audit_log_untrustedstring(ab,
+ r->filterkey);
+ } else
+ audit_log_format(ab, " key=(null)");
+ audit_log_format(ab, " list=%d res=1",
+ r->listnr);
+ audit_log_end(ab);
+ }
list_del(&r->rlist);
list_del_rcu(&e->list);
call_rcu(&e->rcu, audit_free_rule_rcu);
@@ -1495,6 +1504,9 @@ static void audit_log_rule_change(uid_t loginuid, u32 sid, char *action,
{
struct audit_buffer *ab;
+ if (!audit_enabled)
+ return;
+
ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
if (!ab)
return;
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index bce9ecdb771..1c06ecf38d7 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -70,6 +70,7 @@
#include "audit.h"
extern struct list_head audit_filter_list[];
+extern int audit_ever_enabled;
/* AUDIT_NAMES is the number of slots we reserve in the audit_context
* for saving names from getname(). */
@@ -78,6 +79,9 @@ extern struct list_head audit_filter_list[];
/* Indicates that audit should log the full pathname. */
#define AUDIT_NAME_FULL -1
+/* no execve audit message should be longer than this (userspace limits) */
+#define MAX_EXECVE_AUDIT_LEN 7500
+
/* number of audit rules */
int audit_n_rules;
@@ -176,7 +180,11 @@ struct audit_aux_data_fd_pair {
struct audit_aux_data_pids {
struct audit_aux_data d;
pid_t target_pid[AUDIT_AUX_PIDS];
+ uid_t target_auid[AUDIT_AUX_PIDS];
+ uid_t target_uid[AUDIT_AUX_PIDS];
+ unsigned int target_sessionid[AUDIT_AUX_PIDS];
u32 target_sid[AUDIT_AUX_PIDS];
+ char target_comm[AUDIT_AUX_PIDS][TASK_COMM_LEN];
int pid_count;
};
@@ -192,7 +200,6 @@ struct audit_context {
enum audit_state state;
unsigned int serial; /* serial number for record */
struct timespec ctime; /* time of syscall entry */
- uid_t loginuid; /* login uid (identity) */
int major; /* syscall number */
unsigned long argv[4]; /* syscall arguments */
int return_valid; /* return code is valid */
@@ -215,7 +222,11 @@ struct audit_context {
int arch;
pid_t target_pid;
+ uid_t target_auid;
+ uid_t target_uid;
+ unsigned int target_sessionid;
u32 target_sid;
+ char target_comm[TASK_COMM_LEN];
struct audit_tree_refs *trees, *first_trees;
int tree_count;
@@ -506,7 +517,7 @@ static int audit_filter_rules(struct task_struct *tsk,
case AUDIT_LOGINUID:
result = 0;
if (ctx)
- result = audit_comparator(ctx->loginuid, f->op, f->val);
+ result = audit_comparator(tsk->loginuid, f->op, f->val);
break;
case AUDIT_SUBJ_USER:
case AUDIT_SUBJ_ROLE:
@@ -702,7 +713,24 @@ static inline struct audit_context *audit_get_context(struct task_struct *tsk,
if (likely(!context))
return NULL;
context->return_valid = return_valid;
- context->return_code = return_code;
+
+ /*
+ * we need to fix up the return code in the audit logs if the actual
+ * return codes are later going to be fixed up by the arch specific
+ * signal handlers
+ *
+ * This is actually a test for:
+ * (rc == ERESTARTSYS ) || (rc == ERESTARTNOINTR) ||
+ * (rc == ERESTARTNOHAND) || (rc == ERESTART_RESTARTBLOCK)
+ *
+ * but is faster than a bunch of ||
+ */
+ if (unlikely(return_code <= -ERESTARTSYS) &&
+ (return_code >= -ERESTART_RESTARTBLOCK) &&
+ (return_code != -ENOIOCTLCMD))
+ context->return_code = -EINTR;
+ else
+ context->return_code = return_code;
if (context->in_syscall && !context->dummy && !context->auditable) {
enum audit_state state;
@@ -783,11 +811,8 @@ static inline void audit_free_aux(struct audit_context *context)
static inline void audit_zero_context(struct audit_context *context,
enum audit_state state)
{
- uid_t loginuid = context->loginuid;
-
memset(context, 0, sizeof(*context));
context->state = state;
- context->loginuid = loginuid;
}
static inline struct audit_context *audit_alloc_context(enum audit_state state)
@@ -814,7 +839,7 @@ int audit_alloc(struct task_struct *tsk)
struct audit_context *context;
enum audit_state state;
- if (likely(!audit_enabled))
+ if (likely(!audit_ever_enabled))
return 0; /* Return if not auditing. */
state = audit_filter_task(tsk);
@@ -826,11 +851,6 @@ int audit_alloc(struct task_struct *tsk)
return -ENOMEM;
}
- /* Preserve login uid */
- context->loginuid = -1;
- if (current->audit_context)
- context->loginuid = current->audit_context->loginuid;
-
tsk->audit_context = context;
set_tsk_thread_flag(tsk, TIF_SYSCALL_AUDIT);
return 0;
@@ -922,7 +942,8 @@ static void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk
}
static int audit_log_pid_context(struct audit_context *context, pid_t pid,
- u32 sid)
+ uid_t auid, uid_t uid, unsigned int sessionid,
+ u32 sid, char *comm)
{
struct audit_buffer *ab;
char *s = NULL;
@@ -931,68 +952,204 @@ static int audit_log_pid_context(struct audit_context *context, pid_t pid,
ab = audit_log_start(context, GFP_KERNEL, AUDIT_OBJ_PID);
if (!ab)
- return 1;
+ return rc;
+ audit_log_format(ab, "opid=%d oauid=%d ouid=%d oses=%d", pid, auid,
+ uid, sessionid);
if (selinux_sid_to_string(sid, &s, &len)) {
- audit_log_format(ab, "opid=%d obj=(none)", pid);
+ audit_log_format(ab, " obj=(none)");
rc = 1;
} else
- audit_log_format(ab, "opid=%d obj=%s", pid, s);
+ audit_log_format(ab, " obj=%s", s);
+ audit_log_format(ab, " ocomm=");
+ audit_log_untrustedstring(ab, comm);
audit_log_end(ab);
kfree(s);
return rc;
}
-static void audit_log_execve_info(struct audit_buffer *ab,
- struct audit_aux_data_execve *axi)
+/*
+ * to_send and len_sent accounting are very loose estimates. We aren't
+ * really worried about a hard cap to MAX_EXECVE_AUDIT_LEN so much as being
+ * within about 500 bytes (next page boundry)
+ *
+ * why snprintf? an int is up to 12 digits long. if we just assumed when
+ * logging that a[%d]= was going to be 16 characters long we would be wasting
+ * space in every audit message. In one 7500 byte message we can log up to
+ * about 1000 min size arguments. That comes down to about 50% waste of space
+ * if we didn't do the snprintf to find out how long arg_num_len was.
+ */
+static int audit_log_single_execve_arg(struct audit_context *context,
+ struct audit_buffer **ab,
+ int arg_num,
+ size_t *len_sent,
+ const char __user *p,
+ char *buf)
{
- int i;
- long len, ret;
- const char __user *p;
- char *buf;
+ char arg_num_len_buf[12];
+ const char __user *tmp_p = p;
+ /* how many digits are in arg_num? 3 is the length of a=\n */
+ size_t arg_num_len = snprintf(arg_num_len_buf, 12, "%d", arg_num) + 3;
+ size_t len, len_left, to_send;
+ size_t max_execve_audit_len = MAX_EXECVE_AUDIT_LEN;
+ unsigned int i, has_cntl = 0, too_long = 0;
+ int ret;
+
+ /* strnlen_user includes the null we don't want to send */
+ len_left = len = strnlen_user(p, MAX_ARG_STRLEN) - 1;
- if (axi->mm != current->mm)
- return; /* execve failed, no additional info */
-
- p = (const char __user *)axi->mm->arg_start;
+ /*
+ * We just created this mm, if we can't find the strings
+ * we just copied into it something is _very_ wrong. Similar
+ * for strings that are too long, we should not have created
+ * any.
+ */
+ if (unlikely((len = -1) || len > MAX_ARG_STRLEN - 1)) {
+ WARN_ON(1);
+ send_sig(SIGKILL, current, 0);
+ }
- for (i = 0; i < axi->argc; i++, p += len) {
- len = strnlen_user(p, MAX_ARG_STRLEN);
+ /* walk the whole argument looking for non-ascii chars */
+ do {
+ if (len_left > MAX_EXECVE_AUDIT_LEN)
+ to_send = MAX_EXECVE_AUDIT_LEN;
+ else
+ to_send = len_left;
+ ret = copy_from_user(buf, tmp_p, to_send);
/*
- * We just created this mm, if we can't find the strings
- * we just copied into it something is _very_ wrong. Similar
- * for strings that are too long, we should not have created
- * any.
+ * There is no reason for this copy to be short. We just
+ * copied them here, and the mm hasn't been exposed to user-
+ * space yet.
*/
- if (!len || len > MAX_ARG_STRLEN) {
+ if (ret) {
WARN_ON(1);
send_sig(SIGKILL, current, 0);
}
-
- buf = kmalloc(len, GFP_KERNEL);
- if (!buf) {
- audit_panic("out of memory for argv string\n");
+ buf[to_send] = '\0';
+ has_cntl = audit_string_contains_control(buf, to_send);
+ if (has_cntl) {
+ /*
+ * hex messages get logged as 2 bytes, so we can only
+ * send half as much in each message
+ */
+ max_execve_audit_len = MAX_EXECVE_AUDIT_LEN / 2;
break;
}
+ len_left -= to_send;
+ tmp_p += to_send;
+ } while (len_left > 0);
+
+ len_left = len;
+
+ if (len > max_execve_audit_len)
+ too_long = 1;
+
+ /* rewalk the argument actually logging the message */
+ for (i = 0; len_left > 0; i++) {
+ int room_left;
+
+ if (len_left > max_execve_audit_len)
+ to_send = max_execve_audit_len;
+ else
+ to_send = len_left;
+
+ /* do we have space left to send this argument in this ab? */
+ room_left = MAX_EXECVE_AUDIT_LEN - arg_num_len - *len_sent;
+ if (has_cntl)
+ room_left -= (to_send * 2);
+ else
+ room_left -= to_send;
+ if (room_left < 0) {
+ *len_sent = 0;
+ audit_log_end(*ab);
+ *ab = audit_log_start(context, GFP_KERNEL, AUDIT_EXECVE);
+ if (!*ab)
+ return 0;
+ }
- ret = copy_from_user(buf, p, len);
/*
- * There is no reason for this copy to be short. We just
- * copied them here, and the mm hasn't been exposed to user-
- * space yet.
+ * first record needs to say how long the original string was
+ * so we can be sure nothing was lost.
+ */
+ if ((i == 0) && (too_long))
+ audit_log_format(*ab, "a%d_len=%ld ", arg_num,
+ has_cntl ? 2*len : len);
+
+ /*
+ * normally arguments are small enough to fit and we already
+ * filled buf above when we checked for control characters
+ * so don't bother with another copy_from_user
*/
+ if (len >= max_execve_audit_len)
+ ret = copy_from_user(buf, p, to_send);
+ else
+ ret = 0;
if (ret) {
WARN_ON(1);
send_sig(SIGKILL, current, 0);
}
+ buf[to_send] = '\0';
+
+ /* actually log it */
+ audit_log_format(*ab, "a%d", arg_num);
+ if (too_long)
+ audit_log_format(*ab, "[%d]", i);
+ audit_log_format(*ab, "=");
+ if (has_cntl)
+ audit_log_hex(*ab, buf, to_send);
+ else
+ audit_log_format(*ab, "\"%s\"", buf);
+ audit_log_format(*ab, "\n");
+
+ p += to_send;
+ len_left -= to_send;
+ *len_sent += arg_num_len;
+ if (has_cntl)
+ *len_sent += to_send * 2;
+ else
+ *len_sent += to_send;
+ }
+ /* include the null we didn't log */
+ return len + 1;
+}
- audit_log_format(ab, "a%d=", i);
- audit_log_untrustedstring(ab, buf);
- audit_log_format(ab, "\n");
+static void audit_log_execve_info(struct audit_context *context,
+ struct audit_buffer **ab,
+ struct audit_aux_data_execve *axi)
+{
+ int i;
+ size_t len, len_sent = 0;
+ const char __user *p;
+ char *buf;
+
+ if (axi->mm != current->mm)
+ return; /* execve failed, no additional info */
+
+ p = (const char __user *)axi->mm->arg_start;
+
+ audit_log_format(*ab, "argc=%d ", axi->argc);
+
+ /*
+ * we need some kernel buffer to hold the userspace args. Just
+ * allocate one big one rather than allocating one of the right size
+ * for every single argument inside audit_log_single_execve_arg()
+ * should be <8k allocation so should be pretty safe.
+ */
+ buf = kmalloc(MAX_EXECVE_AUDIT_LEN + 1, GFP_KERNEL);
+ if (!buf) {
+ audit_panic("out of memory for argv string\n");
+ return;
+ }
- kfree(buf);
+ for (i = 0; i < axi->argc; i++) {
+ len = audit_log_single_execve_arg(context, ab, i,
+ &len_sent, p, buf);
+ if (len <= 0)
+ break;
+ p += len;
}
+ kfree(buf);
}
static void audit_log_exit(struct audit_context *context, struct task_struct *tsk)
@@ -1039,7 +1196,7 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
" a0=%lx a1=%lx a2=%lx a3=%lx items=%d"
" ppid=%d pid=%d auid=%u uid=%u gid=%u"
" euid=%u suid=%u fsuid=%u"
- " egid=%u sgid=%u fsgid=%u tty=%s",
+ " egid=%u sgid=%u fsgid=%u tty=%s ses=%u",
context->argv[0],
context->argv[1],
context->argv[2],
@@ -1047,11 +1204,12 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
context->name_count,
context->ppid,
context->pid,
- context->loginuid,
+ tsk->loginuid,
context->uid,
context->gid,
context->euid, context->suid, context->fsuid,
- context->egid, context->sgid, context->fsgid, tty);
+ context->egid, context->sgid, context->fsgid, tty,
+ tsk->sessionid);
mutex_unlock(&tty_mutex);
@@ -1135,7 +1293,7 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
case AUDIT_EXECVE: {
struct audit_aux_data_execve *axi = (void *)aux;
- audit_log_execve_info(ab, axi);
+ audit_log_execve_info(context, &ab, axi);
break; }
case AUDIT_SOCKETCALL: {
@@ -1168,13 +1326,19 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
for (i = 0; i < axs->pid_count; i++)
if (audit_log_pid_context(context, axs->target_pid[i],
- axs->target_sid[i]))
+ axs->target_auid[i],
+ axs->target_uid[i],
+ axs->target_sessionid[i],
+ axs->target_sid[i],
+ axs->target_comm[i]))
call_panic = 1;
}
if (context->target_pid &&
audit_log_pid_context(context, context->target_pid,
- context->target_sid))
+ context->target_auid, context->target_uid,
+ context->target_sessionid,
+ context->target_sid, context->target_comm))
call_panic = 1;
if (context->pwd && context->pwdmnt) {
@@ -1242,6 +1406,11 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
audit_log_end(ab);
}
+
+ /* Send end of event record to help user space know we are finished */
+ ab = audit_log_start(context, GFP_KERNEL, AUDIT_EOE);
+ if (ab)
+ audit_log_end(ab);
if (call_panic)
audit_panic("error converting sid to string");
}
@@ -1766,6 +1935,9 @@ void auditsc_get_stamp(struct audit_context *ctx,
ctx->auditable = 1;
}
+/* global counter which is incremented every time something logs in */
+static atomic_t session_id = ATOMIC_INIT(0);
+
/**
* audit_set_loginuid - set a task's audit_context loginuid
* @task: task whose audit context is being modified
@@ -1777,41 +1949,29 @@ void auditsc_get_stamp(struct audit_context *ctx,
*/
int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
{
+ unsigned int sessionid = atomic_inc_return(&session_id);
struct audit_context *context = task->audit_context;
- if (context) {
- /* Only log if audit is enabled */
- if (context->in_syscall) {
- struct audit_buffer *ab;
-
- ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_LOGIN);
- if (ab) {
- audit_log_format(ab, "login pid=%d uid=%u "
- "old auid=%u new auid=%u",
- task->pid, task->uid,
- context->loginuid, loginuid);
- audit_log_end(ab);
- }
+ if (context && context->in_syscall) {
+ struct audit_buffer *ab;
+
+ ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_LOGIN);
+ if (ab) {
+ audit_log_format(ab, "login pid=%d uid=%u "
+ "old auid=%u new auid=%u"
+ " old ses=%u new ses=%u",
+ task->pid, task->uid,
+ task->loginuid, loginuid,
+ task->sessionid, sessionid);
+ audit_log_end(ab);
}
- context->loginuid = loginuid;
}
+ task->sessionid = sessionid;
+ task->loginuid = loginuid;
return 0;
}
/**
- * audit_get_loginuid - get the loginuid for an audit_context
- * @ctx: the audit_context
- *
- * Returns the context's loginuid or -1 if @ctx is NULL.
- */
-uid_t audit_get_loginuid(struct audit_context *ctx)
-{
- return ctx ? ctx->loginuid : -1;
-}
-
-EXPORT_SYMBOL(audit_get_loginuid);
-
-/**
* __audit_mq_open - record audit data for a POSIX MQ open
* @oflag: open flag
* @mode: mode bits
@@ -2070,8 +2230,6 @@ int __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode
return 0;
}
-int audit_argv_kb = 32;
-
int audit_bprm(struct linux_binprm *bprm)
{
struct audit_aux_data_execve *ax;
@@ -2080,14 +2238,6 @@ int audit_bprm(struct linux_binprm *bprm)
if (likely(!audit_enabled || !context || context->dummy))
return 0;
- /*
- * Even though the stack code doesn't limit the arg+env size any more,
- * the audit code requires that _all_ arguments be logged in a single
- * netlink skb. Hence cap it :-(
- */
- if (bprm->argv_len > (audit_argv_kb << 10))
- return -E2BIG;
-
ax = kmalloc(sizeof(*ax), GFP_KERNEL);
if (!ax)
return -ENOMEM;
@@ -2193,7 +2343,11 @@ void __audit_ptrace(struct task_struct *t)
struct audit_context *context = current->audit_context;
context->target_pid = t->pid;
+ context->target_auid = audit_get_loginuid(t);
+ context->target_uid = t->uid;
+ context->target_sessionid = audit_get_sessionid(t);
selinux_get_task_sid(t, &context->target_sid);
+ memcpy(context->target_comm, t->comm, TASK_COMM_LEN);
}
/**
@@ -2216,8 +2370,8 @@ int __audit_signal_info(int sig, struct task_struct *t)
if (audit_pid && t->tgid == audit_pid) {
if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1) {
audit_sig_pid = tsk->pid;
- if (ctx)
- audit_sig_uid = ctx->loginuid;
+ if (tsk->loginuid != -1)
+ audit_sig_uid = tsk->loginuid;
else
audit_sig_uid = tsk->uid;
selinux_get_task_sid(tsk, &audit_sig_sid);
@@ -2230,7 +2384,11 @@ int __audit_signal_info(int sig, struct task_struct *t)
* in audit_context */
if (!ctx->target_pid) {
ctx->target_pid = t->tgid;
+ ctx->target_auid = audit_get_loginuid(t);
+ ctx->target_uid = t->uid;
+ ctx->target_sessionid = audit_get_sessionid(t);
selinux_get_task_sid(t, &ctx->target_sid);
+ memcpy(ctx->target_comm, t->comm, TASK_COMM_LEN);
return 0;
}
@@ -2247,7 +2405,11 @@ int __audit_signal_info(int sig, struct task_struct *t)
BUG_ON(axp->pid_count >= AUDIT_AUX_PIDS);
axp->target_pid[axp->pid_count] = t->tgid;
+ axp->target_auid[axp->pid_count] = audit_get_loginuid(t);
+ axp->target_uid[axp->pid_count] = t->uid;
+ axp->target_sessionid[axp->pid_count] = audit_get_sessionid(t);
selinux_get_task_sid(t, &axp->target_sid[axp->pid_count]);
+ memcpy(axp->target_comm[axp->pid_count], t->comm, TASK_COMM_LEN);
axp->pid_count++;
return 0;
@@ -2264,6 +2426,8 @@ void audit_core_dumps(long signr)
{
struct audit_buffer *ab;
u32 sid;
+ uid_t auid = audit_get_loginuid(current);
+ unsigned int sessionid = audit_get_sessionid(current);
if (!audit_enabled)
return;
@@ -2272,9 +2436,8 @@ void audit_core_dumps(long signr)
return;
ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_ANOM_ABEND);
- audit_log_format(ab, "auid=%u uid=%u gid=%u",
- audit_get_loginuid(current->audit_context),
- current->uid, current->gid);
+ audit_log_format(ab, "auid=%u uid=%u gid=%u ses=%u",
+ auid, current->uid, current->gid, sessionid);
selinux_get_task_sid(current, &sid);
if (sid) {
char *ctx = NULL;
diff --git a/kernel/backtracetest.c b/kernel/backtracetest.c
new file mode 100644
index 00000000000..d1a7605c5b8
--- /dev/null
+++ b/kernel/backtracetest.c
@@ -0,0 +1,48 @@
+/*
+ * Simple stack backtrace regression test module
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author: Arjan van de Ven <arjan@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+
+static struct timer_list backtrace_timer;
+
+static void backtrace_test_timer(unsigned long data)
+{
+ printk("Testing a backtrace from irq context.\n");
+ printk("The following trace is a kernel self test and not a bug!\n");
+ dump_stack();
+}
+static int backtrace_regression_test(void)
+{
+ printk("====[ backtrace testing ]===========\n");
+ printk("Testing a backtrace from process context.\n");
+ printk("The following trace is a kernel self test and not a bug!\n");
+ dump_stack();
+
+ init_timer(&backtrace_timer);
+ backtrace_timer.function = backtrace_test_timer;
+ mod_timer(&backtrace_timer, jiffies + 10);
+
+ msleep(10);
+ printk("====[ end of backtrace testing ]====\n");
+ return 0;
+}
+
+static void exitf(void)
+{
+}
+
+module_init(backtrace_regression_test);
+module_exit(exitf);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Arjan van de Ven <arjan@linux.intel.com>");
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 3fe21e19c96..1a3c23936d4 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1,6 +1,4 @@
/*
- * kernel/cgroup.c
- *
* Generic process-grouping system.
*
* Based originally on the cpuset system, extracted by Paul Menage
@@ -2200,7 +2198,8 @@ static void cgroup_init_subsys(struct cgroup_subsys *ss)
{
struct cgroup_subsys_state *css;
struct list_head *l;
- printk(KERN_ERR "Initializing cgroup subsys %s\n", ss->name);
+
+ printk(KERN_INFO "Initializing cgroup subsys %s\n", ss->name);
/* Create the top cgroup state for this subsystem */
ss->root = &rootnode;
@@ -2273,7 +2272,7 @@ int __init cgroup_init_early(void)
BUG_ON(!ss->create);
BUG_ON(!ss->destroy);
if (ss->subsys_id != i) {
- printk(KERN_ERR "Subsys %s id == %d\n",
+ printk(KERN_ERR "cgroup: Subsys %s id == %d\n",
ss->name, ss->subsys_id);
BUG();
}
@@ -2605,7 +2604,7 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys)
dentry = lookup_one_len(nodename, parent->dentry, strlen(nodename));
if (IS_ERR(dentry)) {
printk(KERN_INFO
- "Couldn't allocate dentry for %s: %ld\n", nodename,
+ "cgroup: Couldn't allocate dentry for %s: %ld\n", nodename,
PTR_ERR(dentry));
ret = PTR_ERR(dentry);
goto out_release;
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 6b3a0c15144..e0d3a4f56ec 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -15,9 +15,8 @@
#include <linux/stop_machine.h>
#include <linux/mutex.h>
-/* This protects CPUs going up and down... */
+/* Serializes the updates to cpu_online_map, cpu_present_map */
static DEFINE_MUTEX(cpu_add_remove_lock);
-static DEFINE_MUTEX(cpu_bitmask_lock);
static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
@@ -26,52 +25,123 @@ static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
*/
static int cpu_hotplug_disabled;
-#ifdef CONFIG_HOTPLUG_CPU
+static struct {
+ struct task_struct *active_writer;
+ struct mutex lock; /* Synchronizes accesses to refcount, */
+ /*
+ * Also blocks the new readers during
+ * an ongoing cpu hotplug operation.
+ */
+ int refcount;
+ wait_queue_head_t writer_queue;
+} cpu_hotplug;
-/* Crappy recursive lock-takers in cpufreq! Complain loudly about idiots */
-static struct task_struct *recursive;
-static int recursive_depth;
+#define writer_exists() (cpu_hotplug.active_writer != NULL)
-void lock_cpu_hotplug(void)
+void __init cpu_hotplug_init(void)
{
- struct task_struct *tsk = current;
-
- if (tsk == recursive) {
- static int warnings = 10;
- if (warnings) {
- printk(KERN_ERR "Lukewarm IQ detected in hotplug locking\n");
- WARN_ON(1);
- warnings--;
- }
- recursive_depth++;
+ cpu_hotplug.active_writer = NULL;
+ mutex_init(&cpu_hotplug.lock);
+ cpu_hotplug.refcount = 0;
+ init_waitqueue_head(&cpu_hotplug.writer_queue);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+void get_online_cpus(void)
+{
+ might_sleep();
+ if (cpu_hotplug.active_writer == current)
return;
- }
- mutex_lock(&cpu_bitmask_lock);
- recursive = tsk;
+ mutex_lock(&cpu_hotplug.lock);
+ cpu_hotplug.refcount++;
+ mutex_unlock(&cpu_hotplug.lock);
+
}
-EXPORT_SYMBOL_GPL(lock_cpu_hotplug);
+EXPORT_SYMBOL_GPL(get_online_cpus);
-void unlock_cpu_hotplug(void)
+void put_online_cpus(void)
{
- WARN_ON(recursive != current);
- if (recursive_depth) {
- recursive_depth--;
+ if (cpu_hotplug.active_writer == current)
return;
- }
- recursive = NULL;
- mutex_unlock(&cpu_bitmask_lock);
+ mutex_lock(&cpu_hotplug.lock);
+ cpu_hotplug.refcount--;
+
+ if (unlikely(writer_exists()) && !cpu_hotplug.refcount)
+ wake_up(&cpu_hotplug.writer_queue);
+
+ mutex_unlock(&cpu_hotplug.lock);
+
}
-EXPORT_SYMBOL_GPL(unlock_cpu_hotplug);
+EXPORT_SYMBOL_GPL(put_online_cpus);
#endif /* CONFIG_HOTPLUG_CPU */
+/*
+ * The following two API's must be used when attempting
+ * to serialize the updates to cpu_online_map, cpu_present_map.
+ */
+void cpu_maps_update_begin(void)
+{
+ mutex_lock(&cpu_add_remove_lock);
+}
+
+void cpu_maps_update_done(void)
+{
+ mutex_unlock(&cpu_add_remove_lock);
+}
+
+/*
+ * This ensures that the hotplug operation can begin only when the
+ * refcount goes to zero.
+ *
+ * Note that during a cpu-hotplug operation, the new readers, if any,
+ * will be blocked by the cpu_hotplug.lock
+ *
+ * Since cpu_maps_update_begin is always called after invoking
+ * cpu_maps_update_begin, we can be sure that only one writer is active.
+ *
+ * Note that theoretically, there is a possibility of a livelock:
+ * - Refcount goes to zero, last reader wakes up the sleeping
+ * writer.
+ * - Last reader unlocks the cpu_hotplug.lock.
+ * - A new reader arrives at this moment, bumps up the refcount.
+ * - The writer acquires the cpu_hotplug.lock finds the refcount
+ * non zero and goes to sleep again.
+ *
+ * However, this is very difficult to achieve in practice since
+ * get_online_cpus() not an api which is called all that often.
+ *
+ */
+static void cpu_hotplug_begin(void)
+{
+ DECLARE_WAITQUEUE(wait, current);
+
+ mutex_lock(&cpu_hotplug.lock);
+
+ cpu_hotplug.active_writer = current;
+ add_wait_queue_exclusive(&cpu_hotplug.writer_queue, &wait);
+ while (cpu_hotplug.refcount) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ mutex_unlock(&cpu_hotplug.lock);
+ schedule();
+ mutex_lock(&cpu_hotplug.lock);
+ }
+ remove_wait_queue_locked(&cpu_hotplug.writer_queue, &wait);
+}
+
+static void cpu_hotplug_done(void)
+{
+ cpu_hotplug.active_writer = NULL;
+ mutex_unlock(&cpu_hotplug.lock);
+}
/* Need to know about CPUs going up/down? */
int __cpuinit register_cpu_notifier(struct notifier_block *nb)
{
int ret;
- mutex_lock(&cpu_add_remove_lock);
+ cpu_maps_update_begin();
ret = raw_notifier_chain_register(&cpu_chain, nb);
- mutex_unlock(&cpu_add_remove_lock);
+ cpu_maps_update_done();
return ret;
}
@@ -81,9 +151,9 @@ EXPORT_SYMBOL(register_cpu_notifier);
void unregister_cpu_notifier(struct notifier_block *nb)
{
- mutex_lock(&cpu_add_remove_lock);
+ cpu_maps_update_begin();
raw_notifier_chain_unregister(&cpu_chain, nb);
- mutex_unlock(&cpu_add_remove_lock);
+ cpu_maps_update_done();
}
EXPORT_SYMBOL(unregister_cpu_notifier);
@@ -147,7 +217,7 @@ static int _cpu_down(unsigned int cpu, int tasks_frozen)
if (!cpu_online(cpu))
return -EINVAL;
- raw_notifier_call_chain(&cpu_chain, CPU_LOCK_ACQUIRE, hcpu);
+ cpu_hotplug_begin();
err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod,
hcpu, -1, &nr_calls);
if (err == NOTIFY_BAD) {
@@ -166,9 +236,7 @@ static int _cpu_down(unsigned int cpu, int tasks_frozen)
cpu_clear(cpu, tmp);
set_cpus_allowed(current, tmp);
- mutex_lock(&cpu_bitmask_lock);
p = __stop_machine_run(take_cpu_down, &tcd_param, cpu);
- mutex_unlock(&cpu_bitmask_lock);
if (IS_ERR(p) || cpu_online(cpu)) {
/* CPU didn't die: tell everyone. Can't complain. */
@@ -202,7 +270,7 @@ out_thread:
out_allowed:
set_cpus_allowed(current, old_allowed);
out_release:
- raw_notifier_call_chain(&cpu_chain, CPU_LOCK_RELEASE, hcpu);
+ cpu_hotplug_done();
return err;
}
@@ -210,13 +278,13 @@ int cpu_down(unsigned int cpu)
{
int err = 0;
- mutex_lock(&cpu_add_remove_lock);
+ cpu_maps_update_begin();
if (cpu_hotplug_disabled)
err = -EBUSY;
else
err = _cpu_down(cpu, 0);
- mutex_unlock(&cpu_add_remove_lock);
+ cpu_maps_update_done();
return err;
}
#endif /*CONFIG_HOTPLUG_CPU*/
@@ -231,7 +299,7 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
if (cpu_online(cpu) || !cpu_present(cpu))
return -EINVAL;
- raw_notifier_call_chain(&cpu_chain, CPU_LOCK_ACQUIRE, hcpu);
+ cpu_hotplug_begin();
ret = __raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE | mod, hcpu,
-1, &nr_calls);
if (ret == NOTIFY_BAD) {
@@ -243,9 +311,7 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
}
/* Arch-specific enabling code. */
- mutex_lock(&cpu_bitmask_lock);
ret = __cpu_up(cpu);
- mutex_unlock(&cpu_bitmask_lock);
if (ret != 0)
goto out_notify;
BUG_ON(!cpu_online(cpu));
@@ -257,7 +323,7 @@ out_notify:
if (ret != 0)
__raw_notifier_call_chain(&cpu_chain,
CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
- raw_notifier_call_chain(&cpu_chain, CPU_LOCK_RELEASE, hcpu);
+ cpu_hotplug_done();
return ret;
}
@@ -275,13 +341,13 @@ int __cpuinit cpu_up(unsigned int cpu)
return -EINVAL;
}
- mutex_lock(&cpu_add_remove_lock);
+ cpu_maps_update_begin();
if (cpu_hotplug_disabled)
err = -EBUSY;
else
err = _cpu_up(cpu, 0);
- mutex_unlock(&cpu_add_remove_lock);
+ cpu_maps_update_done();
return err;
}
@@ -292,7 +358,7 @@ int disable_nonboot_cpus(void)
{
int cpu, first_cpu, error = 0;
- mutex_lock(&cpu_add_remove_lock);
+ cpu_maps_update_begin();
first_cpu = first_cpu(cpu_online_map);
/* We take down all of the non-boot CPUs in one shot to avoid races
* with the userspace trying to use the CPU hotplug at the same time
@@ -319,7 +385,7 @@ int disable_nonboot_cpus(void)
} else {
printk(KERN_ERR "Non-boot CPUs are not disabled\n");
}
- mutex_unlock(&cpu_add_remove_lock);
+ cpu_maps_update_done();
return error;
}
@@ -328,7 +394,7 @@ void enable_nonboot_cpus(void)
int cpu, error;
/* Allow everyone to use the CPU hotplug again */
- mutex_lock(&cpu_add_remove_lock);
+ cpu_maps_update_begin();
cpu_hotplug_disabled = 0;
if (cpus_empty(frozen_cpus))
goto out;
@@ -344,6 +410,6 @@ void enable_nonboot_cpus(void)
}
cpus_clear(frozen_cpus);
out:
- mutex_unlock(&cpu_add_remove_lock);
+ cpu_maps_update_done();
}
#endif /* CONFIG_PM_SLEEP_SMP */
diff --git a/kernel/cpu_acct.c b/kernel/cpu_acct.c
deleted file mode 100644
index 731e47e7f16..00000000000
--- a/kernel/cpu_acct.c
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- * kernel/cpu_acct.c - CPU accounting cgroup subsystem
- *
- * Copyright (C) Google Inc, 2006
- *
- * Developed by Paul Menage (menage@google.com) and Balbir Singh
- * (balbir@in.ibm.com)
- *
- */
-
-/*
- * Example cgroup subsystem for reporting total CPU usage of tasks in a
- * cgroup, along with percentage load over a time interval
- */
-
-#include <linux/module.h>
-#include <linux/cgroup.h>
-#include <linux/fs.h>
-#include <linux/rcupdate.h>
-
-#include <asm/div64.h>
-
-struct cpuacct {
- struct cgroup_subsys_state css;
- spinlock_t lock;
- /* total time used by this class */
- cputime64_t time;
-
- /* time when next load calculation occurs */
- u64 next_interval_check;
-
- /* time used in current period */
- cputime64_t current_interval_time;
-
- /* time used in last period */
- cputime64_t last_interval_time;
-};
-
-struct cgroup_subsys cpuacct_subsys;
-
-static inline struct cpuacct *cgroup_ca(struct cgroup *cont)
-{
- return container_of(cgroup_subsys_state(cont, cpuacct_subsys_id),
- struct cpuacct, css);
-}
-
-static inline struct cpuacct *task_ca(struct task_struct *task)
-{
- return container_of(task_subsys_state(task, cpuacct_subsys_id),
- struct cpuacct, css);
-}
-
-#define INTERVAL (HZ * 10)
-
-static inline u64 next_interval_boundary(u64 now)
-{
- /* calculate the next interval boundary beyond the
- * current time */
- do_div(now, INTERVAL);
- return (now + 1) * INTERVAL;
-}
-
-static struct cgroup_subsys_state *cpuacct_create(
- struct cgroup_subsys *ss, struct cgroup *cont)
-{
- struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
-
- if (!ca)
- return ERR_PTR(-ENOMEM);
- spin_lock_init(&ca->lock);
- ca->next_interval_check = next_interval_boundary(get_jiffies_64());
- return &ca->css;
-}
-
-static void cpuacct_destroy(struct cgroup_subsys *ss,
- struct cgroup *cont)
-{
- kfree(cgroup_ca(cont));
-}
-
-/* Lazily update the load calculation if necessary. Called with ca locked */
-static void cpuusage_update(struct cpuacct *ca)
-{
- u64 now = get_jiffies_64();
-
- /* If we're not due for an update, return */
- if (ca->next_interval_check > now)
- return;
-
- if (ca->next_interval_check <= (now - INTERVAL)) {
- /* If it's been more than an interval since the last
- * check, then catch up - the last interval must have
- * been zero load */
- ca->last_interval_time = 0;
- ca->next_interval_check = next_interval_boundary(now);
- } else {
- /* If a steal takes the last interval time negative,
- * then we just ignore it */
- if ((s64)ca->current_interval_time > 0)
- ca->last_interval_time = ca->current_interval_time;
- else
- ca->last_interval_time = 0;
- ca->next_interval_check += INTERVAL;
- }
- ca->current_interval_time = 0;
-}
-
-static u64 cpuusage_read(struct cgroup *cont, struct cftype *cft)
-{
- struct cpuacct *ca = cgroup_ca(cont);
- u64 time;
-
- spin_lock_irq(&ca->lock);
- cpuusage_update(ca);
- time = cputime64_to_jiffies64(ca->time);
- spin_unlock_irq(&ca->lock);
-
- /* Convert 64-bit jiffies to seconds */
- time *= 1000;
- do_div(time, HZ);
- return time;
-}
-
-static u64 load_read(struct cgroup *cont, struct cftype *cft)
-{
- struct cpuacct *ca = cgroup_ca(cont);
- u64 time;
-
- /* Find the time used in the previous interval */
- spin_lock_irq(&ca->lock);
- cpuusage_update(ca);
- time = cputime64_to_jiffies64(ca->last_interval_time);
- spin_unlock_irq(&ca->lock);
-
- /* Convert time to a percentage, to give the load in the
- * previous period */
- time *= 100;
- do_div(time, INTERVAL);
-
- return time;
-}
-
-static struct cftype files[] = {
- {
- .name = "usage",
- .read_uint = cpuusage_read,
- },
- {
- .name = "load",
- .read_uint = load_read,
- }
-};
-
-static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cont)
-{
- return cgroup_add_files(cont, ss, files, ARRAY_SIZE(files));
-}
-
-void cpuacct_charge(struct task_struct *task, cputime_t cputime)
-{
-
- struct cpuacct *ca;
- unsigned long flags;
-
- if (!cpuacct_subsys.active)
- return;
- rcu_read_lock();
- ca = task_ca(task);
- if (ca) {
- spin_lock_irqsave(&ca->lock, flags);
- cpuusage_update(ca);
- ca->time = cputime64_add(ca->time, cputime);
- ca->current_interval_time =
- cputime64_add(ca->current_interval_time, cputime);
- spin_unlock_irqrestore(&ca->lock, flags);
- }
- rcu_read_unlock();
-}
-
-struct cgroup_subsys cpuacct_subsys = {
- .name = "cpuacct",
- .create = cpuacct_create,
- .destroy = cpuacct_destroy,
- .populate = cpuacct_populate,
- .subsys_id = cpuacct_subsys_id,
-};
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 50f5dc46368..cfaf6419d81 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -537,10 +537,10 @@ static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
*
* Call with cgroup_mutex held. May take callback_mutex during
* call due to the kfifo_alloc() and kmalloc() calls. May nest
- * a call to the lock_cpu_hotplug()/unlock_cpu_hotplug() pair.
+ * a call to the get_online_cpus()/put_online_cpus() pair.
* Must not be called holding callback_mutex, because we must not
- * call lock_cpu_hotplug() while holding callback_mutex. Elsewhere
- * the kernel nests callback_mutex inside lock_cpu_hotplug() calls.
+ * call get_online_cpus() while holding callback_mutex. Elsewhere
+ * the kernel nests callback_mutex inside get_online_cpus() calls.
* So the reverse nesting would risk an ABBA deadlock.
*
* The three key local variables below are:
@@ -691,9 +691,9 @@ restart:
rebuild:
/* Have scheduler rebuild sched domains */
- lock_cpu_hotplug();
+ get_online_cpus();
partition_sched_domains(ndoms, doms);
- unlock_cpu_hotplug();
+ put_online_cpus();
done:
if (q && !IS_ERR(q))
@@ -1617,10 +1617,10 @@ static struct cgroup_subsys_state *cpuset_create(
*
* If the cpuset being removed has its flag 'sched_load_balance'
* enabled, then simulate turning sched_load_balance off, which
- * will call rebuild_sched_domains(). The lock_cpu_hotplug()
+ * will call rebuild_sched_domains(). The get_online_cpus()
* call in rebuild_sched_domains() must not be made while holding
* callback_mutex. Elsewhere the kernel nests callback_mutex inside
- * lock_cpu_hotplug() calls. So the reverse nesting would risk an
+ * get_online_cpus() calls. So the reverse nesting would risk an
* ABBA deadlock.
*/
diff --git a/kernel/exit.c b/kernel/exit.c
index f1aec27f1df..bfb1c0e940e 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -249,7 +249,7 @@ static int has_stopped_jobs(struct pid *pgrp)
struct task_struct *p;
do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
- if (p->state != TASK_STOPPED)
+ if (!task_is_stopped(p))
continue;
retval = 1;
break;
@@ -614,7 +614,7 @@ reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
p->parent = p->real_parent;
add_parent(p);
- if (p->state == TASK_TRACED) {
+ if (task_is_traced(p)) {
/*
* If it was at a trace stop, turn it into
* a normal stop since it's no longer being
@@ -1357,7 +1357,7 @@ static int wait_task_stopped(struct task_struct *p, int delayed_group_leader,
int __user *stat_addr, struct rusage __user *ru)
{
int retval, exit_code;
- struct pid_namespace *ns;
+ pid_t pid;
if (!p->exit_code)
return 0;
@@ -1376,21 +1376,19 @@ static int wait_task_stopped(struct task_struct *p, int delayed_group_leader,
* keep holding onto the tasklist_lock while we call getrusage and
* possibly take page faults for user memory.
*/
- ns = current->nsproxy->pid_ns;
+ pid = task_pid_nr_ns(p, current->nsproxy->pid_ns);
get_task_struct(p);
read_unlock(&tasklist_lock);
if (unlikely(noreap)) {
- pid_t pid = task_pid_nr_ns(p, ns);
uid_t uid = p->uid;
int why = (p->ptrace & PT_PTRACED) ? CLD_TRAPPED : CLD_STOPPED;
exit_code = p->exit_code;
- if (unlikely(!exit_code) ||
- unlikely(p->state & TASK_TRACED))
+ if (unlikely(!exit_code) || unlikely(p->exit_state))
goto bail_ref;
return wait_noreap_copyout(p, pid, uid,
- why, (exit_code << 8) | 0x7f,
+ why, exit_code,
infop, ru);
}
@@ -1452,11 +1450,11 @@ bail_ref:
if (!retval && infop)
retval = put_user(exit_code, &infop->si_status);
if (!retval && infop)
- retval = put_user(task_pid_nr_ns(p, ns), &infop->si_pid);
+ retval = put_user(pid, &infop->si_pid);
if (!retval && infop)
retval = put_user(p->uid, &infop->si_uid);
if (!retval)
- retval = task_pid_nr_ns(p, ns);
+ retval = pid;
put_task_struct(p);
BUG_ON(!retval);
@@ -1565,60 +1563,51 @@ repeat:
}
allowed = 1;
- switch (p->state) {
- case TASK_TRACED:
- /*
- * When we hit the race with PTRACE_ATTACH,
- * we will not report this child. But the
- * race means it has not yet been moved to
- * our ptrace_children list, so we need to
- * set the flag here to avoid a spurious ECHILD
- * when the race happens with the only child.
- */
- flag = 1;
- if (!my_ptrace_child(p))
- continue;
- /*FALLTHROUGH*/
- case TASK_STOPPED:
+ if (task_is_stopped_or_traced(p)) {
/*
* It's stopped now, so it might later
* continue, exit, or stop again.
+ *
+ * When we hit the race with PTRACE_ATTACH, we
+ * will not report this child. But the race
+ * means it has not yet been moved to our
+ * ptrace_children list, so we need to set the
+ * flag here to avoid a spurious ECHILD when
+ * the race happens with the only child.
*/
flag = 1;
- if (!(options & WUNTRACED) &&
- !my_ptrace_child(p))
- continue;
+
+ if (!my_ptrace_child(p)) {
+ if (task_is_traced(p))
+ continue;
+ if (!(options & WUNTRACED))
+ continue;
+ }
+
retval = wait_task_stopped(p, ret == 2,
- (options & WNOWAIT),
- infop,
- stat_addr, ru);
+ (options & WNOWAIT), infop,
+ stat_addr, ru);
if (retval == -EAGAIN)
goto repeat;
if (retval != 0) /* He released the lock. */
goto end;
- break;
- default:
- // case EXIT_DEAD:
- if (p->exit_state == EXIT_DEAD)
+ } else if (p->exit_state == EXIT_DEAD) {
+ continue;
+ } else if (p->exit_state == EXIT_ZOMBIE) {
+ /*
+ * Eligible but we cannot release it yet:
+ */
+ if (ret == 2)
+ goto check_continued;
+ if (!likely(options & WEXITED))
continue;
- // case EXIT_ZOMBIE:
- if (p->exit_state == EXIT_ZOMBIE) {
- /*
- * Eligible but we cannot release
- * it yet:
- */
- if (ret == 2)
- goto check_continued;
- if (!likely(options & WEXITED))
- continue;
- retval = wait_task_zombie(
- p, (options & WNOWAIT),
- infop, stat_addr, ru);
- /* He released the lock. */
- if (retval != 0)
- goto end;
- break;
- }
+ retval = wait_task_zombie(p,
+ (options & WNOWAIT), infop,
+ stat_addr, ru);
+ /* He released the lock. */
+ if (retval != 0)
+ goto end;
+ } else {
check_continued:
/*
* It's running now, so it might later
@@ -1627,12 +1616,11 @@ check_continued:
flag = 1;
if (!unlikely(options & WCONTINUED))
continue;
- retval = wait_task_continued(
- p, (options & WNOWAIT),
- infop, stat_addr, ru);
+ retval = wait_task_continued(p,
+ (options & WNOWAIT), infop,
+ stat_addr, ru);
if (retval != 0) /* He released the lock. */
goto end;
- break;
}
}
if (!flag) {
diff --git a/kernel/extable.c b/kernel/extable.c
index 7fe26285531..a26cb2e1702 100644
--- a/kernel/extable.c
+++ b/kernel/extable.c
@@ -46,7 +46,8 @@ int core_kernel_text(unsigned long addr)
addr <= (unsigned long)_etext)
return 1;
- if (addr >= (unsigned long)_sinittext &&
+ if (system_state == SYSTEM_BOOTING &&
+ addr >= (unsigned long)_sinittext &&
addr <= (unsigned long)_einittext)
return 1;
return 0;
diff --git a/kernel/fork.c b/kernel/fork.c
index ddafdfac945..05e0b6f4365 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -51,6 +51,7 @@
#include <linux/random.h>
#include <linux/tty.h>
#include <linux/proc_fs.h>
+#include <linux/blkdev.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
@@ -392,6 +393,7 @@ void fastcall __mmdrop(struct mm_struct *mm)
destroy_context(mm);
free_mm(mm);
}
+EXPORT_SYMBOL_GPL(__mmdrop);
/*
* Decrement the use count and release all resources for an mm.
@@ -791,6 +793,31 @@ out:
return error;
}
+static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
+{
+#ifdef CONFIG_BLOCK
+ struct io_context *ioc = current->io_context;
+
+ if (!ioc)
+ return 0;
+ /*
+ * Share io context with parent, if CLONE_IO is set
+ */
+ if (clone_flags & CLONE_IO) {
+ tsk->io_context = ioc_task_link(ioc);
+ if (unlikely(!tsk->io_context))
+ return -ENOMEM;
+ } else if (ioprio_valid(ioc->ioprio)) {
+ tsk->io_context = alloc_io_context(GFP_KERNEL, -1);
+ if (unlikely(!tsk->io_context))
+ return -ENOMEM;
+
+ tsk->io_context->ioprio = ioc->ioprio;
+ }
+#endif
+ return 0;
+}
+
/*
* Helper to unshare the files of the current task.
* We don't want to expose copy_files internals to
@@ -1045,6 +1072,10 @@ static struct task_struct *copy_process(unsigned long clone_flags,
copy_flags(clone_flags, p);
INIT_LIST_HEAD(&p->children);
INIT_LIST_HEAD(&p->sibling);
+#ifdef CONFIG_PREEMPT_RCU
+ p->rcu_read_lock_nesting = 0;
+ p->rcu_flipctr_idx = 0;
+#endif /* #ifdef CONFIG_PREEMPT_RCU */
p->vfork_done = NULL;
spin_lock_init(&p->alloc_lock);
@@ -1056,6 +1087,13 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->gtime = cputime_zero;
p->utimescaled = cputime_zero;
p->stimescaled = cputime_zero;
+ p->prev_utime = cputime_zero;
+ p->prev_stime = cputime_zero;
+
+#ifdef CONFIG_DETECT_SOFTLOCKUP
+ p->last_switch_count = 0;
+ p->last_switch_timestamp = 0;
+#endif
#ifdef CONFIG_TASK_XACCT
p->rchar = 0; /* I/O counter: bytes read */
@@ -1121,6 +1159,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->blocked_on = NULL; /* not blocked yet */
#endif
+ /* Perform scheduler related setup. Assign this task to a CPU. */
+ sched_fork(p, clone_flags);
+
if ((retval = security_task_alloc(p)))
goto bad_fork_cleanup_policy;
if ((retval = audit_alloc(p)))
@@ -1142,15 +1183,17 @@ static struct task_struct *copy_process(unsigned long clone_flags,
goto bad_fork_cleanup_mm;
if ((retval = copy_namespaces(clone_flags, p)))
goto bad_fork_cleanup_keys;
+ if ((retval = copy_io(clone_flags, p)))
+ goto bad_fork_cleanup_namespaces;
retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
if (retval)
- goto bad_fork_cleanup_namespaces;
+ goto bad_fork_cleanup_io;
if (pid != &init_struct_pid) {
retval = -ENOMEM;
pid = alloc_pid(task_active_pid_ns(p));
if (!pid)
- goto bad_fork_cleanup_namespaces;
+ goto bad_fork_cleanup_io;
if (clone_flags & CLONE_NEWPID) {
retval = pid_ns_prepare_proc(task_active_pid_ns(p));
@@ -1191,6 +1234,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
#ifdef TIF_SYSCALL_EMU
clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
#endif
+ clear_all_latency_tracing(p);
/* Our parent execution domain becomes current domain
These must match for thread signalling to apply */
@@ -1210,9 +1254,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
INIT_LIST_HEAD(&p->ptrace_children);
INIT_LIST_HEAD(&p->ptrace_list);
- /* Perform scheduler related setup. Assign this task to a CPU. */
- sched_fork(p, clone_flags);
-
/* Now that the task is set up, run cgroup callbacks if
* necessary. We need to run them before the task is visible
* on the tasklist. */
@@ -1222,9 +1263,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
/* Need tasklist lock for parent etc handling! */
write_lock_irq(&tasklist_lock);
- /* for sys_ioprio_set(IOPRIO_WHO_PGRP) */
- p->ioprio = current->ioprio;
-
/*
* The task hasn't been attached yet, so its cpus_allowed mask will
* not be changed, nor will its assigned CPU.
@@ -1235,6 +1273,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
* parent's CPU). This avoids alot of nasty races.
*/
p->cpus_allowed = current->cpus_allowed;
+ p->rt.nr_cpus_allowed = current->rt.nr_cpus_allowed;
if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) ||
!cpu_online(task_cpu(p))))
set_task_cpu(p, smp_processor_id());
@@ -1290,23 +1329,14 @@ static struct task_struct *copy_process(unsigned long clone_flags,
__ptrace_link(p, current->parent);
if (thread_group_leader(p)) {
- if (clone_flags & CLONE_NEWPID) {
+ if (clone_flags & CLONE_NEWPID)
p->nsproxy->pid_ns->child_reaper = p;
- p->signal->tty = NULL;
- set_task_pgrp(p, p->pid);
- set_task_session(p, p->pid);
- attach_pid(p, PIDTYPE_PGID, pid);
- attach_pid(p, PIDTYPE_SID, pid);
- } else {
- p->signal->tty = current->signal->tty;
- set_task_pgrp(p, task_pgrp_nr(current));
- set_task_session(p, task_session_nr(current));
- attach_pid(p, PIDTYPE_PGID,
- task_pgrp(current));
- attach_pid(p, PIDTYPE_SID,
- task_session(current));
- }
+ p->signal->tty = current->signal->tty;
+ set_task_pgrp(p, task_pgrp_nr(current));
+ set_task_session(p, task_session_nr(current));
+ attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
+ attach_pid(p, PIDTYPE_SID, task_session(current));
list_add_tail_rcu(&p->tasks, &init_task.tasks);
__get_cpu_var(process_counts)++;
}
@@ -1324,6 +1354,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
bad_fork_free_pid:
if (pid != &init_struct_pid)
free_pid(pid);
+bad_fork_cleanup_io:
+ put_io_context(p->io_context);
bad_fork_cleanup_namespaces:
exit_task_namespaces(p);
bad_fork_cleanup_keys:
diff --git a/kernel/futex.c b/kernel/futex.c
index 32710451dc2..a6baaec44b8 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -109,6 +109,9 @@ struct futex_q {
/* Optional priority inheritance state: */
struct futex_pi_state *pi_state;
struct task_struct *task;
+
+ /* Bitset for the optional bitmasked wakeup */
+ u32 bitset;
};
/*
@@ -181,8 +184,8 @@ static inline int match_futex(union futex_key *key1, union futex_key *key2)
* For other futexes, it points to &current->mm->mmap_sem and
* caller must have taken the reader lock. but NOT any spinlocks.
*/
-int get_futex_key(u32 __user *uaddr, struct rw_semaphore *fshared,
- union futex_key *key)
+static int get_futex_key(u32 __user *uaddr, struct rw_semaphore *fshared,
+ union futex_key *key)
{
unsigned long address = (unsigned long)uaddr;
struct mm_struct *mm = current->mm;
@@ -268,14 +271,13 @@ int get_futex_key(u32 __user *uaddr, struct rw_semaphore *fshared,
}
return err;
}
-EXPORT_SYMBOL_GPL(get_futex_key);
/*
* Take a reference to the resource addressed by a key.
* Can be called while holding spinlocks.
*
*/
-inline void get_futex_key_refs(union futex_key *key)
+static void get_futex_key_refs(union futex_key *key)
{
if (key->both.ptr == 0)
return;
@@ -288,13 +290,12 @@ inline void get_futex_key_refs(union futex_key *key)
break;
}
}
-EXPORT_SYMBOL_GPL(get_futex_key_refs);
/*
* Drop a reference to the resource addressed by a key.
* The hash bucket spinlock must not be held.
*/
-void drop_futex_key_refs(union futex_key *key)
+static void drop_futex_key_refs(union futex_key *key)
{
if (!key->both.ptr)
return;
@@ -307,7 +308,6 @@ void drop_futex_key_refs(union futex_key *key)
break;
}
}
-EXPORT_SYMBOL_GPL(drop_futex_key_refs);
static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval)
{
@@ -661,7 +661,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
if (curval == -EFAULT)
ret = -EFAULT;
- if (curval != uval)
+ else if (curval != uval)
ret = -EINVAL;
if (ret) {
spin_unlock(&pi_state->pi_mutex.wait_lock);
@@ -725,7 +725,7 @@ double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
* to this virtual address:
*/
static int futex_wake(u32 __user *uaddr, struct rw_semaphore *fshared,
- int nr_wake)
+ int nr_wake, u32 bitset)
{
struct futex_hash_bucket *hb;
struct futex_q *this, *next;
@@ -733,6 +733,9 @@ static int futex_wake(u32 __user *uaddr, struct rw_semaphore *fshared,
union futex_key key;
int ret;
+ if (!bitset)
+ return -EINVAL;
+
futex_lock_mm(fshared);
ret = get_futex_key(uaddr, fshared, &key);
@@ -749,6 +752,11 @@ static int futex_wake(u32 __user *uaddr, struct rw_semaphore *fshared,
ret = -EINVAL;
break;
}
+
+ /* Check if one of the bits is set in both bitsets */
+ if (!(this->bitset & bitset))
+ continue;
+
wake_futex(this);
if (++ret >= nr_wake)
break;
@@ -1100,15 +1108,15 @@ static void unqueue_me_pi(struct futex_q *q)
}
/*
- * Fixup the pi_state owner with current.
+ * Fixup the pi_state owner with the new owner.
*
* Must be called with hash bucket lock held and mm->sem held for non
* private futexes.
*/
static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
- struct task_struct *curr)
+ struct task_struct *newowner)
{
- u32 newtid = task_pid_vnr(curr) | FUTEX_WAITERS;
+ u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
struct futex_pi_state *pi_state = q->pi_state;
u32 uval, curval, newval;
int ret;
@@ -1122,12 +1130,12 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
} else
newtid |= FUTEX_OWNER_DIED;
- pi_state->owner = curr;
+ pi_state->owner = newowner;
- spin_lock_irq(&curr->pi_lock);
+ spin_lock_irq(&newowner->pi_lock);
WARN_ON(!list_empty(&pi_state->list));
- list_add(&pi_state->list, &curr->pi_state_list);
- spin_unlock_irq(&curr->pi_lock);
+ list_add(&pi_state->list, &newowner->pi_state_list);
+ spin_unlock_irq(&newowner->pi_lock);
/*
* We own it, so we have to replace the pending owner
@@ -1152,14 +1160,14 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
/*
* In case we must use restart_block to restart a futex_wait,
- * we encode in the 'arg3' shared capability
+ * we encode in the 'flags' shared capability
*/
-#define ARG3_SHARED 1
+#define FLAGS_SHARED 1
static long futex_wait_restart(struct restart_block *restart);
static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
- u32 val, ktime_t *abs_time)
+ u32 val, ktime_t *abs_time, u32 bitset)
{
struct task_struct *curr = current;
DECLARE_WAITQUEUE(wait, curr);
@@ -1170,7 +1178,11 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
struct hrtimer_sleeper t;
int rem = 0;
+ if (!bitset)
+ return -EINVAL;
+
q.pi_state = NULL;
+ q.bitset = bitset;
retry:
futex_lock_mm(fshared);
@@ -1255,6 +1267,8 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
t.timer.expires = *abs_time;
hrtimer_start(&t.timer, t.timer.expires, HRTIMER_MODE_ABS);
+ if (!hrtimer_active(&t.timer))
+ t.task = NULL;
/*
* the timer could have already expired, in which
@@ -1293,12 +1307,14 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
struct restart_block *restart;
restart = &current_thread_info()->restart_block;
restart->fn = futex_wait_restart;
- restart->arg0 = (unsigned long)uaddr;
- restart->arg1 = (unsigned long)val;
- restart->arg2 = (unsigned long)abs_time;
- restart->arg3 = 0;
+ restart->futex.uaddr = (u32 *)uaddr;
+ restart->futex.val = val;
+ restart->futex.time = abs_time->tv64;
+ restart->futex.bitset = bitset;
+ restart->futex.flags = 0;
+
if (fshared)
- restart->arg3 |= ARG3_SHARED;
+ restart->futex.flags |= FLAGS_SHARED;
return -ERESTART_RESTARTBLOCK;
}
@@ -1313,15 +1329,16 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
static long futex_wait_restart(struct restart_block *restart)
{
- u32 __user *uaddr = (u32 __user *)restart->arg0;
- u32 val = (u32)restart->arg1;
- ktime_t *abs_time = (ktime_t *)restart->arg2;
+ u32 __user *uaddr = (u32 __user *)restart->futex.uaddr;
struct rw_semaphore *fshared = NULL;
+ ktime_t t;
+ t.tv64 = restart->futex.time;
restart->fn = do_no_restart_syscall;
- if (restart->arg3 & ARG3_SHARED)
+ if (restart->futex.flags & FLAGS_SHARED)
fshared = &current->mm->mmap_sem;
- return (long)futex_wait(uaddr, fshared, val, abs_time);
+ return (long)futex_wait(uaddr, fshared, restart->futex.val, &t,
+ restart->futex.bitset);
}
@@ -1510,9 +1527,37 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
* when we were on the way back before we locked the
* hash bucket.
*/
- if (q.pi_state->owner == curr &&
- rt_mutex_trylock(&q.pi_state->pi_mutex)) {
- ret = 0;
+ if (q.pi_state->owner == curr) {
+ /*
+ * Try to get the rt_mutex now. This might
+ * fail as some other task acquired the
+ * rt_mutex after we removed ourself from the
+ * rt_mutex waiters list.
+ */
+ if (rt_mutex_trylock(&q.pi_state->pi_mutex))
+ ret = 0;
+ else {
+ /*
+ * pi_state is incorrect, some other
+ * task did a lock steal and we
+ * returned due to timeout or signal
+ * without taking the rt_mutex. Too
+ * late. We can access the
+ * rt_mutex_owner without locking, as
+ * the other task is now blocked on
+ * the hash bucket lock. Fix the state
+ * up.
+ */
+ struct task_struct *owner;
+ int res;
+
+ owner = rt_mutex_owner(&q.pi_state->pi_mutex);
+ res = fixup_pi_state_owner(uaddr, &q, owner);
+
+ /* propagate -EFAULT, if the fixup failed */
+ if (res)
+ ret = res;
+ }
} else {
/*
* Paranoia check. If we did not take the lock
@@ -1914,7 +1959,8 @@ retry:
* PI futexes happens in exit_pi_state():
*/
if (!pi && (uval & FUTEX_WAITERS))
- futex_wake(uaddr, &curr->mm->mmap_sem, 1);
+ futex_wake(uaddr, &curr->mm->mmap_sem, 1,
+ FUTEX_BITSET_MATCH_ANY);
}
return 0;
}
@@ -2014,10 +2060,14 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
switch (cmd) {
case FUTEX_WAIT:
- ret = futex_wait(uaddr, fshared, val, timeout);
+ val3 = FUTEX_BITSET_MATCH_ANY;
+ case FUTEX_WAIT_BITSET:
+ ret = futex_wait(uaddr, fshared, val, timeout, val3);
break;
case FUTEX_WAKE:
- ret = futex_wake(uaddr, fshared, val);
+ val3 = FUTEX_BITSET_MATCH_ANY;
+ case FUTEX_WAKE_BITSET:
+ ret = futex_wake(uaddr, fshared, val, val3);
break;
case FUTEX_FD:
/* non-zero val means F_SETOWN(getpid()) & F_SETSIG(val) */
@@ -2057,7 +2107,8 @@ asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val,
u32 val2 = 0;
int cmd = op & FUTEX_CMD_MASK;
- if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI)) {
+ if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
+ cmd == FUTEX_WAIT_BITSET)) {
if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
return -EFAULT;
if (!timespec_valid(&ts))
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
index 00b572666cc..133d558db45 100644
--- a/kernel/futex_compat.c
+++ b/kernel/futex_compat.c
@@ -30,6 +30,15 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
return 0;
}
+static void __user *futex_uaddr(struct robust_list *entry,
+ compat_long_t futex_offset)
+{
+ compat_uptr_t base = ptr_to_compat(entry);
+ void __user *uaddr = compat_ptr(base + futex_offset);
+
+ return uaddr;
+}
+
/*
* Walk curr->robust_list (very carefully, it's a userspace list!)
* and mark any locks found there dead, and notify any waiters.
@@ -76,11 +85,12 @@ void compat_exit_robust_list(struct task_struct *curr)
* A pending lock might already be on the list, so
* dont process it twice:
*/
- if (entry != pending)
- if (handle_futex_death((void __user *)entry + futex_offset,
- curr, pi))
- return;
+ if (entry != pending) {
+ void __user *uaddr = futex_uaddr(entry, futex_offset);
+ if (handle_futex_death(uaddr, curr, pi))
+ return;
+ }
if (rc)
return;
uentry = next_uentry;
@@ -94,9 +104,11 @@ void compat_exit_robust_list(struct task_struct *curr)
cond_resched();
}
- if (pending)
- handle_futex_death((void __user *)pending + futex_offset,
- curr, pip);
+ if (pending) {
+ void __user *uaddr = futex_uaddr(pending, futex_offset);
+
+ handle_futex_death(uaddr, curr, pip);
+ }
}
asmlinkage long
@@ -155,7 +167,8 @@ asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val,
int val2 = 0;
int cmd = op & FUTEX_CMD_MASK;
- if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI)) {
+ if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
+ cmd == FUTEX_WAIT_BITSET)) {
if (get_compat_timespec(&ts, utime))
return -EFAULT;
if (!timespec_valid(&ts))
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index b6d2ff7e37e..1069998fe25 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -325,6 +325,22 @@ unsigned long ktime_divns(const ktime_t kt, s64 div)
}
#endif /* BITS_PER_LONG >= 64 */
+/*
+ * Check, whether the timer is on the callback pending list
+ */
+static inline int hrtimer_cb_pending(const struct hrtimer *timer)
+{
+ return timer->state & HRTIMER_STATE_PENDING;
+}
+
+/*
+ * Remove a timer from the callback pending list
+ */
+static inline void hrtimer_remove_cb_pending(struct hrtimer *timer)
+{
+ list_del_init(&timer->cb_entry);
+}
+
/* High resolution timer related functions */
#ifdef CONFIG_HIGH_RES_TIMERS
@@ -494,29 +510,12 @@ void hres_timers_resume(void)
}
/*
- * Check, whether the timer is on the callback pending list
- */
-static inline int hrtimer_cb_pending(const struct hrtimer *timer)
-{
- return timer->state & HRTIMER_STATE_PENDING;
-}
-
-/*
- * Remove a timer from the callback pending list
- */
-static inline void hrtimer_remove_cb_pending(struct hrtimer *timer)
-{
- list_del_init(&timer->cb_entry);
-}
-
-/*
* Initialize the high resolution related parts of cpu_base
*/
static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
{
base->expires_next.tv64 = KTIME_MAX;
base->hres_active = 0;
- INIT_LIST_HEAD(&base->cb_pending);
}
/*
@@ -524,7 +523,6 @@ static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
*/
static inline void hrtimer_init_timer_hres(struct hrtimer *timer)
{
- INIT_LIST_HEAD(&timer->cb_entry);
}
/*
@@ -602,7 +600,7 @@ static int hrtimer_switch_to_hres(void)
/* "Retrigger" the interrupt to get things going */
retrigger_next_event(NULL);
local_irq_restore(flags);
- printk(KERN_INFO "Switched to high resolution mode on CPU %d\n",
+ printk(KERN_DEBUG "Switched to high resolution mode on CPU %d\n",
smp_processor_id());
return 1;
}
@@ -618,10 +616,13 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
{
return 0;
}
-static inline int hrtimer_cb_pending(struct hrtimer *timer) { return 0; }
-static inline void hrtimer_remove_cb_pending(struct hrtimer *timer) { }
static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
static inline void hrtimer_init_timer_hres(struct hrtimer *timer) { }
+static inline int hrtimer_reprogram(struct hrtimer *timer,
+ struct hrtimer_clock_base *base)
+{
+ return 0;
+}
#endif /* CONFIG_HIGH_RES_TIMERS */
@@ -850,6 +851,14 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
#ifdef CONFIG_TIME_LOW_RES
tim = ktime_add(tim, base->resolution);
#endif
+ /*
+ * Careful here: User space might have asked for a
+ * very long sleep, so the add above might result in a
+ * negative number, which enqueues the timer in front
+ * of the queue.
+ */
+ if (tim.tv64 < 0)
+ tim.tv64 = KTIME_MAX;
}
timer->expires = tim;
@@ -993,6 +1002,7 @@ void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
clock_id = CLOCK_MONOTONIC;
timer->base = &cpu_base->clock_base[clock_id];
+ INIT_LIST_HEAD(&timer->cb_entry);
hrtimer_init_timer_hres(timer);
#ifdef CONFIG_TIMER_STATS
@@ -1022,6 +1032,85 @@ int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
}
EXPORT_SYMBOL_GPL(hrtimer_get_res);
+static void run_hrtimer_pending(struct hrtimer_cpu_base *cpu_base)
+{
+ spin_lock_irq(&cpu_base->lock);
+
+ while (!list_empty(&cpu_base->cb_pending)) {
+ enum hrtimer_restart (*fn)(struct hrtimer *);
+ struct hrtimer *timer;
+ int restart;
+
+ timer = list_entry(cpu_base->cb_pending.next,
+ struct hrtimer, cb_entry);
+
+ timer_stats_account_hrtimer(timer);
+
+ fn = timer->function;
+ __remove_hrtimer(timer, timer->base, HRTIMER_STATE_CALLBACK, 0);
+ spin_unlock_irq(&cpu_base->lock);
+
+ restart = fn(timer);
+
+ spin_lock_irq(&cpu_base->lock);
+
+ timer->state &= ~HRTIMER_STATE_CALLBACK;
+ if (restart == HRTIMER_RESTART) {
+ BUG_ON(hrtimer_active(timer));
+ /*
+ * Enqueue the timer, allow reprogramming of the event
+ * device
+ */
+ enqueue_hrtimer(timer, timer->base, 1);
+ } else if (hrtimer_active(timer)) {
+ /*
+ * If the timer was rearmed on another CPU, reprogram
+ * the event device.
+ */
+ if (timer->base->first == &timer->node)
+ hrtimer_reprogram(timer, timer->base);
+ }
+ }
+ spin_unlock_irq(&cpu_base->lock);
+}
+
+static void __run_hrtimer(struct hrtimer *timer)
+{
+ struct hrtimer_clock_base *base = timer->base;
+ struct hrtimer_cpu_base *cpu_base = base->cpu_base;
+ enum hrtimer_restart (*fn)(struct hrtimer *);
+ int restart;
+
+ __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
+ timer_stats_account_hrtimer(timer);
+
+ fn = timer->function;
+ if (timer->cb_mode == HRTIMER_CB_IRQSAFE_NO_SOFTIRQ) {
+ /*
+ * Used for scheduler timers, avoid lock inversion with
+ * rq->lock and tasklist_lock.
+ *
+ * These timers are required to deal with enqueue expiry
+ * themselves and are not allowed to migrate.
+ */
+ spin_unlock(&cpu_base->lock);
+ restart = fn(timer);
+ spin_lock(&cpu_base->lock);
+ } else
+ restart = fn(timer);
+
+ /*
+ * Note: We clear the CALLBACK bit after enqueue_hrtimer to avoid
+ * reprogramming of the event hardware. This happens at the end of this
+ * function anyway.
+ */
+ if (restart != HRTIMER_NORESTART) {
+ BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
+ enqueue_hrtimer(timer, base, 0);
+ }
+ timer->state &= ~HRTIMER_STATE_CALLBACK;
+}
+
#ifdef CONFIG_HIGH_RES_TIMERS
/*
@@ -1079,21 +1168,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
continue;
}
- __remove_hrtimer(timer, base,
- HRTIMER_STATE_CALLBACK, 0);
- timer_stats_account_hrtimer(timer);
-
- /*
- * Note: We clear the CALLBACK bit after
- * enqueue_hrtimer to avoid reprogramming of
- * the event hardware. This happens at the end
- * of this function anyway.
- */
- if (timer->function(timer) != HRTIMER_NORESTART) {
- BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
- enqueue_hrtimer(timer, base, 0);
- }
- timer->state &= ~HRTIMER_STATE_CALLBACK;
+ __run_hrtimer(timer);
}
spin_unlock(&cpu_base->lock);
base++;
@@ -1114,52 +1189,41 @@ void hrtimer_interrupt(struct clock_event_device *dev)
static void run_hrtimer_softirq(struct softirq_action *h)
{
- struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
-
- spin_lock_irq(&cpu_base->lock);
-
- while (!list_empty(&cpu_base->cb_pending)) {
- enum hrtimer_restart (*fn)(struct hrtimer *);
- struct hrtimer *timer;
- int restart;
-
- timer = list_entry(cpu_base->cb_pending.next,
- struct hrtimer, cb_entry);
+ run_hrtimer_pending(&__get_cpu_var(hrtimer_bases));
+}
- timer_stats_account_hrtimer(timer);
+#endif /* CONFIG_HIGH_RES_TIMERS */
- fn = timer->function;
- __remove_hrtimer(timer, timer->base, HRTIMER_STATE_CALLBACK, 0);
- spin_unlock_irq(&cpu_base->lock);
+/*
+ * Called from timer softirq every jiffy, expire hrtimers:
+ *
+ * For HRT its the fall back code to run the softirq in the timer
+ * softirq context in case the hrtimer initialization failed or has
+ * not been done yet.
+ */
+void hrtimer_run_pending(void)
+{
+ struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
- restart = fn(timer);
+ if (hrtimer_hres_active())
+ return;
- spin_lock_irq(&cpu_base->lock);
+ /*
+ * This _is_ ugly: We have to check in the softirq context,
+ * whether we can switch to highres and / or nohz mode. The
+ * clocksource switch happens in the timer interrupt with
+ * xtime_lock held. Notification from there only sets the
+ * check bit in the tick_oneshot code, otherwise we might
+ * deadlock vs. xtime_lock.
+ */
+ if (tick_check_oneshot_change(!hrtimer_is_hres_enabled()))
+ hrtimer_switch_to_hres();
- timer->state &= ~HRTIMER_STATE_CALLBACK;
- if (restart == HRTIMER_RESTART) {
- BUG_ON(hrtimer_active(timer));
- /*
- * Enqueue the timer, allow reprogramming of the event
- * device
- */
- enqueue_hrtimer(timer, timer->base, 1);
- } else if (hrtimer_active(timer)) {
- /*
- * If the timer was rearmed on another CPU, reprogram
- * the event device.
- */
- if (timer->base->first == &timer->node)
- hrtimer_reprogram(timer, timer->base);
- }
- }
- spin_unlock_irq(&cpu_base->lock);
+ run_hrtimer_pending(cpu_base);
}
-#endif /* CONFIG_HIGH_RES_TIMERS */
-
/*
- * Expire the per base hrtimer-queue:
+ * Called from hardirq context every jiffy
*/
static inline void run_hrtimer_queue(struct hrtimer_cpu_base *cpu_base,
int index)
@@ -1173,46 +1237,27 @@ static inline void run_hrtimer_queue(struct hrtimer_cpu_base *cpu_base,
if (base->get_softirq_time)
base->softirq_time = base->get_softirq_time();
- spin_lock_irq(&cpu_base->lock);
+ spin_lock(&cpu_base->lock);
while ((node = base->first)) {
struct hrtimer *timer;
- enum hrtimer_restart (*fn)(struct hrtimer *);
- int restart;
timer = rb_entry(node, struct hrtimer, node);
if (base->softirq_time.tv64 <= timer->expires.tv64)
break;
-#ifdef CONFIG_HIGH_RES_TIMERS
- WARN_ON_ONCE(timer->cb_mode == HRTIMER_CB_IRQSAFE_NO_SOFTIRQ);
-#endif
- timer_stats_account_hrtimer(timer);
-
- fn = timer->function;
- __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
- spin_unlock_irq(&cpu_base->lock);
-
- restart = fn(timer);
-
- spin_lock_irq(&cpu_base->lock);
-
- timer->state &= ~HRTIMER_STATE_CALLBACK;
- if (restart != HRTIMER_NORESTART) {
- BUG_ON(hrtimer_active(timer));
- enqueue_hrtimer(timer, base, 0);
+ if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
+ __remove_hrtimer(timer, base, HRTIMER_STATE_PENDING, 0);
+ list_add_tail(&timer->cb_entry,
+ &base->cpu_base->cb_pending);
+ continue;
}
+
+ __run_hrtimer(timer);
}
- spin_unlock_irq(&cpu_base->lock);
+ spin_unlock(&cpu_base->lock);
}
-/*
- * Called from timer softirq every jiffy, expire hrtimers:
- *
- * For HRT its the fall back code to run the softirq in the timer
- * softirq context in case the hrtimer initialization failed or has
- * not been done yet.
- */
void hrtimer_run_queues(void)
{
struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
@@ -1221,18 +1266,6 @@ void hrtimer_run_queues(void)
if (hrtimer_hres_active())
return;
- /*
- * This _is_ ugly: We have to check in the softirq context,
- * whether we can switch to highres and / or nohz mode. The
- * clocksource switch happens in the timer interrupt with
- * xtime_lock held. Notification from there only sets the
- * check bit in the tick_oneshot code, otherwise we might
- * deadlock vs. xtime_lock.
- */
- if (tick_check_oneshot_change(!hrtimer_is_hres_enabled()))
- if (hrtimer_switch_to_hres())
- return;
-
hrtimer_get_softirq_time(cpu_base);
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
@@ -1260,7 +1293,7 @@ void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
sl->timer.function = hrtimer_wakeup;
sl->task = task;
#ifdef CONFIG_HIGH_RES_TIMERS
- sl->timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_RESTART;
+ sl->timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ;
#endif
}
@@ -1271,6 +1304,8 @@ static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mod
do {
set_current_state(TASK_INTERRUPTIBLE);
hrtimer_start(&t->timer, t->timer.expires, mode);
+ if (!hrtimer_active(&t->timer))
+ t->task = NULL;
if (likely(t->task))
schedule();
@@ -1280,6 +1315,8 @@ static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mod
} while (t->task && !signal_pending(current));
+ __set_current_state(TASK_RUNNING);
+
return t->task == NULL;
}
@@ -1370,7 +1407,7 @@ sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp)
/*
* Functions related to boot-time initialization:
*/
-static void __devinit init_hrtimers_cpu(int cpu)
+static void __cpuinit init_hrtimers_cpu(int cpu)
{
struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
int i;
@@ -1381,6 +1418,7 @@ static void __devinit init_hrtimers_cpu(int cpu)
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
cpu_base->clock_base[i].cpu_base = cpu_base;
+ INIT_LIST_HEAD(&cpu_base->cb_pending);
hrtimer_init_hres(cpu_base);
}
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 9b5dff6b3f6..44019ce30a1 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -297,18 +297,13 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc)
if (unlikely(desc->status & IRQ_INPROGRESS))
goto out_unlock;
+ desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
kstat_cpu(cpu).irqs[irq]++;
action = desc->action;
- if (unlikely(!action || (desc->status & IRQ_DISABLED))) {
- if (desc->chip->mask)
- desc->chip->mask(irq);
- desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
- desc->status |= IRQ_PENDING;
+ if (unlikely(!action || (desc->status & IRQ_DISABLED)))
goto out_unlock;
- }
- desc->status &= ~(IRQ_REPLAY | IRQ_WAITING | IRQ_PENDING);
desc->status |= IRQ_INPROGRESS;
spin_unlock(&desc->lock);
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index e391cbb1f56..dc335ad2752 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -178,9 +178,11 @@ fastcall unsigned int __do_IRQ(unsigned int irq)
*/
if (desc->chip->ack)
desc->chip->ack(irq);
- action_ret = handle_IRQ_event(irq, desc->action);
- if (!noirqdebug)
- note_interrupt(irq, desc, action_ret);
+ if (likely(!(desc->status & IRQ_DISABLED))) {
+ action_ret = handle_IRQ_event(irq, desc->action);
+ if (!noirqdebug)
+ note_interrupt(irq, desc, action_ret);
+ }
desc->chip->end(irq);
return 1;
}
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 1f314221d53..438a0146428 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -479,6 +479,9 @@ void free_irq(unsigned int irq, void *dev_id)
return;
}
printk(KERN_ERR "Trying to free already-free IRQ %d\n", irq);
+#ifdef CONFIG_DEBUG_SHIRQ
+ dump_stack();
+#endif
spin_unlock_irqrestore(&desc->lock, flags);
return;
}
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 50b81b98046..c2f2ccb0549 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -75,6 +75,18 @@ static int irq_affinity_write_proc(struct file *file, const char __user *buffer,
#endif
+static int irq_spurious_read(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ struct irq_desc *d = &irq_desc[(long) data];
+ return sprintf(page, "count %u\n"
+ "unhandled %u\n"
+ "last_unhandled %u ms\n",
+ d->irq_count,
+ d->irqs_unhandled,
+ jiffies_to_msecs(d->last_unhandled));
+}
+
#define MAX_NAMELEN 128
static int name_unique(unsigned int irq, struct irqaction *new_action)
@@ -118,6 +130,7 @@ void register_handler_proc(unsigned int irq, struct irqaction *action)
void register_irq_proc(unsigned int irq)
{
char name [MAX_NAMELEN];
+ struct proc_dir_entry *entry;
if (!root_irq_dir ||
(irq_desc[irq].chip == &no_irq_chip) ||
@@ -132,8 +145,6 @@ void register_irq_proc(unsigned int irq)
#ifdef CONFIG_SMP
{
- struct proc_dir_entry *entry;
-
/* create /proc/irq/<irq>/smp_affinity */
entry = create_proc_entry("smp_affinity", 0600, irq_desc[irq].dir);
@@ -144,6 +155,12 @@ void register_irq_proc(unsigned int irq)
}
}
#endif
+
+ entry = create_proc_entry("spurious", 0444, irq_desc[irq].dir);
+ if (entry) {
+ entry->data = (void *)(long)irq;
+ entry->read_proc = irq_spurious_read;
+ }
}
#undef MAX_NAMELEN
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index 32b161972fa..a6b2bc831dd 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/kallsyms.h>
#include <linux/interrupt.h>
+#include <linux/moduleparam.h>
static int irqfixup __read_mostly;
@@ -225,6 +226,8 @@ int noirqdebug_setup(char *str)
}
__setup("noirqdebug", noirqdebug_setup);
+module_param(noirqdebug, bool, 0644);
+MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true");
static int __init irqfixup_setup(char *str)
{
@@ -236,6 +239,8 @@ static int __init irqfixup_setup(char *str)
}
__setup("irqfixup", irqfixup_setup);
+module_param(irqfixup, int, 0644);
+MODULE_PARM_DESC("irqfixup", "0: No fixup, 1: irqfixup mode 2: irqpoll mode");
static int __init irqpoll_setup(char *str)
{
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 474219a4192..7dadc71ce51 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -32,9 +32,14 @@
/* These will be re-linked against their real values during the second link stage */
extern const unsigned long kallsyms_addresses[] __attribute__((weak));
-extern const unsigned long kallsyms_num_syms __attribute__((weak));
extern const u8 kallsyms_names[] __attribute__((weak));
+/* tell the compiler that the count isn't in the small data section if the arch
+ * has one (eg: FRV)
+ */
+extern const unsigned long kallsyms_num_syms
+__attribute__((weak, section(".rodata")));
+
extern const u8 kallsyms_token_table[] __attribute__((weak));
extern const u16 kallsyms_token_index[] __attribute__((weak));
@@ -228,10 +233,11 @@ static unsigned long get_symbol_pos(unsigned long addr,
int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize,
unsigned long *offset)
{
+ char namebuf[KSYM_NAME_LEN];
if (is_ksym_addr(addr))
return !!get_symbol_pos(addr, symbolsize, offset);
- return !!module_address_lookup(addr, symbolsize, offset, NULL);
+ return !!module_address_lookup(addr, symbolsize, offset, NULL, namebuf);
}
/*
@@ -246,8 +252,6 @@ const char *kallsyms_lookup(unsigned long addr,
unsigned long *offset,
char **modname, char *namebuf)
{
- const char *msym;
-
namebuf[KSYM_NAME_LEN - 1] = 0;
namebuf[0] = 0;
@@ -263,10 +267,8 @@ const char *kallsyms_lookup(unsigned long addr,
}
/* see if it's in a module */
- msym = module_address_lookup(addr, symbolsize, offset, modname);
- if (msym)
- return strncpy(namebuf, msym, KSYM_NAME_LEN - 1);
-
+ return module_address_lookup(addr, symbolsize, offset, modname,
+ namebuf);
return NULL;
}
diff --git a/kernel/kexec.c b/kernel/kexec.c
index aa74a1ef2da..9a26eec9eb0 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1404,6 +1404,7 @@ static int __init crash_save_vmcoreinfo_init(void)
VMCOREINFO_OFFSET(list_head, next);
VMCOREINFO_OFFSET(list_head, prev);
VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
+ VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
VMCOREINFO_NUMBER(NR_FREE_PAGES);
arch_crash_save_vmcoreinfo();
diff --git a/kernel/kmod.c b/kernel/kmod.c
index c6a4f8aebeb..bb7df2a28bd 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -451,13 +451,11 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info,
enum umh_wait wait)
{
DECLARE_COMPLETION_ONSTACK(done);
- int retval;
+ int retval = 0;
helper_lock();
- if (sub_info->path[0] == '\0') {
- retval = 0;
+ if (sub_info->path[0] == '\0')
goto out;
- }
if (!khelper_wq || usermodehelper_disabled) {
retval = -EBUSY;
@@ -468,13 +466,14 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info,
sub_info->wait = wait;
queue_work(khelper_wq, &sub_info->work);
- if (wait == UMH_NO_WAIT) /* task has freed sub_info */
- return 0;
+ if (wait == UMH_NO_WAIT) /* task has freed sub_info */
+ goto unlock;
wait_for_completion(&done);
retval = sub_info->retval;
- out:
+out:
call_usermodehelper_freeinfo(sub_info);
+unlock:
helper_unlock();
return retval;
}
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index e3a5d817ac9..d0493eafea3 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -824,6 +824,8 @@ static int __init init_kprobes(void)
if (!err)
err = register_die_notifier(&kprobe_exceptions_nb);
+ if (!err)
+ init_test_probes();
return err;
}
diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
index 65daa5373ca..e53bc30e9ba 100644
--- a/kernel/ksysfs.c
+++ b/kernel/ksysfs.c
@@ -17,30 +17,34 @@
#include <linux/sched.h>
#define KERNEL_ATTR_RO(_name) \
-static struct subsys_attribute _name##_attr = __ATTR_RO(_name)
+static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
#define KERNEL_ATTR_RW(_name) \
-static struct subsys_attribute _name##_attr = \
+static struct kobj_attribute _name##_attr = \
__ATTR(_name, 0644, _name##_show, _name##_store)
#if defined(CONFIG_HOTPLUG) && defined(CONFIG_NET)
/* current uevent sequence number */
-static ssize_t uevent_seqnum_show(struct kset *kset, char *page)
+static ssize_t uevent_seqnum_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
- return sprintf(page, "%llu\n", (unsigned long long)uevent_seqnum);
+ return sprintf(buf, "%llu\n", (unsigned long long)uevent_seqnum);
}
KERNEL_ATTR_RO(uevent_seqnum);
/* uevent helper program, used during early boo */
-static ssize_t uevent_helper_show(struct kset *kset, char *page)
+static ssize_t uevent_helper_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
- return sprintf(page, "%s\n", uevent_helper);
+ return sprintf(buf, "%s\n", uevent_helper);
}
-static ssize_t uevent_helper_store(struct kset *kset, const char *page, size_t count)
+static ssize_t uevent_helper_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
{
if (count+1 > UEVENT_HELPER_PATH_LEN)
return -ENOENT;
- memcpy(uevent_helper, page, count);
+ memcpy(uevent_helper, buf, count);
uevent_helper[count] = '\0';
if (count && uevent_helper[count-1] == '\n')
uevent_helper[count-1] = '\0';
@@ -50,21 +54,24 @@ KERNEL_ATTR_RW(uevent_helper);
#endif
#ifdef CONFIG_KEXEC
-static ssize_t kexec_loaded_show(struct kset *kset, char *page)
+static ssize_t kexec_loaded_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
- return sprintf(page, "%d\n", !!kexec_image);
+ return sprintf(buf, "%d\n", !!kexec_image);
}
KERNEL_ATTR_RO(kexec_loaded);
-static ssize_t kexec_crash_loaded_show(struct kset *kset, char *page)
+static ssize_t kexec_crash_loaded_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
- return sprintf(page, "%d\n", !!kexec_crash_image);
+ return sprintf(buf, "%d\n", !!kexec_crash_image);
}
KERNEL_ATTR_RO(kexec_crash_loaded);
-static ssize_t vmcoreinfo_show(struct kset *kset, char *page)
+static ssize_t vmcoreinfo_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
- return sprintf(page, "%lx %x\n",
+ return sprintf(buf, "%lx %x\n",
paddr_vmcoreinfo_note(),
(unsigned int)vmcoreinfo_max_size);
}
@@ -94,8 +101,8 @@ static struct bin_attribute notes_attr = {
.read = &notes_read,
};
-decl_subsys(kernel, NULL, NULL);
-EXPORT_SYMBOL_GPL(kernel_subsys);
+struct kobject *kernel_kobj;
+EXPORT_SYMBOL_GPL(kernel_kobj);
static struct attribute * kernel_attrs[] = {
#if defined(CONFIG_HOTPLUG) && defined(CONFIG_NET)
@@ -116,24 +123,39 @@ static struct attribute_group kernel_attr_group = {
static int __init ksysfs_init(void)
{
- int error = subsystem_register(&kernel_subsys);
- if (!error)
- error = sysfs_create_group(&kernel_subsys.kobj,
- &kernel_attr_group);
+ int error;
- if (!error && notes_size > 0) {
- notes_attr.size = notes_size;
- error = sysfs_create_bin_file(&kernel_subsys.kobj,
- &notes_attr);
+ kernel_kobj = kobject_create_and_add("kernel", NULL);
+ if (!kernel_kobj) {
+ error = -ENOMEM;
+ goto exit;
}
+ error = sysfs_create_group(kernel_kobj, &kernel_attr_group);
+ if (error)
+ goto kset_exit;
- /*
- * Create "/sys/kernel/uids" directory and corresponding root user's
- * directory under it.
- */
- if (!error)
- error = uids_kobject_init();
+ if (notes_size > 0) {
+ notes_attr.size = notes_size;
+ error = sysfs_create_bin_file(kernel_kobj, &notes_attr);
+ if (error)
+ goto group_exit;
+ }
+ /* create the /sys/kernel/uids/ directory */
+ error = uids_sysfs_init();
+ if (error)
+ goto notes_exit;
+
+ return 0;
+
+notes_exit:
+ if (notes_size > 0)
+ sysfs_remove_bin_file(kernel_kobj, &notes_attr);
+group_exit:
+ sysfs_remove_group(kernel_kobj, &kernel_attr_group);
+kset_exit:
+ kobject_put(kernel_kobj);
+exit:
return error;
}
diff --git a/kernel/kthread.c b/kernel/kthread.c
index dcfe724300e..0ac887882f9 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -15,6 +15,8 @@
#include <linux/mutex.h>
#include <asm/semaphore.h>
+#define KTHREAD_NICE_LEVEL (-5)
+
static DEFINE_SPINLOCK(kthread_create_lock);
static LIST_HEAD(kthread_create_list);
struct task_struct *kthreadd_task;
@@ -94,10 +96,18 @@ static void create_kthread(struct kthread_create_info *create)
if (pid < 0) {
create->result = ERR_PTR(pid);
} else {
+ struct sched_param param = { .sched_priority = 0 };
wait_for_completion(&create->started);
read_lock(&tasklist_lock);
create->result = find_task_by_pid(pid);
read_unlock(&tasklist_lock);
+ /*
+ * root may have changed our (kthreadd's) priority or CPU mask.
+ * The kernel thread should not inherit these properties.
+ */
+ sched_setscheduler(create->result, SCHED_NORMAL, &param);
+ set_user_nice(create->result, KTHREAD_NICE_LEVEL);
+ set_cpus_allowed(create->result, CPU_MASK_ALL);
}
complete(&create->done);
}
@@ -221,7 +231,7 @@ int kthreadd(void *unused)
/* Setup a clean context for our children to inherit. */
set_task_comm(tsk, "kthreadd");
ignore_signals(tsk);
- set_user_nice(tsk, -5);
+ set_user_nice(tsk, KTHREAD_NICE_LEVEL);
set_cpus_allowed(tsk, CPU_MASK_ALL);
current->flags |= PF_NOFREEZE;
diff --git a/kernel/latencytop.c b/kernel/latencytop.c
new file mode 100644
index 00000000000..b4e3c85abe7
--- /dev/null
+++ b/kernel/latencytop.c
@@ -0,0 +1,239 @@
+/*
+ * latencytop.c: Latency display infrastructure
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author: Arjan van de Ven <arjan@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#include <linux/latencytop.h>
+#include <linux/kallsyms.h>
+#include <linux/seq_file.h>
+#include <linux/notifier.h>
+#include <linux/spinlock.h>
+#include <linux/proc_fs.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/stacktrace.h>
+
+static DEFINE_SPINLOCK(latency_lock);
+
+#define MAXLR 128
+static struct latency_record latency_record[MAXLR];
+
+int latencytop_enabled;
+
+void clear_all_latency_tracing(struct task_struct *p)
+{
+ unsigned long flags;
+
+ if (!latencytop_enabled)
+ return;
+
+ spin_lock_irqsave(&latency_lock, flags);
+ memset(&p->latency_record, 0, sizeof(p->latency_record));
+ p->latency_record_count = 0;
+ spin_unlock_irqrestore(&latency_lock, flags);
+}
+
+static void clear_global_latency_tracing(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&latency_lock, flags);
+ memset(&latency_record, 0, sizeof(latency_record));
+ spin_unlock_irqrestore(&latency_lock, flags);
+}
+
+static void __sched
+account_global_scheduler_latency(struct task_struct *tsk, struct latency_record *lat)
+{
+ int firstnonnull = MAXLR + 1;
+ int i;
+
+ if (!latencytop_enabled)
+ return;
+
+ /* skip kernel threads for now */
+ if (!tsk->mm)
+ return;
+
+ for (i = 0; i < MAXLR; i++) {
+ int q;
+ int same = 1;
+ /* Nothing stored: */
+ if (!latency_record[i].backtrace[0]) {
+ if (firstnonnull > i)
+ firstnonnull = i;
+ continue;
+ }
+ for (q = 0 ; q < LT_BACKTRACEDEPTH ; q++) {
+ if (latency_record[i].backtrace[q] !=
+ lat->backtrace[q])
+ same = 0;
+ if (same && lat->backtrace[q] == 0)
+ break;
+ if (same && lat->backtrace[q] == ULONG_MAX)
+ break;
+ }
+ if (same) {
+ latency_record[i].count++;
+ latency_record[i].time += lat->time;
+ if (lat->time > latency_record[i].max)
+ latency_record[i].max = lat->time;
+ return;
+ }
+ }
+
+ i = firstnonnull;
+ if (i >= MAXLR - 1)
+ return;
+
+ /* Allocted a new one: */
+ memcpy(&latency_record[i], lat, sizeof(struct latency_record));
+}
+
+static inline void store_stacktrace(struct task_struct *tsk, struct latency_record *lat)
+{
+ struct stack_trace trace;
+
+ memset(&trace, 0, sizeof(trace));
+ trace.max_entries = LT_BACKTRACEDEPTH;
+ trace.entries = &lat->backtrace[0];
+ trace.skip = 0;
+ save_stack_trace_tsk(tsk, &trace);
+}
+
+void __sched
+account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
+{
+ unsigned long flags;
+ int i, q;
+ struct latency_record lat;
+
+ if (!latencytop_enabled)
+ return;
+
+ /* Long interruptible waits are generally user requested... */
+ if (inter && usecs > 5000)
+ return;
+
+ memset(&lat, 0, sizeof(lat));
+ lat.count = 1;
+ lat.time = usecs;
+ lat.max = usecs;
+ store_stacktrace(tsk, &lat);
+
+ spin_lock_irqsave(&latency_lock, flags);
+
+ account_global_scheduler_latency(tsk, &lat);
+
+ /*
+ * short term hack; if we're > 32 we stop; future we recycle:
+ */
+ tsk->latency_record_count++;
+ if (tsk->latency_record_count >= LT_SAVECOUNT)
+ goto out_unlock;
+
+ for (i = 0; i < LT_SAVECOUNT ; i++) {
+ struct latency_record *mylat;
+ int same = 1;
+ mylat = &tsk->latency_record[i];
+ for (q = 0 ; q < LT_BACKTRACEDEPTH ; q++) {
+ if (mylat->backtrace[q] !=
+ lat.backtrace[q])
+ same = 0;
+ if (same && lat.backtrace[q] == 0)
+ break;
+ if (same && lat.backtrace[q] == ULONG_MAX)
+ break;
+ }
+ if (same) {
+ mylat->count++;
+ mylat->time += lat.time;
+ if (lat.time > mylat->max)
+ mylat->max = lat.time;
+ goto out_unlock;
+ }
+ }
+
+ /* Allocated a new one: */
+ i = tsk->latency_record_count;
+ memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record));
+
+out_unlock:
+ spin_unlock_irqrestore(&latency_lock, flags);
+}
+
+static int lstats_show(struct seq_file *m, void *v)
+{
+ int i;
+
+ seq_puts(m, "Latency Top version : v0.1\n");
+
+ for (i = 0; i < MAXLR; i++) {
+ if (latency_record[i].backtrace[0]) {
+ int q;
+ seq_printf(m, "%i %li %li ",
+ latency_record[i].count,
+ latency_record[i].time,
+ latency_record[i].max);
+ for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
+ char sym[KSYM_NAME_LEN];
+ char *c;
+ if (!latency_record[i].backtrace[q])
+ break;
+ if (latency_record[i].backtrace[q] == ULONG_MAX)
+ break;
+ sprint_symbol(sym, latency_record[i].backtrace[q]);
+ c = strchr(sym, '+');
+ if (c)
+ *c = 0;
+ seq_printf(m, "%s ", sym);
+ }
+ seq_printf(m, "\n");
+ }
+ }
+ return 0;
+}
+
+static ssize_t
+lstats_write(struct file *file, const char __user *buf, size_t count,
+ loff_t *offs)
+{
+ clear_global_latency_tracing();
+
+ return count;
+}
+
+static int lstats_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, lstats_show, NULL);
+}
+
+static struct file_operations lstats_fops = {
+ .open = lstats_open,
+ .read = seq_read,
+ .write = lstats_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init init_lstats_procfs(void)
+{
+ struct proc_dir_entry *pe;
+
+ pe = create_proc_entry("latency_stats", 0644, NULL);
+ if (!pe)
+ return -ENOMEM;
+
+ pe->proc_fops = &lstats_fops;
+
+ return 0;
+}
+__initcall(init_lstats_procfs);
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 55fe0c7cd95..3574379f4d6 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -2424,7 +2424,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
return 0;
/*
- * Calculate the chain hash: it's the combined has of all the
+ * Calculate the chain hash: it's the combined hash of all the
* lock keys along the dependency chain. We save the hash value
* at every step so that we can get the current hash easily
* after unlock. The chain hash is then used to cache dependency
@@ -2654,10 +2654,15 @@ static void check_flags(unsigned long flags)
if (!debug_locks)
return;
- if (irqs_disabled_flags(flags))
- DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled);
- else
- DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled);
+ if (irqs_disabled_flags(flags)) {
+ if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) {
+ printk("possible reason: unannotated irqs-off.\n");
+ }
+ } else {
+ if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) {
+ printk("possible reason: unannotated irqs-on.\n");
+ }
+ }
/*
* We dont accurately track softirq state in e.g.
@@ -2927,7 +2932,7 @@ static void zap_class(struct lock_class *class)
}
-static inline int within(void *addr, void *start, unsigned long size)
+static inline int within(const void *addr, void *start, unsigned long size)
{
return addr >= start && addr < start + size;
}
@@ -2938,9 +2943,10 @@ void lockdep_free_key_range(void *start, unsigned long size)
struct list_head *head;
unsigned long flags;
int i;
+ int locked;
raw_local_irq_save(flags);
- graph_lock();
+ locked = graph_lock();
/*
* Unhash all classes that were created by this module:
@@ -2949,12 +2955,16 @@ void lockdep_free_key_range(void *start, unsigned long size)
head = classhash_table + i;
if (list_empty(head))
continue;
- list_for_each_entry_safe(class, next, head, hash_entry)
+ list_for_each_entry_safe(class, next, head, hash_entry) {
if (within(class->key, start, size))
zap_class(class);
+ else if (within(class->name, start, size))
+ zap_class(class);
+ }
}
- graph_unlock();
+ if (locked)
+ graph_unlock();
raw_local_irq_restore(flags);
}
@@ -2964,6 +2974,7 @@ void lockdep_reset_lock(struct lockdep_map *lock)
struct list_head *head;
unsigned long flags;
int i, j;
+ int locked;
raw_local_irq_save(flags);
@@ -2982,7 +2993,7 @@ void lockdep_reset_lock(struct lockdep_map *lock)
* Debug check: in the end all mapped classes should
* be gone.
*/
- graph_lock();
+ locked = graph_lock();
for (i = 0; i < CLASSHASH_SIZE; i++) {
head = classhash_table + i;
if (list_empty(head))
@@ -2995,7 +3006,8 @@ void lockdep_reset_lock(struct lockdep_map *lock)
}
}
}
- graph_unlock();
+ if (locked)
+ graph_unlock();
out_restore:
raw_local_irq_restore(flags);
@@ -3054,11 +3066,6 @@ void __init lockdep_info(void)
#endif
}
-static inline int in_range(const void *start, const void *addr, const void *end)
-{
- return addr >= start && addr <= end;
-}
-
static void
print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
const void *mem_to, struct held_lock *hlock)
@@ -3080,6 +3087,13 @@ print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
dump_stack();
}
+static inline int not_in_range(const void* mem_from, unsigned long mem_len,
+ const void* lock_from, unsigned long lock_len)
+{
+ return lock_from + lock_len <= mem_from ||
+ mem_from + mem_len <= lock_from;
+}
+
/*
* Called when kernel memory is freed (or unmapped), or if a lock
* is destroyed or reinitialized - this code checks whether there is
@@ -3087,7 +3101,6 @@ print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
*/
void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
{
- const void *mem_to = mem_from + mem_len, *lock_from, *lock_to;
struct task_struct *curr = current;
struct held_lock *hlock;
unsigned long flags;
@@ -3100,14 +3113,11 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
for (i = 0; i < curr->lockdep_depth; i++) {
hlock = curr->held_locks + i;
- lock_from = (void *)hlock->instance;
- lock_to = (void *)(hlock->instance + 1);
-
- if (!in_range(mem_from, lock_from, mem_to) &&
- !in_range(mem_from, lock_to, mem_to))
+ if (not_in_range(mem_from, mem_len, hlock->instance,
+ sizeof(*hlock->instance)))
continue;
- print_freed_lock_bug(curr, mem_from, mem_to, hlock);
+ print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
break;
}
local_irq_restore(flags);
@@ -3173,6 +3183,13 @@ retry:
printk(" locked it.\n");
do_each_thread(g, p) {
+ /*
+ * It's not reliable to print a task's held locks
+ * if it's not sleeping (or if it's not the current
+ * task):
+ */
+ if (p->state == TASK_RUNNING && p != current)
+ continue;
if (p->lockdep_depth)
lockdep_print_held_locks(p);
if (!unlock)
@@ -3189,7 +3206,11 @@ retry:
EXPORT_SYMBOL_GPL(debug_show_all_locks);
-void debug_show_held_locks(struct task_struct *task)
+/*
+ * Careful: only use this function if you are sure that
+ * the task cannot run in parallel!
+ */
+void __debug_show_held_locks(struct task_struct *task)
{
if (unlikely(!debug_locks)) {
printk("INFO: lockdep is turned off.\n");
@@ -3197,6 +3218,12 @@ void debug_show_held_locks(struct task_struct *task)
}
lockdep_print_held_locks(task);
}
+EXPORT_SYMBOL_GPL(__debug_show_held_locks);
+
+void debug_show_held_locks(struct task_struct *task)
+{
+ __debug_show_held_locks(task);
+}
EXPORT_SYMBOL_GPL(debug_show_held_locks);
diff --git a/kernel/marker.c b/kernel/marker.c
index ccb48d9a365..5323cfaedbc 100644
--- a/kernel/marker.c
+++ b/kernel/marker.c
@@ -28,7 +28,7 @@ extern struct marker __start___markers[];
extern struct marker __stop___markers[];
/*
- * module_mutex nests inside markers_mutex. Markers mutex protects the builtin
+ * markers_mutex nests inside module_mutex. Markers mutex protects the builtin
* and module markers, the hash table and deferred_sync.
*/
static DEFINE_MUTEX(markers_mutex);
@@ -257,7 +257,6 @@ static void disable_marker(struct marker *elem)
* @refcount: number of references left to the given probe_module (out)
*
* Updates the probe callback corresponding to a range of markers.
- * Must be called with markers_mutex held.
*/
void marker_update_probe_range(struct marker *begin,
struct marker *end, struct module *probe_module,
@@ -266,6 +265,7 @@ void marker_update_probe_range(struct marker *begin,
struct marker *iter;
struct marker_entry *mark_entry;
+ mutex_lock(&markers_mutex);
for (iter = begin; iter < end; iter++) {
mark_entry = get_marker(iter->name);
if (mark_entry && mark_entry->refcount) {
@@ -281,6 +281,7 @@ void marker_update_probe_range(struct marker *begin,
disable_marker(iter);
}
}
+ mutex_unlock(&markers_mutex);
}
/*
@@ -293,7 +294,6 @@ static void marker_update_probes(struct module *probe_module)
{
int refcount = 0;
- mutex_lock(&markers_mutex);
/* Core kernel markers */
marker_update_probe_range(__start___markers,
__stop___markers, probe_module, &refcount);
@@ -303,7 +303,6 @@ static void marker_update_probes(struct module *probe_module)
synchronize_sched();
deferred_sync = 0;
}
- mutex_unlock(&markers_mutex);
}
/**
@@ -320,7 +319,7 @@ int marker_probe_register(const char *name, const char *format,
marker_probe_func *probe, void *private)
{
struct marker_entry *entry;
- int ret = 0, need_update = 0;
+ int ret = 0;
mutex_lock(&markers_mutex);
entry = get_marker(name);
@@ -335,11 +334,11 @@ int marker_probe_register(const char *name, const char *format,
ret = add_marker(name, format, probe, private);
if (ret)
goto end;
- need_update = 1;
+ mutex_unlock(&markers_mutex);
+ marker_update_probes(NULL);
+ return ret;
end:
mutex_unlock(&markers_mutex);
- if (need_update)
- marker_update_probes(NULL);
return ret;
}
EXPORT_SYMBOL_GPL(marker_probe_register);
@@ -355,7 +354,6 @@ void *marker_probe_unregister(const char *name)
struct module *probe_module;
struct marker_entry *entry;
void *private;
- int need_update = 0;
mutex_lock(&markers_mutex);
entry = get_marker(name);
@@ -368,11 +366,11 @@ void *marker_probe_unregister(const char *name)
probe_module = __module_text_address((unsigned long)entry->probe);
private = remove_marker(name);
deferred_sync = 1;
- need_update = 1;
+ mutex_unlock(&markers_mutex);
+ marker_update_probes(probe_module);
+ return private;
end:
mutex_unlock(&markers_mutex);
- if (need_update)
- marker_update_probes(probe_module);
return private;
}
EXPORT_SYMBOL_GPL(marker_probe_unregister);
@@ -392,7 +390,6 @@ void *marker_probe_unregister_private_data(void *private)
struct marker_entry *entry;
int found = 0;
unsigned int i;
- int need_update = 0;
mutex_lock(&markers_mutex);
for (i = 0; i < MARKER_TABLE_SIZE; i++) {
@@ -414,11 +411,11 @@ iter_end:
probe_module = __module_text_address((unsigned long)entry->probe);
private = remove_marker(entry->name);
deferred_sync = 1;
- need_update = 1;
+ mutex_unlock(&markers_mutex);
+ marker_update_probes(probe_module);
+ return private;
end:
mutex_unlock(&markers_mutex);
- if (need_update)
- marker_update_probes(probe_module);
return private;
}
EXPORT_SYMBOL_GPL(marker_probe_unregister_private_data);
@@ -434,7 +431,7 @@ EXPORT_SYMBOL_GPL(marker_probe_unregister_private_data);
int marker_arm(const char *name)
{
struct marker_entry *entry;
- int ret = 0, need_update = 0;
+ int ret = 0;
mutex_lock(&markers_mutex);
entry = get_marker(name);
@@ -447,11 +444,9 @@ int marker_arm(const char *name)
*/
if (entry->refcount++)
goto end;
- need_update = 1;
end:
mutex_unlock(&markers_mutex);
- if (need_update)
- marker_update_probes(NULL);
+ marker_update_probes(NULL);
return ret;
}
EXPORT_SYMBOL_GPL(marker_arm);
@@ -467,7 +462,7 @@ EXPORT_SYMBOL_GPL(marker_arm);
int marker_disarm(const char *name)
{
struct marker_entry *entry;
- int ret = 0, need_update = 0;
+ int ret = 0;
mutex_lock(&markers_mutex);
entry = get_marker(name);
@@ -486,11 +481,9 @@ int marker_disarm(const char *name)
ret = -EPERM;
goto end;
}
- need_update = 1;
end:
mutex_unlock(&markers_mutex);
- if (need_update)
- marker_update_probes(NULL);
+ marker_update_probes(NULL);
return ret;
}
EXPORT_SYMBOL_GPL(marker_disarm);
diff --git a/kernel/module.c b/kernel/module.c
index 3202c995007..bd60278ee70 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -47,8 +47,6 @@
#include <asm/cacheflush.h>
#include <linux/license.h>
-extern int module_sysfs_initialized;
-
#if 0
#define DEBUGP printk
#else
@@ -67,6 +65,9 @@ extern int module_sysfs_initialized;
static DEFINE_MUTEX(module_mutex);
static LIST_HEAD(modules);
+/* Waiting for a module to finish initializing? */
+static DECLARE_WAIT_QUEUE_HEAD(module_wq);
+
static BLOCKING_NOTIFIER_HEAD(module_notify_list);
int register_module_notifier(struct notifier_block * nb)
@@ -81,12 +82,16 @@ int unregister_module_notifier(struct notifier_block * nb)
}
EXPORT_SYMBOL(unregister_module_notifier);
-/* We require a truly strong try_module_get() */
+/* We require a truly strong try_module_get(): 0 means failure due to
+ ongoing or failed initialization etc. */
static inline int strong_try_module_get(struct module *mod)
{
if (mod && mod->state == MODULE_STATE_COMING)
+ return -EBUSY;
+ if (try_module_get(mod))
return 0;
- return try_module_get(mod);
+ else
+ return -ENOENT;
}
static inline void add_taint_module(struct module *mod, unsigned flag)
@@ -425,6 +430,14 @@ static unsigned int find_pcpusec(Elf_Ehdr *hdr,
return find_sec(hdr, sechdrs, secstrings, ".data.percpu");
}
+static void percpu_modcopy(void *pcpudest, const void *from, unsigned long size)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ memcpy(pcpudest + per_cpu_offset(cpu), from, size);
+}
+
static int percpu_modinit(void)
{
pcpu_num_used = 2;
@@ -497,6 +510,8 @@ static struct module_attribute modinfo_##field = { \
MODINFO_ATTR(version);
MODINFO_ATTR(srcversion);
+static char last_unloaded_module[MODULE_NAME_LEN+1];
+
#ifdef CONFIG_MODULE_UNLOAD
/* Init the unload section of the module. */
static void module_unload_init(struct module *mod)
@@ -538,11 +553,21 @@ static int already_uses(struct module *a, struct module *b)
static int use_module(struct module *a, struct module *b)
{
struct module_use *use;
- int no_warn;
+ int no_warn, err;
if (b == NULL || already_uses(a, b)) return 1;
- if (!strong_try_module_get(b))
+ /* If we're interrupted or time out, we fail. */
+ if (wait_event_interruptible_timeout(
+ module_wq, (err = strong_try_module_get(b)) != -EBUSY,
+ 30 * HZ) <= 0) {
+ printk("%s: gave up waiting for init of module %s.\n",
+ a->name, b->name);
+ return 0;
+ }
+
+ /* If strong_try_module_get() returned a different error, we fail. */
+ if (err)
return 0;
DEBUGP("Allocating new usage for %s.\n", a->name);
@@ -720,6 +745,8 @@ sys_delete_module(const char __user *name_user, unsigned int flags)
mod->exit();
mutex_lock(&module_mutex);
}
+ /* Store the name of the last unloaded module for diagnostic purposes */
+ strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module));
free_module(mod);
out:
@@ -813,7 +840,7 @@ static inline void module_unload_free(struct module *mod)
static inline int use_module(struct module *a, struct module *b)
{
- return strong_try_module_get(b);
+ return strong_try_module_get(b) == 0;
}
static inline void module_unload_init(struct module *mod)
@@ -952,7 +979,8 @@ static unsigned long resolve_symbol(Elf_Shdr *sechdrs,
ret = __find_symbol(name, &owner, &crc,
!(mod->taints & TAINT_PROPRIETARY_MODULE));
if (ret) {
- /* use_module can fail due to OOM, or module unloading */
+ /* use_module can fail due to OOM,
+ or module initialization or unloading */
if (!check_version(sechdrs, versindex, name, mod, crc) ||
!use_module(mod, owner))
ret = 0;
@@ -1120,7 +1148,7 @@ static void add_notes_attrs(struct module *mod, unsigned int nsect,
++loaded;
}
- notes_attrs->dir = kobject_add_dir(&mod->mkobj.kobj, "notes");
+ notes_attrs->dir = kobject_create_and_add("notes", &mod->mkobj.kobj);
if (!notes_attrs->dir)
goto out;
@@ -1210,6 +1238,7 @@ void module_remove_modinfo_attrs(struct module *mod)
int mod_sysfs_init(struct module *mod)
{
int err;
+ struct kobject *kobj;
if (!module_sysfs_initialized) {
printk(KERN_ERR "%s: module sysfs not initialized\n",
@@ -1217,15 +1246,25 @@ int mod_sysfs_init(struct module *mod)
err = -EINVAL;
goto out;
}
- memset(&mod->mkobj.kobj, 0, sizeof(mod->mkobj.kobj));
- err = kobject_set_name(&mod->mkobj.kobj, "%s", mod->name);
- if (err)
+
+ kobj = kset_find_obj(module_kset, mod->name);
+ if (kobj) {
+ printk(KERN_ERR "%s: module is already loaded\n", mod->name);
+ kobject_put(kobj);
+ err = -EINVAL;
goto out;
- kobj_set_kset_s(&mod->mkobj, module_subsys);
+ }
+
mod->mkobj.mod = mod;
- kobject_init(&mod->mkobj.kobj);
+ memset(&mod->mkobj.kobj, 0, sizeof(mod->mkobj.kobj));
+ mod->mkobj.kobj.kset = module_kset;
+ err = kobject_init_and_add(&mod->mkobj.kobj, &module_ktype, NULL,
+ "%s", mod->name);
+ if (err)
+ kobject_put(&mod->mkobj.kobj);
+ /* delay uevent until full sysfs population */
out:
return err;
}
@@ -1236,12 +1275,7 @@ int mod_sysfs_setup(struct module *mod,
{
int err;
- /* delay uevent until full sysfs population */
- err = kobject_add(&mod->mkobj.kobj);
- if (err)
- goto out;
-
- mod->holders_dir = kobject_add_dir(&mod->mkobj.kobj, "holders");
+ mod->holders_dir = kobject_create_and_add("holders", &mod->mkobj.kobj);
if (!mod->holders_dir) {
err = -ENOMEM;
goto out_unreg;
@@ -1261,11 +1295,9 @@ int mod_sysfs_setup(struct module *mod,
out_unreg_param:
module_param_sysfs_remove(mod);
out_unreg_holders:
- kobject_unregister(mod->holders_dir);
+ kobject_put(mod->holders_dir);
out_unreg:
- kobject_del(&mod->mkobj.kobj);
kobject_put(&mod->mkobj.kobj);
-out:
return err;
}
#endif
@@ -1274,9 +1306,20 @@ static void mod_kobject_remove(struct module *mod)
{
module_remove_modinfo_attrs(mod);
module_param_sysfs_remove(mod);
- kobject_unregister(mod->mkobj.drivers_dir);
- kobject_unregister(mod->holders_dir);
- kobject_unregister(&mod->mkobj.kobj);
+ kobject_put(mod->mkobj.drivers_dir);
+ kobject_put(mod->holders_dir);
+ kobject_put(&mod->mkobj.kobj);
+}
+
+/*
+ * link the module with the whole machine is stopped with interrupts off
+ * - this defends against kallsyms not taking locks
+ */
+static int __link_module(void *_mod)
+{
+ struct module *mod = _mod;
+ list_add(&mod->list, &modules);
+ return 0;
}
/*
@@ -1328,7 +1371,7 @@ void *__symbol_get(const char *symbol)
preempt_disable();
value = __find_symbol(symbol, &owner, &crc, 1);
- if (value && !strong_try_module_get(owner))
+ if (value && strong_try_module_get(owner) != 0)
value = 0;
preempt_enable();
@@ -1369,7 +1412,7 @@ dup:
return ret;
}
-/* Change all symbols so that sh_value encodes the pointer directly. */
+/* Change all symbols so that st_value encodes the pointer directly. */
static int simplify_symbols(Elf_Shdr *sechdrs,
unsigned int symindex,
const char *strtab,
@@ -1882,16 +1925,16 @@ static struct module *load_module(void __user *umod,
/* Now we've moved module, initialize linked lists, etc. */
module_unload_init(mod);
- /* Initialize kobject, so we can reference it. */
+ /* add kobject, so we can reference it. */
err = mod_sysfs_init(mod);
if (err)
- goto cleanup;
+ goto free_unload;
/* Set up license info based on the info section */
set_license(mod, get_modinfo(sechdrs, infoindex, "license"));
if (strcmp(mod->name, "ndiswrapper") == 0)
- add_taint(TAINT_PROPRIETARY_MODULE);
+ add_taint_module(mod, TAINT_PROPRIETARY_MODULE);
if (strcmp(mod->name, "driverloader") == 0)
add_taint_module(mod, TAINT_PROPRIETARY_MODULE);
@@ -2021,6 +2064,11 @@ static struct module *load_module(void __user *umod,
printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
mod->name);
+ /* Now sew it into the lists so we can get lockdep and oops
+ * info during argument parsing. Noone should access us, since
+ * strong_try_module_get() will fail. */
+ stop_machine_run(__link_module, mod, NR_CPUS);
+
/* Size of section 0 is 0, so this works well if no params */
err = parse_args(mod->name, mod->args,
(struct kernel_param *)
@@ -2029,7 +2077,7 @@ static struct module *load_module(void __user *umod,
/ sizeof(struct kernel_param),
NULL);
if (err < 0)
- goto arch_cleanup;
+ goto unlink;
err = mod_sysfs_setup(mod,
(struct kernel_param *)
@@ -2037,7 +2085,7 @@ static struct module *load_module(void __user *umod,
sechdrs[setupindex].sh_size
/ sizeof(struct kernel_param));
if (err < 0)
- goto arch_cleanup;
+ goto unlink;
add_sect_attrs(mod, hdr->e_shnum, secstrings, sechdrs);
add_notes_attrs(mod, hdr->e_shnum, secstrings, sechdrs);
@@ -2052,9 +2100,13 @@ static struct module *load_module(void __user *umod,
/* Done! */
return mod;
- arch_cleanup:
+ unlink:
+ stop_machine_run(__unlink_module, mod, NR_CPUS);
module_arch_cleanup(mod);
cleanup:
+ kobject_del(&mod->mkobj.kobj);
+ kobject_put(&mod->mkobj.kobj);
+ free_unload:
module_unload_free(mod);
module_free(mod, mod->module_init);
free_core:
@@ -2074,17 +2126,6 @@ static struct module *load_module(void __user *umod,
goto free_hdr;
}
-/*
- * link the module with the whole machine is stopped with interrupts off
- * - this defends against kallsyms not taking locks
- */
-static int __link_module(void *_mod)
-{
- struct module *mod = _mod;
- list_add(&mod->list, &modules);
- return 0;
-}
-
/* This is where the real work happens */
asmlinkage long
sys_init_module(void __user *umod,
@@ -2109,10 +2150,6 @@ sys_init_module(void __user *umod,
return PTR_ERR(mod);
}
- /* Now sew it into the lists. They won't access us, since
- strong_try_module_get() will fail. */
- stop_machine_run(__link_module, mod, NR_CPUS);
-
/* Drop lock so they can recurse */
mutex_unlock(&module_mutex);
@@ -2131,6 +2168,7 @@ sys_init_module(void __user *umod,
mutex_lock(&module_mutex);
free_module(mod);
mutex_unlock(&module_mutex);
+ wake_up(&module_wq);
return ret;
}
@@ -2145,6 +2183,7 @@ sys_init_module(void __user *umod,
mod->init_size = 0;
mod->init_text_size = 0;
mutex_unlock(&module_mutex);
+ wake_up(&module_wq);
return 0;
}
@@ -2209,32 +2248,41 @@ static const char *get_ksymbol(struct module *mod,
return mod->strtab + mod->symtab[best].st_name;
}
-/* For kallsyms to ask for address resolution. NULL means not found.
- We don't lock, as this is used for oops resolution and races are a
- lesser concern. */
-const char *module_address_lookup(unsigned long addr,
- unsigned long *size,
- unsigned long *offset,
- char **modname)
+/* For kallsyms to ask for address resolution. NULL means not found. Careful
+ * not to lock to avoid deadlock on oopses, simply disable preemption. */
+char *module_address_lookup(unsigned long addr,
+ unsigned long *size,
+ unsigned long *offset,
+ char **modname,
+ char *namebuf)
{
struct module *mod;
+ const char *ret = NULL;
+ preempt_disable();
list_for_each_entry(mod, &modules, list) {
if (within(addr, mod->module_init, mod->init_size)
|| within(addr, mod->module_core, mod->core_size)) {
if (modname)
*modname = mod->name;
- return get_ksymbol(mod, addr, size, offset);
+ ret = get_ksymbol(mod, addr, size, offset);
+ break;
}
}
- return NULL;
+ /* Make a copy in here where it's safe */
+ if (ret) {
+ strncpy(namebuf, ret, KSYM_NAME_LEN - 1);
+ ret = namebuf;
+ }
+ preempt_enable();
+ return (char *)ret;
}
int lookup_module_symbol_name(unsigned long addr, char *symname)
{
struct module *mod;
- mutex_lock(&module_mutex);
+ preempt_disable();
list_for_each_entry(mod, &modules, list) {
if (within(addr, mod->module_init, mod->init_size) ||
within(addr, mod->module_core, mod->core_size)) {
@@ -2244,12 +2292,12 @@ int lookup_module_symbol_name(unsigned long addr, char *symname)
if (!sym)
goto out;
strlcpy(symname, sym, KSYM_NAME_LEN);
- mutex_unlock(&module_mutex);
+ preempt_enable();
return 0;
}
}
out:
- mutex_unlock(&module_mutex);
+ preempt_enable();
return -ERANGE;
}
@@ -2258,7 +2306,7 @@ int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size,
{
struct module *mod;
- mutex_lock(&module_mutex);
+ preempt_disable();
list_for_each_entry(mod, &modules, list) {
if (within(addr, mod->module_init, mod->init_size) ||
within(addr, mod->module_core, mod->core_size)) {
@@ -2271,12 +2319,12 @@ int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size,
strlcpy(modname, mod->name, MODULE_NAME_LEN);
if (name)
strlcpy(name, sym, KSYM_NAME_LEN);
- mutex_unlock(&module_mutex);
+ preempt_enable();
return 0;
}
}
out:
- mutex_unlock(&module_mutex);
+ preempt_enable();
return -ERANGE;
}
@@ -2285,7 +2333,7 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
{
struct module *mod;
- mutex_lock(&module_mutex);
+ preempt_disable();
list_for_each_entry(mod, &modules, list) {
if (symnum < mod->num_symtab) {
*value = mod->symtab[symnum].st_value;
@@ -2294,12 +2342,12 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
KSYM_NAME_LEN);
strlcpy(module_name, mod->name, MODULE_NAME_LEN);
*exported = is_exported(name, mod);
- mutex_unlock(&module_mutex);
+ preempt_enable();
return 0;
}
symnum -= mod->num_symtab;
}
- mutex_unlock(&module_mutex);
+ preempt_enable();
return -ERANGE;
}
@@ -2322,6 +2370,7 @@ unsigned long module_kallsyms_lookup_name(const char *name)
unsigned long ret = 0;
/* Don't lock: we're in enough trouble already. */
+ preempt_disable();
if ((colon = strchr(name, ':')) != NULL) {
*colon = '\0';
if ((mod = find_module(name)) != NULL)
@@ -2332,6 +2381,7 @@ unsigned long module_kallsyms_lookup_name(const char *name)
if ((ret = mod_find_symname(mod, name)) != 0)
break;
}
+ preempt_enable();
return ret;
}
#endif /* CONFIG_KALLSYMS */
@@ -2353,21 +2403,30 @@ static void m_stop(struct seq_file *m, void *p)
mutex_unlock(&module_mutex);
}
-static char *taint_flags(unsigned int taints, char *buf)
+static char *module_flags(struct module *mod, char *buf)
{
int bx = 0;
- if (taints) {
+ if (mod->taints ||
+ mod->state == MODULE_STATE_GOING ||
+ mod->state == MODULE_STATE_COMING) {
buf[bx++] = '(';
- if (taints & TAINT_PROPRIETARY_MODULE)
+ if (mod->taints & TAINT_PROPRIETARY_MODULE)
buf[bx++] = 'P';
- if (taints & TAINT_FORCED_MODULE)
+ if (mod->taints & TAINT_FORCED_MODULE)
buf[bx++] = 'F';
/*
* TAINT_FORCED_RMMOD: could be added.
* TAINT_UNSAFE_SMP, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't
* apply to modules.
*/
+
+ /* Show a - for module-is-being-unloaded */
+ if (mod->state == MODULE_STATE_GOING)
+ buf[bx++] = '-';
+ /* Show a + for module-is-being-loaded */
+ if (mod->state == MODULE_STATE_COMING)
+ buf[bx++] = '+';
buf[bx++] = ')';
}
buf[bx] = '\0';
@@ -2394,7 +2453,7 @@ static int m_show(struct seq_file *m, void *p)
/* Taints info */
if (mod->taints)
- seq_printf(m, " %s", taint_flags(mod->taints, buf));
+ seq_printf(m, " %s", module_flags(mod, buf));
seq_printf(m, "\n");
return 0;
@@ -2489,97 +2548,12 @@ void print_modules(void)
printk("Modules linked in:");
list_for_each_entry(mod, &modules, list)
- printk(" %s%s", mod->name, taint_flags(mod->taints, buf));
+ printk(" %s%s", mod->name, module_flags(mod, buf));
+ if (last_unloaded_module[0])
+ printk(" [last unloaded: %s]", last_unloaded_module);
printk("\n");
}
-#ifdef CONFIG_SYSFS
-static char *make_driver_name(struct device_driver *drv)
-{
- char *driver_name;
-
- driver_name = kmalloc(strlen(drv->name) + strlen(drv->bus->name) + 2,
- GFP_KERNEL);
- if (!driver_name)
- return NULL;
-
- sprintf(driver_name, "%s:%s", drv->bus->name, drv->name);
- return driver_name;
-}
-
-static void module_create_drivers_dir(struct module_kobject *mk)
-{
- if (!mk || mk->drivers_dir)
- return;
-
- mk->drivers_dir = kobject_add_dir(&mk->kobj, "drivers");
-}
-
-void module_add_driver(struct module *mod, struct device_driver *drv)
-{
- char *driver_name;
- int no_warn;
- struct module_kobject *mk = NULL;
-
- if (!drv)
- return;
-
- if (mod)
- mk = &mod->mkobj;
- else if (drv->mod_name) {
- struct kobject *mkobj;
-
- /* Lookup built-in module entry in /sys/modules */
- mkobj = kset_find_obj(&module_subsys, drv->mod_name);
- if (mkobj) {
- mk = container_of(mkobj, struct module_kobject, kobj);
- /* remember our module structure */
- drv->mkobj = mk;
- /* kset_find_obj took a reference */
- kobject_put(mkobj);
- }
- }
-
- if (!mk)
- return;
-
- /* Don't check return codes; these calls are idempotent */
- no_warn = sysfs_create_link(&drv->kobj, &mk->kobj, "module");
- driver_name = make_driver_name(drv);
- if (driver_name) {
- module_create_drivers_dir(mk);
- no_warn = sysfs_create_link(mk->drivers_dir, &drv->kobj,
- driver_name);
- kfree(driver_name);
- }
-}
-EXPORT_SYMBOL(module_add_driver);
-
-void module_remove_driver(struct device_driver *drv)
-{
- struct module_kobject *mk = NULL;
- char *driver_name;
-
- if (!drv)
- return;
-
- sysfs_remove_link(&drv->kobj, "module");
-
- if (drv->owner)
- mk = &drv->owner->mkobj;
- else if (drv->mkobj)
- mk = drv->mkobj;
- if (mk && mk->drivers_dir) {
- driver_name = make_driver_name(drv);
- if (driver_name) {
- sysfs_remove_link(mk->drivers_dir, driver_name);
- kfree(driver_name);
- }
- }
-}
-EXPORT_SYMBOL(module_remove_driver);
-#endif
-
#ifdef CONFIG_MODVERSIONS
/* Generate the signature for struct module here, too, for modversions. */
void struct_module(struct module *mod) { return; }
diff --git a/kernel/mutex.c b/kernel/mutex.c
index d7fe50cc556..d9ec9b66625 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -166,9 +166,12 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
* got a signal? (This code gets eliminated in the
* TASK_UNINTERRUPTIBLE case.)
*/
- if (unlikely(state == TASK_INTERRUPTIBLE &&
- signal_pending(task))) {
- mutex_remove_waiter(lock, &waiter, task_thread_info(task));
+ if (unlikely((state == TASK_INTERRUPTIBLE &&
+ signal_pending(task)) ||
+ (state == TASK_KILLABLE &&
+ fatal_signal_pending(task)))) {
+ mutex_remove_waiter(lock, &waiter,
+ task_thread_info(task));
mutex_release(&lock->dep_map, 1, ip);
spin_unlock_mutex(&lock->wait_lock, flags);
@@ -211,6 +214,14 @@ mutex_lock_nested(struct mutex *lock, unsigned int subclass)
EXPORT_SYMBOL_GPL(mutex_lock_nested);
int __sched
+mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
+{
+ might_sleep();
+ return __mutex_lock_common(lock, TASK_KILLABLE, subclass, _RET_IP_);
+}
+EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
+
+int __sched
mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
{
might_sleep();
@@ -272,6 +283,9 @@ __mutex_unlock_slowpath(atomic_t *lock_count)
* mutex_lock_interruptible() and mutex_trylock().
*/
static int fastcall noinline __sched
+__mutex_lock_killable_slowpath(atomic_t *lock_count);
+
+static noinline int fastcall __sched
__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
/***
@@ -294,6 +308,14 @@ int fastcall __sched mutex_lock_interruptible(struct mutex *lock)
EXPORT_SYMBOL(mutex_lock_interruptible);
+int fastcall __sched mutex_lock_killable(struct mutex *lock)
+{
+ might_sleep();
+ return __mutex_fastpath_lock_retval
+ (&lock->count, __mutex_lock_killable_slowpath);
+}
+EXPORT_SYMBOL(mutex_lock_killable);
+
static void fastcall noinline __sched
__mutex_lock_slowpath(atomic_t *lock_count)
{
@@ -303,6 +325,14 @@ __mutex_lock_slowpath(atomic_t *lock_count)
}
static int fastcall noinline __sched
+__mutex_lock_killable_slowpath(atomic_t *lock_count)
+{
+ struct mutex *lock = container_of(lock_count, struct mutex, count);
+
+ return __mutex_lock_common(lock, TASK_KILLABLE, 0, _RET_IP_);
+}
+
+static noinline int fastcall __sched
__mutex_lock_interruptible_slowpath(atomic_t *lock_count)
{
struct mutex *lock = container_of(lock_count, struct mutex, count);
diff --git a/kernel/panic.c b/kernel/panic.c
index 6f6e03e9159..d9e90cfe329 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -19,6 +19,8 @@
#include <linux/nmi.h>
#include <linux/kexec.h>
#include <linux/debug_locks.h>
+#include <linux/random.h>
+#include <linux/kallsyms.h>
int panic_on_oops;
int tainted;
@@ -266,13 +268,52 @@ void oops_enter(void)
}
/*
+ * 64-bit random ID for oopses:
+ */
+static u64 oops_id;
+
+static int init_oops_id(void)
+{
+ if (!oops_id)
+ get_random_bytes(&oops_id, sizeof(oops_id));
+
+ return 0;
+}
+late_initcall(init_oops_id);
+
+static void print_oops_end_marker(void)
+{
+ init_oops_id();
+ printk(KERN_WARNING "---[ end trace %016llx ]---\n",
+ (unsigned long long)oops_id);
+}
+
+/*
* Called when the architecture exits its oops handler, after printing
* everything.
*/
void oops_exit(void)
{
do_oops_enter_exit();
+ print_oops_end_marker();
+}
+
+#ifdef WANT_WARN_ON_SLOWPATH
+void warn_on_slowpath(const char *file, int line)
+{
+ char function[KSYM_SYMBOL_LEN];
+ unsigned long caller = (unsigned long) __builtin_return_address(0);
+ sprint_symbol(function, caller);
+
+ printk(KERN_WARNING "------------[ cut here ]------------\n");
+ printk(KERN_WARNING "WARNING: at %s:%d %s()\n", file,
+ line, function);
+ print_modules();
+ dump_stack();
+ print_oops_end_marker();
}
+EXPORT_SYMBOL(warn_on_slowpath);
+#endif
#ifdef CONFIG_CC_STACKPROTECTOR
/*
diff --git a/kernel/params.c b/kernel/params.c
index 16f269e9ddc..42fe5e6126c 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -376,8 +376,6 @@ int param_get_string(char *buffer, struct kernel_param *kp)
extern struct kernel_param __start___param[], __stop___param[];
-#define MAX_KBUILD_MODNAME KOBJ_NAME_LEN
-
struct param_attribute
{
struct module_attribute mattr;
@@ -472,7 +470,7 @@ param_sysfs_setup(struct module_kobject *mk,
sizeof(mp->grp.attrs[0]));
size[1] = (valid_attrs + 1) * sizeof(mp->grp.attrs[0]);
- mp = kmalloc(size[0] + size[1], GFP_KERNEL);
+ mp = kzalloc(size[0] + size[1], GFP_KERNEL);
if (!mp)
return ERR_PTR(-ENOMEM);
@@ -560,11 +558,10 @@ static void __init kernel_param_sysfs_setup(const char *name,
BUG_ON(!mk);
mk->mod = THIS_MODULE;
- kobj_set_kset_s(mk, module_subsys);
- kobject_set_name(&mk->kobj, name);
- kobject_init(&mk->kobj);
- ret = kobject_add(&mk->kobj);
+ mk->kobj.kset = module_kset;
+ ret = kobject_init_and_add(&mk->kobj, &module_ktype, NULL, "%s", name);
if (ret) {
+ kobject_put(&mk->kobj);
printk(KERN_ERR "Module '%s' failed to be added to sysfs, "
"error number %d\n", name, ret);
printk(KERN_ERR "The system will be unstable now.\n");
@@ -588,23 +585,20 @@ static void __init param_sysfs_builtin(void)
{
struct kernel_param *kp, *kp_begin = NULL;
unsigned int i, name_len, count = 0;
- char modname[MAX_KBUILD_MODNAME + 1] = "";
+ char modname[MODULE_NAME_LEN + 1] = "";
for (i=0; i < __stop___param - __start___param; i++) {
char *dot;
- size_t kplen;
+ size_t max_name_len;
kp = &__start___param[i];
- kplen = strlen(kp->name);
+ max_name_len =
+ min_t(size_t, MODULE_NAME_LEN, strlen(kp->name));
- /* We do not handle args without periods. */
- if (kplen > MAX_KBUILD_MODNAME) {
- DEBUGP("kernel parameter name is too long: %s\n", kp->name);
- continue;
- }
- dot = memchr(kp->name, '.', kplen);
+ dot = memchr(kp->name, '.', max_name_len);
if (!dot) {
- DEBUGP("couldn't find period in %s\n", kp->name);
+ DEBUGP("couldn't find period in first %d characters "
+ "of %s\n", MODULE_NAME_LEN, kp->name);
continue;
}
name_len = dot - kp->name;
@@ -682,8 +676,6 @@ static struct sysfs_ops module_sysfs_ops = {
.store = module_attr_store,
};
-static struct kobj_type module_ktype;
-
static int uevent_filter(struct kset *kset, struct kobject *kobj)
{
struct kobj_type *ktype = get_ktype(kobj);
@@ -697,10 +689,10 @@ static struct kset_uevent_ops module_uevent_ops = {
.filter = uevent_filter,
};
-decl_subsys(module, &module_ktype, &module_uevent_ops);
+struct kset *module_kset;
int module_sysfs_initialized;
-static struct kobj_type module_ktype = {
+struct kobj_type module_ktype = {
.sysfs_ops = &module_sysfs_ops,
};
@@ -709,13 +701,11 @@ static struct kobj_type module_ktype = {
*/
static int __init param_sysfs_init(void)
{
- int ret;
-
- ret = subsystem_register(&module_subsys);
- if (ret < 0) {
- printk(KERN_WARNING "%s (%d): subsystem_register error: %d\n",
- __FILE__, __LINE__, ret);
- return ret;
+ module_kset = kset_create_and_add("module", &module_uevent_ops, NULL);
+ if (!module_kset) {
+ printk(KERN_WARNING "%s (%d): error creating kset\n",
+ __FILE__, __LINE__);
+ return -ENOMEM;
}
module_sysfs_initialized = 1;
@@ -725,14 +715,7 @@ static int __init param_sysfs_init(void)
}
subsys_initcall(param_sysfs_init);
-#else
-#if 0
-static struct sysfs_ops module_sysfs_ops = {
- .show = NULL,
- .store = NULL,
-};
-#endif
-#endif
+#endif /* CONFIG_SYSFS */
EXPORT_SYMBOL(param_set_byte);
EXPORT_SYMBOL(param_get_byte);
diff --git a/kernel/pid.c b/kernel/pid.c
index d1db36b9467..f815455431b 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -537,6 +537,7 @@ err_alloc:
return NULL;
}
+#ifdef CONFIG_PID_NS
static struct pid_namespace *create_pid_namespace(int level)
{
struct pid_namespace *ns;
@@ -621,6 +622,7 @@ void free_pid_ns(struct kref *kref)
if (parent != NULL)
put_pid_ns(parent);
}
+#endif /* CONFIG_PID_NS */
void zap_pid_ns_processes(struct pid_namespace *pid_ns)
{
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 68c96376e84..0b7c82ac467 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -967,6 +967,7 @@ static void check_thread_timers(struct task_struct *tsk,
{
int maxfire;
struct list_head *timers = tsk->cpu_timers;
+ struct signal_struct *const sig = tsk->signal;
maxfire = 20;
tsk->it_prof_expires = cputime_zero;
@@ -1011,6 +1012,35 @@ static void check_thread_timers(struct task_struct *tsk,
t->firing = 1;
list_move_tail(&t->entry, firing);
}
+
+ /*
+ * Check for the special case thread timers.
+ */
+ if (sig->rlim[RLIMIT_RTTIME].rlim_cur != RLIM_INFINITY) {
+ unsigned long hard = sig->rlim[RLIMIT_RTTIME].rlim_max;
+ unsigned long *soft = &sig->rlim[RLIMIT_RTTIME].rlim_cur;
+
+ if (hard != RLIM_INFINITY &&
+ tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
+ /*
+ * At the hard limit, we just die.
+ * No need to calculate anything else now.
+ */
+ __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
+ return;
+ }
+ if (tsk->rt.timeout > DIV_ROUND_UP(*soft, USEC_PER_SEC/HZ)) {
+ /*
+ * At the soft limit, send a SIGXCPU every second.
+ */
+ if (sig->rlim[RLIMIT_RTTIME].rlim_cur
+ < sig->rlim[RLIMIT_RTTIME].rlim_max) {
+ sig->rlim[RLIMIT_RTTIME].rlim_cur +=
+ USEC_PER_SEC;
+ }
+ __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
+ }
+ }
}
/*
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 8e186c67814..ef9b802738a 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -44,9 +44,30 @@ config PM_VERBOSE
---help---
This option enables verbose messages from the Power Management code.
+config CAN_PM_TRACE
+ def_bool y
+ depends on PM_DEBUG && PM_SLEEP && EXPERIMENTAL
+
config PM_TRACE
+ bool
+ help
+ This enables code to save the last PM event point across
+ reboot. The architecture needs to support this, x86 for
+ example does by saving things in the RTC, see below.
+
+ The architecture specific code must provide the extern
+ functions from <linux/resume-trace.h> as well as the
+ <asm/resume-trace.h> header with a TRACE_RESUME() macro.
+
+ The way the information is presented is architecture-
+ dependent, x86 will print the information during a
+ late_initcall.
+
+config PM_TRACE_RTC
bool "Suspend/resume event tracing"
- depends on PM_DEBUG && X86 && PM_SLEEP && EXPERIMENTAL
+ depends on CAN_PM_TRACE
+ depends on X86
+ select PM_TRACE
default n
---help---
This enables some cheesy code to save the last PM event point in the
@@ -63,7 +84,8 @@ config PM_TRACE
config PM_SLEEP_SMP
bool
- depends on SUSPEND_SMP_POSSIBLE || HIBERNATION_SMP_POSSIBLE
+ depends on SMP
+ depends on ARCH_SUSPEND_POSSIBLE || ARCH_HIBERNATION_POSSIBLE
depends on PM_SLEEP
select HOTPLUG_CPU
default y
@@ -73,46 +95,29 @@ config PM_SLEEP
depends on SUSPEND || HIBERNATION
default y
-config SUSPEND_UP_POSSIBLE
- bool
- depends on (X86 && !X86_VOYAGER) || PPC || ARM || BLACKFIN || MIPS \
- || SUPERH || FRV
- depends on !SMP
- default y
-
-config SUSPEND_SMP_POSSIBLE
- bool
- depends on (X86 && !X86_VOYAGER) \
- || (PPC && (PPC_PSERIES || PPC_PMAC)) || ARM
- depends on SMP
- default y
-
config SUSPEND
bool "Suspend to RAM and standby"
- depends on PM
- depends on SUSPEND_UP_POSSIBLE || SUSPEND_SMP_POSSIBLE
+ depends on PM && ARCH_SUSPEND_POSSIBLE
default y
---help---
Allow the system to enter sleep states in which main memory is
powered and thus its contents are preserved, such as the
- suspend-to-RAM state (i.e. the ACPI S3 state).
+ suspend-to-RAM state (e.g. the ACPI S3 state).
-config HIBERNATION_UP_POSSIBLE
- bool
- depends on X86 || PPC64_SWSUSP || PPC32
- depends on !SMP
+config SUSPEND_FREEZER
+ bool "Enable freezer for suspend to RAM/standby" \
+ if ARCH_WANTS_FREEZER_CONTROL || BROKEN
+ depends on SUSPEND
default y
+ help
+ This allows you to turn off the freezer for suspend. If this is
+ done, no tasks are frozen for suspend to RAM/standby.
-config HIBERNATION_SMP_POSSIBLE
- bool
- depends on (X86 && !X86_VOYAGER) || PPC64_SWSUSP
- depends on SMP
- default y
+ Turning OFF this setting is NOT recommended! If in doubt, say Y.
config HIBERNATION
bool "Hibernation (aka 'suspend to disk')"
- depends on PM && SWAP
- depends on HIBERNATION_UP_POSSIBLE || HIBERNATION_SMP_POSSIBLE
+ depends on PM && SWAP && ARCH_HIBERNATION_POSSIBLE
---help---
Enable the suspend to disk (STD) functionality, which is usually
called "hibernation" in user interfaces. STD checkpoints the
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index 8b15f777010..d09da089517 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -54,8 +54,8 @@ static struct platform_hibernation_ops *hibernation_ops;
void hibernation_set_ops(struct platform_hibernation_ops *ops)
{
- if (ops && !(ops->start && ops->pre_snapshot && ops->finish
- && ops->prepare && ops->enter && ops->pre_restore
+ if (ops && !(ops->begin && ops->end && ops->pre_snapshot
+ && ops->prepare && ops->finish && ops->enter && ops->pre_restore
&& ops->restore_cleanup)) {
WARN_ON(1);
return;
@@ -70,15 +70,55 @@ void hibernation_set_ops(struct platform_hibernation_ops *ops)
mutex_unlock(&pm_mutex);
}
+#ifdef CONFIG_PM_DEBUG
+static void hibernation_debug_sleep(void)
+{
+ printk(KERN_INFO "hibernation debug: Waiting for 5 seconds.\n");
+ mdelay(5000);
+}
+
+static int hibernation_testmode(int mode)
+{
+ if (hibernation_mode == mode) {
+ hibernation_debug_sleep();
+ return 1;
+ }
+ return 0;
+}
+
+static int hibernation_test(int level)
+{
+ if (pm_test_level == level) {
+ hibernation_debug_sleep();
+ return 1;
+ }
+ return 0;
+}
+#else /* !CONFIG_PM_DEBUG */
+static int hibernation_testmode(int mode) { return 0; }
+static int hibernation_test(int level) { return 0; }
+#endif /* !CONFIG_PM_DEBUG */
+
/**
- * platform_start - tell the platform driver that we're starting
+ * platform_begin - tell the platform driver that we're starting
* hibernation
*/
-static int platform_start(int platform_mode)
+static int platform_begin(int platform_mode)
{
return (platform_mode && hibernation_ops) ?
- hibernation_ops->start() : 0;
+ hibernation_ops->begin() : 0;
+}
+
+/**
+ * platform_end - tell the platform driver that we've entered the
+ * working state
+ */
+
+static void platform_end(int platform_mode)
+{
+ if (platform_mode && hibernation_ops)
+ hibernation_ops->end();
}
/**
@@ -162,19 +202,25 @@ int create_image(int platform_mode)
*/
error = device_power_down(PMSG_FREEZE);
if (error) {
- printk(KERN_ERR "Some devices failed to power down, "
- KERN_ERR "aborting suspend\n");
+ printk(KERN_ERR "PM: Some devices failed to power down, "
+ "aborting hibernation\n");
goto Enable_irqs;
}
+ if (hibernation_test(TEST_CORE))
+ goto Power_up;
+
+ in_suspend = 1;
save_processor_state();
error = swsusp_arch_suspend();
if (error)
- printk(KERN_ERR "Error %d while creating the image\n", error);
+ printk(KERN_ERR "PM: Error %d creating hibernation image\n",
+ error);
/* Restore control flow magically appears here */
restore_processor_state();
if (!in_suspend)
platform_leave(platform_mode);
+ Power_up:
/* NOTE: device_power_up() is just a resume() for devices
* that suspended with irqs off ... no overall powerup.
*/
@@ -202,36 +248,90 @@ int hibernation_snapshot(int platform_mode)
if (error)
return error;
- error = platform_start(platform_mode);
+ error = platform_begin(platform_mode);
if (error)
- return error;
+ goto Close;
suspend_console();
error = device_suspend(PMSG_FREEZE);
if (error)
goto Resume_console;
- error = platform_pre_snapshot(platform_mode);
- if (error)
+ if (hibernation_test(TEST_DEVICES))
goto Resume_devices;
+ error = platform_pre_snapshot(platform_mode);
+ if (error || hibernation_test(TEST_PLATFORM))
+ goto Finish;
+
error = disable_nonboot_cpus();
if (!error) {
- if (hibernation_mode != HIBERNATION_TEST) {
- in_suspend = 1;
- error = create_image(platform_mode);
- /* Control returns here after successful restore */
- } else {
- printk("swsusp debug: Waiting for 5 seconds.\n");
- mdelay(5000);
- }
+ if (hibernation_test(TEST_CPUS))
+ goto Enable_cpus;
+
+ if (hibernation_testmode(HIBERNATION_TEST))
+ goto Enable_cpus;
+
+ error = create_image(platform_mode);
+ /* Control returns here after successful restore */
}
+ Enable_cpus:
enable_nonboot_cpus();
- Resume_devices:
+ Finish:
platform_finish(platform_mode);
+ Resume_devices:
device_resume();
Resume_console:
resume_console();
+ Close:
+ platform_end(platform_mode);
+ return error;
+}
+
+/**
+ * resume_target_kernel - prepare devices that need to be suspended with
+ * interrupts off, restore the contents of highmem that have not been
+ * restored yet from the image and run the low level code that will restore
+ * the remaining contents of memory and switch to the just restored target
+ * kernel.
+ */
+
+static int resume_target_kernel(void)
+{
+ int error;
+
+ local_irq_disable();
+ error = device_power_down(PMSG_PRETHAW);
+ if (error) {
+ printk(KERN_ERR "PM: Some devices failed to power down, "
+ "aborting resume\n");
+ goto Enable_irqs;
+ }
+ /* We'll ignore saved state, but this gets preempt count (etc) right */
+ save_processor_state();
+ error = restore_highmem();
+ if (!error) {
+ error = swsusp_arch_resume();
+ /*
+ * The code below is only ever reached in case of a failure.
+ * Otherwise execution continues at place where
+ * swsusp_arch_suspend() was called
+ */
+ BUG_ON(!error);
+ /* This call to restore_highmem() undos the previous one */
+ restore_highmem();
+ }
+ /*
+ * The only reason why swsusp_arch_resume() can fail is memory being
+ * very tight, so we have to free it as soon as we can to avoid
+ * subsequent failures
+ */
+ swsusp_free();
+ restore_processor_state();
+ touch_softlockup_watchdog();
+ device_power_up();
+ Enable_irqs:
+ local_irq_enable();
return error;
}
@@ -258,7 +358,7 @@ int hibernation_restore(int platform_mode)
if (!error) {
error = disable_nonboot_cpus();
if (!error)
- error = swsusp_resume();
+ error = resume_target_kernel();
enable_nonboot_cpus();
}
platform_restore_cleanup(platform_mode);
@@ -286,9 +386,9 @@ int hibernation_platform_enter(void)
* hibernation_ops->finish() before saving the image, so we should let
* the firmware know that we're going to enter the sleep state after all
*/
- error = hibernation_ops->start();
+ error = hibernation_ops->begin();
if (error)
- return error;
+ goto Close;
suspend_console();
error = device_suspend(PMSG_SUSPEND);
@@ -322,6 +422,8 @@ int hibernation_platform_enter(void)
device_resume();
Resume_console:
resume_console();
+ Close:
+ hibernation_ops->end();
return error;
}
@@ -352,24 +454,17 @@ static void power_down(void)
* Valid image is on the disk, if we continue we risk serious data
* corruption after resume.
*/
- printk(KERN_CRIT "Please power me down manually\n");
+ printk(KERN_CRIT "PM: Please power down manually\n");
while(1);
}
-static void unprepare_processes(void)
-{
- thaw_processes();
- pm_restore_console();
-}
-
static int prepare_processes(void)
{
int error = 0;
- pm_prepare_console();
if (freeze_processes()) {
error = -EBUSY;
- unprepare_processes();
+ thaw_processes();
}
return error;
}
@@ -389,6 +484,7 @@ int hibernate(void)
goto Unlock;
}
+ pm_prepare_console();
error = pm_notifier_call_chain(PM_HIBERNATION_PREPARE);
if (error)
goto Exit;
@@ -398,7 +494,7 @@ int hibernate(void)
if (error)
goto Exit;
- printk("Syncing filesystems ... ");
+ printk(KERN_INFO "PM: Syncing filesystems ... ");
sys_sync();
printk("done.\n");
@@ -406,11 +502,12 @@ int hibernate(void)
if (error)
goto Finish;
- if (hibernation_mode == HIBERNATION_TESTPROC) {
- printk("swsusp debug: Waiting for 5 seconds.\n");
- mdelay(5000);
+ if (hibernation_test(TEST_FREEZER))
goto Thaw;
- }
+
+ if (hibernation_testmode(HIBERNATION_TESTPROC))
+ goto Thaw;
+
error = hibernation_snapshot(hibernation_mode == HIBERNATION_PLATFORM);
if (in_suspend && !error) {
unsigned int flags = 0;
@@ -427,11 +524,12 @@ int hibernate(void)
swsusp_free();
}
Thaw:
- unprepare_processes();
+ thaw_processes();
Finish:
free_basic_memory_bitmaps();
Exit:
pm_notifier_call_chain(PM_POST_HIBERNATION);
+ pm_restore_console();
atomic_inc(&snapshot_device_available);
Unlock:
mutex_unlock(&pm_mutex);
@@ -456,29 +554,40 @@ static int software_resume(void)
int error;
unsigned int flags;
- mutex_lock(&pm_mutex);
+ /*
+ * name_to_dev_t() below takes a sysfs buffer mutex when sysfs
+ * is configured into the kernel. Since the regular hibernate
+ * trigger path is via sysfs which takes a buffer mutex before
+ * calling hibernate functions (which take pm_mutex) this can
+ * cause lockdep to complain about a possible ABBA deadlock
+ * which cannot happen since we're in the boot code here and
+ * sysfs can't be invoked yet. Therefore, we use a subclass
+ * here to avoid lockdep complaining.
+ */
+ mutex_lock_nested(&pm_mutex, SINGLE_DEPTH_NESTING);
if (!swsusp_resume_device) {
if (!strlen(resume_file)) {
mutex_unlock(&pm_mutex);
return -ENOENT;
}
swsusp_resume_device = name_to_dev_t(resume_file);
- pr_debug("swsusp: Resume From Partition %s\n", resume_file);
+ pr_debug("PM: Resume from partition %s\n", resume_file);
} else {
- pr_debug("swsusp: Resume From Partition %d:%d\n",
- MAJOR(swsusp_resume_device), MINOR(swsusp_resume_device));
+ pr_debug("PM: Resume from partition %d:%d\n",
+ MAJOR(swsusp_resume_device),
+ MINOR(swsusp_resume_device));
}
if (noresume) {
/**
- * FIXME: If noresume is specified, we need to find the partition
- * and reset it back to normal swap space.
+ * FIXME: If noresume is specified, we need to find the
+ * partition and reset it back to normal swap space.
*/
mutex_unlock(&pm_mutex);
return 0;
}
- pr_debug("PM: Checking swsusp image.\n");
+ pr_debug("PM: Checking hibernation image.\n");
error = swsusp_check();
if (error)
goto Unlock;
@@ -489,6 +598,11 @@ static int software_resume(void)
goto Unlock;
}
+ pm_prepare_console();
+ error = pm_notifier_call_chain(PM_RESTORE_PREPARE);
+ if (error)
+ goto Finish;
+
error = create_basic_memory_bitmaps();
if (error)
goto Finish;
@@ -500,7 +614,7 @@ static int software_resume(void)
goto Done;
}
- pr_debug("PM: Reading swsusp image.\n");
+ pr_debug("PM: Reading hibernation image.\n");
error = swsusp_read(&flags);
if (!error)
@@ -508,10 +622,12 @@ static int software_resume(void)
printk(KERN_ERR "PM: Restore failed, recovering.\n");
swsusp_free();
- unprepare_processes();
+ thaw_processes();
Done:
free_basic_memory_bitmaps();
Finish:
+ pm_notifier_call_chain(PM_POST_RESTORE);
+ pm_restore_console();
atomic_inc(&snapshot_device_available);
/* For success case, the suspend path will release the lock */
Unlock:
@@ -557,7 +673,8 @@ static const char * const hibernation_modes[] = {
* supports it (as determined by having hibernation_ops).
*/
-static ssize_t disk_show(struct kset *kset, char *buf)
+static ssize_t disk_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
{
int i;
char *start = buf;
@@ -587,7 +704,8 @@ static ssize_t disk_show(struct kset *kset, char *buf)
}
-static ssize_t disk_store(struct kset *kset, const char *buf, size_t n)
+static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
{
int error = 0;
int i;
@@ -624,7 +742,7 @@ static ssize_t disk_store(struct kset *kset, const char *buf, size_t n)
error = -EINVAL;
if (!error)
- pr_debug("PM: suspend-to-disk mode set to '%s'\n",
+ pr_debug("PM: Hibernation mode set to '%s'\n",
hibernation_modes[mode]);
mutex_unlock(&pm_mutex);
return error ? error : n;
@@ -632,13 +750,15 @@ static ssize_t disk_store(struct kset *kset, const char *buf, size_t n)
power_attr(disk);
-static ssize_t resume_show(struct kset *kset, char *buf)
+static ssize_t resume_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
{
return sprintf(buf,"%d:%d\n", MAJOR(swsusp_resume_device),
MINOR(swsusp_resume_device));
}
-static ssize_t resume_store(struct kset *kset, const char *buf, size_t n)
+static ssize_t resume_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
{
unsigned int maj, min;
dev_t res;
@@ -654,7 +774,7 @@ static ssize_t resume_store(struct kset *kset, const char *buf, size_t n)
mutex_lock(&pm_mutex);
swsusp_resume_device = res;
mutex_unlock(&pm_mutex);
- printk("Attempting manual resume\n");
+ printk(KERN_INFO "PM: Starting manual resume from disk\n");
noresume = 0;
software_resume();
ret = n;
@@ -664,12 +784,14 @@ static ssize_t resume_store(struct kset *kset, const char *buf, size_t n)
power_attr(resume);
-static ssize_t image_size_show(struct kset *kset, char *buf)
+static ssize_t image_size_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
{
return sprintf(buf, "%lu\n", image_size);
}
-static ssize_t image_size_store(struct kset *kset, const char *buf, size_t n)
+static ssize_t image_size_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
{
unsigned long size;
@@ -698,7 +820,7 @@ static struct attribute_group attr_group = {
static int __init pm_disk_init(void)
{
- return sysfs_create_group(&power_subsys.kobj, &attr_group);
+ return sysfs_create_group(power_kobj, &attr_group);
}
core_initcall(pm_disk_init);
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 3cdf95b1dc9..6a6d5eb3524 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -24,10 +24,112 @@
#include "power.h"
-BLOCKING_NOTIFIER_HEAD(pm_chain_head);
-
DEFINE_MUTEX(pm_mutex);
+unsigned int pm_flags;
+EXPORT_SYMBOL(pm_flags);
+
+#ifdef CONFIG_PM_SLEEP
+
+/* Routines for PM-transition notifications */
+
+static BLOCKING_NOTIFIER_HEAD(pm_chain_head);
+
+int register_pm_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&pm_chain_head, nb);
+}
+EXPORT_SYMBOL_GPL(register_pm_notifier);
+
+int unregister_pm_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&pm_chain_head, nb);
+}
+EXPORT_SYMBOL_GPL(unregister_pm_notifier);
+
+int pm_notifier_call_chain(unsigned long val)
+{
+ return (blocking_notifier_call_chain(&pm_chain_head, val, NULL)
+ == NOTIFY_BAD) ? -EINVAL : 0;
+}
+
+#ifdef CONFIG_PM_DEBUG
+int pm_test_level = TEST_NONE;
+
+static int suspend_test(int level)
+{
+ if (pm_test_level == level) {
+ printk(KERN_INFO "suspend debug: Waiting for 5 seconds.\n");
+ mdelay(5000);
+ return 1;
+ }
+ return 0;
+}
+
+static const char * const pm_tests[__TEST_AFTER_LAST] = {
+ [TEST_NONE] = "none",
+ [TEST_CORE] = "core",
+ [TEST_CPUS] = "processors",
+ [TEST_PLATFORM] = "platform",
+ [TEST_DEVICES] = "devices",
+ [TEST_FREEZER] = "freezer",
+};
+
+static ssize_t pm_test_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ char *s = buf;
+ int level;
+
+ for (level = TEST_FIRST; level <= TEST_MAX; level++)
+ if (pm_tests[level]) {
+ if (level == pm_test_level)
+ s += sprintf(s, "[%s] ", pm_tests[level]);
+ else
+ s += sprintf(s, "%s ", pm_tests[level]);
+ }
+
+ if (s != buf)
+ /* convert the last space to a newline */
+ *(s-1) = '\n';
+
+ return (s - buf);
+}
+
+static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ const char * const *s;
+ int level;
+ char *p;
+ int len;
+ int error = -EINVAL;
+
+ p = memchr(buf, '\n', n);
+ len = p ? p - buf : n;
+
+ mutex_lock(&pm_mutex);
+
+ level = TEST_FIRST;
+ for (s = &pm_tests[level]; level <= TEST_MAX; s++, level++)
+ if (*s && len == strlen(*s) && !strncmp(buf, *s, len)) {
+ pm_test_level = level;
+ error = 0;
+ break;
+ }
+
+ mutex_unlock(&pm_mutex);
+
+ return error ? error : n;
+}
+
+power_attr(pm_test);
+#else /* !CONFIG_PM_DEBUG */
+static inline int suspend_test(int level) { return 0; }
+#endif /* !CONFIG_PM_DEBUG */
+
+#endif /* CONFIG_PM_SLEEP */
+
#ifdef CONFIG_SUSPEND
/* This is just an arbitrary number */
@@ -73,13 +175,13 @@ static int suspend_prepare(void)
if (!suspend_ops || !suspend_ops->enter)
return -EPERM;
+ pm_prepare_console();
+
error = pm_notifier_call_chain(PM_SUSPEND_PREPARE);
if (error)
goto Finish;
- pm_prepare_console();
-
- if (freeze_processes()) {
+ if (suspend_freeze_processes()) {
error = -EAGAIN;
goto Thaw;
}
@@ -97,10 +199,10 @@ static int suspend_prepare(void)
return 0;
Thaw:
- thaw_processes();
- pm_restore_console();
+ suspend_thaw_processes();
Finish:
pm_notifier_call_chain(PM_POST_SUSPEND);
+ pm_restore_console();
return error;
}
@@ -130,10 +232,13 @@ static int suspend_enter(suspend_state_t state)
BUG_ON(!irqs_disabled());
if ((error = device_power_down(PMSG_SUSPEND))) {
- printk(KERN_ERR "Some devices failed to power down\n");
+ printk(KERN_ERR "PM: Some devices failed to power down\n");
goto Done;
}
- error = suspend_ops->enter(state);
+
+ if (!suspend_test(TEST_CORE))
+ error = suspend_ops->enter(state);
+
device_power_up();
Done:
arch_suspend_enable_irqs();
@@ -142,8 +247,8 @@ static int suspend_enter(suspend_state_t state)
}
/**
- * suspend_devices_and_enter - suspend devices and enter the desired system sleep
- * state.
+ * suspend_devices_and_enter - suspend devices and enter the desired system
+ * sleep state.
* @state: state to enter
*/
int suspend_devices_and_enter(suspend_state_t state)
@@ -153,33 +258,45 @@ int suspend_devices_and_enter(suspend_state_t state)
if (!suspend_ops)
return -ENOSYS;
- if (suspend_ops->set_target) {
- error = suspend_ops->set_target(state);
+ if (suspend_ops->begin) {
+ error = suspend_ops->begin(state);
if (error)
- return error;
+ goto Close;
}
suspend_console();
error = device_suspend(PMSG_SUSPEND);
if (error) {
- printk(KERN_ERR "Some devices failed to suspend\n");
+ printk(KERN_ERR "PM: Some devices failed to suspend\n");
goto Resume_console;
}
+
+ if (suspend_test(TEST_DEVICES))
+ goto Resume_devices;
+
if (suspend_ops->prepare) {
error = suspend_ops->prepare();
if (error)
goto Resume_devices;
}
+
+ if (suspend_test(TEST_PLATFORM))
+ goto Finish;
+
error = disable_nonboot_cpus();
- if (!error)
+ if (!error && !suspend_test(TEST_CPUS))
suspend_enter(state);
enable_nonboot_cpus();
+ Finish:
if (suspend_ops->finish)
suspend_ops->finish();
Resume_devices:
device_resume();
Resume_console:
resume_console();
+ Close:
+ if (suspend_ops->end)
+ suspend_ops->end();
return error;
}
@@ -191,9 +308,9 @@ int suspend_devices_and_enter(suspend_state_t state)
*/
static void suspend_finish(void)
{
- thaw_processes();
- pm_restore_console();
+ suspend_thaw_processes();
pm_notifier_call_chain(PM_POST_SUSPEND);
+ pm_restore_console();
}
@@ -235,17 +352,22 @@ static int enter_state(suspend_state_t state)
if (!mutex_trylock(&pm_mutex))
return -EBUSY;
- printk("Syncing filesystems ... ");
+ printk(KERN_INFO "PM: Syncing filesystems ... ");
sys_sync();
printk("done.\n");
pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]);
- if ((error = suspend_prepare()))
+ error = suspend_prepare();
+ if (error)
goto Unlock;
+ if (suspend_test(TEST_FREEZER))
+ goto Finish;
+
pr_debug("PM: Entering %s sleep\n", pm_states[state]);
error = suspend_devices_and_enter(state);
+ Finish:
pr_debug("PM: Finishing wakeup.\n");
suspend_finish();
Unlock:
@@ -273,8 +395,7 @@ EXPORT_SYMBOL(pm_suspend);
#endif /* CONFIG_SUSPEND */
-decl_subsys(power,NULL,NULL);
-
+struct kobject *power_kobj;
/**
* state - control system power state.
@@ -287,7 +408,8 @@ decl_subsys(power,NULL,NULL);
* proper enumerated value, and initiates a suspend transition.
*/
-static ssize_t state_show(struct kset *kset, char *buf)
+static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
{
char *s = buf;
#ifdef CONFIG_SUSPEND
@@ -308,7 +430,8 @@ static ssize_t state_show(struct kset *kset, char *buf)
return (s - buf);
}
-static ssize_t state_store(struct kset *kset, const char *buf, size_t n)
+static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
{
#ifdef CONFIG_SUSPEND
suspend_state_t state = PM_SUSPEND_STANDBY;
@@ -345,13 +468,15 @@ power_attr(state);
#ifdef CONFIG_PM_TRACE
int pm_trace_enabled;
-static ssize_t pm_trace_show(struct kset *kset, char *buf)
+static ssize_t pm_trace_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
{
return sprintf(buf, "%d\n", pm_trace_enabled);
}
static ssize_t
-pm_trace_store(struct kset *kset, const char *buf, size_t n)
+pm_trace_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
{
int val;
@@ -363,18 +488,18 @@ pm_trace_store(struct kset *kset, const char *buf, size_t n)
}
power_attr(pm_trace);
+#endif /* CONFIG_PM_TRACE */
static struct attribute * g[] = {
&state_attr.attr,
+#ifdef CONFIG_PM_TRACE
&pm_trace_attr.attr,
+#endif
+#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_PM_DEBUG)
+ &pm_test_attr.attr,
+#endif
NULL,
};
-#else
-static struct attribute * g[] = {
- &state_attr.attr,
- NULL,
-};
-#endif /* CONFIG_PM_TRACE */
static struct attribute_group attr_group = {
.attrs = g,
@@ -383,10 +508,10 @@ static struct attribute_group attr_group = {
static int __init pm_init(void)
{
- int error = subsystem_register(&power_subsys);
- if (!error)
- error = sysfs_create_group(&power_subsys.kobj,&attr_group);
- return error;
+ power_kobj = kobject_create_and_add("power", NULL);
+ if (!power_kobj)
+ return -ENOMEM;
+ return sysfs_create_group(power_kobj, &attr_group);
}
core_initcall(pm_init);
diff --git a/kernel/power/pm.c b/kernel/power/pm.c
index c50d15266c1..60c73fa670d 100644
--- a/kernel/power/pm.c
+++ b/kernel/power/pm.c
@@ -27,8 +27,6 @@
#include <linux/interrupt.h>
#include <linux/mutex.h>
-int pm_active;
-
/*
* Locking notes:
* pm_devs_lock can be a semaphore providing pm ops are not called
@@ -204,6 +202,4 @@ int pm_send_all(pm_request_t rqst, void *data)
EXPORT_SYMBOL(pm_register);
EXPORT_SYMBOL(pm_send_all);
-EXPORT_SYMBOL(pm_active);
-
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 195dc461176..700f44ec840 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -1,5 +1,7 @@
#include <linux/suspend.h>
+#include <linux/suspend_ioctls.h>
#include <linux/utsname.h>
+#include <linux/freezer.h>
struct swsusp_info {
struct new_utsname uts;
@@ -54,7 +56,7 @@ extern int pfn_is_nosave(unsigned long);
extern struct mutex pm_mutex;
#define power_attr(_name) \
-static struct subsys_attribute _name##_attr = { \
+static struct kobj_attribute _name##_attr = { \
.attr = { \
.name = __stringify(_name), \
.mode = 0644, \
@@ -63,8 +65,6 @@ static struct subsys_attribute _name##_attr = { \
.store = _name##_store, \
}
-extern struct kset power_subsys;
-
/* Preferred image size in bytes (default 500 MB) */
extern unsigned long image_size;
extern int in_suspend;
@@ -130,42 +130,12 @@ struct snapshot_handle {
#define data_of(handle) ((handle).buffer + (handle).buf_offset)
extern unsigned int snapshot_additional_pages(struct zone *zone);
+extern unsigned long snapshot_get_image_size(void);
extern int snapshot_read_next(struct snapshot_handle *handle, size_t count);
extern int snapshot_write_next(struct snapshot_handle *handle, size_t count);
extern void snapshot_write_finalize(struct snapshot_handle *handle);
extern int snapshot_image_loaded(struct snapshot_handle *handle);
-/*
- * This structure is used to pass the values needed for the identification
- * of the resume swap area from a user space to the kernel via the
- * SNAPSHOT_SET_SWAP_AREA ioctl
- */
-struct resume_swap_area {
- loff_t offset;
- u_int32_t dev;
-} __attribute__((packed));
-
-#define SNAPSHOT_IOC_MAGIC '3'
-#define SNAPSHOT_FREEZE _IO(SNAPSHOT_IOC_MAGIC, 1)
-#define SNAPSHOT_UNFREEZE _IO(SNAPSHOT_IOC_MAGIC, 2)
-#define SNAPSHOT_ATOMIC_SNAPSHOT _IOW(SNAPSHOT_IOC_MAGIC, 3, void *)
-#define SNAPSHOT_ATOMIC_RESTORE _IO(SNAPSHOT_IOC_MAGIC, 4)
-#define SNAPSHOT_FREE _IO(SNAPSHOT_IOC_MAGIC, 5)
-#define SNAPSHOT_SET_IMAGE_SIZE _IOW(SNAPSHOT_IOC_MAGIC, 6, unsigned long)
-#define SNAPSHOT_AVAIL_SWAP _IOR(SNAPSHOT_IOC_MAGIC, 7, void *)
-#define SNAPSHOT_GET_SWAP_PAGE _IOR(SNAPSHOT_IOC_MAGIC, 8, void *)
-#define SNAPSHOT_FREE_SWAP_PAGES _IO(SNAPSHOT_IOC_MAGIC, 9)
-#define SNAPSHOT_SET_SWAP_FILE _IOW(SNAPSHOT_IOC_MAGIC, 10, unsigned int)
-#define SNAPSHOT_S2RAM _IO(SNAPSHOT_IOC_MAGIC, 11)
-#define SNAPSHOT_PMOPS _IOW(SNAPSHOT_IOC_MAGIC, 12, unsigned int)
-#define SNAPSHOT_SET_SWAP_AREA _IOW(SNAPSHOT_IOC_MAGIC, 13, \
- struct resume_swap_area)
-#define SNAPSHOT_IOC_MAXNR 13
-
-#define PMOPS_PREPARE 1
-#define PMOPS_ENTER 2
-#define PMOPS_FINISH 3
-
/* If unset, the snapshot device cannot be open. */
extern atomic_t snapshot_device_available;
@@ -183,7 +153,6 @@ extern int swsusp_swap_in_use(void);
extern int swsusp_check(void);
extern int swsusp_shrink_memory(void);
extern void swsusp_free(void);
-extern int swsusp_resume(void);
extern int swsusp_read(unsigned int *flags_p);
extern int swsusp_write(unsigned int flags);
extern void swsusp_close(void);
@@ -203,11 +172,56 @@ static inline int suspend_devices_and_enter(suspend_state_t state)
}
#endif /* !CONFIG_SUSPEND */
-/* kernel/power/common.c */
-extern struct blocking_notifier_head pm_chain_head;
+#ifdef CONFIG_PM_SLEEP
+/* kernel/power/main.c */
+extern int pm_notifier_call_chain(unsigned long val);
+#endif
+
+#ifdef CONFIG_HIGHMEM
+unsigned int count_highmem_pages(void);
+int restore_highmem(void);
+#else
+static inline unsigned int count_highmem_pages(void) { return 0; }
+static inline int restore_highmem(void) { return 0; }
+#endif
+
+/*
+ * Suspend test levels
+ */
+enum {
+ /* keep first */
+ TEST_NONE,
+ TEST_CORE,
+ TEST_CPUS,
+ TEST_PLATFORM,
+ TEST_DEVICES,
+ TEST_FREEZER,
+ /* keep last */
+ __TEST_AFTER_LAST
+};
-static inline int pm_notifier_call_chain(unsigned long val)
+#define TEST_FIRST TEST_NONE
+#define TEST_MAX (__TEST_AFTER_LAST - 1)
+
+extern int pm_test_level;
+
+#ifdef CONFIG_SUSPEND_FREEZER
+static inline int suspend_freeze_processes(void)
+{
+ return freeze_processes();
+}
+
+static inline void suspend_thaw_processes(void)
{
- return (blocking_notifier_call_chain(&pm_chain_head, val, NULL)
- == NOTIFY_BAD) ? -EINVAL : 0;
+ thaw_processes();
}
+#else
+static inline int suspend_freeze_processes(void)
+{
+ return 0;
+}
+
+static inline void suspend_thaw_processes(void)
+{
+}
+#endif
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 6533923e711..7c2118f9597 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -86,9 +86,9 @@ static void fake_signal_wake_up(struct task_struct *p, int resume)
static void send_fake_signal(struct task_struct *p)
{
- if (p->state == TASK_STOPPED)
+ if (task_is_stopped(p))
force_sig_specific(SIGSTOP, p);
- fake_signal_wake_up(p, p->state == TASK_STOPPED);
+ fake_signal_wake_up(p, task_is_stopped(p));
}
static int has_mm(struct task_struct *p)
@@ -182,7 +182,7 @@ static int try_to_freeze_tasks(int freeze_user_space)
if (frozen(p) || !freezeable(p))
continue;
- if (p->state == TASK_TRACED && frozen(p->parent)) {
+ if (task_is_traced(p) && frozen(p->parent)) {
cancel_freezing(p);
continue;
}
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 78039b477d2..f6a5df934f8 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -635,7 +635,7 @@ __register_nosave_region(unsigned long start_pfn, unsigned long end_pfn,
region->end_pfn = end_pfn;
list_add_tail(&region->list, &nosave_regions);
Report:
- printk("swsusp: Registered nosave memory region: %016lx - %016lx\n",
+ printk(KERN_INFO "PM: Registered nosave memory: %016lx - %016lx\n",
start_pfn << PAGE_SHIFT, end_pfn << PAGE_SHIFT);
}
@@ -704,7 +704,7 @@ static void mark_nosave_pages(struct memory_bitmap *bm)
list_for_each_entry(region, &nosave_regions, list) {
unsigned long pfn;
- printk("swsusp: Marking nosave pages: %016lx - %016lx\n",
+ pr_debug("PM: Marking nosave pages: %016lx - %016lx\n",
region->start_pfn << PAGE_SHIFT,
region->end_pfn << PAGE_SHIFT);
@@ -749,7 +749,7 @@ int create_basic_memory_bitmaps(void)
free_pages_map = bm2;
mark_nosave_pages(forbidden_pages_map);
- printk("swsusp: Basic memory bitmaps created\n");
+ pr_debug("PM: Basic memory bitmaps created\n");
return 0;
@@ -784,7 +784,7 @@ void free_basic_memory_bitmaps(void)
memory_bm_free(bm2, PG_UNSAFE_CLEAR);
kfree(bm2);
- printk("swsusp: Basic memory bitmaps freed\n");
+ pr_debug("PM: Basic memory bitmaps freed\n");
}
/**
@@ -872,7 +872,6 @@ unsigned int count_highmem_pages(void)
}
#else
static inline void *saveable_highmem_page(unsigned long pfn) { return NULL; }
-static inline unsigned int count_highmem_pages(void) { return 0; }
#endif /* CONFIG_HIGHMEM */
/**
@@ -1089,7 +1088,7 @@ static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
}
nr_pages += count_pages_for_highmem(nr_highmem);
- pr_debug("swsusp: Normal pages needed: %u + %u + %u, available pages: %u\n",
+ pr_debug("PM: Normal pages needed: %u + %u + %u, available pages: %u\n",
nr_pages, PAGES_FOR_IO, meta, free);
return free > nr_pages + PAGES_FOR_IO + meta;
@@ -1202,20 +1201,20 @@ asmlinkage int swsusp_save(void)
{
unsigned int nr_pages, nr_highmem;
- printk("swsusp: critical section: \n");
+ printk(KERN_INFO "PM: Creating hibernation image: \n");
drain_local_pages();
nr_pages = count_data_pages();
nr_highmem = count_highmem_pages();
- printk("swsusp: Need to copy %u pages\n", nr_pages + nr_highmem);
+ printk(KERN_INFO "PM: Need to copy %u pages\n", nr_pages + nr_highmem);
if (!enough_free_mem(nr_pages, nr_highmem)) {
- printk(KERN_ERR "swsusp: Not enough free memory\n");
+ printk(KERN_ERR "PM: Not enough free memory\n");
return -ENOMEM;
}
if (swsusp_alloc(&orig_bm, &copy_bm, nr_pages, nr_highmem)) {
- printk(KERN_ERR "swsusp: Memory allocation failed\n");
+ printk(KERN_ERR "PM: Memory allocation failed\n");
return -ENOMEM;
}
@@ -1235,7 +1234,8 @@ asmlinkage int swsusp_save(void)
nr_copy_pages = nr_pages;
nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
- printk("swsusp: critical section: done (%d pages copied)\n", nr_pages);
+ printk(KERN_INFO "PM: Hibernation image created (%d pages copied)\n",
+ nr_pages);
return 0;
}
@@ -1264,12 +1264,17 @@ static char *check_image_kernel(struct swsusp_info *info)
}
#endif /* CONFIG_ARCH_HIBERNATION_HEADER */
+unsigned long snapshot_get_image_size(void)
+{
+ return nr_copy_pages + nr_meta_pages + 1;
+}
+
static int init_header(struct swsusp_info *info)
{
memset(info, 0, sizeof(struct swsusp_info));
info->num_physpages = num_physpages;
info->image_pages = nr_copy_pages;
- info->pages = nr_copy_pages + nr_meta_pages + 1;
+ info->pages = snapshot_get_image_size();
info->size = info->pages;
info->size <<= PAGE_SHIFT;
return init_header_complete(info);
@@ -1429,7 +1434,7 @@ static int check_header(struct swsusp_info *info)
if (!reason && info->num_physpages != num_physpages)
reason = "memory size";
if (reason) {
- printk(KERN_ERR "swsusp: Resume mismatch: %s\n", reason);
+ printk(KERN_ERR "PM: Image mismatch: %s\n", reason);
return -EPERM;
}
return 0;
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 917aba10057..a0abf9a463f 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -28,8 +28,6 @@
#include "power.h"
-extern char resume_file[];
-
#define SWSUSP_SIG "S1SUSPEND"
struct swsusp_header {
@@ -73,7 +71,8 @@ static int submit(int rw, pgoff_t page_off, struct page *page,
bio->bi_end_io = end_swap_bio_read;
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
- printk("swsusp: ERROR: adding page to bio at %ld\n", page_off);
+ printk(KERN_ERR "PM: Adding page to bio failed at %ld\n",
+ page_off);
bio_put(bio);
return -EFAULT;
}
@@ -153,7 +152,7 @@ static int mark_swapfiles(sector_t start, unsigned int flags)
error = bio_write_page(swsusp_resume_block,
swsusp_header, NULL);
} else {
- printk(KERN_ERR "swsusp: Swap header not found!\n");
+ printk(KERN_ERR "PM: Swap header not found!\n");
error = -ENODEV;
}
return error;
@@ -325,7 +324,8 @@ static int save_image(struct swap_map_handle *handle,
struct timeval start;
struct timeval stop;
- printk("Saving image data pages (%u pages) ... ", nr_to_write);
+ printk(KERN_INFO "PM: Saving image data pages (%u pages) ... ",
+ nr_to_write);
m = nr_to_write / 100;
if (!m)
m = 1;
@@ -365,7 +365,7 @@ static int enough_swap(unsigned int nr_pages)
{
unsigned int free_swap = count_swap_pages(root_swap, 1);
- pr_debug("swsusp: free swap pages: %u\n", free_swap);
+ pr_debug("PM: Free swap pages: %u\n", free_swap);
return free_swap > nr_pages + PAGES_FOR_IO;
}
@@ -388,7 +388,7 @@ int swsusp_write(unsigned int flags)
error = swsusp_swap_check();
if (error) {
- printk(KERN_ERR "swsusp: Cannot find swap device, try "
+ printk(KERN_ERR "PM: Cannot find swap device, try "
"swapon -a.\n");
return error;
}
@@ -402,7 +402,7 @@ int swsusp_write(unsigned int flags)
}
header = (struct swsusp_info *)data_of(snapshot);
if (!enough_swap(header->pages)) {
- printk(KERN_ERR "swsusp: Not enough free swap\n");
+ printk(KERN_ERR "PM: Not enough free swap\n");
error = -ENOSPC;
goto out;
}
@@ -417,7 +417,7 @@ int swsusp_write(unsigned int flags)
if (!error) {
flush_swap_writer(&handle);
- printk("S");
+ printk(KERN_INFO "PM: S");
error = mark_swapfiles(start, flags);
printk("|\n");
}
@@ -507,7 +507,8 @@ static int load_image(struct swap_map_handle *handle,
int err2;
unsigned nr_pages;
- printk("Loading image data pages (%u pages) ... ", nr_to_read);
+ printk(KERN_INFO "PM: Loading image data pages (%u pages) ... ",
+ nr_to_read);
m = nr_to_read / 100;
if (!m)
m = 1;
@@ -558,7 +559,7 @@ int swsusp_read(unsigned int *flags_p)
*flags_p = swsusp_header->flags;
if (IS_ERR(resume_bdev)) {
- pr_debug("swsusp: block device not initialised\n");
+ pr_debug("PM: Image device not initialised\n");
return PTR_ERR(resume_bdev);
}
@@ -577,9 +578,9 @@ int swsusp_read(unsigned int *flags_p)
blkdev_put(resume_bdev);
if (!error)
- pr_debug("swsusp: Reading resume file was successful\n");
+ pr_debug("PM: Image successfully loaded\n");
else
- pr_debug("swsusp: Error %d resuming\n", error);
+ pr_debug("PM: Error %d resuming\n", error);
return error;
}
@@ -611,13 +612,13 @@ int swsusp_check(void)
if (error)
blkdev_put(resume_bdev);
else
- pr_debug("swsusp: Signature found, resuming\n");
+ pr_debug("PM: Signature found, resuming\n");
} else {
error = PTR_ERR(resume_bdev);
}
if (error)
- pr_debug("swsusp: Error %d check for resume file\n", error);
+ pr_debug("PM: Error %d checking image file\n", error);
return error;
}
@@ -629,7 +630,7 @@ int swsusp_check(void)
void swsusp_close(void)
{
if (IS_ERR(resume_bdev)) {
- pr_debug("swsusp: block device not initialised\n");
+ pr_debug("PM: Image device not initialised\n");
return;
}
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index e1722d3155f..023ff2a31d8 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -64,14 +64,6 @@ unsigned long image_size = 500 * 1024 * 1024;
int in_suspend __nosavedata = 0;
-#ifdef CONFIG_HIGHMEM
-unsigned int count_highmem_pages(void);
-int restore_highmem(void);
-#else
-static inline int restore_highmem(void) { return 0; }
-static inline unsigned int count_highmem_pages(void) { return 0; }
-#endif
-
/**
* The following functions are used for tracing the allocated
* swap pages, so that they can be freed in case of an error.
@@ -196,7 +188,8 @@ void swsusp_show_speed(struct timeval *start, struct timeval *stop,
centisecs = 1; /* avoid div-by-zero */
k = nr_pages * (PAGE_SIZE / 1024);
kps = (k * 100) / centisecs;
- printk("%s %d kbytes in %d.%02d seconds (%d.%02d MB/s)\n", msg, k,
+ printk(KERN_INFO "PM: %s %d kbytes in %d.%02d seconds (%d.%02d MB/s)\n",
+ msg, k,
centisecs / 100, centisecs % 100,
kps / 1000, (kps % 1000) / 10);
}
@@ -227,7 +220,7 @@ int swsusp_shrink_memory(void)
char *p = "-\\|/";
struct timeval start, stop;
- printk("Shrinking memory... ");
+ printk(KERN_INFO "PM: Shrinking memory... ");
do_gettimeofday(&start);
do {
long size, highmem_size;
@@ -269,38 +262,3 @@ int swsusp_shrink_memory(void)
return 0;
}
-
-int swsusp_resume(void)
-{
- int error;
-
- local_irq_disable();
- /* NOTE: device_power_down() is just a suspend() with irqs off;
- * it has no special "power things down" semantics
- */
- if (device_power_down(PMSG_PRETHAW))
- printk(KERN_ERR "Some devices failed to power down, very bad\n");
- /* We'll ignore saved state, but this gets preempt count (etc) right */
- save_processor_state();
- error = restore_highmem();
- if (!error) {
- error = swsusp_arch_resume();
- /* The code below is only ever reached in case of a failure.
- * Otherwise execution continues at place where
- * swsusp_arch_suspend() was called
- */
- BUG_ON(!error);
- /* This call to restore_highmem() undos the previous one */
- restore_highmem();
- }
- /* The only reason why swsusp_arch_resume() can fail is memory being
- * very tight, so we have to free it as soon as we can to avoid
- * subsequent failures
- */
- swsusp_free();
- restore_processor_state();
- touch_softlockup_watchdog();
- device_power_up();
- local_irq_enable();
- return error;
-}
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 5bd321bcbb7..f5512cb3aa8 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -28,6 +28,29 @@
#include "power.h"
+/*
+ * NOTE: The SNAPSHOT_SET_SWAP_FILE and SNAPSHOT_PMOPS ioctls are obsolete and
+ * will be removed in the future. They are only preserved here for
+ * compatibility with existing userland utilities.
+ */
+#define SNAPSHOT_SET_SWAP_FILE _IOW(SNAPSHOT_IOC_MAGIC, 10, unsigned int)
+#define SNAPSHOT_PMOPS _IOW(SNAPSHOT_IOC_MAGIC, 12, unsigned int)
+
+#define PMOPS_PREPARE 1
+#define PMOPS_ENTER 2
+#define PMOPS_FINISH 3
+
+/*
+ * NOTE: The following ioctl definitions are wrong and have been replaced with
+ * correct ones. They are only preserved here for compatibility with existing
+ * userland utilities and will be removed in the future.
+ */
+#define SNAPSHOT_ATOMIC_SNAPSHOT _IOW(SNAPSHOT_IOC_MAGIC, 3, void *)
+#define SNAPSHOT_SET_IMAGE_SIZE _IOW(SNAPSHOT_IOC_MAGIC, 6, unsigned long)
+#define SNAPSHOT_AVAIL_SWAP _IOR(SNAPSHOT_IOC_MAGIC, 7, void *)
+#define SNAPSHOT_GET_SWAP_PAGE _IOR(SNAPSHOT_IOC_MAGIC, 8, void *)
+
+
#define SNAPSHOT_MINOR 231
static struct snapshot_data {
@@ -36,7 +59,7 @@ static struct snapshot_data {
int mode;
char frozen;
char ready;
- char platform_suspend;
+ char platform_support;
} snapshot_state;
atomic_t snapshot_device_available = ATOMIC_INIT(1);
@@ -44,6 +67,7 @@ atomic_t snapshot_device_available = ATOMIC_INIT(1);
static int snapshot_open(struct inode *inode, struct file *filp)
{
struct snapshot_data *data;
+ int error;
if (!atomic_add_unless(&snapshot_device_available, -1, 0))
return -EBUSY;
@@ -64,13 +88,23 @@ static int snapshot_open(struct inode *inode, struct file *filp)
data->swap = swsusp_resume_device ?
swap_type_of(swsusp_resume_device, 0, NULL) : -1;
data->mode = O_RDONLY;
+ error = pm_notifier_call_chain(PM_RESTORE_PREPARE);
+ if (error)
+ pm_notifier_call_chain(PM_POST_RESTORE);
} else {
data->swap = -1;
data->mode = O_WRONLY;
+ error = pm_notifier_call_chain(PM_HIBERNATION_PREPARE);
+ if (error)
+ pm_notifier_call_chain(PM_POST_HIBERNATION);
+ }
+ if (error) {
+ atomic_inc(&snapshot_device_available);
+ return error;
}
data->frozen = 0;
data->ready = 0;
- data->platform_suspend = 0;
+ data->platform_support = 0;
return 0;
}
@@ -88,6 +122,8 @@ static int snapshot_release(struct inode *inode, struct file *filp)
thaw_processes();
mutex_unlock(&pm_mutex);
}
+ pm_notifier_call_chain(data->mode == O_WRONLY ?
+ PM_POST_HIBERNATION : PM_POST_RESTORE);
atomic_inc(&snapshot_device_available);
return 0;
}
@@ -133,7 +169,7 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
{
int error = 0;
struct snapshot_data *data;
- loff_t avail;
+ loff_t size;
sector_t offset;
if (_IOC_TYPE(cmd) != SNAPSHOT_IOC_MAGIC)
@@ -151,18 +187,13 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
if (data->frozen)
break;
mutex_lock(&pm_mutex);
- error = pm_notifier_call_chain(PM_HIBERNATION_PREPARE);
- if (!error) {
- printk("Syncing filesystems ... ");
- sys_sync();
- printk("done.\n");
-
- error = freeze_processes();
- if (error)
- thaw_processes();
- }
+ printk("Syncing filesystems ... ");
+ sys_sync();
+ printk("done.\n");
+
+ error = freeze_processes();
if (error)
- pm_notifier_call_chain(PM_POST_HIBERNATION);
+ thaw_processes();
mutex_unlock(&pm_mutex);
if (!error)
data->frozen = 1;
@@ -173,19 +204,19 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
break;
mutex_lock(&pm_mutex);
thaw_processes();
- pm_notifier_call_chain(PM_POST_HIBERNATION);
mutex_unlock(&pm_mutex);
data->frozen = 0;
break;
+ case SNAPSHOT_CREATE_IMAGE:
case SNAPSHOT_ATOMIC_SNAPSHOT:
if (data->mode != O_RDONLY || !data->frozen || data->ready) {
error = -EPERM;
break;
}
- error = hibernation_snapshot(data->platform_suspend);
+ error = hibernation_snapshot(data->platform_support);
if (!error)
- error = put_user(in_suspend, (unsigned int __user *)arg);
+ error = put_user(in_suspend, (int __user *)arg);
if (!error)
data->ready = 1;
break;
@@ -197,7 +228,7 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
error = -EPERM;
break;
}
- error = hibernation_restore(data->platform_suspend);
+ error = hibernation_restore(data->platform_support);
break;
case SNAPSHOT_FREE:
@@ -206,16 +237,29 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
data->ready = 0;
break;
+ case SNAPSHOT_PREF_IMAGE_SIZE:
case SNAPSHOT_SET_IMAGE_SIZE:
image_size = arg;
break;
+ case SNAPSHOT_GET_IMAGE_SIZE:
+ if (!data->ready) {
+ error = -ENODATA;
+ break;
+ }
+ size = snapshot_get_image_size();
+ size <<= PAGE_SHIFT;
+ error = put_user(size, (loff_t __user *)arg);
+ break;
+
+ case SNAPSHOT_AVAIL_SWAP_SIZE:
case SNAPSHOT_AVAIL_SWAP:
- avail = count_swap_pages(data->swap, 1);
- avail <<= PAGE_SHIFT;
- error = put_user(avail, (loff_t __user *)arg);
+ size = count_swap_pages(data->swap, 1);
+ size <<= PAGE_SHIFT;
+ error = put_user(size, (loff_t __user *)arg);
break;
+ case SNAPSHOT_ALLOC_SWAP_PAGE:
case SNAPSHOT_GET_SWAP_PAGE:
if (data->swap < 0 || data->swap >= MAX_SWAPFILES) {
error = -ENODEV;
@@ -224,7 +268,7 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
offset = alloc_swapdev_block(data->swap);
if (offset) {
offset <<= PAGE_SHIFT;
- error = put_user(offset, (sector_t __user *)arg);
+ error = put_user(offset, (loff_t __user *)arg);
} else {
error = -ENOSPC;
}
@@ -238,7 +282,7 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
free_all_swap_pages(data->swap);
break;
- case SNAPSHOT_SET_SWAP_FILE:
+ case SNAPSHOT_SET_SWAP_FILE: /* This ioctl is deprecated */
if (!swsusp_swap_in_use()) {
/*
* User space encodes device types as two-byte values,
@@ -275,26 +319,33 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
mutex_unlock(&pm_mutex);
break;
- case SNAPSHOT_PMOPS:
+ case SNAPSHOT_PLATFORM_SUPPORT:
+ data->platform_support = !!arg;
+ break;
+
+ case SNAPSHOT_POWER_OFF:
+ if (data->platform_support)
+ error = hibernation_platform_enter();
+ break;
+
+ case SNAPSHOT_PMOPS: /* This ioctl is deprecated */
error = -EINVAL;
switch (arg) {
case PMOPS_PREPARE:
- data->platform_suspend = 1;
+ data->platform_support = 1;
error = 0;
break;
case PMOPS_ENTER:
- if (data->platform_suspend)
+ if (data->platform_support)
error = hibernation_platform_enter();
-
break;
case PMOPS_FINISH:
- if (data->platform_suspend)
+ if (data->platform_support)
error = 0;
-
break;
default:
diff --git a/kernel/printk.c b/kernel/printk.c
index a30fe33de39..29ae1e99cde 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -36,6 +36,13 @@
#include <asm/uaccess.h>
+/*
+ * Architectures can override it:
+ */
+void __attribute__((weak)) early_printk(const char *fmt, ...)
+{
+}
+
#define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
/* printk's without a loglevel use this.. */
@@ -448,10 +455,10 @@ static int __init ignore_loglevel_setup(char *str)
ignore_loglevel = 1;
printk(KERN_INFO "debug: ignoring loglevel setting.\n");
- return 1;
+ return 0;
}
-__setup("ignore_loglevel", ignore_loglevel_setup);
+early_param("ignore_loglevel", ignore_loglevel_setup);
/*
* Write out chars from start to end - 1 inclusive
@@ -573,11 +580,6 @@ static int __init printk_time_setup(char *str)
__setup("time", printk_time_setup);
-__attribute__((weak)) unsigned long long printk_clock(void)
-{
- return sched_clock();
-}
-
/* Check if we have any console registered that can be called early in boot. */
static int have_callable_console(void)
{
@@ -628,30 +630,57 @@ asmlinkage int printk(const char *fmt, ...)
/* cpu currently holding logbuf_lock */
static volatile unsigned int printk_cpu = UINT_MAX;
+const char printk_recursion_bug_msg [] =
+ KERN_CRIT "BUG: recent printk recursion!\n";
+static int printk_recursion_bug;
+
asmlinkage int vprintk(const char *fmt, va_list args)
{
+ static int log_level_unknown = 1;
+ static char printk_buf[1024];
+
unsigned long flags;
- int printed_len;
+ int printed_len = 0;
+ int this_cpu;
char *p;
- static char printk_buf[1024];
- static int log_level_unknown = 1;
boot_delay_msec();
preempt_disable();
- if (unlikely(oops_in_progress) && printk_cpu == smp_processor_id())
- /* If a crash is occurring during printk() on this CPU,
- * make sure we can't deadlock */
- zap_locks();
-
/* This stops the holder of console_sem just where we want him */
raw_local_irq_save(flags);
+ this_cpu = smp_processor_id();
+
+ /*
+ * Ouch, printk recursed into itself!
+ */
+ if (unlikely(printk_cpu == this_cpu)) {
+ /*
+ * If a crash is occurring during printk() on this CPU,
+ * then try to get the crash message out but make sure
+ * we can't deadlock. Otherwise just return to avoid the
+ * recursion and return - but flag the recursion so that
+ * it can be printed at the next appropriate moment:
+ */
+ if (!oops_in_progress) {
+ printk_recursion_bug = 1;
+ goto out_restore_irqs;
+ }
+ zap_locks();
+ }
+
lockdep_off();
spin_lock(&logbuf_lock);
- printk_cpu = smp_processor_id();
+ printk_cpu = this_cpu;
+ if (printk_recursion_bug) {
+ printk_recursion_bug = 0;
+ strcpy(printk_buf, printk_recursion_bug_msg);
+ printed_len = sizeof(printk_recursion_bug_msg);
+ }
/* Emit the output into the temporary buffer */
- printed_len = vscnprintf(printk_buf, sizeof(printk_buf), fmt, args);
+ printed_len += vscnprintf(printk_buf + printed_len,
+ sizeof(printk_buf), fmt, args);
/*
* Copy the output into log_buf. If the caller didn't provide
@@ -680,7 +709,7 @@ asmlinkage int vprintk(const char *fmt, va_list args)
loglev_char = default_message_loglevel
+ '0';
}
- t = printk_clock();
+ t = cpu_clock(printk_cpu);
nanosec_rem = do_div(t, 1000000000);
tlen = sprintf(tbuf,
"<%c>[%5lu.%06lu] ",
@@ -744,6 +773,7 @@ asmlinkage int vprintk(const char *fmt, va_list args)
printk_cpu = UINT_MAX;
spin_unlock(&logbuf_lock);
lockdep_on();
+out_restore_irqs:
raw_local_irq_restore(flags);
}
@@ -817,7 +847,7 @@ __setup("console=", console_setup);
* commonly to provide a default console (ie from PROM variables) when
* the user has not supplied one.
*/
-int __init add_preferred_console(char *name, int idx, char *options)
+int add_preferred_console(char *name, int idx, char *options)
{
struct console_cmdline *c;
int i;
diff --git a/kernel/profile.c b/kernel/profile.c
index 5e95330e512..e64c2da11c0 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -52,7 +52,7 @@ static DEFINE_PER_CPU(int, cpu_profile_flip);
static DEFINE_MUTEX(profile_flip_mutex);
#endif /* CONFIG_SMP */
-static int __init profile_setup(char * str)
+static int __init profile_setup(char *str)
{
static char __initdata schedstr[] = "schedule";
static char __initdata sleepstr[] = "sleep";
@@ -104,28 +104,28 @@ __setup("profile=", profile_setup);
void __init profile_init(void)
{
- if (!prof_on)
+ if (!prof_on)
return;
-
+
/* only text is profiled */
prof_len = (_etext - _stext) >> prof_shift;
prof_buffer = alloc_bootmem(prof_len*sizeof(atomic_t));
}
/* Profile event notifications */
-
+
#ifdef CONFIG_PROFILING
-
+
static BLOCKING_NOTIFIER_HEAD(task_exit_notifier);
static ATOMIC_NOTIFIER_HEAD(task_free_notifier);
static BLOCKING_NOTIFIER_HEAD(munmap_notifier);
-
-void profile_task_exit(struct task_struct * task)
+
+void profile_task_exit(struct task_struct *task)
{
blocking_notifier_call_chain(&task_exit_notifier, 0, task);
}
-
-int profile_handoff_task(struct task_struct * task)
+
+int profile_handoff_task(struct task_struct *task)
{
int ret;
ret = atomic_notifier_call_chain(&task_free_notifier, 0, task);
@@ -137,52 +137,55 @@ void profile_munmap(unsigned long addr)
blocking_notifier_call_chain(&munmap_notifier, 0, (void *)addr);
}
-int task_handoff_register(struct notifier_block * n)
+int task_handoff_register(struct notifier_block *n)
{
return atomic_notifier_chain_register(&task_free_notifier, n);
}
+EXPORT_SYMBOL_GPL(task_handoff_register);
-int task_handoff_unregister(struct notifier_block * n)
+int task_handoff_unregister(struct notifier_block *n)
{
return atomic_notifier_chain_unregister(&task_free_notifier, n);
}
+EXPORT_SYMBOL_GPL(task_handoff_unregister);
-int profile_event_register(enum profile_type type, struct notifier_block * n)
+int profile_event_register(enum profile_type type, struct notifier_block *n)
{
int err = -EINVAL;
-
+
switch (type) {
- case PROFILE_TASK_EXIT:
- err = blocking_notifier_chain_register(
- &task_exit_notifier, n);
- break;
- case PROFILE_MUNMAP:
- err = blocking_notifier_chain_register(
- &munmap_notifier, n);
- break;
+ case PROFILE_TASK_EXIT:
+ err = blocking_notifier_chain_register(
+ &task_exit_notifier, n);
+ break;
+ case PROFILE_MUNMAP:
+ err = blocking_notifier_chain_register(
+ &munmap_notifier, n);
+ break;
}
-
+
return err;
}
+EXPORT_SYMBOL_GPL(profile_event_register);
-
-int profile_event_unregister(enum profile_type type, struct notifier_block * n)
+int profile_event_unregister(enum profile_type type, struct notifier_block *n)
{
int err = -EINVAL;
-
+
switch (type) {
- case PROFILE_TASK_EXIT:
- err = blocking_notifier_chain_unregister(
- &task_exit_notifier, n);
- break;
- case PROFILE_MUNMAP:
- err = blocking_notifier_chain_unregister(
- &munmap_notifier, n);
- break;
+ case PROFILE_TASK_EXIT:
+ err = blocking_notifier_chain_unregister(
+ &task_exit_notifier, n);
+ break;
+ case PROFILE_MUNMAP:
+ err = blocking_notifier_chain_unregister(
+ &munmap_notifier, n);
+ break;
}
return err;
}
+EXPORT_SYMBOL_GPL(profile_event_unregister);
int register_timer_hook(int (*hook)(struct pt_regs *))
{
@@ -191,6 +194,7 @@ int register_timer_hook(int (*hook)(struct pt_regs *))
timer_hook = hook;
return 0;
}
+EXPORT_SYMBOL_GPL(register_timer_hook);
void unregister_timer_hook(int (*hook)(struct pt_regs *))
{
@@ -199,13 +203,7 @@ void unregister_timer_hook(int (*hook)(struct pt_regs *))
/* make sure all CPUs see the NULL hook */
synchronize_sched(); /* Allow ongoing interrupts to complete. */
}
-
-EXPORT_SYMBOL_GPL(register_timer_hook);
EXPORT_SYMBOL_GPL(unregister_timer_hook);
-EXPORT_SYMBOL_GPL(task_handoff_register);
-EXPORT_SYMBOL_GPL(task_handoff_unregister);
-EXPORT_SYMBOL_GPL(profile_event_register);
-EXPORT_SYMBOL_GPL(profile_event_unregister);
#endif /* CONFIG_PROFILING */
@@ -366,7 +364,7 @@ static int __devinit profile_cpu_callback(struct notifier_block *info,
per_cpu(cpu_profile_hits, cpu)[0] = page_address(page);
}
break;
- out_free:
+out_free:
page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
per_cpu(cpu_profile_hits, cpu)[1] = NULL;
__free_page(page);
@@ -409,7 +407,6 @@ void profile_hits(int type, void *__pc, unsigned int nr_hits)
atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
}
#endif /* !CONFIG_SMP */
-
EXPORT_SYMBOL_GPL(profile_hits);
void profile_tick(int type)
@@ -427,7 +424,7 @@ void profile_tick(int type)
#include <asm/uaccess.h>
#include <asm/ptrace.h>
-static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
+static int prof_cpu_mask_read_proc(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
int len = cpumask_scnprintf(page, count, *(cpumask_t *)data);
@@ -437,8 +434,8 @@ static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
return len;
}
-static int prof_cpu_mask_write_proc (struct file *file, const char __user *buffer,
- unsigned long count, void *data)
+static int prof_cpu_mask_write_proc(struct file *file,
+ const char __user *buffer, unsigned long count, void *data)
{
cpumask_t *mask = (cpumask_t *)data;
unsigned long full_count = count, err;
@@ -457,7 +454,8 @@ void create_prof_cpu_mask(struct proc_dir_entry *root_irq_dir)
struct proc_dir_entry *entry;
/* create /proc/irq/prof_cpu_mask */
- if (!(entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir)))
+ entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
+ if (!entry)
return;
entry->data = (void *)&prof_cpu_mask;
entry->read_proc = prof_cpu_mask_read_proc;
@@ -475,7 +473,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
unsigned long p = *ppos;
ssize_t read;
- char * pnt;
+ char *pnt;
unsigned int sample_step = 1 << prof_shift;
profile_flip_buffers();
@@ -486,12 +484,12 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
read = 0;
while (p < sizeof(unsigned int) && count > 0) {
- if (put_user(*((char *)(&sample_step)+p),buf))
+ if (put_user(*((char *)(&sample_step)+p), buf))
return -EFAULT;
buf++; p++; count--; read++;
}
pnt = (char *)prof_buffer + p - sizeof(atomic_t);
- if (copy_to_user(buf,(void *)pnt,count))
+ if (copy_to_user(buf, (void *)pnt, count))
return -EFAULT;
read += count;
*ppos += read;
@@ -508,7 +506,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
#ifdef CONFIG_SMP
- extern int setup_profiling_timer (unsigned int multiplier);
+ extern int setup_profiling_timer(unsigned int multiplier);
if (count == sizeof(int)) {
unsigned int multiplier;
@@ -591,7 +589,8 @@ static int __init create_proc_profile(void)
return 0;
if (create_hash_tables())
return -1;
- if (!(entry = create_proc_entry("profile", S_IWUSR | S_IRUGO, NULL)))
+ entry = create_proc_entry("profile", S_IWUSR | S_IRUGO, NULL);
+ if (!entry)
return 0;
entry->proc_fops = &proc_profile_operations;
entry->size = (1+prof_len) * sizeof(atomic_t);
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 7c76f2ffaea..b0d4ab4dfd3 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -51,7 +51,7 @@ void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
void ptrace_untrace(struct task_struct *child)
{
spin_lock(&child->sighand->siglock);
- if (child->state == TASK_TRACED) {
+ if (task_is_traced(child)) {
if (child->signal->flags & SIGNAL_STOP_STOPPED) {
child->state = TASK_STOPPED;
} else {
@@ -79,7 +79,7 @@ void __ptrace_unlink(struct task_struct *child)
add_parent(child);
}
- if (child->state == TASK_TRACED)
+ if (task_is_traced(child))
ptrace_untrace(child);
}
@@ -103,9 +103,9 @@ int ptrace_check_attach(struct task_struct *child, int kill)
&& child->signal != NULL) {
ret = 0;
spin_lock_irq(&child->sighand->siglock);
- if (child->state == TASK_STOPPED) {
+ if (task_is_stopped(child)) {
child->state = TASK_TRACED;
- } else if (child->state != TASK_TRACED && !kill) {
+ } else if (!task_is_traced(child) && !kill) {
ret = -ESRCH;
}
spin_unlock_irq(&child->sighand->siglock);
@@ -120,7 +120,7 @@ int ptrace_check_attach(struct task_struct *child, int kill)
return ret;
}
-static int may_attach(struct task_struct *task)
+int __ptrace_may_attach(struct task_struct *task)
{
/* May we inspect the given task?
* This check is used both for attaching with ptrace
@@ -154,7 +154,7 @@ int ptrace_may_attach(struct task_struct *task)
{
int err;
task_lock(task);
- err = may_attach(task);
+ err = __ptrace_may_attach(task);
task_unlock(task);
return !err;
}
@@ -196,7 +196,7 @@ repeat:
/* the same process cannot be attached many times */
if (task->ptrace & PT_PTRACED)
goto bad;
- retval = may_attach(task);
+ retval = __ptrace_may_attach(task);
if (retval)
goto bad;
@@ -366,12 +366,73 @@ static int ptrace_setsiginfo(struct task_struct *child, siginfo_t __user * data)
return error;
}
+
+#ifdef PTRACE_SINGLESTEP
+#define is_singlestep(request) ((request) == PTRACE_SINGLESTEP)
+#else
+#define is_singlestep(request) 0
+#endif
+
+#ifdef PTRACE_SINGLEBLOCK
+#define is_singleblock(request) ((request) == PTRACE_SINGLEBLOCK)
+#else
+#define is_singleblock(request) 0
+#endif
+
+#ifdef PTRACE_SYSEMU
+#define is_sysemu_singlestep(request) ((request) == PTRACE_SYSEMU_SINGLESTEP)
+#else
+#define is_sysemu_singlestep(request) 0
+#endif
+
+static int ptrace_resume(struct task_struct *child, long request, long data)
+{
+ if (!valid_signal(data))
+ return -EIO;
+
+ if (request == PTRACE_SYSCALL)
+ set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+ else
+ clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+
+#ifdef TIF_SYSCALL_EMU
+ if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP)
+ set_tsk_thread_flag(child, TIF_SYSCALL_EMU);
+ else
+ clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
+#endif
+
+ if (is_singleblock(request)) {
+ if (unlikely(!arch_has_block_step()))
+ return -EIO;
+ user_enable_block_step(child);
+ } else if (is_singlestep(request) || is_sysemu_singlestep(request)) {
+ if (unlikely(!arch_has_single_step()))
+ return -EIO;
+ user_enable_single_step(child);
+ }
+ else
+ user_disable_single_step(child);
+
+ child->exit_code = data;
+ wake_up_process(child);
+
+ return 0;
+}
+
int ptrace_request(struct task_struct *child, long request,
long addr, long data)
{
int ret = -EIO;
switch (request) {
+ case PTRACE_PEEKTEXT:
+ case PTRACE_PEEKDATA:
+ return generic_ptrace_peekdata(child, addr, data);
+ case PTRACE_POKETEXT:
+ case PTRACE_POKEDATA:
+ return generic_ptrace_pokedata(child, addr, data);
+
#ifdef PTRACE_OLDSETOPTIONS
case PTRACE_OLDSETOPTIONS:
#endif
@@ -390,6 +451,26 @@ int ptrace_request(struct task_struct *child, long request,
case PTRACE_DETACH: /* detach a process that was attached. */
ret = ptrace_detach(child, data);
break;
+
+#ifdef PTRACE_SINGLESTEP
+ case PTRACE_SINGLESTEP:
+#endif
+#ifdef PTRACE_SINGLEBLOCK
+ case PTRACE_SINGLEBLOCK:
+#endif
+#ifdef PTRACE_SYSEMU
+ case PTRACE_SYSEMU:
+ case PTRACE_SYSEMU_SINGLESTEP:
+#endif
+ case PTRACE_SYSCALL:
+ case PTRACE_CONT:
+ return ptrace_resume(child, request, data);
+
+ case PTRACE_KILL:
+ if (child->exit_state) /* already dead */
+ return 0;
+ return ptrace_resume(child, request, SIGKILL);
+
default:
break;
}
@@ -470,6 +551,8 @@ asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
lock_kernel();
if (request == PTRACE_TRACEME) {
ret = ptrace_traceme();
+ if (!ret)
+ arch_ptrace_attach(current);
goto out;
}
@@ -524,3 +607,87 @@ int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
copied = access_process_vm(tsk, addr, &data, sizeof(data), 1);
return (copied == sizeof(data)) ? 0 : -EIO;
}
+
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+
+int compat_ptrace_request(struct task_struct *child, compat_long_t request,
+ compat_ulong_t addr, compat_ulong_t data)
+{
+ compat_ulong_t __user *datap = compat_ptr(data);
+ compat_ulong_t word;
+ int ret;
+
+ switch (request) {
+ case PTRACE_PEEKTEXT:
+ case PTRACE_PEEKDATA:
+ ret = access_process_vm(child, addr, &word, sizeof(word), 0);
+ if (ret != sizeof(word))
+ ret = -EIO;
+ else
+ ret = put_user(word, datap);
+ break;
+
+ case PTRACE_POKETEXT:
+ case PTRACE_POKEDATA:
+ ret = access_process_vm(child, addr, &data, sizeof(data), 1);
+ ret = (ret != sizeof(data) ? -EIO : 0);
+ break;
+
+ case PTRACE_GETEVENTMSG:
+ ret = put_user((compat_ulong_t) child->ptrace_message, datap);
+ break;
+
+ default:
+ ret = ptrace_request(child, request, addr, data);
+ }
+
+ return ret;
+}
+
+#ifdef __ARCH_WANT_COMPAT_SYS_PTRACE
+asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
+ compat_long_t addr, compat_long_t data)
+{
+ struct task_struct *child;
+ long ret;
+
+ /*
+ * This lock_kernel fixes a subtle race with suid exec
+ */
+ lock_kernel();
+ if (request == PTRACE_TRACEME) {
+ ret = ptrace_traceme();
+ goto out;
+ }
+
+ child = ptrace_get_task_struct(pid);
+ if (IS_ERR(child)) {
+ ret = PTR_ERR(child);
+ goto out;
+ }
+
+ if (request == PTRACE_ATTACH) {
+ ret = ptrace_attach(child);
+ /*
+ * Some architectures need to do book-keeping after
+ * a ptrace attach.
+ */
+ if (!ret)
+ arch_ptrace_attach(child);
+ goto out_put_task_struct;
+ }
+
+ ret = ptrace_check_attach(child, request == PTRACE_KILL);
+ if (!ret)
+ ret = compat_arch_ptrace(child, request, addr, data);
+
+ out_put_task_struct:
+ put_task_struct(child);
+ out:
+ unlock_kernel();
+ return ret;
+}
+#endif /* __ARCH_WANT_COMPAT_SYS_PTRACE */
+
+#endif /* CONFIG_COMPAT */
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c
new file mode 100644
index 00000000000..f4ffbd0f306
--- /dev/null
+++ b/kernel/rcuclassic.c
@@ -0,0 +1,575 @@
+/*
+ * Read-Copy Update mechanism for mutual exclusion
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright IBM Corporation, 2001
+ *
+ * Authors: Dipankar Sarma <dipankar@in.ibm.com>
+ * Manfred Spraul <manfred@colorfullife.com>
+ *
+ * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
+ * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
+ * Papers:
+ * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
+ * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
+ *
+ * For detailed explanation of Read-Copy Update mechanism see -
+ * Documentation/RCU
+ *
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <linux/rcupdate.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <asm/atomic.h>
+#include <linux/bitops.h>
+#include <linux/module.h>
+#include <linux/completion.h>
+#include <linux/moduleparam.h>
+#include <linux/percpu.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
+#include <linux/mutex.h>
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+static struct lock_class_key rcu_lock_key;
+struct lockdep_map rcu_lock_map =
+ STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
+EXPORT_SYMBOL_GPL(rcu_lock_map);
+#endif
+
+
+/* Definition for rcupdate control block. */
+static struct rcu_ctrlblk rcu_ctrlblk = {
+ .cur = -300,
+ .completed = -300,
+ .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock),
+ .cpumask = CPU_MASK_NONE,
+};
+static struct rcu_ctrlblk rcu_bh_ctrlblk = {
+ .cur = -300,
+ .completed = -300,
+ .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock),
+ .cpumask = CPU_MASK_NONE,
+};
+
+DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L };
+DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L };
+
+static int blimit = 10;
+static int qhimark = 10000;
+static int qlowmark = 100;
+
+#ifdef CONFIG_SMP
+static void force_quiescent_state(struct rcu_data *rdp,
+ struct rcu_ctrlblk *rcp)
+{
+ int cpu;
+ cpumask_t cpumask;
+ set_need_resched();
+ if (unlikely(!rcp->signaled)) {
+ rcp->signaled = 1;
+ /*
+ * Don't send IPI to itself. With irqs disabled,
+ * rdp->cpu is the current cpu.
+ */
+ cpumask = rcp->cpumask;
+ cpu_clear(rdp->cpu, cpumask);
+ for_each_cpu_mask(cpu, cpumask)
+ smp_send_reschedule(cpu);
+ }
+}
+#else
+static inline void force_quiescent_state(struct rcu_data *rdp,
+ struct rcu_ctrlblk *rcp)
+{
+ set_need_resched();
+}
+#endif
+
+/**
+ * call_rcu - Queue an RCU callback for invocation after a grace period.
+ * @head: structure to be used for queueing the RCU updates.
+ * @func: actual update function to be invoked after the grace period
+ *
+ * The update function will be invoked some time after a full grace
+ * period elapses, in other words after all currently executing RCU
+ * read-side critical sections have completed. RCU read-side critical
+ * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
+ * and may be nested.
+ */
+void call_rcu(struct rcu_head *head,
+ void (*func)(struct rcu_head *rcu))
+{
+ unsigned long flags;
+ struct rcu_data *rdp;
+
+ head->func = func;
+ head->next = NULL;
+ local_irq_save(flags);
+ rdp = &__get_cpu_var(rcu_data);
+ *rdp->nxttail = head;
+ rdp->nxttail = &head->next;
+ if (unlikely(++rdp->qlen > qhimark)) {
+ rdp->blimit = INT_MAX;
+ force_quiescent_state(rdp, &rcu_ctrlblk);
+ }
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(call_rcu);
+
+/**
+ * call_rcu_bh - Queue an RCU for invocation after a quicker grace period.
+ * @head: structure to be used for queueing the RCU updates.
+ * @func: actual update function to be invoked after the grace period
+ *
+ * The update function will be invoked some time after a full grace
+ * period elapses, in other words after all currently executing RCU
+ * read-side critical sections have completed. call_rcu_bh() assumes
+ * that the read-side critical sections end on completion of a softirq
+ * handler. This means that read-side critical sections in process
+ * context must not be interrupted by softirqs. This interface is to be
+ * used when most of the read-side critical sections are in softirq context.
+ * RCU read-side critical sections are delimited by rcu_read_lock() and
+ * rcu_read_unlock(), * if in interrupt context or rcu_read_lock_bh()
+ * and rcu_read_unlock_bh(), if in process context. These may be nested.
+ */
+void call_rcu_bh(struct rcu_head *head,
+ void (*func)(struct rcu_head *rcu))
+{
+ unsigned long flags;
+ struct rcu_data *rdp;
+
+ head->func = func;
+ head->next = NULL;
+ local_irq_save(flags);
+ rdp = &__get_cpu_var(rcu_bh_data);
+ *rdp->nxttail = head;
+ rdp->nxttail = &head->next;
+
+ if (unlikely(++rdp->qlen > qhimark)) {
+ rdp->blimit = INT_MAX;
+ force_quiescent_state(rdp, &rcu_bh_ctrlblk);
+ }
+
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(call_rcu_bh);
+
+/*
+ * Return the number of RCU batches processed thus far. Useful
+ * for debug and statistics.
+ */
+long rcu_batches_completed(void)
+{
+ return rcu_ctrlblk.completed;
+}
+EXPORT_SYMBOL_GPL(rcu_batches_completed);
+
+/*
+ * Return the number of RCU batches processed thus far. Useful
+ * for debug and statistics.
+ */
+long rcu_batches_completed_bh(void)
+{
+ return rcu_bh_ctrlblk.completed;
+}
+EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
+
+/* Raises the softirq for processing rcu_callbacks. */
+static inline void raise_rcu_softirq(void)
+{
+ raise_softirq(RCU_SOFTIRQ);
+ /*
+ * The smp_mb() here is required to ensure that this cpu's
+ * __rcu_process_callbacks() reads the most recently updated
+ * value of rcu->cur.
+ */
+ smp_mb();
+}
+
+/*
+ * Invoke the completed RCU callbacks. They are expected to be in
+ * a per-cpu list.
+ */
+static void rcu_do_batch(struct rcu_data *rdp)
+{
+ struct rcu_head *next, *list;
+ int count = 0;
+
+ list = rdp->donelist;
+ while (list) {
+ next = list->next;
+ prefetch(next);
+ list->func(list);
+ list = next;
+ if (++count >= rdp->blimit)
+ break;
+ }
+ rdp->donelist = list;
+
+ local_irq_disable();
+ rdp->qlen -= count;
+ local_irq_enable();
+ if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark)
+ rdp->blimit = blimit;
+
+ if (!rdp->donelist)
+ rdp->donetail = &rdp->donelist;
+ else
+ raise_rcu_softirq();
+}
+
+/*
+ * Grace period handling:
+ * The grace period handling consists out of two steps:
+ * - A new grace period is started.
+ * This is done by rcu_start_batch. The start is not broadcasted to
+ * all cpus, they must pick this up by comparing rcp->cur with
+ * rdp->quiescbatch. All cpus are recorded in the
+ * rcu_ctrlblk.cpumask bitmap.
+ * - All cpus must go through a quiescent state.
+ * Since the start of the grace period is not broadcasted, at least two
+ * calls to rcu_check_quiescent_state are required:
+ * The first call just notices that a new grace period is running. The
+ * following calls check if there was a quiescent state since the beginning
+ * of the grace period. If so, it updates rcu_ctrlblk.cpumask. If
+ * the bitmap is empty, then the grace period is completed.
+ * rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace
+ * period (if necessary).
+ */
+/*
+ * Register a new batch of callbacks, and start it up if there is currently no
+ * active batch and the batch to be registered has not already occurred.
+ * Caller must hold rcu_ctrlblk.lock.
+ */
+static void rcu_start_batch(struct rcu_ctrlblk *rcp)
+{
+ if (rcp->next_pending &&
+ rcp->completed == rcp->cur) {
+ rcp->next_pending = 0;
+ /*
+ * next_pending == 0 must be visible in
+ * __rcu_process_callbacks() before it can see new value of cur.
+ */
+ smp_wmb();
+ rcp->cur++;
+
+ /*
+ * Accessing nohz_cpu_mask before incrementing rcp->cur needs a
+ * Barrier Otherwise it can cause tickless idle CPUs to be
+ * included in rcp->cpumask, which will extend graceperiods
+ * unnecessarily.
+ */
+ smp_mb();
+ cpus_andnot(rcp->cpumask, cpu_online_map, nohz_cpu_mask);
+
+ rcp->signaled = 0;
+ }
+}
+
+/*
+ * cpu went through a quiescent state since the beginning of the grace period.
+ * Clear it from the cpu mask and complete the grace period if it was the last
+ * cpu. Start another grace period if someone has further entries pending
+ */
+static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp)
+{
+ cpu_clear(cpu, rcp->cpumask);
+ if (cpus_empty(rcp->cpumask)) {
+ /* batch completed ! */
+ rcp->completed = rcp->cur;
+ rcu_start_batch(rcp);
+ }
+}
+
+/*
+ * Check if the cpu has gone through a quiescent state (say context
+ * switch). If so and if it already hasn't done so in this RCU
+ * quiescent cycle, then indicate that it has done so.
+ */
+static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp,
+ struct rcu_data *rdp)
+{
+ if (rdp->quiescbatch != rcp->cur) {
+ /* start new grace period: */
+ rdp->qs_pending = 1;
+ rdp->passed_quiesc = 0;
+ rdp->quiescbatch = rcp->cur;
+ return;
+ }
+
+ /* Grace period already completed for this cpu?
+ * qs_pending is checked instead of the actual bitmap to avoid
+ * cacheline trashing.
+ */
+ if (!rdp->qs_pending)
+ return;
+
+ /*
+ * Was there a quiescent state since the beginning of the grace
+ * period? If no, then exit and wait for the next call.
+ */
+ if (!rdp->passed_quiesc)
+ return;
+ rdp->qs_pending = 0;
+
+ spin_lock(&rcp->lock);
+ /*
+ * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync
+ * during cpu startup. Ignore the quiescent state.
+ */
+ if (likely(rdp->quiescbatch == rcp->cur))
+ cpu_quiet(rdp->cpu, rcp);
+
+ spin_unlock(&rcp->lock);
+}
+
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+/* warning! helper for rcu_offline_cpu. do not use elsewhere without reviewing
+ * locking requirements, the list it's pulling from has to belong to a cpu
+ * which is dead and hence not processing interrupts.
+ */
+static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list,
+ struct rcu_head **tail)
+{
+ local_irq_disable();
+ *this_rdp->nxttail = list;
+ if (list)
+ this_rdp->nxttail = tail;
+ local_irq_enable();
+}
+
+static void __rcu_offline_cpu(struct rcu_data *this_rdp,
+ struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
+{
+ /* if the cpu going offline owns the grace period
+ * we can block indefinitely waiting for it, so flush
+ * it here
+ */
+ spin_lock_bh(&rcp->lock);
+ if (rcp->cur != rcp->completed)
+ cpu_quiet(rdp->cpu, rcp);
+ spin_unlock_bh(&rcp->lock);
+ rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail);
+ rcu_move_batch(this_rdp, rdp->curlist, rdp->curtail);
+ rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail);
+}
+
+static void rcu_offline_cpu(int cpu)
+{
+ struct rcu_data *this_rdp = &get_cpu_var(rcu_data);
+ struct rcu_data *this_bh_rdp = &get_cpu_var(rcu_bh_data);
+
+ __rcu_offline_cpu(this_rdp, &rcu_ctrlblk,
+ &per_cpu(rcu_data, cpu));
+ __rcu_offline_cpu(this_bh_rdp, &rcu_bh_ctrlblk,
+ &per_cpu(rcu_bh_data, cpu));
+ put_cpu_var(rcu_data);
+ put_cpu_var(rcu_bh_data);
+}
+
+#else
+
+static void rcu_offline_cpu(int cpu)
+{
+}
+
+#endif
+
+/*
+ * This does the RCU processing work from softirq context.
+ */
+static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp,
+ struct rcu_data *rdp)
+{
+ if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch)) {
+ *rdp->donetail = rdp->curlist;
+ rdp->donetail = rdp->curtail;
+ rdp->curlist = NULL;
+ rdp->curtail = &rdp->curlist;
+ }
+
+ if (rdp->nxtlist && !rdp->curlist) {
+ local_irq_disable();
+ rdp->curlist = rdp->nxtlist;
+ rdp->curtail = rdp->nxttail;
+ rdp->nxtlist = NULL;
+ rdp->nxttail = &rdp->nxtlist;
+ local_irq_enable();
+
+ /*
+ * start the next batch of callbacks
+ */
+
+ /* determine batch number */
+ rdp->batch = rcp->cur + 1;
+ /* see the comment and corresponding wmb() in
+ * the rcu_start_batch()
+ */
+ smp_rmb();
+
+ if (!rcp->next_pending) {
+ /* and start it/schedule start if it's a new batch */
+ spin_lock(&rcp->lock);
+ rcp->next_pending = 1;
+ rcu_start_batch(rcp);
+ spin_unlock(&rcp->lock);
+ }
+ }
+
+ rcu_check_quiescent_state(rcp, rdp);
+ if (rdp->donelist)
+ rcu_do_batch(rdp);
+}
+
+static void rcu_process_callbacks(struct softirq_action *unused)
+{
+ __rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data));
+ __rcu_process_callbacks(&rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data));
+}
+
+static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
+{
+ /* This cpu has pending rcu entries and the grace period
+ * for them has completed.
+ */
+ if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch))
+ return 1;
+
+ /* This cpu has no pending entries, but there are new entries */
+ if (!rdp->curlist && rdp->nxtlist)
+ return 1;
+
+ /* This cpu has finished callbacks to invoke */
+ if (rdp->donelist)
+ return 1;
+
+ /* The rcu core waits for a quiescent state from the cpu */
+ if (rdp->quiescbatch != rcp->cur || rdp->qs_pending)
+ return 1;
+
+ /* nothing to do */
+ return 0;
+}
+
+/*
+ * Check to see if there is any immediate RCU-related work to be done
+ * by the current CPU, returning 1 if so. This function is part of the
+ * RCU implementation; it is -not- an exported member of the RCU API.
+ */
+int rcu_pending(int cpu)
+{
+ return __rcu_pending(&rcu_ctrlblk, &per_cpu(rcu_data, cpu)) ||
+ __rcu_pending(&rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu));
+}
+
+/*
+ * Check to see if any future RCU-related work will need to be done
+ * by the current CPU, even if none need be done immediately, returning
+ * 1 if so. This function is part of the RCU implementation; it is -not-
+ * an exported member of the RCU API.
+ */
+int rcu_needs_cpu(int cpu)
+{
+ struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
+ struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu);
+
+ return (!!rdp->curlist || !!rdp_bh->curlist || rcu_pending(cpu));
+}
+
+void rcu_check_callbacks(int cpu, int user)
+{
+ if (user ||
+ (idle_cpu(cpu) && !in_softirq() &&
+ hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
+ rcu_qsctr_inc(cpu);
+ rcu_bh_qsctr_inc(cpu);
+ } else if (!in_softirq())
+ rcu_bh_qsctr_inc(cpu);
+ raise_rcu_softirq();
+}
+
+static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp,
+ struct rcu_data *rdp)
+{
+ memset(rdp, 0, sizeof(*rdp));
+ rdp->curtail = &rdp->curlist;
+ rdp->nxttail = &rdp->nxtlist;
+ rdp->donetail = &rdp->donelist;
+ rdp->quiescbatch = rcp->completed;
+ rdp->qs_pending = 0;
+ rdp->cpu = cpu;
+ rdp->blimit = blimit;
+}
+
+static void __cpuinit rcu_online_cpu(int cpu)
+{
+ struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
+ struct rcu_data *bh_rdp = &per_cpu(rcu_bh_data, cpu);
+
+ rcu_init_percpu_data(cpu, &rcu_ctrlblk, rdp);
+ rcu_init_percpu_data(cpu, &rcu_bh_ctrlblk, bh_rdp);
+ open_softirq(RCU_SOFTIRQ, rcu_process_callbacks, NULL);
+}
+
+static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ long cpu = (long)hcpu;
+
+ switch (action) {
+ case CPU_UP_PREPARE:
+ case CPU_UP_PREPARE_FROZEN:
+ rcu_online_cpu(cpu);
+ break;
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ rcu_offline_cpu(cpu);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata rcu_nb = {
+ .notifier_call = rcu_cpu_notify,
+};
+
+/*
+ * Initializes rcu mechanism. Assumed to be called early.
+ * That is before local timer(SMP) or jiffie timer (uniproc) is setup.
+ * Note that rcu_qsctr and friends are implicitly
+ * initialized due to the choice of ``0'' for RCU_CTR_INVALID.
+ */
+void __init __rcu_init(void)
+{
+ rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE,
+ (void *)(long)smp_processor_id());
+ /* Register notifier for non-boot CPUs */
+ register_cpu_notifier(&rcu_nb);
+}
+
+module_param(blimit, int, 0);
+module_param(qhimark, int, 0);
+module_param(qlowmark, int, 0);
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index a66d4d1615f..760dfc233a0 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -15,7 +15,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
- * Copyright (C) IBM Corporation, 2001
+ * Copyright IBM Corporation, 2001
*
* Authors: Dipankar Sarma <dipankar@in.ibm.com>
* Manfred Spraul <manfred@colorfullife.com>
@@ -35,165 +35,57 @@
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/smp.h>
-#include <linux/rcupdate.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <asm/atomic.h>
#include <linux/bitops.h>
-#include <linux/module.h>
#include <linux/completion.h>
-#include <linux/moduleparam.h>
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/mutex.h>
+#include <linux/module.h>
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-static struct lock_class_key rcu_lock_key;
-struct lockdep_map rcu_lock_map =
- STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
-
-EXPORT_SYMBOL_GPL(rcu_lock_map);
-#endif
-
-/* Definition for rcupdate control block. */
-static struct rcu_ctrlblk rcu_ctrlblk = {
- .cur = -300,
- .completed = -300,
- .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock),
- .cpumask = CPU_MASK_NONE,
-};
-static struct rcu_ctrlblk rcu_bh_ctrlblk = {
- .cur = -300,
- .completed = -300,
- .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock),
- .cpumask = CPU_MASK_NONE,
+struct rcu_synchronize {
+ struct rcu_head head;
+ struct completion completion;
};
-DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L };
-DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L };
-
-/* Fake initialization required by compiler */
-static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL};
-static int blimit = 10;
-static int qhimark = 10000;
-static int qlowmark = 100;
-
+static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
static atomic_t rcu_barrier_cpu_count;
static DEFINE_MUTEX(rcu_barrier_mutex);
static struct completion rcu_barrier_completion;
-#ifdef CONFIG_SMP
-static void force_quiescent_state(struct rcu_data *rdp,
- struct rcu_ctrlblk *rcp)
-{
- int cpu;
- cpumask_t cpumask;
- set_need_resched();
- if (unlikely(!rcp->signaled)) {
- rcp->signaled = 1;
- /*
- * Don't send IPI to itself. With irqs disabled,
- * rdp->cpu is the current cpu.
- */
- cpumask = rcp->cpumask;
- cpu_clear(rdp->cpu, cpumask);
- for_each_cpu_mask(cpu, cpumask)
- smp_send_reschedule(cpu);
- }
-}
-#else
-static inline void force_quiescent_state(struct rcu_data *rdp,
- struct rcu_ctrlblk *rcp)
+/* Because of FASTCALL declaration of complete, we use this wrapper */
+static void wakeme_after_rcu(struct rcu_head *head)
{
- set_need_resched();
+ struct rcu_synchronize *rcu;
+
+ rcu = container_of(head, struct rcu_synchronize, head);
+ complete(&rcu->completion);
}
-#endif
/**
- * call_rcu - Queue an RCU callback for invocation after a grace period.
- * @head: structure to be used for queueing the RCU updates.
- * @func: actual update function to be invoked after the grace period
+ * synchronize_rcu - wait until a grace period has elapsed.
*
- * The update function will be invoked some time after a full grace
- * period elapses, in other words after all currently executing RCU
+ * Control will return to the caller some time after a full grace
+ * period has elapsed, in other words after all currently executing RCU
* read-side critical sections have completed. RCU read-side critical
* sections are delimited by rcu_read_lock() and rcu_read_unlock(),
* and may be nested.
*/
-void fastcall call_rcu(struct rcu_head *head,
- void (*func)(struct rcu_head *rcu))
-{
- unsigned long flags;
- struct rcu_data *rdp;
-
- head->func = func;
- head->next = NULL;
- local_irq_save(flags);
- rdp = &__get_cpu_var(rcu_data);
- *rdp->nxttail = head;
- rdp->nxttail = &head->next;
- if (unlikely(++rdp->qlen > qhimark)) {
- rdp->blimit = INT_MAX;
- force_quiescent_state(rdp, &rcu_ctrlblk);
- }
- local_irq_restore(flags);
-}
-
-/**
- * call_rcu_bh - Queue an RCU for invocation after a quicker grace period.
- * @head: structure to be used for queueing the RCU updates.
- * @func: actual update function to be invoked after the grace period
- *
- * The update function will be invoked some time after a full grace
- * period elapses, in other words after all currently executing RCU
- * read-side critical sections have completed. call_rcu_bh() assumes
- * that the read-side critical sections end on completion of a softirq
- * handler. This means that read-side critical sections in process
- * context must not be interrupted by softirqs. This interface is to be
- * used when most of the read-side critical sections are in softirq context.
- * RCU read-side critical sections are delimited by rcu_read_lock() and
- * rcu_read_unlock(), * if in interrupt context or rcu_read_lock_bh()
- * and rcu_read_unlock_bh(), if in process context. These may be nested.
- */
-void fastcall call_rcu_bh(struct rcu_head *head,
- void (*func)(struct rcu_head *rcu))
+void synchronize_rcu(void)
{
- unsigned long flags;
- struct rcu_data *rdp;
-
- head->func = func;
- head->next = NULL;
- local_irq_save(flags);
- rdp = &__get_cpu_var(rcu_bh_data);
- *rdp->nxttail = head;
- rdp->nxttail = &head->next;
-
- if (unlikely(++rdp->qlen > qhimark)) {
- rdp->blimit = INT_MAX;
- force_quiescent_state(rdp, &rcu_bh_ctrlblk);
- }
-
- local_irq_restore(flags);
-}
+ struct rcu_synchronize rcu;
-/*
- * Return the number of RCU batches processed thus far. Useful
- * for debug and statistics.
- */
-long rcu_batches_completed(void)
-{
- return rcu_ctrlblk.completed;
-}
+ init_completion(&rcu.completion);
+ /* Will wake me after RCU finished */
+ call_rcu(&rcu.head, wakeme_after_rcu);
-/*
- * Return the number of RCU batches processed thus far. Useful
- * for debug and statistics.
- */
-long rcu_batches_completed_bh(void)
-{
- return rcu_bh_ctrlblk.completed;
+ /* Wait for it */
+ wait_for_completion(&rcu.completion);
}
+EXPORT_SYMBOL_GPL(synchronize_rcu);
static void rcu_barrier_callback(struct rcu_head *notused)
{
@@ -207,10 +99,8 @@ static void rcu_barrier_callback(struct rcu_head *notused)
static void rcu_barrier_func(void *notused)
{
int cpu = smp_processor_id();
- struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
- struct rcu_head *head;
+ struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu);
- head = &rdp->barrier;
atomic_inc(&rcu_barrier_cpu_count);
call_rcu(head, rcu_barrier_callback);
}
@@ -225,420 +115,24 @@ void rcu_barrier(void)
mutex_lock(&rcu_barrier_mutex);
init_completion(&rcu_barrier_completion);
atomic_set(&rcu_barrier_cpu_count, 0);
+ /*
+ * The queueing of callbacks in all CPUs must be atomic with
+ * respect to RCU, otherwise one CPU may queue a callback,
+ * wait for a grace period, decrement barrier count and call
+ * complete(), while other CPUs have not yet queued anything.
+ * So, we need to make sure that grace periods cannot complete
+ * until all the callbacks are queued.
+ */
+ rcu_read_lock();
on_each_cpu(rcu_barrier_func, NULL, 0, 1);
+ rcu_read_unlock();
wait_for_completion(&rcu_barrier_completion);
mutex_unlock(&rcu_barrier_mutex);
}
EXPORT_SYMBOL_GPL(rcu_barrier);
-/*
- * Invoke the completed RCU callbacks. They are expected to be in
- * a per-cpu list.
- */
-static void rcu_do_batch(struct rcu_data *rdp)
-{
- struct rcu_head *next, *list;
- int count = 0;
-
- list = rdp->donelist;
- while (list) {
- next = list->next;
- prefetch(next);
- list->func(list);
- list = next;
- if (++count >= rdp->blimit)
- break;
- }
- rdp->donelist = list;
-
- local_irq_disable();
- rdp->qlen -= count;
- local_irq_enable();
- if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark)
- rdp->blimit = blimit;
-
- if (!rdp->donelist)
- rdp->donetail = &rdp->donelist;
- else
- tasklet_schedule(&per_cpu(rcu_tasklet, rdp->cpu));
-}
-
-/*
- * Grace period handling:
- * The grace period handling consists out of two steps:
- * - A new grace period is started.
- * This is done by rcu_start_batch. The start is not broadcasted to
- * all cpus, they must pick this up by comparing rcp->cur with
- * rdp->quiescbatch. All cpus are recorded in the
- * rcu_ctrlblk.cpumask bitmap.
- * - All cpus must go through a quiescent state.
- * Since the start of the grace period is not broadcasted, at least two
- * calls to rcu_check_quiescent_state are required:
- * The first call just notices that a new grace period is running. The
- * following calls check if there was a quiescent state since the beginning
- * of the grace period. If so, it updates rcu_ctrlblk.cpumask. If
- * the bitmap is empty, then the grace period is completed.
- * rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace
- * period (if necessary).
- */
-/*
- * Register a new batch of callbacks, and start it up if there is currently no
- * active batch and the batch to be registered has not already occurred.
- * Caller must hold rcu_ctrlblk.lock.
- */
-static void rcu_start_batch(struct rcu_ctrlblk *rcp)
-{
- if (rcp->next_pending &&
- rcp->completed == rcp->cur) {
- rcp->next_pending = 0;
- /*
- * next_pending == 0 must be visible in
- * __rcu_process_callbacks() before it can see new value of cur.
- */
- smp_wmb();
- rcp->cur++;
-
- /*
- * Accessing nohz_cpu_mask before incrementing rcp->cur needs a
- * Barrier Otherwise it can cause tickless idle CPUs to be
- * included in rcp->cpumask, which will extend graceperiods
- * unnecessarily.
- */
- smp_mb();
- cpus_andnot(rcp->cpumask, cpu_online_map, nohz_cpu_mask);
-
- rcp->signaled = 0;
- }
-}
-
-/*
- * cpu went through a quiescent state since the beginning of the grace period.
- * Clear it from the cpu mask and complete the grace period if it was the last
- * cpu. Start another grace period if someone has further entries pending
- */
-static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp)
-{
- cpu_clear(cpu, rcp->cpumask);
- if (cpus_empty(rcp->cpumask)) {
- /* batch completed ! */
- rcp->completed = rcp->cur;
- rcu_start_batch(rcp);
- }
-}
-
-/*
- * Check if the cpu has gone through a quiescent state (say context
- * switch). If so and if it already hasn't done so in this RCU
- * quiescent cycle, then indicate that it has done so.
- */
-static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp,
- struct rcu_data *rdp)
-{
- if (rdp->quiescbatch != rcp->cur) {
- /* start new grace period: */
- rdp->qs_pending = 1;
- rdp->passed_quiesc = 0;
- rdp->quiescbatch = rcp->cur;
- return;
- }
-
- /* Grace period already completed for this cpu?
- * qs_pending is checked instead of the actual bitmap to avoid
- * cacheline trashing.
- */
- if (!rdp->qs_pending)
- return;
-
- /*
- * Was there a quiescent state since the beginning of the grace
- * period? If no, then exit and wait for the next call.
- */
- if (!rdp->passed_quiesc)
- return;
- rdp->qs_pending = 0;
-
- spin_lock(&rcp->lock);
- /*
- * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync
- * during cpu startup. Ignore the quiescent state.
- */
- if (likely(rdp->quiescbatch == rcp->cur))
- cpu_quiet(rdp->cpu, rcp);
-
- spin_unlock(&rcp->lock);
-}
-
-
-#ifdef CONFIG_HOTPLUG_CPU
-
-/* warning! helper for rcu_offline_cpu. do not use elsewhere without reviewing
- * locking requirements, the list it's pulling from has to belong to a cpu
- * which is dead and hence not processing interrupts.
- */
-static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list,
- struct rcu_head **tail)
-{
- local_irq_disable();
- *this_rdp->nxttail = list;
- if (list)
- this_rdp->nxttail = tail;
- local_irq_enable();
-}
-
-static void __rcu_offline_cpu(struct rcu_data *this_rdp,
- struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
-{
- /* if the cpu going offline owns the grace period
- * we can block indefinitely waiting for it, so flush
- * it here
- */
- spin_lock_bh(&rcp->lock);
- if (rcp->cur != rcp->completed)
- cpu_quiet(rdp->cpu, rcp);
- spin_unlock_bh(&rcp->lock);
- rcu_move_batch(this_rdp, rdp->curlist, rdp->curtail);
- rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail);
- rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail);
-}
-
-static void rcu_offline_cpu(int cpu)
-{
- struct rcu_data *this_rdp = &get_cpu_var(rcu_data);
- struct rcu_data *this_bh_rdp = &get_cpu_var(rcu_bh_data);
-
- __rcu_offline_cpu(this_rdp, &rcu_ctrlblk,
- &per_cpu(rcu_data, cpu));
- __rcu_offline_cpu(this_bh_rdp, &rcu_bh_ctrlblk,
- &per_cpu(rcu_bh_data, cpu));
- put_cpu_var(rcu_data);
- put_cpu_var(rcu_bh_data);
- tasklet_kill_immediate(&per_cpu(rcu_tasklet, cpu), cpu);
-}
-
-#else
-
-static void rcu_offline_cpu(int cpu)
-{
-}
-
-#endif
-
-/*
- * This does the RCU processing work from tasklet context.
- */
-static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp,
- struct rcu_data *rdp)
-{
- if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch)) {
- *rdp->donetail = rdp->curlist;
- rdp->donetail = rdp->curtail;
- rdp->curlist = NULL;
- rdp->curtail = &rdp->curlist;
- }
-
- if (rdp->nxtlist && !rdp->curlist) {
- local_irq_disable();
- rdp->curlist = rdp->nxtlist;
- rdp->curtail = rdp->nxttail;
- rdp->nxtlist = NULL;
- rdp->nxttail = &rdp->nxtlist;
- local_irq_enable();
-
- /*
- * start the next batch of callbacks
- */
-
- /* determine batch number */
- rdp->batch = rcp->cur + 1;
- /* see the comment and corresponding wmb() in
- * the rcu_start_batch()
- */
- smp_rmb();
-
- if (!rcp->next_pending) {
- /* and start it/schedule start if it's a new batch */
- spin_lock(&rcp->lock);
- rcp->next_pending = 1;
- rcu_start_batch(rcp);
- spin_unlock(&rcp->lock);
- }
- }
-
- rcu_check_quiescent_state(rcp, rdp);
- if (rdp->donelist)
- rcu_do_batch(rdp);
-}
-
-static void rcu_process_callbacks(unsigned long unused)
-{
- __rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data));
- __rcu_process_callbacks(&rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data));
-}
-
-static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
-{
- /* This cpu has pending rcu entries and the grace period
- * for them has completed.
- */
- if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch))
- return 1;
-
- /* This cpu has no pending entries, but there are new entries */
- if (!rdp->curlist && rdp->nxtlist)
- return 1;
-
- /* This cpu has finished callbacks to invoke */
- if (rdp->donelist)
- return 1;
-
- /* The rcu core waits for a quiescent state from the cpu */
- if (rdp->quiescbatch != rcp->cur || rdp->qs_pending)
- return 1;
-
- /* nothing to do */
- return 0;
-}
-
-/*
- * Check to see if there is any immediate RCU-related work to be done
- * by the current CPU, returning 1 if so. This function is part of the
- * RCU implementation; it is -not- an exported member of the RCU API.
- */
-int rcu_pending(int cpu)
-{
- return __rcu_pending(&rcu_ctrlblk, &per_cpu(rcu_data, cpu)) ||
- __rcu_pending(&rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu));
-}
-
-/*
- * Check to see if any future RCU-related work will need to be done
- * by the current CPU, even if none need be done immediately, returning
- * 1 if so. This function is part of the RCU implementation; it is -not-
- * an exported member of the RCU API.
- */
-int rcu_needs_cpu(int cpu)
-{
- struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
- struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu);
-
- return (!!rdp->curlist || !!rdp_bh->curlist || rcu_pending(cpu));
-}
-
-void rcu_check_callbacks(int cpu, int user)
-{
- if (user ||
- (idle_cpu(cpu) && !in_softirq() &&
- hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
- rcu_qsctr_inc(cpu);
- rcu_bh_qsctr_inc(cpu);
- } else if (!in_softirq())
- rcu_bh_qsctr_inc(cpu);
- tasklet_schedule(&per_cpu(rcu_tasklet, cpu));
-}
-
-static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp,
- struct rcu_data *rdp)
-{
- memset(rdp, 0, sizeof(*rdp));
- rdp->curtail = &rdp->curlist;
- rdp->nxttail = &rdp->nxtlist;
- rdp->donetail = &rdp->donelist;
- rdp->quiescbatch = rcp->completed;
- rdp->qs_pending = 0;
- rdp->cpu = cpu;
- rdp->blimit = blimit;
-}
-
-static void __devinit rcu_online_cpu(int cpu)
-{
- struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
- struct rcu_data *bh_rdp = &per_cpu(rcu_bh_data, cpu);
-
- rcu_init_percpu_data(cpu, &rcu_ctrlblk, rdp);
- rcu_init_percpu_data(cpu, &rcu_bh_ctrlblk, bh_rdp);
- tasklet_init(&per_cpu(rcu_tasklet, cpu), rcu_process_callbacks, 0UL);
-}
-
-static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
-{
- long cpu = (long)hcpu;
- switch (action) {
- case CPU_UP_PREPARE:
- case CPU_UP_PREPARE_FROZEN:
- rcu_online_cpu(cpu);
- break;
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- rcu_offline_cpu(cpu);
- break;
- default:
- break;
- }
- return NOTIFY_OK;
-}
-
-static struct notifier_block __cpuinitdata rcu_nb = {
- .notifier_call = rcu_cpu_notify,
-};
-
-/*
- * Initializes rcu mechanism. Assumed to be called early.
- * That is before local timer(SMP) or jiffie timer (uniproc) is setup.
- * Note that rcu_qsctr and friends are implicitly
- * initialized due to the choice of ``0'' for RCU_CTR_INVALID.
- */
void __init rcu_init(void)
{
- rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE,
- (void *)(long)smp_processor_id());
- /* Register notifier for non-boot CPUs */
- register_cpu_notifier(&rcu_nb);
-}
-
-struct rcu_synchronize {
- struct rcu_head head;
- struct completion completion;
-};
-
-/* Because of FASTCALL declaration of complete, we use this wrapper */
-static void wakeme_after_rcu(struct rcu_head *head)
-{
- struct rcu_synchronize *rcu;
-
- rcu = container_of(head, struct rcu_synchronize, head);
- complete(&rcu->completion);
+ __rcu_init();
}
-/**
- * synchronize_rcu - wait until a grace period has elapsed.
- *
- * Control will return to the caller some time after a full grace
- * period has elapsed, in other words after all currently executing RCU
- * read-side critical sections have completed. RCU read-side critical
- * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
- * and may be nested.
- *
- * If your read-side code is not protected by rcu_read_lock(), do -not-
- * use synchronize_rcu().
- */
-void synchronize_rcu(void)
-{
- struct rcu_synchronize rcu;
-
- init_completion(&rcu.completion);
- /* Will wake me after RCU finished */
- call_rcu(&rcu.head, wakeme_after_rcu);
-
- /* Wait for it */
- wait_for_completion(&rcu.completion);
-}
-
-module_param(blimit, int, 0);
-module_param(qhimark, int, 0);
-module_param(qlowmark, int, 0);
-EXPORT_SYMBOL_GPL(rcu_batches_completed);
-EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
-EXPORT_SYMBOL_GPL(call_rcu);
-EXPORT_SYMBOL_GPL(call_rcu_bh);
-EXPORT_SYMBOL_GPL(synchronize_rcu);
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c
new file mode 100644
index 00000000000..987cfb7ade8
--- /dev/null
+++ b/kernel/rcupreempt.c
@@ -0,0 +1,953 @@
+/*
+ * Read-Copy Update mechanism for mutual exclusion, realtime implementation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright IBM Corporation, 2006
+ *
+ * Authors: Paul E. McKenney <paulmck@us.ibm.com>
+ * With thanks to Esben Nielsen, Bill Huey, and Ingo Molnar
+ * for pushing me away from locks and towards counters, and
+ * to Suparna Bhattacharya for pushing me completely away
+ * from atomic instructions on the read side.
+ *
+ * Papers: http://www.rdrop.com/users/paulmck/RCU
+ *
+ * Design Document: http://lwn.net/Articles/253651/
+ *
+ * For detailed explanation of Read-Copy Update mechanism see -
+ * Documentation/RCU/ *.txt
+ *
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <linux/rcupdate.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <asm/atomic.h>
+#include <linux/bitops.h>
+#include <linux/module.h>
+#include <linux/completion.h>
+#include <linux/moduleparam.h>
+#include <linux/percpu.h>
+#include <linux/notifier.h>
+#include <linux/rcupdate.h>
+#include <linux/cpu.h>
+#include <linux/random.h>
+#include <linux/delay.h>
+#include <linux/byteorder/swabb.h>
+#include <linux/cpumask.h>
+#include <linux/rcupreempt_trace.h>
+
+/*
+ * Macro that prevents the compiler from reordering accesses, but does
+ * absolutely -nothing- to prevent CPUs from reordering. This is used
+ * only to mediate communication between mainline code and hardware
+ * interrupt and NMI handlers.
+ */
+#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
+
+/*
+ * PREEMPT_RCU data structures.
+ */
+
+/*
+ * GP_STAGES specifies the number of times the state machine has
+ * to go through the all the rcu_try_flip_states (see below)
+ * in a single Grace Period.
+ *
+ * GP in GP_STAGES stands for Grace Period ;)
+ */
+#define GP_STAGES 2
+struct rcu_data {
+ spinlock_t lock; /* Protect rcu_data fields. */
+ long completed; /* Number of last completed batch. */
+ int waitlistcount;
+ struct tasklet_struct rcu_tasklet;
+ struct rcu_head *nextlist;
+ struct rcu_head **nexttail;
+ struct rcu_head *waitlist[GP_STAGES];
+ struct rcu_head **waittail[GP_STAGES];
+ struct rcu_head *donelist;
+ struct rcu_head **donetail;
+ long rcu_flipctr[2];
+#ifdef CONFIG_RCU_TRACE
+ struct rcupreempt_trace trace;
+#endif /* #ifdef CONFIG_RCU_TRACE */
+};
+
+/*
+ * States for rcu_try_flip() and friends.
+ */
+
+enum rcu_try_flip_states {
+
+ /*
+ * Stay here if nothing is happening. Flip the counter if somthing
+ * starts happening. Denoted by "I"
+ */
+ rcu_try_flip_idle_state,
+
+ /*
+ * Wait here for all CPUs to notice that the counter has flipped. This
+ * prevents the old set of counters from ever being incremented once
+ * we leave this state, which in turn is necessary because we cannot
+ * test any individual counter for zero -- we can only check the sum.
+ * Denoted by "A".
+ */
+ rcu_try_flip_waitack_state,
+
+ /*
+ * Wait here for the sum of the old per-CPU counters to reach zero.
+ * Denoted by "Z".
+ */
+ rcu_try_flip_waitzero_state,
+
+ /*
+ * Wait here for each of the other CPUs to execute a memory barrier.
+ * This is necessary to ensure that these other CPUs really have
+ * completed executing their RCU read-side critical sections, despite
+ * their CPUs wildly reordering memory. Denoted by "M".
+ */
+ rcu_try_flip_waitmb_state,
+};
+
+struct rcu_ctrlblk {
+ spinlock_t fliplock; /* Protect state-machine transitions. */
+ long completed; /* Number of last completed batch. */
+ enum rcu_try_flip_states rcu_try_flip_state; /* The current state of
+ the rcu state machine */
+};
+
+static DEFINE_PER_CPU(struct rcu_data, rcu_data);
+static struct rcu_ctrlblk rcu_ctrlblk = {
+ .fliplock = __SPIN_LOCK_UNLOCKED(rcu_ctrlblk.fliplock),
+ .completed = 0,
+ .rcu_try_flip_state = rcu_try_flip_idle_state,
+};
+
+
+#ifdef CONFIG_RCU_TRACE
+static char *rcu_try_flip_state_names[] =
+ { "idle", "waitack", "waitzero", "waitmb" };
+#endif /* #ifdef CONFIG_RCU_TRACE */
+
+static cpumask_t rcu_cpu_online_map __read_mostly = CPU_MASK_NONE;
+
+/*
+ * Enum and per-CPU flag to determine when each CPU has seen
+ * the most recent counter flip.
+ */
+
+enum rcu_flip_flag_values {
+ rcu_flip_seen, /* Steady/initial state, last flip seen. */
+ /* Only GP detector can update. */
+ rcu_flipped /* Flip just completed, need confirmation. */
+ /* Only corresponding CPU can update. */
+};
+static DEFINE_PER_CPU_SHARED_ALIGNED(enum rcu_flip_flag_values, rcu_flip_flag)
+ = rcu_flip_seen;
+
+/*
+ * Enum and per-CPU flag to determine when each CPU has executed the
+ * needed memory barrier to fence in memory references from its last RCU
+ * read-side critical section in the just-completed grace period.
+ */
+
+enum rcu_mb_flag_values {
+ rcu_mb_done, /* Steady/initial state, no mb()s required. */
+ /* Only GP detector can update. */
+ rcu_mb_needed /* Flip just completed, need an mb(). */
+ /* Only corresponding CPU can update. */
+};
+static DEFINE_PER_CPU_SHARED_ALIGNED(enum rcu_mb_flag_values, rcu_mb_flag)
+ = rcu_mb_done;
+
+/*
+ * RCU_DATA_ME: find the current CPU's rcu_data structure.
+ * RCU_DATA_CPU: find the specified CPU's rcu_data structure.
+ */
+#define RCU_DATA_ME() (&__get_cpu_var(rcu_data))
+#define RCU_DATA_CPU(cpu) (&per_cpu(rcu_data, cpu))
+
+/*
+ * Helper macro for tracing when the appropriate rcu_data is not
+ * cached in a local variable, but where the CPU number is so cached.
+ */
+#define RCU_TRACE_CPU(f, cpu) RCU_TRACE(f, &(RCU_DATA_CPU(cpu)->trace));
+
+/*
+ * Helper macro for tracing when the appropriate rcu_data is not
+ * cached in a local variable.
+ */
+#define RCU_TRACE_ME(f) RCU_TRACE(f, &(RCU_DATA_ME()->trace));
+
+/*
+ * Helper macro for tracing when the appropriate rcu_data is pointed
+ * to by a local variable.
+ */
+#define RCU_TRACE_RDP(f, rdp) RCU_TRACE(f, &((rdp)->trace));
+
+/*
+ * Return the number of RCU batches processed thus far. Useful
+ * for debug and statistics.
+ */
+long rcu_batches_completed(void)
+{
+ return rcu_ctrlblk.completed;
+}
+EXPORT_SYMBOL_GPL(rcu_batches_completed);
+
+EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
+
+void __rcu_read_lock(void)
+{
+ int idx;
+ struct task_struct *t = current;
+ int nesting;
+
+ nesting = ACCESS_ONCE(t->rcu_read_lock_nesting);
+ if (nesting != 0) {
+
+ /* An earlier rcu_read_lock() covers us, just count it. */
+
+ t->rcu_read_lock_nesting = nesting + 1;
+
+ } else {
+ unsigned long flags;
+
+ /*
+ * We disable interrupts for the following reasons:
+ * - If we get scheduling clock interrupt here, and we
+ * end up acking the counter flip, it's like a promise
+ * that we will never increment the old counter again.
+ * Thus we will break that promise if that
+ * scheduling clock interrupt happens between the time
+ * we pick the .completed field and the time that we
+ * increment our counter.
+ *
+ * - We don't want to be preempted out here.
+ *
+ * NMIs can still occur, of course, and might themselves
+ * contain rcu_read_lock().
+ */
+
+ local_irq_save(flags);
+
+ /*
+ * Outermost nesting of rcu_read_lock(), so increment
+ * the current counter for the current CPU. Use volatile
+ * casts to prevent the compiler from reordering.
+ */
+
+ idx = ACCESS_ONCE(rcu_ctrlblk.completed) & 0x1;
+ ACCESS_ONCE(RCU_DATA_ME()->rcu_flipctr[idx])++;
+
+ /*
+ * Now that the per-CPU counter has been incremented, we
+ * are protected from races with rcu_read_lock() invoked
+ * from NMI handlers on this CPU. We can therefore safely
+ * increment the nesting counter, relieving further NMIs
+ * of the need to increment the per-CPU counter.
+ */
+
+ ACCESS_ONCE(t->rcu_read_lock_nesting) = nesting + 1;
+
+ /*
+ * Now that we have preventing any NMIs from storing
+ * to the ->rcu_flipctr_idx, we can safely use it to
+ * remember which counter to decrement in the matching
+ * rcu_read_unlock().
+ */
+
+ ACCESS_ONCE(t->rcu_flipctr_idx) = idx;
+ local_irq_restore(flags);
+ }
+}
+EXPORT_SYMBOL_GPL(__rcu_read_lock);
+
+void __rcu_read_unlock(void)
+{
+ int idx;
+ struct task_struct *t = current;
+ int nesting;
+
+ nesting = ACCESS_ONCE(t->rcu_read_lock_nesting);
+ if (nesting > 1) {
+
+ /*
+ * We are still protected by the enclosing rcu_read_lock(),
+ * so simply decrement the counter.
+ */
+
+ t->rcu_read_lock_nesting = nesting - 1;
+
+ } else {
+ unsigned long flags;
+
+ /*
+ * Disable local interrupts to prevent the grace-period
+ * detection state machine from seeing us half-done.
+ * NMIs can still occur, of course, and might themselves
+ * contain rcu_read_lock() and rcu_read_unlock().
+ */
+
+ local_irq_save(flags);
+
+ /*
+ * Outermost nesting of rcu_read_unlock(), so we must
+ * decrement the current counter for the current CPU.
+ * This must be done carefully, because NMIs can
+ * occur at any point in this code, and any rcu_read_lock()
+ * and rcu_read_unlock() pairs in the NMI handlers
+ * must interact non-destructively with this code.
+ * Lots of volatile casts, and -very- careful ordering.
+ *
+ * Changes to this code, including this one, must be
+ * inspected, validated, and tested extremely carefully!!!
+ */
+
+ /*
+ * First, pick up the index.
+ */
+
+ idx = ACCESS_ONCE(t->rcu_flipctr_idx);
+
+ /*
+ * Now that we have fetched the counter index, it is
+ * safe to decrement the per-task RCU nesting counter.
+ * After this, any interrupts or NMIs will increment and
+ * decrement the per-CPU counters.
+ */
+ ACCESS_ONCE(t->rcu_read_lock_nesting) = nesting - 1;
+
+ /*
+ * It is now safe to decrement this task's nesting count.
+ * NMIs that occur after this statement will route their
+ * rcu_read_lock() calls through this "else" clause, and
+ * will thus start incrementing the per-CPU counter on
+ * their own. They will also clobber ->rcu_flipctr_idx,
+ * but that is OK, since we have already fetched it.
+ */
+
+ ACCESS_ONCE(RCU_DATA_ME()->rcu_flipctr[idx])--;
+ local_irq_restore(flags);
+ }
+}
+EXPORT_SYMBOL_GPL(__rcu_read_unlock);
+
+/*
+ * If a global counter flip has occurred since the last time that we
+ * advanced callbacks, advance them. Hardware interrupts must be
+ * disabled when calling this function.
+ */
+static void __rcu_advance_callbacks(struct rcu_data *rdp)
+{
+ int cpu;
+ int i;
+ int wlc = 0;
+
+ if (rdp->completed != rcu_ctrlblk.completed) {
+ if (rdp->waitlist[GP_STAGES - 1] != NULL) {
+ *rdp->donetail = rdp->waitlist[GP_STAGES - 1];
+ rdp->donetail = rdp->waittail[GP_STAGES - 1];
+ RCU_TRACE_RDP(rcupreempt_trace_move2done, rdp);
+ }
+ for (i = GP_STAGES - 2; i >= 0; i--) {
+ if (rdp->waitlist[i] != NULL) {
+ rdp->waitlist[i + 1] = rdp->waitlist[i];
+ rdp->waittail[i + 1] = rdp->waittail[i];
+ wlc++;
+ } else {
+ rdp->waitlist[i + 1] = NULL;
+ rdp->waittail[i + 1] =
+ &rdp->waitlist[i + 1];
+ }
+ }
+ if (rdp->nextlist != NULL) {
+ rdp->waitlist[0] = rdp->nextlist;
+ rdp->waittail[0] = rdp->nexttail;
+ wlc++;
+ rdp->nextlist = NULL;
+ rdp->nexttail = &rdp->nextlist;
+ RCU_TRACE_RDP(rcupreempt_trace_move2wait, rdp);
+ } else {
+ rdp->waitlist[0] = NULL;
+ rdp->waittail[0] = &rdp->waitlist[0];
+ }
+ rdp->waitlistcount = wlc;
+ rdp->completed = rcu_ctrlblk.completed;
+ }
+
+ /*
+ * Check to see if this CPU needs to report that it has seen
+ * the most recent counter flip, thereby declaring that all
+ * subsequent rcu_read_lock() invocations will respect this flip.
+ */
+
+ cpu = raw_smp_processor_id();
+ if (per_cpu(rcu_flip_flag, cpu) == rcu_flipped) {
+ smp_mb(); /* Subsequent counter accesses must see new value */
+ per_cpu(rcu_flip_flag, cpu) = rcu_flip_seen;
+ smp_mb(); /* Subsequent RCU read-side critical sections */
+ /* seen -after- acknowledgement. */
+ }
+}
+
+/*
+ * Get here when RCU is idle. Decide whether we need to
+ * move out of idle state, and return non-zero if so.
+ * "Straightforward" approach for the moment, might later
+ * use callback-list lengths, grace-period duration, or
+ * some such to determine when to exit idle state.
+ * Might also need a pre-idle test that does not acquire
+ * the lock, but let's get the simple case working first...
+ */
+
+static int
+rcu_try_flip_idle(void)
+{
+ int cpu;
+
+ RCU_TRACE_ME(rcupreempt_trace_try_flip_i1);
+ if (!rcu_pending(smp_processor_id())) {
+ RCU_TRACE_ME(rcupreempt_trace_try_flip_ie1);
+ return 0;
+ }
+
+ /*
+ * Do the flip.
+ */
+
+ RCU_TRACE_ME(rcupreempt_trace_try_flip_g1);
+ rcu_ctrlblk.completed++; /* stands in for rcu_try_flip_g2 */
+
+ /*
+ * Need a memory barrier so that other CPUs see the new
+ * counter value before they see the subsequent change of all
+ * the rcu_flip_flag instances to rcu_flipped.
+ */
+
+ smp_mb(); /* see above block comment. */
+
+ /* Now ask each CPU for acknowledgement of the flip. */
+
+ for_each_cpu_mask(cpu, rcu_cpu_online_map)
+ per_cpu(rcu_flip_flag, cpu) = rcu_flipped;
+
+ return 1;
+}
+
+/*
+ * Wait for CPUs to acknowledge the flip.
+ */
+
+static int
+rcu_try_flip_waitack(void)
+{
+ int cpu;
+
+ RCU_TRACE_ME(rcupreempt_trace_try_flip_a1);
+ for_each_cpu_mask(cpu, rcu_cpu_online_map)
+ if (per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) {
+ RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1);
+ return 0;
+ }
+
+ /*
+ * Make sure our checks above don't bleed into subsequent
+ * waiting for the sum of the counters to reach zero.
+ */
+
+ smp_mb(); /* see above block comment. */
+ RCU_TRACE_ME(rcupreempt_trace_try_flip_a2);
+ return 1;
+}
+
+/*
+ * Wait for collective ``last'' counter to reach zero,
+ * then tell all CPUs to do an end-of-grace-period memory barrier.
+ */
+
+static int
+rcu_try_flip_waitzero(void)
+{
+ int cpu;
+ int lastidx = !(rcu_ctrlblk.completed & 0x1);
+ int sum = 0;
+
+ /* Check to see if the sum of the "last" counters is zero. */
+
+ RCU_TRACE_ME(rcupreempt_trace_try_flip_z1);
+ for_each_cpu_mask(cpu, rcu_cpu_online_map)
+ sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx];
+ if (sum != 0) {
+ RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1);
+ return 0;
+ }
+
+ /*
+ * This ensures that the other CPUs see the call for
+ * memory barriers -after- the sum to zero has been
+ * detected here
+ */
+ smp_mb(); /* ^^^^^^^^^^^^ */
+
+ /* Call for a memory barrier from each CPU. */
+ for_each_cpu_mask(cpu, rcu_cpu_online_map)
+ per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed;
+
+ RCU_TRACE_ME(rcupreempt_trace_try_flip_z2);
+ return 1;
+}
+
+/*
+ * Wait for all CPUs to do their end-of-grace-period memory barrier.
+ * Return 0 once all CPUs have done so.
+ */
+
+static int
+rcu_try_flip_waitmb(void)
+{
+ int cpu;
+
+ RCU_TRACE_ME(rcupreempt_trace_try_flip_m1);
+ for_each_cpu_mask(cpu, rcu_cpu_online_map)
+ if (per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) {
+ RCU_TRACE_ME(rcupreempt_trace_try_flip_me1);
+ return 0;
+ }
+
+ smp_mb(); /* Ensure that the above checks precede any following flip. */
+ RCU_TRACE_ME(rcupreempt_trace_try_flip_m2);
+ return 1;
+}
+
+/*
+ * Attempt a single flip of the counters. Remember, a single flip does
+ * -not- constitute a grace period. Instead, the interval between
+ * at least GP_STAGES consecutive flips is a grace period.
+ *
+ * If anyone is nuts enough to run this CONFIG_PREEMPT_RCU implementation
+ * on a large SMP, they might want to use a hierarchical organization of
+ * the per-CPU-counter pairs.
+ */
+static void rcu_try_flip(void)
+{
+ unsigned long flags;
+
+ RCU_TRACE_ME(rcupreempt_trace_try_flip_1);
+ if (unlikely(!spin_trylock_irqsave(&rcu_ctrlblk.fliplock, flags))) {
+ RCU_TRACE_ME(rcupreempt_trace_try_flip_e1);
+ return;
+ }
+
+ /*
+ * Take the next transition(s) through the RCU grace-period
+ * flip-counter state machine.
+ */
+
+ switch (rcu_ctrlblk.rcu_try_flip_state) {
+ case rcu_try_flip_idle_state:
+ if (rcu_try_flip_idle())
+ rcu_ctrlblk.rcu_try_flip_state =
+ rcu_try_flip_waitack_state;
+ break;
+ case rcu_try_flip_waitack_state:
+ if (rcu_try_flip_waitack())
+ rcu_ctrlblk.rcu_try_flip_state =
+ rcu_try_flip_waitzero_state;
+ break;
+ case rcu_try_flip_waitzero_state:
+ if (rcu_try_flip_waitzero())
+ rcu_ctrlblk.rcu_try_flip_state =
+ rcu_try_flip_waitmb_state;
+ break;
+ case rcu_try_flip_waitmb_state:
+ if (rcu_try_flip_waitmb())
+ rcu_ctrlblk.rcu_try_flip_state =
+ rcu_try_flip_idle_state;
+ }
+ spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags);
+}
+
+/*
+ * Check to see if this CPU needs to do a memory barrier in order to
+ * ensure that any prior RCU read-side critical sections have committed
+ * their counter manipulations and critical-section memory references
+ * before declaring the grace period to be completed.
+ */
+static void rcu_check_mb(int cpu)
+{
+ if (per_cpu(rcu_mb_flag, cpu) == rcu_mb_needed) {
+ smp_mb(); /* Ensure RCU read-side accesses are visible. */
+ per_cpu(rcu_mb_flag, cpu) = rcu_mb_done;
+ }
+}
+
+void rcu_check_callbacks(int cpu, int user)
+{
+ unsigned long flags;
+ struct rcu_data *rdp = RCU_DATA_CPU(cpu);
+
+ rcu_check_mb(cpu);
+ if (rcu_ctrlblk.completed == rdp->completed)
+ rcu_try_flip();
+ spin_lock_irqsave(&rdp->lock, flags);
+ RCU_TRACE_RDP(rcupreempt_trace_check_callbacks, rdp);
+ __rcu_advance_callbacks(rdp);
+ if (rdp->donelist == NULL) {
+ spin_unlock_irqrestore(&rdp->lock, flags);
+ } else {
+ spin_unlock_irqrestore(&rdp->lock, flags);
+ raise_softirq(RCU_SOFTIRQ);
+ }
+}
+
+/*
+ * Needed by dynticks, to make sure all RCU processing has finished
+ * when we go idle:
+ */
+void rcu_advance_callbacks(int cpu, int user)
+{
+ unsigned long flags;
+ struct rcu_data *rdp = RCU_DATA_CPU(cpu);
+
+ if (rcu_ctrlblk.completed == rdp->completed) {
+ rcu_try_flip();
+ if (rcu_ctrlblk.completed == rdp->completed)
+ return;
+ }
+ spin_lock_irqsave(&rdp->lock, flags);
+ RCU_TRACE_RDP(rcupreempt_trace_check_callbacks, rdp);
+ __rcu_advance_callbacks(rdp);
+ spin_unlock_irqrestore(&rdp->lock, flags);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+#define rcu_offline_cpu_enqueue(srclist, srctail, dstlist, dsttail) do { \
+ *dsttail = srclist; \
+ if (srclist != NULL) { \
+ dsttail = srctail; \
+ srclist = NULL; \
+ srctail = &srclist;\
+ } \
+ } while (0)
+
+void rcu_offline_cpu(int cpu)
+{
+ int i;
+ struct rcu_head *list = NULL;
+ unsigned long flags;
+ struct rcu_data *rdp = RCU_DATA_CPU(cpu);
+ struct rcu_head **tail = &list;
+
+ /*
+ * Remove all callbacks from the newly dead CPU, retaining order.
+ * Otherwise rcu_barrier() will fail
+ */
+
+ spin_lock_irqsave(&rdp->lock, flags);
+ rcu_offline_cpu_enqueue(rdp->donelist, rdp->donetail, list, tail);
+ for (i = GP_STAGES - 1; i >= 0; i--)
+ rcu_offline_cpu_enqueue(rdp->waitlist[i], rdp->waittail[i],
+ list, tail);
+ rcu_offline_cpu_enqueue(rdp->nextlist, rdp->nexttail, list, tail);
+ spin_unlock_irqrestore(&rdp->lock, flags);
+ rdp->waitlistcount = 0;
+
+ /* Disengage the newly dead CPU from the grace-period computation. */
+
+ spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags);
+ rcu_check_mb(cpu);
+ if (per_cpu(rcu_flip_flag, cpu) == rcu_flipped) {
+ smp_mb(); /* Subsequent counter accesses must see new value */
+ per_cpu(rcu_flip_flag, cpu) = rcu_flip_seen;
+ smp_mb(); /* Subsequent RCU read-side critical sections */
+ /* seen -after- acknowledgement. */
+ }
+
+ RCU_DATA_ME()->rcu_flipctr[0] += RCU_DATA_CPU(cpu)->rcu_flipctr[0];
+ RCU_DATA_ME()->rcu_flipctr[1] += RCU_DATA_CPU(cpu)->rcu_flipctr[1];
+
+ RCU_DATA_CPU(cpu)->rcu_flipctr[0] = 0;
+ RCU_DATA_CPU(cpu)->rcu_flipctr[1] = 0;
+
+ cpu_clear(cpu, rcu_cpu_online_map);
+
+ spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags);
+
+ /*
+ * Place the removed callbacks on the current CPU's queue.
+ * Make them all start a new grace period: simple approach,
+ * in theory could starve a given set of callbacks, but
+ * you would need to be doing some serious CPU hotplugging
+ * to make this happen. If this becomes a problem, adding
+ * a synchronize_rcu() to the hotplug path would be a simple
+ * fix.
+ */
+
+ rdp = RCU_DATA_ME();
+ spin_lock_irqsave(&rdp->lock, flags);
+ *rdp->nexttail = list;
+ if (list)
+ rdp->nexttail = tail;
+ spin_unlock_irqrestore(&rdp->lock, flags);
+}
+
+void __devinit rcu_online_cpu(int cpu)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags);
+ cpu_set(cpu, rcu_cpu_online_map);
+ spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags);
+}
+
+#else /* #ifdef CONFIG_HOTPLUG_CPU */
+
+void rcu_offline_cpu(int cpu)
+{
+}
+
+void __devinit rcu_online_cpu(int cpu)
+{
+}
+
+#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
+
+static void rcu_process_callbacks(struct softirq_action *unused)
+{
+ unsigned long flags;
+ struct rcu_head *next, *list;
+ struct rcu_data *rdp = RCU_DATA_ME();
+
+ spin_lock_irqsave(&rdp->lock, flags);
+ list = rdp->donelist;
+ if (list == NULL) {
+ spin_unlock_irqrestore(&rdp->lock, flags);
+ return;
+ }
+ rdp->donelist = NULL;
+ rdp->donetail = &rdp->donelist;
+ RCU_TRACE_RDP(rcupreempt_trace_done_remove, rdp);
+ spin_unlock_irqrestore(&rdp->lock, flags);
+ while (list) {
+ next = list->next;
+ list->func(list);
+ list = next;
+ RCU_TRACE_ME(rcupreempt_trace_invoke);
+ }
+}
+
+void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
+{
+ unsigned long flags;
+ struct rcu_data *rdp;
+
+ head->func = func;
+ head->next = NULL;
+ local_irq_save(flags);
+ rdp = RCU_DATA_ME();
+ spin_lock(&rdp->lock);
+ __rcu_advance_callbacks(rdp);
+ *rdp->nexttail = head;
+ rdp->nexttail = &head->next;
+ RCU_TRACE_RDP(rcupreempt_trace_next_add, rdp);
+ spin_unlock(&rdp->lock);
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(call_rcu);
+
+/*
+ * Wait until all currently running preempt_disable() code segments
+ * (including hardware-irq-disable segments) complete. Note that
+ * in -rt this does -not- necessarily result in all currently executing
+ * interrupt -handlers- having completed.
+ */
+void __synchronize_sched(void)
+{
+ cpumask_t oldmask;
+ int cpu;
+
+ if (sched_getaffinity(0, &oldmask) < 0)
+ oldmask = cpu_possible_map;
+ for_each_online_cpu(cpu) {
+ sched_setaffinity(0, cpumask_of_cpu(cpu));
+ schedule();
+ }
+ sched_setaffinity(0, oldmask);
+}
+EXPORT_SYMBOL_GPL(__synchronize_sched);
+
+/*
+ * Check to see if any future RCU-related work will need to be done
+ * by the current CPU, even if none need be done immediately, returning
+ * 1 if so. Assumes that notifiers would take care of handling any
+ * outstanding requests from the RCU core.
+ *
+ * This function is part of the RCU implementation; it is -not-
+ * an exported member of the RCU API.
+ */
+int rcu_needs_cpu(int cpu)
+{
+ struct rcu_data *rdp = RCU_DATA_CPU(cpu);
+
+ return (rdp->donelist != NULL ||
+ !!rdp->waitlistcount ||
+ rdp->nextlist != NULL);
+}
+
+int rcu_pending(int cpu)
+{
+ struct rcu_data *rdp = RCU_DATA_CPU(cpu);
+
+ /* The CPU has at least one callback queued somewhere. */
+
+ if (rdp->donelist != NULL ||
+ !!rdp->waitlistcount ||
+ rdp->nextlist != NULL)
+ return 1;
+
+ /* The RCU core needs an acknowledgement from this CPU. */
+
+ if ((per_cpu(rcu_flip_flag, cpu) == rcu_flipped) ||
+ (per_cpu(rcu_mb_flag, cpu) == rcu_mb_needed))
+ return 1;
+
+ /* This CPU has fallen behind the global grace-period number. */
+
+ if (rdp->completed != rcu_ctrlblk.completed)
+ return 1;
+
+ /* Nothing needed from this CPU. */
+
+ return 0;
+}
+
+static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ long cpu = (long)hcpu;
+
+ switch (action) {
+ case CPU_UP_PREPARE:
+ case CPU_UP_PREPARE_FROZEN:
+ rcu_online_cpu(cpu);
+ break;
+ case CPU_UP_CANCELED:
+ case CPU_UP_CANCELED_FROZEN:
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ rcu_offline_cpu(cpu);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata rcu_nb = {
+ .notifier_call = rcu_cpu_notify,
+};
+
+void __init __rcu_init(void)
+{
+ int cpu;
+ int i;
+ struct rcu_data *rdp;
+
+ printk(KERN_NOTICE "Preemptible RCU implementation.\n");
+ for_each_possible_cpu(cpu) {
+ rdp = RCU_DATA_CPU(cpu);
+ spin_lock_init(&rdp->lock);
+ rdp->completed = 0;
+ rdp->waitlistcount = 0;
+ rdp->nextlist = NULL;
+ rdp->nexttail = &rdp->nextlist;
+ for (i = 0; i < GP_STAGES; i++) {
+ rdp->waitlist[i] = NULL;
+ rdp->waittail[i] = &rdp->waitlist[i];
+ }
+ rdp->donelist = NULL;
+ rdp->donetail = &rdp->donelist;
+ rdp->rcu_flipctr[0] = 0;
+ rdp->rcu_flipctr[1] = 0;
+ }
+ register_cpu_notifier(&rcu_nb);
+
+ /*
+ * We don't need protection against CPU-Hotplug here
+ * since
+ * a) If a CPU comes online while we are iterating over the
+ * cpu_online_map below, we would only end up making a
+ * duplicate call to rcu_online_cpu() which sets the corresponding
+ * CPU's mask in the rcu_cpu_online_map.
+ *
+ * b) A CPU cannot go offline at this point in time since the user
+ * does not have access to the sysfs interface, nor do we
+ * suspend the system.
+ */
+ for_each_online_cpu(cpu)
+ rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, (void *)(long) cpu);
+
+ open_softirq(RCU_SOFTIRQ, rcu_process_callbacks, NULL);
+}
+
+/*
+ * Deprecated, use synchronize_rcu() or synchronize_sched() instead.
+ */
+void synchronize_kernel(void)
+{
+ synchronize_rcu();
+}
+
+#ifdef CONFIG_RCU_TRACE
+long *rcupreempt_flipctr(int cpu)
+{
+ return &RCU_DATA_CPU(cpu)->rcu_flipctr[0];
+}
+EXPORT_SYMBOL_GPL(rcupreempt_flipctr);
+
+int rcupreempt_flip_flag(int cpu)
+{
+ return per_cpu(rcu_flip_flag, cpu);
+}
+EXPORT_SYMBOL_GPL(rcupreempt_flip_flag);
+
+int rcupreempt_mb_flag(int cpu)
+{
+ return per_cpu(rcu_mb_flag, cpu);
+}
+EXPORT_SYMBOL_GPL(rcupreempt_mb_flag);
+
+char *rcupreempt_try_flip_state_name(void)
+{
+ return rcu_try_flip_state_names[rcu_ctrlblk.rcu_try_flip_state];
+}
+EXPORT_SYMBOL_GPL(rcupreempt_try_flip_state_name);
+
+struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu)
+{
+ struct rcu_data *rdp = RCU_DATA_CPU(cpu);
+
+ return &rdp->trace;
+}
+EXPORT_SYMBOL_GPL(rcupreempt_trace_cpu);
+
+#endif /* #ifdef RCU_TRACE */
diff --git a/kernel/rcupreempt_trace.c b/kernel/rcupreempt_trace.c
new file mode 100644
index 00000000000..49ac4947af2
--- /dev/null
+++ b/kernel/rcupreempt_trace.c
@@ -0,0 +1,330 @@
+/*
+ * Read-Copy Update tracing for realtime implementation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright IBM Corporation, 2006
+ *
+ * Papers: http://www.rdrop.com/users/paulmck/RCU
+ *
+ * For detailed explanation of Read-Copy Update mechanism see -
+ * Documentation/RCU/ *.txt
+ *
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <linux/rcupdate.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <asm/atomic.h>
+#include <linux/bitops.h>
+#include <linux/module.h>
+#include <linux/completion.h>
+#include <linux/moduleparam.h>
+#include <linux/percpu.h>
+#include <linux/notifier.h>
+#include <linux/rcupdate.h>
+#include <linux/cpu.h>
+#include <linux/mutex.h>
+#include <linux/rcupreempt_trace.h>
+#include <linux/debugfs.h>
+
+static struct mutex rcupreempt_trace_mutex;
+static char *rcupreempt_trace_buf;
+#define RCUPREEMPT_TRACE_BUF_SIZE 4096
+
+void rcupreempt_trace_move2done(struct rcupreempt_trace *trace)
+{
+ trace->done_length += trace->wait_length;
+ trace->done_add += trace->wait_length;
+ trace->wait_length = 0;
+}
+void rcupreempt_trace_move2wait(struct rcupreempt_trace *trace)
+{
+ trace->wait_length += trace->next_length;
+ trace->wait_add += trace->next_length;
+ trace->next_length = 0;
+}
+void rcupreempt_trace_try_flip_1(struct rcupreempt_trace *trace)
+{
+ atomic_inc(&trace->rcu_try_flip_1);
+}
+void rcupreempt_trace_try_flip_e1(struct rcupreempt_trace *trace)
+{
+ atomic_inc(&trace->rcu_try_flip_e1);
+}
+void rcupreempt_trace_try_flip_i1(struct rcupreempt_trace *trace)
+{
+ trace->rcu_try_flip_i1++;
+}
+void rcupreempt_trace_try_flip_ie1(struct rcupreempt_trace *trace)
+{
+ trace->rcu_try_flip_ie1++;
+}
+void rcupreempt_trace_try_flip_g1(struct rcupreempt_trace *trace)
+{
+ trace->rcu_try_flip_g1++;
+}
+void rcupreempt_trace_try_flip_a1(struct rcupreempt_trace *trace)
+{
+ trace->rcu_try_flip_a1++;
+}
+void rcupreempt_trace_try_flip_ae1(struct rcupreempt_trace *trace)
+{
+ trace->rcu_try_flip_ae1++;
+}
+void rcupreempt_trace_try_flip_a2(struct rcupreempt_trace *trace)
+{
+ trace->rcu_try_flip_a2++;
+}
+void rcupreempt_trace_try_flip_z1(struct rcupreempt_trace *trace)
+{
+ trace->rcu_try_flip_z1++;
+}
+void rcupreempt_trace_try_flip_ze1(struct rcupreempt_trace *trace)
+{
+ trace->rcu_try_flip_ze1++;
+}
+void rcupreempt_trace_try_flip_z2(struct rcupreempt_trace *trace)
+{
+ trace->rcu_try_flip_z2++;
+}
+void rcupreempt_trace_try_flip_m1(struct rcupreempt_trace *trace)
+{
+ trace->rcu_try_flip_m1++;
+}
+void rcupreempt_trace_try_flip_me1(struct rcupreempt_trace *trace)
+{
+ trace->rcu_try_flip_me1++;
+}
+void rcupreempt_trace_try_flip_m2(struct rcupreempt_trace *trace)
+{
+ trace->rcu_try_flip_m2++;
+}
+void rcupreempt_trace_check_callbacks(struct rcupreempt_trace *trace)
+{
+ trace->rcu_check_callbacks++;
+}
+void rcupreempt_trace_done_remove(struct rcupreempt_trace *trace)
+{
+ trace->done_remove += trace->done_length;
+ trace->done_length = 0;
+}
+void rcupreempt_trace_invoke(struct rcupreempt_trace *trace)
+{
+ atomic_inc(&trace->done_invoked);
+}
+void rcupreempt_trace_next_add(struct rcupreempt_trace *trace)
+{
+ trace->next_add++;
+ trace->next_length++;
+}
+
+static void rcupreempt_trace_sum(struct rcupreempt_trace *sp)
+{
+ struct rcupreempt_trace *cp;
+ int cpu;
+
+ memset(sp, 0, sizeof(*sp));
+ for_each_possible_cpu(cpu) {
+ cp = rcupreempt_trace_cpu(cpu);
+ sp->next_length += cp->next_length;
+ sp->next_add += cp->next_add;
+ sp->wait_length += cp->wait_length;
+ sp->wait_add += cp->wait_add;
+ sp->done_length += cp->done_length;
+ sp->done_add += cp->done_add;
+ sp->done_remove += cp->done_remove;
+ atomic_set(&sp->done_invoked, atomic_read(&cp->done_invoked));
+ sp->rcu_check_callbacks += cp->rcu_check_callbacks;
+ atomic_set(&sp->rcu_try_flip_1,
+ atomic_read(&cp->rcu_try_flip_1));
+ atomic_set(&sp->rcu_try_flip_e1,
+ atomic_read(&cp->rcu_try_flip_e1));
+ sp->rcu_try_flip_i1 += cp->rcu_try_flip_i1;
+ sp->rcu_try_flip_ie1 += cp->rcu_try_flip_ie1;
+ sp->rcu_try_flip_g1 += cp->rcu_try_flip_g1;
+ sp->rcu_try_flip_a1 += cp->rcu_try_flip_a1;
+ sp->rcu_try_flip_ae1 += cp->rcu_try_flip_ae1;
+ sp->rcu_try_flip_a2 += cp->rcu_try_flip_a2;
+ sp->rcu_try_flip_z1 += cp->rcu_try_flip_z1;
+ sp->rcu_try_flip_ze1 += cp->rcu_try_flip_ze1;
+ sp->rcu_try_flip_z2 += cp->rcu_try_flip_z2;
+ sp->rcu_try_flip_m1 += cp->rcu_try_flip_m1;
+ sp->rcu_try_flip_me1 += cp->rcu_try_flip_me1;
+ sp->rcu_try_flip_m2 += cp->rcu_try_flip_m2;
+ }
+}
+
+static ssize_t rcustats_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct rcupreempt_trace trace;
+ ssize_t bcount;
+ int cnt = 0;
+
+ rcupreempt_trace_sum(&trace);
+ mutex_lock(&rcupreempt_trace_mutex);
+ snprintf(&rcupreempt_trace_buf[cnt], RCUPREEMPT_TRACE_BUF_SIZE - cnt,
+ "ggp=%ld rcc=%ld\n",
+ rcu_batches_completed(),
+ trace.rcu_check_callbacks);
+ snprintf(&rcupreempt_trace_buf[cnt], RCUPREEMPT_TRACE_BUF_SIZE - cnt,
+ "na=%ld nl=%ld wa=%ld wl=%ld da=%ld dl=%ld dr=%ld di=%d\n"
+ "1=%d e1=%d i1=%ld ie1=%ld g1=%ld a1=%ld ae1=%ld a2=%ld\n"
+ "z1=%ld ze1=%ld z2=%ld m1=%ld me1=%ld m2=%ld\n",
+
+ trace.next_add, trace.next_length,
+ trace.wait_add, trace.wait_length,
+ trace.done_add, trace.done_length,
+ trace.done_remove, atomic_read(&trace.done_invoked),
+ atomic_read(&trace.rcu_try_flip_1),
+ atomic_read(&trace.rcu_try_flip_e1),
+ trace.rcu_try_flip_i1, trace.rcu_try_flip_ie1,
+ trace.rcu_try_flip_g1,
+ trace.rcu_try_flip_a1, trace.rcu_try_flip_ae1,
+ trace.rcu_try_flip_a2,
+ trace.rcu_try_flip_z1, trace.rcu_try_flip_ze1,
+ trace.rcu_try_flip_z2,
+ trace.rcu_try_flip_m1, trace.rcu_try_flip_me1,
+ trace.rcu_try_flip_m2);
+ bcount = simple_read_from_buffer(buffer, count, ppos,
+ rcupreempt_trace_buf, strlen(rcupreempt_trace_buf));
+ mutex_unlock(&rcupreempt_trace_mutex);
+ return bcount;
+}
+
+static ssize_t rcugp_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ long oldgp = rcu_batches_completed();
+ ssize_t bcount;
+
+ mutex_lock(&rcupreempt_trace_mutex);
+ synchronize_rcu();
+ snprintf(rcupreempt_trace_buf, RCUPREEMPT_TRACE_BUF_SIZE,
+ "oldggp=%ld newggp=%ld\n", oldgp, rcu_batches_completed());
+ bcount = simple_read_from_buffer(buffer, count, ppos,
+ rcupreempt_trace_buf, strlen(rcupreempt_trace_buf));
+ mutex_unlock(&rcupreempt_trace_mutex);
+ return bcount;
+}
+
+static ssize_t rcuctrs_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ int cnt = 0;
+ int cpu;
+ int f = rcu_batches_completed() & 0x1;
+ ssize_t bcount;
+
+ mutex_lock(&rcupreempt_trace_mutex);
+
+ cnt += snprintf(&rcupreempt_trace_buf[cnt], RCUPREEMPT_TRACE_BUF_SIZE,
+ "CPU last cur F M\n");
+ for_each_online_cpu(cpu) {
+ long *flipctr = rcupreempt_flipctr(cpu);
+ cnt += snprintf(&rcupreempt_trace_buf[cnt],
+ RCUPREEMPT_TRACE_BUF_SIZE - cnt,
+ "%3d %4ld %3ld %d %d\n",
+ cpu,
+ flipctr[!f],
+ flipctr[f],
+ rcupreempt_flip_flag(cpu),
+ rcupreempt_mb_flag(cpu));
+ }
+ cnt += snprintf(&rcupreempt_trace_buf[cnt],
+ RCUPREEMPT_TRACE_BUF_SIZE - cnt,
+ "ggp = %ld, state = %s\n",
+ rcu_batches_completed(),
+ rcupreempt_try_flip_state_name());
+ cnt += snprintf(&rcupreempt_trace_buf[cnt],
+ RCUPREEMPT_TRACE_BUF_SIZE - cnt,
+ "\n");
+ bcount = simple_read_from_buffer(buffer, count, ppos,
+ rcupreempt_trace_buf, strlen(rcupreempt_trace_buf));
+ mutex_unlock(&rcupreempt_trace_mutex);
+ return bcount;
+}
+
+static struct file_operations rcustats_fops = {
+ .owner = THIS_MODULE,
+ .read = rcustats_read,
+};
+
+static struct file_operations rcugp_fops = {
+ .owner = THIS_MODULE,
+ .read = rcugp_read,
+};
+
+static struct file_operations rcuctrs_fops = {
+ .owner = THIS_MODULE,
+ .read = rcuctrs_read,
+};
+
+static struct dentry *rcudir, *statdir, *ctrsdir, *gpdir;
+static int rcupreempt_debugfs_init(void)
+{
+ rcudir = debugfs_create_dir("rcu", NULL);
+ if (!rcudir)
+ goto out;
+ statdir = debugfs_create_file("rcustats", 0444, rcudir,
+ NULL, &rcustats_fops);
+ if (!statdir)
+ goto free_out;
+
+ gpdir = debugfs_create_file("rcugp", 0444, rcudir, NULL, &rcugp_fops);
+ if (!gpdir)
+ goto free_out;
+
+ ctrsdir = debugfs_create_file("rcuctrs", 0444, rcudir,
+ NULL, &rcuctrs_fops);
+ if (!ctrsdir)
+ goto free_out;
+ return 0;
+free_out:
+ if (statdir)
+ debugfs_remove(statdir);
+ if (gpdir)
+ debugfs_remove(gpdir);
+ debugfs_remove(rcudir);
+out:
+ return 1;
+}
+
+static int __init rcupreempt_trace_init(void)
+{
+ mutex_init(&rcupreempt_trace_mutex);
+ rcupreempt_trace_buf = kmalloc(RCUPREEMPT_TRACE_BUF_SIZE, GFP_KERNEL);
+ if (!rcupreempt_trace_buf)
+ return 1;
+ return rcupreempt_debugfs_init();
+}
+
+static void __exit rcupreempt_trace_cleanup(void)
+{
+ debugfs_remove(statdir);
+ debugfs_remove(gpdir);
+ debugfs_remove(ctrsdir);
+ debugfs_remove(rcudir);
+ kfree(rcupreempt_trace_buf);
+}
+
+
+module_init(rcupreempt_trace_init);
+module_exit(rcupreempt_trace_cleanup);
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index c3e165c2318..fd599829e72 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -726,11 +726,11 @@ static void rcu_torture_shuffle_tasks(void)
cpumask_t tmp_mask = CPU_MASK_ALL;
int i;
- lock_cpu_hotplug();
+ get_online_cpus();
/* No point in shuffling if there is only one online CPU (ex: UP) */
if (num_online_cpus() == 1) {
- unlock_cpu_hotplug();
+ put_online_cpus();
return;
}
@@ -762,7 +762,7 @@ static void rcu_torture_shuffle_tasks(void)
else
rcu_idle_cpu--;
- unlock_cpu_hotplug();
+ put_online_cpus();
}
/* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
diff --git a/kernel/resource.c b/kernel/resource.c
index a358142ff48..2eb553d9b51 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -277,7 +277,7 @@ walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg,
int ret = -1;
res.start = (u64) start_pfn << PAGE_SHIFT;
res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
- res.flags = IORESOURCE_MEM;
+ res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
orig_end = res.end;
while ((res.start < res.end) && (find_next_system_ram(&res) >= 0)) {
pfn = (unsigned long)(res.start >> PAGE_SHIFT);
diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
index e3055ba6915..092e4c620af 100644
--- a/kernel/rtmutex-tester.c
+++ b/kernel/rtmutex-tester.c
@@ -394,7 +394,7 @@ static SYSDEV_ATTR(status, 0600, sysfs_test_status, NULL);
static SYSDEV_ATTR(command, 0600, NULL, sysfs_test_command);
static struct sysdev_class rttest_sysclass = {
- set_kset_name("rttest"),
+ .name = "rttest",
};
static int init_test_thread(int id)
diff --git a/kernel/rwsem.c b/kernel/rwsem.c
index 1ec620c0306..cae050b05f5 100644
--- a/kernel/rwsem.c
+++ b/kernel/rwsem.c
@@ -6,6 +6,7 @@
#include <linux/types.h>
#include <linux/kernel.h>
+#include <linux/sched.h>
#include <linux/module.h>
#include <linux/rwsem.h>
@@ -15,7 +16,7 @@
/*
* lock for reading
*/
-void down_read(struct rw_semaphore *sem)
+void __sched down_read(struct rw_semaphore *sem)
{
might_sleep();
rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
@@ -42,7 +43,7 @@ EXPORT_SYMBOL(down_read_trylock);
/*
* lock for writing
*/
-void down_write(struct rw_semaphore *sem)
+void __sched down_write(struct rw_semaphore *sem)
{
might_sleep();
rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
diff --git a/kernel/sched.c b/kernel/sched.c
index b4fbbc44045..9474b23c28b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -22,6 +22,8 @@
* by Peter Williams
* 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
* 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
+ * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
+ * Thomas Gleixner, Mike Kravetz
*/
#include <linux/mm.h>
@@ -52,7 +54,6 @@
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/percpu.h>
-#include <linux/cpu_acct.h>
#include <linux/kthread.h>
#include <linux/seq_file.h>
#include <linux/sysctl.h>
@@ -64,6 +65,7 @@
#include <linux/reciprocal_div.h>
#include <linux/unistd.h>
#include <linux/pagemap.h>
+#include <linux/hrtimer.h>
#include <asm/tlb.h>
#include <asm/irq_regs.h>
@@ -75,7 +77,7 @@
*/
unsigned long long __attribute__((weak)) sched_clock(void)
{
- return (unsigned long long)jiffies * (1000000000 / HZ);
+ return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ);
}
/*
@@ -97,10 +99,9 @@ unsigned long long __attribute__((weak)) sched_clock(void)
#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
/*
- * Some helpers for converting nanosecond timing to jiffy resolution
+ * Helpers for converting nanosecond timing to jiffy resolution
*/
-#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (1000000000 / HZ))
-#define JIFFIES_TO_NS(TIME) ((TIME) * (1000000000 / HZ))
+#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
#define NICE_0_LOAD SCHED_LOAD_SCALE
#define NICE_0_SHIFT SCHED_LOAD_SHIFT
@@ -160,6 +161,8 @@ struct rt_prio_array {
struct cfs_rq;
+static LIST_HEAD(task_groups);
+
/* task group related information */
struct task_group {
#ifdef CONFIG_FAIR_CGROUP_SCHED
@@ -169,9 +172,50 @@ struct task_group {
struct sched_entity **se;
/* runqueue "owned" by this group on each cpu */
struct cfs_rq **cfs_rq;
+
+ struct sched_rt_entity **rt_se;
+ struct rt_rq **rt_rq;
+
+ unsigned int rt_ratio;
+
+ /*
+ * shares assigned to a task group governs how much of cpu bandwidth
+ * is allocated to the group. The more shares a group has, the more is
+ * the cpu bandwidth allocated to it.
+ *
+ * For ex, lets say that there are three task groups, A, B and C which
+ * have been assigned shares 1000, 2000 and 3000 respectively. Then,
+ * cpu bandwidth allocated by the scheduler to task groups A, B and C
+ * should be:
+ *
+ * Bw(A) = 1000/(1000+2000+3000) * 100 = 16.66%
+ * Bw(B) = 2000/(1000+2000+3000) * 100 = 33.33%
+ * Bw(C) = 3000/(1000+2000+3000) * 100 = 50%
+ *
+ * The weight assigned to a task group's schedulable entities on every
+ * cpu (task_group.se[a_cpu]->load.weight) is derived from the task
+ * group's shares. For ex: lets say that task group A has been
+ * assigned shares of 1000 and there are two CPUs in a system. Then,
+ *
+ * tg_A->se[0]->load.weight = tg_A->se[1]->load.weight = 1000;
+ *
+ * Note: It's not necessary that each of a task's group schedulable
+ * entity have the same weight on all CPUs. If the group
+ * has 2 of its tasks on CPU0 and 1 task on CPU1, then a
+ * better distribution of weight could be:
+ *
+ * tg_A->se[0]->load.weight = 2/3 * 2000 = 1333
+ * tg_A->se[1]->load.weight = 1/2 * 2000 = 667
+ *
+ * rebalance_shares() is responsible for distributing the shares of a
+ * task groups like this among the group's schedulable entities across
+ * cpus.
+ *
+ */
unsigned long shares;
- /* spinlock to serialize modification to shares */
- spinlock_t lock;
+
+ struct rcu_head rcu;
+ struct list_head list;
};
/* Default task group's sched entity on each cpu */
@@ -179,24 +223,51 @@ static DEFINE_PER_CPU(struct sched_entity, init_sched_entity);
/* Default task group's cfs_rq on each cpu */
static DEFINE_PER_CPU(struct cfs_rq, init_cfs_rq) ____cacheline_aligned_in_smp;
+static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
+static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp;
+
static struct sched_entity *init_sched_entity_p[NR_CPUS];
static struct cfs_rq *init_cfs_rq_p[NR_CPUS];
+static struct sched_rt_entity *init_sched_rt_entity_p[NR_CPUS];
+static struct rt_rq *init_rt_rq_p[NR_CPUS];
+
+/* task_group_mutex serializes add/remove of task groups and also changes to
+ * a task group's cpu shares.
+ */
+static DEFINE_MUTEX(task_group_mutex);
+
+/* doms_cur_mutex serializes access to doms_cur[] array */
+static DEFINE_MUTEX(doms_cur_mutex);
+
+#ifdef CONFIG_SMP
+/* kernel thread that runs rebalance_shares() periodically */
+static struct task_struct *lb_monitor_task;
+static int load_balance_monitor(void *unused);
+#endif
+
+static void set_se_shares(struct sched_entity *se, unsigned long shares);
+
/* Default task group.
* Every task in system belong to this group at bootup.
*/
struct task_group init_task_group = {
- .se = init_sched_entity_p,
+ .se = init_sched_entity_p,
.cfs_rq = init_cfs_rq_p,
+
+ .rt_se = init_sched_rt_entity_p,
+ .rt_rq = init_rt_rq_p,
};
#ifdef CONFIG_FAIR_USER_SCHED
-# define INIT_TASK_GRP_LOAD 2*NICE_0_LOAD
+# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD)
#else
-# define INIT_TASK_GRP_LOAD NICE_0_LOAD
+# define INIT_TASK_GROUP_LOAD NICE_0_LOAD
#endif
-static int init_task_group_load = INIT_TASK_GRP_LOAD;
+#define MIN_GROUP_SHARES 2
+
+static int init_task_group_load = INIT_TASK_GROUP_LOAD;
/* return group to which a task belongs */
static inline struct task_group *task_group(struct task_struct *p)
@@ -209,22 +280,48 @@ static inline struct task_group *task_group(struct task_struct *p)
tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id),
struct task_group, css);
#else
- tg = &init_task_group;
+ tg = &init_task_group;
#endif
-
return tg;
}
/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
-static inline void set_task_cfs_rq(struct task_struct *p)
+static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
+{
+ p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
+ p->se.parent = task_group(p)->se[cpu];
+
+ p->rt.rt_rq = task_group(p)->rt_rq[cpu];
+ p->rt.parent = task_group(p)->rt_se[cpu];
+}
+
+static inline void lock_task_group_list(void)
{
- p->se.cfs_rq = task_group(p)->cfs_rq[task_cpu(p)];
- p->se.parent = task_group(p)->se[task_cpu(p)];
+ mutex_lock(&task_group_mutex);
+}
+
+static inline void unlock_task_group_list(void)
+{
+ mutex_unlock(&task_group_mutex);
+}
+
+static inline void lock_doms_cur(void)
+{
+ mutex_lock(&doms_cur_mutex);
+}
+
+static inline void unlock_doms_cur(void)
+{
+ mutex_unlock(&doms_cur_mutex);
}
#else
-static inline void set_task_cfs_rq(struct task_struct *p) { }
+static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
+static inline void lock_task_group_list(void) { }
+static inline void unlock_task_group_list(void) { }
+static inline void lock_doms_cur(void) { }
+static inline void unlock_doms_cur(void) { }
#endif /* CONFIG_FAIR_GROUP_SCHED */
@@ -249,26 +346,72 @@ struct cfs_rq {
#ifdef CONFIG_FAIR_GROUP_SCHED
struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
- /* leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
+ /*
+ * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
* a hierarchy). Non-leaf lrqs hold other higher schedulable entities
* (like users, containers etc.)
*
* leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
* list is used during load balance.
*/
- struct list_head leaf_cfs_rq_list; /* Better name : task_cfs_rq_list? */
- struct task_group *tg; /* group that "owns" this runqueue */
- struct rcu_head rcu;
+ struct list_head leaf_cfs_rq_list;
+ struct task_group *tg; /* group that "owns" this runqueue */
#endif
};
/* Real-Time classes' related field in a runqueue: */
struct rt_rq {
struct rt_prio_array active;
- int rt_load_balance_idx;
- struct list_head *rt_load_balance_head, *rt_load_balance_curr;
+ unsigned long rt_nr_running;
+#if defined CONFIG_SMP || defined CONFIG_FAIR_GROUP_SCHED
+ int highest_prio; /* highest queued rt task prio */
+#endif
+#ifdef CONFIG_SMP
+ unsigned long rt_nr_migratory;
+ int overloaded;
+#endif
+ int rt_throttled;
+ u64 rt_time;
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+ struct rq *rq;
+ struct list_head leaf_rt_rq_list;
+ struct task_group *tg;
+ struct sched_rt_entity *rt_se;
+#endif
};
+#ifdef CONFIG_SMP
+
+/*
+ * We add the notion of a root-domain which will be used to define per-domain
+ * variables. Each exclusive cpuset essentially defines an island domain by
+ * fully partitioning the member cpus from any other cpuset. Whenever a new
+ * exclusive cpuset is created, we also create and attach a new root-domain
+ * object.
+ *
+ */
+struct root_domain {
+ atomic_t refcount;
+ cpumask_t span;
+ cpumask_t online;
+
+ /*
+ * The "RT overload" flag: it gets set if a CPU has more than
+ * one runnable RT task.
+ */
+ cpumask_t rto_mask;
+ atomic_t rto_count;
+};
+
+/*
+ * By default the system creates a single root-domain with all cpus as
+ * members (mimicking the global state we have today).
+ */
+static struct root_domain def_root_domain;
+
+#endif
+
/*
* This is the main, per-CPU runqueue data structure.
*
@@ -297,11 +440,15 @@ struct rq {
u64 nr_switches;
struct cfs_rq cfs;
+ struct rt_rq rt;
+ u64 rt_period_expire;
+ int rt_throttled;
+
#ifdef CONFIG_FAIR_GROUP_SCHED
/* list of leaf cfs_rq on this cpu: */
struct list_head leaf_cfs_rq_list;
+ struct list_head leaf_rt_rq_list;
#endif
- struct rt_rq rt;
/*
* This is part of a global counter where only the total sum
@@ -318,7 +465,7 @@ struct rq {
u64 clock, prev_clock_raw;
s64 clock_max_delta;
- unsigned int clock_warps, clock_overflows;
+ unsigned int clock_warps, clock_overflows, clock_underflows;
u64 idle_clock;
unsigned int clock_deep_idle_events;
u64 tick_timestamp;
@@ -326,6 +473,7 @@ struct rq {
atomic_t nr_iowait;
#ifdef CONFIG_SMP
+ struct root_domain *rd;
struct sched_domain *sd;
/* For active balancing */
@@ -338,6 +486,12 @@ struct rq {
struct list_head migration_queue;
#endif
+#ifdef CONFIG_SCHED_HRTICK
+ unsigned long hrtick_flags;
+ ktime_t hrtick_expire;
+ struct hrtimer hrtick_timer;
+#endif
+
#ifdef CONFIG_SCHEDSTATS
/* latency stats */
struct sched_info rq_sched_info;
@@ -364,7 +518,6 @@ struct rq {
};
static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
-static DEFINE_MUTEX(sched_hotcpu_mutex);
static inline void check_preempt_curr(struct rq *rq, struct task_struct *p)
{
@@ -442,6 +595,23 @@ static void update_rq_clock(struct rq *rq)
#define task_rq(p) cpu_rq(task_cpu(p))
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
+unsigned long rt_needs_cpu(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+ u64 delta;
+
+ if (!rq->rt_throttled)
+ return 0;
+
+ if (rq->clock > rq->rt_period_expire)
+ return 1;
+
+ delta = rq->rt_period_expire - rq->clock;
+ do_div(delta, NSEC_PER_SEC / HZ);
+
+ return (unsigned long)delta;
+}
+
/*
* Tunables that become constants when CONFIG_SCHED_DEBUG is off:
*/
@@ -456,24 +626,47 @@ static void update_rq_clock(struct rq *rq)
*/
enum {
SCHED_FEAT_NEW_FAIR_SLEEPERS = 1,
- SCHED_FEAT_START_DEBIT = 2,
- SCHED_FEAT_TREE_AVG = 4,
- SCHED_FEAT_APPROX_AVG = 8,
- SCHED_FEAT_WAKEUP_PREEMPT = 16,
- SCHED_FEAT_PREEMPT_RESTRICT = 32,
+ SCHED_FEAT_WAKEUP_PREEMPT = 2,
+ SCHED_FEAT_START_DEBIT = 4,
+ SCHED_FEAT_TREE_AVG = 8,
+ SCHED_FEAT_APPROX_AVG = 16,
+ SCHED_FEAT_HRTICK = 32,
+ SCHED_FEAT_DOUBLE_TICK = 64,
};
const_debug unsigned int sysctl_sched_features =
SCHED_FEAT_NEW_FAIR_SLEEPERS * 1 |
+ SCHED_FEAT_WAKEUP_PREEMPT * 1 |
SCHED_FEAT_START_DEBIT * 1 |
SCHED_FEAT_TREE_AVG * 0 |
SCHED_FEAT_APPROX_AVG * 0 |
- SCHED_FEAT_WAKEUP_PREEMPT * 1 |
- SCHED_FEAT_PREEMPT_RESTRICT * 1;
+ SCHED_FEAT_HRTICK * 1 |
+ SCHED_FEAT_DOUBLE_TICK * 0;
#define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x)
/*
+ * Number of tasks to iterate in a single balance run.
+ * Limited because this is done with IRQs disabled.
+ */
+const_debug unsigned int sysctl_sched_nr_migrate = 32;
+
+/*
+ * period over which we measure -rt task cpu usage in ms.
+ * default: 1s
+ */
+const_debug unsigned int sysctl_sched_rt_period = 1000;
+
+#define SCHED_RT_FRAC_SHIFT 16
+#define SCHED_RT_FRAC (1UL << SCHED_RT_FRAC_SHIFT)
+
+/*
+ * ratio of time -rt tasks may consume.
+ * default: 95%
+ */
+const_debug unsigned int sysctl_sched_rt_ratio = 62259;
+
+/*
* For kernel-internal use: high-speed (but slightly incorrect) per-cpu
* clock constructed from sched_clock():
*/
@@ -485,7 +678,12 @@ unsigned long long cpu_clock(int cpu)
local_irq_save(flags);
rq = cpu_rq(cpu);
- update_rq_clock(rq);
+ /*
+ * Only call sched_clock() if the scheduler has already been
+ * initialized (some code might call cpu_clock() very early):
+ */
+ if (rq->idle)
+ update_rq_clock(rq);
now = rq->clock;
local_irq_restore(flags);
@@ -500,10 +698,15 @@ EXPORT_SYMBOL_GPL(cpu_clock);
# define finish_arch_switch(prev) do { } while (0)
#endif
+static inline int task_current(struct rq *rq, struct task_struct *p)
+{
+ return rq->curr == p;
+}
+
#ifndef __ARCH_WANT_UNLOCKED_CTXSW
static inline int task_running(struct rq *rq, struct task_struct *p)
{
- return rq->curr == p;
+ return task_current(rq, p);
}
static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
@@ -532,7 +735,7 @@ static inline int task_running(struct rq *rq, struct task_struct *p)
#ifdef CONFIG_SMP
return p->oncpu;
#else
- return rq->curr == p;
+ return task_current(rq, p);
#endif
}
@@ -588,7 +791,7 @@ static inline struct rq *__task_rq_lock(struct task_struct *p)
/*
* task_rq_lock - lock the runqueue a given task resides on and disable
- * interrupts. Note the ordering: we can safely lookup the task_rq without
+ * interrupts. Note the ordering: we can safely lookup the task_rq without
* explicitly disabling preemption.
*/
static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
@@ -666,9 +869,177 @@ void sched_clock_idle_wakeup_event(u64 delta_ns)
rq->prev_clock_raw = now;
rq->clock += delta_ns;
spin_unlock(&rq->lock);
+ touch_softlockup_watchdog();
}
EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
+static void __resched_task(struct task_struct *p, int tif_bit);
+
+static inline void resched_task(struct task_struct *p)
+{
+ __resched_task(p, TIF_NEED_RESCHED);
+}
+
+#ifdef CONFIG_SCHED_HRTICK
+/*
+ * Use HR-timers to deliver accurate preemption points.
+ *
+ * Its all a bit involved since we cannot program an hrt while holding the
+ * rq->lock. So what we do is store a state in in rq->hrtick_* and ask for a
+ * reschedule event.
+ *
+ * When we get rescheduled we reprogram the hrtick_timer outside of the
+ * rq->lock.
+ */
+static inline void resched_hrt(struct task_struct *p)
+{
+ __resched_task(p, TIF_HRTICK_RESCHED);
+}
+
+static inline void resched_rq(struct rq *rq)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rq->lock, flags);
+ resched_task(rq->curr);
+ spin_unlock_irqrestore(&rq->lock, flags);
+}
+
+enum {
+ HRTICK_SET, /* re-programm hrtick_timer */
+ HRTICK_RESET, /* not a new slice */
+};
+
+/*
+ * Use hrtick when:
+ * - enabled by features
+ * - hrtimer is actually high res
+ */
+static inline int hrtick_enabled(struct rq *rq)
+{
+ if (!sched_feat(HRTICK))
+ return 0;
+ return hrtimer_is_hres_active(&rq->hrtick_timer);
+}
+
+/*
+ * Called to set the hrtick timer state.
+ *
+ * called with rq->lock held and irqs disabled
+ */
+static void hrtick_start(struct rq *rq, u64 delay, int reset)
+{
+ assert_spin_locked(&rq->lock);
+
+ /*
+ * preempt at: now + delay
+ */
+ rq->hrtick_expire =
+ ktime_add_ns(rq->hrtick_timer.base->get_time(), delay);
+ /*
+ * indicate we need to program the timer
+ */
+ __set_bit(HRTICK_SET, &rq->hrtick_flags);
+ if (reset)
+ __set_bit(HRTICK_RESET, &rq->hrtick_flags);
+
+ /*
+ * New slices are called from the schedule path and don't need a
+ * forced reschedule.
+ */
+ if (reset)
+ resched_hrt(rq->curr);
+}
+
+static void hrtick_clear(struct rq *rq)
+{
+ if (hrtimer_active(&rq->hrtick_timer))
+ hrtimer_cancel(&rq->hrtick_timer);
+}
+
+/*
+ * Update the timer from the possible pending state.
+ */
+static void hrtick_set(struct rq *rq)
+{
+ ktime_t time;
+ int set, reset;
+ unsigned long flags;
+
+ WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
+
+ spin_lock_irqsave(&rq->lock, flags);
+ set = __test_and_clear_bit(HRTICK_SET, &rq->hrtick_flags);
+ reset = __test_and_clear_bit(HRTICK_RESET, &rq->hrtick_flags);
+ time = rq->hrtick_expire;
+ clear_thread_flag(TIF_HRTICK_RESCHED);
+ spin_unlock_irqrestore(&rq->lock, flags);
+
+ if (set) {
+ hrtimer_start(&rq->hrtick_timer, time, HRTIMER_MODE_ABS);
+ if (reset && !hrtimer_active(&rq->hrtick_timer))
+ resched_rq(rq);
+ } else
+ hrtick_clear(rq);
+}
+
+/*
+ * High-resolution timer tick.
+ * Runs from hardirq context with interrupts disabled.
+ */
+static enum hrtimer_restart hrtick(struct hrtimer *timer)
+{
+ struct rq *rq = container_of(timer, struct rq, hrtick_timer);
+
+ WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
+
+ spin_lock(&rq->lock);
+ __update_rq_clock(rq);
+ rq->curr->sched_class->task_tick(rq, rq->curr, 1);
+ spin_unlock(&rq->lock);
+
+ return HRTIMER_NORESTART;
+}
+
+static inline void init_rq_hrtick(struct rq *rq)
+{
+ rq->hrtick_flags = 0;
+ hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ rq->hrtick_timer.function = hrtick;
+ rq->hrtick_timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ;
+}
+
+void hrtick_resched(void)
+{
+ struct rq *rq;
+ unsigned long flags;
+
+ if (!test_thread_flag(TIF_HRTICK_RESCHED))
+ return;
+
+ local_irq_save(flags);
+ rq = cpu_rq(smp_processor_id());
+ hrtick_set(rq);
+ local_irq_restore(flags);
+}
+#else
+static inline void hrtick_clear(struct rq *rq)
+{
+}
+
+static inline void hrtick_set(struct rq *rq)
+{
+}
+
+static inline void init_rq_hrtick(struct rq *rq)
+{
+}
+
+void hrtick_resched(void)
+{
+}
+#endif
+
/*
* resched_task - mark a task 'to be rescheduled now'.
*
@@ -682,16 +1053,16 @@ EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
#endif
-static void resched_task(struct task_struct *p)
+static void __resched_task(struct task_struct *p, int tif_bit)
{
int cpu;
assert_spin_locked(&task_rq(p)->lock);
- if (unlikely(test_tsk_thread_flag(p, TIF_NEED_RESCHED)))
+ if (unlikely(test_tsk_thread_flag(p, tif_bit)))
return;
- set_tsk_thread_flag(p, TIF_NEED_RESCHED);
+ set_tsk_thread_flag(p, tif_bit);
cpu = task_cpu(p);
if (cpu == smp_processor_id())
@@ -714,10 +1085,10 @@ static void resched_cpu(int cpu)
spin_unlock_irqrestore(&rq->lock, flags);
}
#else
-static inline void resched_task(struct task_struct *p)
+static void __resched_task(struct task_struct *p, int tif_bit)
{
assert_spin_locked(&task_rq(p)->lock);
- set_tsk_need_resched(p);
+ set_tsk_thread_flag(p, tif_bit);
}
#endif
@@ -776,7 +1147,7 @@ static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
* To aid in avoiding the subversion of "niceness" due to uneven distribution
* of tasks with abnormal "nice" values across CPUs the contribution that
* each task makes to its run queue's load is weighted according to its
- * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
+ * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
* scaled version of the new time slice allocation that they receive on time
* slice expiry etc.
*/
@@ -851,6 +1222,29 @@ iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
struct rq_iterator *iterator);
#endif
+#ifdef CONFIG_CGROUP_CPUACCT
+static void cpuacct_charge(struct task_struct *tsk, u64 cputime);
+#else
+static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
+#endif
+
+static inline void inc_cpu_load(struct rq *rq, unsigned long load)
+{
+ update_load_add(&rq->load, load);
+}
+
+static inline void dec_cpu_load(struct rq *rq, unsigned long load)
+{
+ update_load_sub(&rq->load, load);
+}
+
+#ifdef CONFIG_SMP
+static unsigned long source_load(int cpu, int type);
+static unsigned long target_load(int cpu, int type);
+static unsigned long cpu_avg_load_per_task(int cpu);
+static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
+#endif /* CONFIG_SMP */
+
#include "sched_stats.h"
#include "sched_idletask.c"
#include "sched_fair.c"
@@ -861,41 +1255,14 @@ iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
#define sched_class_highest (&rt_sched_class)
-/*
- * Update delta_exec, delta_fair fields for rq.
- *
- * delta_fair clock advances at a rate inversely proportional to
- * total load (rq->load.weight) on the runqueue, while
- * delta_exec advances at the same rate as wall-clock (provided
- * cpu is not idle).
- *
- * delta_exec / delta_fair is a measure of the (smoothened) load on this
- * runqueue over any given interval. This (smoothened) load is used
- * during load balance.
- *
- * This function is called /before/ updating rq->load
- * and when switching tasks.
- */
-static inline void inc_load(struct rq *rq, const struct task_struct *p)
-{
- update_load_add(&rq->load, p->se.load.weight);
-}
-
-static inline void dec_load(struct rq *rq, const struct task_struct *p)
-{
- update_load_sub(&rq->load, p->se.load.weight);
-}
-
-static void inc_nr_running(struct task_struct *p, struct rq *rq)
+static void inc_nr_running(struct rq *rq)
{
rq->nr_running++;
- inc_load(rq, p);
}
-static void dec_nr_running(struct task_struct *p, struct rq *rq)
+static void dec_nr_running(struct rq *rq)
{
rq->nr_running--;
- dec_load(rq, p);
}
static void set_load_weight(struct task_struct *p)
@@ -983,11 +1350,11 @@ static int effective_prio(struct task_struct *p)
*/
static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
{
- if (p->state == TASK_UNINTERRUPTIBLE)
+ if (task_contributes_to_load(p))
rq->nr_uninterruptible--;
enqueue_task(rq, p, wakeup);
- inc_nr_running(p, rq);
+ inc_nr_running(rq);
}
/*
@@ -995,11 +1362,11 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
*/
static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
{
- if (p->state == TASK_UNINTERRUPTIBLE)
+ if (task_contributes_to_load(p))
rq->nr_uninterruptible++;
dequeue_task(rq, p, sleep);
- dec_nr_running(p, rq);
+ dec_nr_running(rq);
}
/**
@@ -1019,10 +1386,28 @@ unsigned long weighted_cpuload(const int cpu)
static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
{
+ set_task_rq(p, cpu);
#ifdef CONFIG_SMP
+ /*
+ * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
+ * successfuly executed on another CPU. We must ensure that updates of
+ * per-task data have been completed by this moment.
+ */
+ smp_wmb();
task_thread_info(p)->cpu = cpu;
#endif
- set_task_cfs_rq(p);
+}
+
+static inline void check_class_changed(struct rq *rq, struct task_struct *p,
+ const struct sched_class *prev_class,
+ int oldprio, int running)
+{
+ if (prev_class != p->sched_class) {
+ if (prev_class->switched_from)
+ prev_class->switched_from(rq, p, running);
+ p->sched_class->switched_to(rq, p, running);
+ } else
+ p->sched_class->prio_changed(rq, p, oldprio, running);
}
#ifdef CONFIG_SMP
@@ -1030,7 +1415,7 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
/*
* Is this task likely cache-hot:
*/
-static inline int
+static int
task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
{
s64 delta;
@@ -1255,7 +1640,7 @@ static unsigned long target_load(int cpu, int type)
/*
* Return the average load per task on the cpu's run queue
*/
-static inline unsigned long cpu_avg_load_per_task(int cpu)
+static unsigned long cpu_avg_load_per_task(int cpu)
{
struct rq *rq = cpu_rq(cpu);
unsigned long total = weighted_cpuload(cpu);
@@ -1412,58 +1797,6 @@ static int sched_balance_self(int cpu, int flag)
#endif /* CONFIG_SMP */
-/*
- * wake_idle() will wake a task on an idle cpu if task->cpu is
- * not idle and an idle cpu is available. The span of cpus to
- * search starts with cpus closest then further out as needed,
- * so we always favor a closer, idle cpu.
- *
- * Returns the CPU we should wake onto.
- */
-#if defined(ARCH_HAS_SCHED_WAKE_IDLE)
-static int wake_idle(int cpu, struct task_struct *p)
-{
- cpumask_t tmp;
- struct sched_domain *sd;
- int i;
-
- /*
- * If it is idle, then it is the best cpu to run this task.
- *
- * This cpu is also the best, if it has more than one task already.
- * Siblings must be also busy(in most cases) as they didn't already
- * pickup the extra load from this cpu and hence we need not check
- * sibling runqueue info. This will avoid the checks and cache miss
- * penalities associated with that.
- */
- if (idle_cpu(cpu) || cpu_rq(cpu)->nr_running > 1)
- return cpu;
-
- for_each_domain(cpu, sd) {
- if (sd->flags & SD_WAKE_IDLE) {
- cpus_and(tmp, sd->span, p->cpus_allowed);
- for_each_cpu_mask(i, tmp) {
- if (idle_cpu(i)) {
- if (i != task_cpu(p)) {
- schedstat_inc(p,
- se.nr_wakeups_idle);
- }
- return i;
- }
- }
- } else {
- break;
- }
- }
- return cpu;
-}
-#else
-static inline int wake_idle(int cpu, struct task_struct *p)
-{
- return cpu;
-}
-#endif
-
/***
* try_to_wake_up - wake up a thread
* @p: the to-be-woken-up thread
@@ -1484,11 +1817,6 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
unsigned long flags;
long old_state;
struct rq *rq;
-#ifdef CONFIG_SMP
- struct sched_domain *sd, *this_sd = NULL;
- unsigned long load, this_load;
- int new_cpu;
-#endif
rq = task_rq_lock(p, &flags);
old_state = p->state;
@@ -1506,92 +1834,9 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
if (unlikely(task_running(rq, p)))
goto out_activate;
- new_cpu = cpu;
-
- schedstat_inc(rq, ttwu_count);
- if (cpu == this_cpu) {
- schedstat_inc(rq, ttwu_local);
- goto out_set_cpu;
- }
-
- for_each_domain(this_cpu, sd) {
- if (cpu_isset(cpu, sd->span)) {
- schedstat_inc(sd, ttwu_wake_remote);
- this_sd = sd;
- break;
- }
- }
-
- if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed)))
- goto out_set_cpu;
-
- /*
- * Check for affine wakeup and passive balancing possibilities.
- */
- if (this_sd) {
- int idx = this_sd->wake_idx;
- unsigned int imbalance;
-
- imbalance = 100 + (this_sd->imbalance_pct - 100) / 2;
-
- load = source_load(cpu, idx);
- this_load = target_load(this_cpu, idx);
-
- new_cpu = this_cpu; /* Wake to this CPU if we can */
-
- if (this_sd->flags & SD_WAKE_AFFINE) {
- unsigned long tl = this_load;
- unsigned long tl_per_task;
-
- /*
- * Attract cache-cold tasks on sync wakeups:
- */
- if (sync && !task_hot(p, rq->clock, this_sd))
- goto out_set_cpu;
-
- schedstat_inc(p, se.nr_wakeups_affine_attempts);
- tl_per_task = cpu_avg_load_per_task(this_cpu);
-
- /*
- * If sync wakeup then subtract the (maximum possible)
- * effect of the currently running task from the load
- * of the current CPU:
- */
- if (sync)
- tl -= current->se.load.weight;
-
- if ((tl <= load &&
- tl + target_load(cpu, idx) <= tl_per_task) ||
- 100*(tl + p->se.load.weight) <= imbalance*load) {
- /*
- * This domain has SD_WAKE_AFFINE and
- * p is cache cold in this domain, and
- * there is no bad imbalance.
- */
- schedstat_inc(this_sd, ttwu_move_affine);
- schedstat_inc(p, se.nr_wakeups_affine);
- goto out_set_cpu;
- }
- }
-
- /*
- * Start passive balancing when half the imbalance_pct
- * limit is reached.
- */
- if (this_sd->flags & SD_WAKE_BALANCE) {
- if (imbalance*this_load <= 100*load) {
- schedstat_inc(this_sd, ttwu_move_balance);
- schedstat_inc(p, se.nr_wakeups_passive);
- goto out_set_cpu;
- }
- }
- }
-
- new_cpu = cpu; /* Could not wake to this_cpu. Wake to cpu instead */
-out_set_cpu:
- new_cpu = wake_idle(new_cpu, p);
- if (new_cpu != cpu) {
- set_task_cpu(p, new_cpu);
+ cpu = p->sched_class->select_task_rq(p, sync);
+ if (cpu != orig_cpu) {
+ set_task_cpu(p, cpu);
task_rq_unlock(rq, &flags);
/* might preempt at this point */
rq = task_rq_lock(p, &flags);
@@ -1605,6 +1850,21 @@ out_set_cpu:
cpu = task_cpu(p);
}
+#ifdef CONFIG_SCHEDSTATS
+ schedstat_inc(rq, ttwu_count);
+ if (cpu == this_cpu)
+ schedstat_inc(rq, ttwu_local);
+ else {
+ struct sched_domain *sd;
+ for_each_domain(this_cpu, sd) {
+ if (cpu_isset(cpu, sd->span)) {
+ schedstat_inc(sd, ttwu_wake_remote);
+ break;
+ }
+ }
+ }
+#endif
+
out_activate:
#endif /* CONFIG_SMP */
schedstat_inc(p, se.nr_wakeups);
@@ -1623,6 +1883,10 @@ out_activate:
out_running:
p->state = TASK_RUNNING;
+#ifdef CONFIG_SMP
+ if (p->sched_class->task_wake_up)
+ p->sched_class->task_wake_up(rq, p);
+#endif
out:
task_rq_unlock(rq, &flags);
@@ -1631,8 +1895,7 @@ out:
int fastcall wake_up_process(struct task_struct *p)
{
- return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED |
- TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0);
+ return try_to_wake_up(p, TASK_ALL, 0);
}
EXPORT_SYMBOL(wake_up_process);
@@ -1665,7 +1928,7 @@ static void __sched_fork(struct task_struct *p)
p->se.wait_max = 0;
#endif
- INIT_LIST_HEAD(&p->run_list);
+ INIT_LIST_HEAD(&p->rt.run_list);
p->se.on_rq = 0;
#ifdef CONFIG_PREEMPT_NOTIFIERS
@@ -1742,9 +2005,13 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
* management (if any):
*/
p->sched_class->task_new(rq, p);
- inc_nr_running(p, rq);
+ inc_nr_running(rq);
}
check_preempt_curr(rq, p);
+#ifdef CONFIG_SMP
+ if (p->sched_class->task_wake_up)
+ p->sched_class->task_wake_up(rq, p);
+#endif
task_rq_unlock(rq, &flags);
}
@@ -1839,7 +2106,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
* and do any other architecture-specific cleanup actions.
*
* Note that we may have delayed dropping an mm in context_switch(). If
- * so, we finish that here outside of the runqueue lock. (Doing it
+ * so, we finish that here outside of the runqueue lock. (Doing it
* with the lock held can cause deadlocks; see schedule() for
* details.)
*/
@@ -1865,6 +2132,11 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
prev_state = prev->state;
finish_arch_switch(prev);
finish_lock_switch(rq, prev);
+#ifdef CONFIG_SMP
+ if (current->sched_class->post_schedule)
+ current->sched_class->post_schedule(rq);
+#endif
+
fire_sched_in_preempt_notifiers(current);
if (mm)
mmdrop(mm);
@@ -2098,11 +2370,13 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
/*
* double_lock_balance - lock the busiest runqueue, this_rq is locked already.
*/
-static void double_lock_balance(struct rq *this_rq, struct rq *busiest)
+static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
__releases(this_rq->lock)
__acquires(busiest->lock)
__acquires(this_rq->lock)
{
+ int ret = 0;
+
if (unlikely(!irqs_disabled())) {
/* printk() doesn't work good under rq->lock */
spin_unlock(&this_rq->lock);
@@ -2113,15 +2387,17 @@ static void double_lock_balance(struct rq *this_rq, struct rq *busiest)
spin_unlock(&this_rq->lock);
spin_lock(&busiest->lock);
spin_lock(&this_rq->lock);
+ ret = 1;
} else
spin_lock(&busiest->lock);
}
+ return ret;
}
/*
* If dest_cpu is allowed for this process, migrate the task to it.
* This is accomplished by forcing the cpu_allowed mask to only
- * allow dest_cpu, which will force the cpu onto dest_cpu. Then
+ * allow dest_cpu, which will force the cpu onto dest_cpu. Then
* the cpu_allowed mask is restored.
*/
static void sched_migrate_task(struct task_struct *p, int dest_cpu)
@@ -2237,7 +2513,7 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
enum cpu_idle_type idle, int *all_pinned,
int *this_best_prio, struct rq_iterator *iterator)
{
- int pulled = 0, pinned = 0, skip_for_load;
+ int loops = 0, pulled = 0, pinned = 0, skip_for_load;
struct task_struct *p;
long rem_load_move = max_load_move;
@@ -2251,10 +2527,10 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
*/
p = iterator->start(iterator->arg);
next:
- if (!p)
+ if (!p || loops++ > sysctl_sched_nr_migrate)
goto out;
/*
- * To help distribute high priority tasks accross CPUs we don't
+ * To help distribute high priority tasks across CPUs we don't
* skip a task if it will be the highest priority task (i.e. smallest
* prio value) on its new queue regardless of its load weight
*/
@@ -2271,8 +2547,7 @@ next:
rem_load_move -= p->se.load.weight;
/*
- * We only want to steal up to the prescribed number of tasks
- * and the prescribed amount of weighted load.
+ * We only want to steal up to the prescribed amount of weighted load.
*/
if (rem_load_move > 0) {
if (p->prio < *this_best_prio)
@@ -2567,7 +2842,7 @@ group_next:
* tasks around. Thus we look for the minimum possible imbalance.
* Negative imbalances (*we* are more loaded than anyone else) will
* be counted as no imbalance for these purposes -- we can't fix that
- * by pulling tasks to us. Be careful of negative numbers as they'll
+ * by pulling tasks to us. Be careful of negative numbers as they'll
* appear as very large values with unsigned longs.
*/
if (max_load <= busiest_load_per_task)
@@ -3002,7 +3277,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
/*
* This condition is "impossible", if it occurs
- * we need to fix it. Originally reported by
+ * we need to fix it. Originally reported by
* Bjorn Helgaas on a 128-cpu setup.
*/
BUG_ON(busiest_rq == target_rq);
@@ -3034,7 +3309,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
#ifdef CONFIG_NO_HZ
static struct {
atomic_t load_balancer;
- cpumask_t cpu_mask;
+ cpumask_t cpu_mask;
} nohz ____cacheline_aligned = {
.load_balancer = ATOMIC_INIT(-1),
.cpu_mask = CPU_MASK_NONE,
@@ -3315,7 +3590,7 @@ unsigned long long task_sched_runtime(struct task_struct *p)
rq = task_rq_lock(p, &flags);
ns = p->se.sum_exec_runtime;
- if (rq->curr == p) {
+ if (task_current(rq, p)) {
update_rq_clock(rq);
delta_exec = rq->clock - p->se.exec_start;
if ((s64)delta_exec > 0)
@@ -3335,13 +3610,9 @@ void account_user_time(struct task_struct *p, cputime_t cputime)
{
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
cputime64_t tmp;
- struct rq *rq = this_rq();
p->utime = cputime_add(p->utime, cputime);
- if (p != rq->idle)
- cpuacct_charge(p, cputime);
-
/* Add user time to cpustat. */
tmp = cputime_to_cputime64(cputime);
if (TASK_NICE(p) > 0)
@@ -3355,7 +3626,7 @@ void account_user_time(struct task_struct *p, cputime_t cputime)
* @p: the process that the cpu time gets accounted to
* @cputime: the cpu time spent in virtual machine since the last update
*/
-void account_guest_time(struct task_struct *p, cputime_t cputime)
+static void account_guest_time(struct task_struct *p, cputime_t cputime)
{
cputime64_t tmp;
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
@@ -3392,10 +3663,8 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
struct rq *rq = this_rq();
cputime64_t tmp;
- if (p->flags & PF_VCPU) {
- account_guest_time(p, cputime);
- return;
- }
+ if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0))
+ return account_guest_time(p, cputime);
p->stime = cputime_add(p->stime, cputime);
@@ -3405,10 +3674,9 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
cpustat->irq = cputime64_add(cpustat->irq, tmp);
else if (softirq_count())
cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
- else if (p != rq->idle) {
+ else if (p != rq->idle)
cpustat->system = cputime64_add(cpustat->system, tmp);
- cpuacct_charge(p, cputime);
- } else if (atomic_read(&rq->nr_iowait) > 0)
+ else if (atomic_read(&rq->nr_iowait) > 0)
cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
else
cpustat->idle = cputime64_add(cpustat->idle, tmp);
@@ -3444,10 +3712,8 @@ void account_steal_time(struct task_struct *p, cputime_t steal)
cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
else
cpustat->idle = cputime64_add(cpustat->idle, tmp);
- } else {
+ } else
cpustat->steal = cputime64_add(cpustat->steal, tmp);
- cpuacct_charge(p, -tmp);
- }
}
/*
@@ -3469,12 +3735,14 @@ void scheduler_tick(void)
/*
* Let rq->clock advance by at least TICK_NSEC:
*/
- if (unlikely(rq->clock < next_tick))
+ if (unlikely(rq->clock < next_tick)) {
rq->clock = next_tick;
+ rq->clock_underflows++;
+ }
rq->tick_timestamp = rq->clock;
update_cpu_load(rq);
- if (curr != rq->idle) /* FIXME: needed? */
- curr->sched_class->task_tick(rq, curr);
+ curr->sched_class->task_tick(rq, curr, 0);
+ update_sched_rt_period(rq);
spin_unlock(&rq->lock);
#ifdef CONFIG_SMP
@@ -3547,7 +3815,7 @@ static noinline void __schedule_bug(struct task_struct *prev)
static inline void schedule_debug(struct task_struct *prev)
{
/*
- * Test if we are atomic. Since do_exit() needs to call into
+ * Test if we are atomic. Since do_exit() needs to call into
* schedule() atomically, we ignore that path for now.
* Otherwise, whine if we are scheduling when we should not be.
*/
@@ -3620,6 +3888,8 @@ need_resched_nonpreemptible:
schedule_debug(prev);
+ hrtick_clear(rq);
+
/*
* Do the rq-clock update outside the rq lock:
*/
@@ -3638,6 +3908,11 @@ need_resched_nonpreemptible:
switch_count = &prev->nvcsw;
}
+#ifdef CONFIG_SMP
+ if (prev->sched_class->pre_schedule)
+ prev->sched_class->pre_schedule(rq, prev);
+#endif
+
if (unlikely(!rq->nr_running))
idle_balance(cpu, rq);
@@ -3652,14 +3927,20 @@ need_resched_nonpreemptible:
++*switch_count;
context_switch(rq, prev, next); /* unlocks the rq */
+ /*
+ * the context switch might have flipped the stack from under
+ * us, hence refresh the local variables.
+ */
+ cpu = smp_processor_id();
+ rq = cpu_rq(cpu);
} else
spin_unlock_irq(&rq->lock);
- if (unlikely(reacquire_kernel_lock(current) < 0)) {
- cpu = smp_processor_id();
- rq = cpu_rq(cpu);
+ hrtick_set(rq);
+
+ if (unlikely(reacquire_kernel_lock(current) < 0))
goto need_resched_nonpreemptible;
- }
+
preempt_enable_no_resched();
if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
goto need_resched;
@@ -3669,19 +3950,18 @@ EXPORT_SYMBOL(schedule);
#ifdef CONFIG_PREEMPT
/*
* this is the entry point to schedule() from in-kernel preemption
- * off of preempt_enable. Kernel preemptions off return from interrupt
+ * off of preempt_enable. Kernel preemptions off return from interrupt
* occur there and call schedule directly.
*/
asmlinkage void __sched preempt_schedule(void)
{
struct thread_info *ti = current_thread_info();
-#ifdef CONFIG_PREEMPT_BKL
struct task_struct *task = current;
int saved_lock_depth;
-#endif
+
/*
* If there is a non-zero preempt_count or interrupts are disabled,
- * we do not want to preempt the current task. Just return..
+ * we do not want to preempt the current task. Just return..
*/
if (likely(ti->preempt_count || irqs_disabled()))
return;
@@ -3694,14 +3974,10 @@ asmlinkage void __sched preempt_schedule(void)
* clear ->lock_depth so that schedule() doesnt
* auto-release the semaphore:
*/
-#ifdef CONFIG_PREEMPT_BKL
saved_lock_depth = task->lock_depth;
task->lock_depth = -1;
-#endif
schedule();
-#ifdef CONFIG_PREEMPT_BKL
task->lock_depth = saved_lock_depth;
-#endif
sub_preempt_count(PREEMPT_ACTIVE);
/*
@@ -3722,10 +3998,9 @@ EXPORT_SYMBOL(preempt_schedule);
asmlinkage void __sched preempt_schedule_irq(void)
{
struct thread_info *ti = current_thread_info();
-#ifdef CONFIG_PREEMPT_BKL
struct task_struct *task = current;
int saved_lock_depth;
-#endif
+
/* Catch callers which need to be fixed */
BUG_ON(ti->preempt_count || !irqs_disabled());
@@ -3737,16 +4012,12 @@ asmlinkage void __sched preempt_schedule_irq(void)
* clear ->lock_depth so that schedule() doesnt
* auto-release the semaphore:
*/
-#ifdef CONFIG_PREEMPT_BKL
saved_lock_depth = task->lock_depth;
task->lock_depth = -1;
-#endif
local_irq_enable();
schedule();
local_irq_disable();
-#ifdef CONFIG_PREEMPT_BKL
task->lock_depth = saved_lock_depth;
-#endif
sub_preempt_count(PREEMPT_ACTIVE);
/*
@@ -3767,12 +4038,12 @@ int default_wake_function(wait_queue_t *curr, unsigned mode, int sync,
EXPORT_SYMBOL(default_wake_function);
/*
- * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
- * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
+ * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
+ * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
* number) then we wake all the non-exclusive tasks and one exclusive task.
*
* There are circumstances in which we can try to wake a task which has already
- * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
+ * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
* zero in this (rare) case, and we handle it by continuing to scan the queue.
*/
static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
@@ -3852,8 +4123,7 @@ void complete(struct completion *x)
spin_lock_irqsave(&x->wait.lock, flags);
x->done++;
- __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
- 1, 0, NULL);
+ __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
spin_unlock_irqrestore(&x->wait.lock, flags);
}
EXPORT_SYMBOL(complete);
@@ -3864,8 +4134,7 @@ void complete_all(struct completion *x)
spin_lock_irqsave(&x->wait.lock, flags);
x->done += UINT_MAX/2;
- __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
- 0, 0, NULL);
+ __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
spin_unlock_irqrestore(&x->wait.lock, flags);
}
EXPORT_SYMBOL(complete_all);
@@ -3879,8 +4148,10 @@ do_wait_for_common(struct completion *x, long timeout, int state)
wait.flags |= WQ_FLAG_EXCLUSIVE;
__add_wait_queue_tail(&x->wait, &wait);
do {
- if (state == TASK_INTERRUPTIBLE &&
- signal_pending(current)) {
+ if ((state == TASK_INTERRUPTIBLE &&
+ signal_pending(current)) ||
+ (state == TASK_KILLABLE &&
+ fatal_signal_pending(current))) {
__remove_wait_queue(&x->wait, &wait);
return -ERESTARTSYS;
}
@@ -3940,6 +4211,15 @@ wait_for_completion_interruptible_timeout(struct completion *x,
}
EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
+int __sched wait_for_completion_killable(struct completion *x)
+{
+ long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
+ if (t == -ERESTARTSYS)
+ return t;
+ return 0;
+}
+EXPORT_SYMBOL(wait_for_completion_killable);
+
static long __sched
sleep_on_common(wait_queue_head_t *q, int state, long timeout)
{
@@ -4003,6 +4283,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
unsigned long flags;
int oldprio, on_rq, running;
struct rq *rq;
+ const struct sched_class *prev_class = p->sched_class;
BUG_ON(prio < 0 || prio > MAX_PRIO);
@@ -4011,7 +4292,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
oldprio = p->prio;
on_rq = p->se.on_rq;
- running = task_running(rq, p);
+ running = task_current(rq, p);
if (on_rq) {
dequeue_task(rq, p, 0);
if (running)
@@ -4028,18 +4309,10 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
if (on_rq) {
if (running)
p->sched_class->set_curr_task(rq);
+
enqueue_task(rq, p, 0);
- /*
- * Reschedule if we are currently running on this runqueue and
- * our priority decreased, or if we are not currently running on
- * this runqueue and our priority is higher than the current's
- */
- if (running) {
- if (p->prio > oldprio)
- resched_task(rq->curr);
- } else {
- check_preempt_curr(rq, p);
- }
+
+ check_class_changed(rq, p, prev_class, oldprio, running);
}
task_rq_unlock(rq, &flags);
}
@@ -4071,10 +4344,8 @@ void set_user_nice(struct task_struct *p, long nice)
goto out_unlock;
}
on_rq = p->se.on_rq;
- if (on_rq) {
+ if (on_rq)
dequeue_task(rq, p, 0);
- dec_load(rq, p);
- }
p->static_prio = NICE_TO_PRIO(nice);
set_load_weight(p);
@@ -4084,7 +4355,6 @@ void set_user_nice(struct task_struct *p, long nice)
if (on_rq) {
enqueue_task(rq, p, 0);
- inc_load(rq, p);
/*
* If the task increased its priority or is running and
* lowered its priority, then reschedule its CPU:
@@ -4242,6 +4512,7 @@ int sched_setscheduler(struct task_struct *p, int policy,
{
int retval, oldprio, oldpolicy = -1, on_rq, running;
unsigned long flags;
+ const struct sched_class *prev_class = p->sched_class;
struct rq *rq;
/* may grab non-irq protected spin_locks */
@@ -4322,7 +4593,7 @@ recheck:
}
update_rq_clock(rq);
on_rq = p->se.on_rq;
- running = task_running(rq, p);
+ running = task_current(rq, p);
if (on_rq) {
deactivate_task(rq, p, 0);
if (running)
@@ -4335,18 +4606,10 @@ recheck:
if (on_rq) {
if (running)
p->sched_class->set_curr_task(rq);
+
activate_task(rq, p, 0);
- /*
- * Reschedule if we are currently running on this runqueue and
- * our priority decreased, or if we are not currently running on
- * this runqueue and our priority is higher than the current's
- */
- if (running) {
- if (p->prio > oldprio)
- resched_task(rq->curr);
- } else {
- check_preempt_curr(rq, p);
- }
+
+ check_class_changed(rq, p, prev_class, oldprio, running);
}
__task_rq_unlock(rq);
spin_unlock_irqrestore(&p->pi_lock, flags);
@@ -4385,8 +4648,8 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
* @policy: new policy.
* @param: structure containing the new RT priority.
*/
-asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
- struct sched_param __user *param)
+asmlinkage long
+sys_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
{
/* negative values for policy are not valid */
if (policy < 0)
@@ -4474,19 +4737,19 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask)
struct task_struct *p;
int retval;
- mutex_lock(&sched_hotcpu_mutex);
+ get_online_cpus();
read_lock(&tasklist_lock);
p = find_process_by_pid(pid);
if (!p) {
read_unlock(&tasklist_lock);
- mutex_unlock(&sched_hotcpu_mutex);
+ put_online_cpus();
return -ESRCH;
}
/*
* It is not safe to call set_cpus_allowed with the
- * tasklist_lock held. We will bump the task_struct's
+ * tasklist_lock held. We will bump the task_struct's
* usage count and then drop tasklist_lock.
*/
get_task_struct(p);
@@ -4520,7 +4783,7 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask)
}
out_unlock:
put_task_struct(p);
- mutex_unlock(&sched_hotcpu_mutex);
+ put_online_cpus();
return retval;
}
@@ -4577,7 +4840,7 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask)
struct task_struct *p;
int retval;
- mutex_lock(&sched_hotcpu_mutex);
+ get_online_cpus();
read_lock(&tasklist_lock);
retval = -ESRCH;
@@ -4593,7 +4856,7 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask)
out_unlock:
read_unlock(&tasklist_lock);
- mutex_unlock(&sched_hotcpu_mutex);
+ put_online_cpus();
return retval;
}
@@ -4667,7 +4930,8 @@ static void __cond_resched(void)
} while (need_resched());
}
-int __sched cond_resched(void)
+#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PREEMPT_VOLUNTARY)
+int __sched _cond_resched(void)
{
if (need_resched() && !(preempt_count() & PREEMPT_ACTIVE) &&
system_state == SYSTEM_RUNNING) {
@@ -4676,31 +4940,28 @@ int __sched cond_resched(void)
}
return 0;
}
-EXPORT_SYMBOL(cond_resched);
+EXPORT_SYMBOL(_cond_resched);
+#endif
/*
* cond_resched_lock() - if a reschedule is pending, drop the given lock,
* call schedule, and on return reacquire the lock.
*
- * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
+ * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
* operations here to prevent schedule() from being called twice (once via
* spin_unlock(), once by hand).
*/
int cond_resched_lock(spinlock_t *lock)
{
+ int resched = need_resched() && system_state == SYSTEM_RUNNING;
int ret = 0;
- if (need_lockbreak(lock)) {
+ if (spin_needbreak(lock) || resched) {
spin_unlock(lock);
- cpu_relax();
- ret = 1;
- spin_lock(lock);
- }
- if (need_resched() && system_state == SYSTEM_RUNNING) {
- spin_release(&lock->dep_map, 1, _THIS_IP_);
- _raw_spin_unlock(lock);
- preempt_enable_no_resched();
- __cond_resched();
+ if (resched && need_resched())
+ __cond_resched();
+ else
+ cpu_relax();
ret = 1;
spin_lock(lock);
}
@@ -4736,7 +4997,7 @@ void __sched yield(void)
EXPORT_SYMBOL(yield);
/*
- * This task is about to go to sleep on IO. Increment rq->nr_iowait so
+ * This task is about to go to sleep on IO. Increment rq->nr_iowait so
* that process accounting knows that this is a task in IO wait state.
*
* But don't do that if it is a deliberate, throttling IO wait (this task
@@ -4845,17 +5106,21 @@ long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
if (retval)
goto out_unlock;
- if (p->policy == SCHED_FIFO)
- time_slice = 0;
- else if (p->policy == SCHED_RR)
+ /*
+ * Time slice is 0 for SCHED_FIFO tasks and for SCHED_OTHER
+ * tasks that are on an otherwise idle runqueue:
+ */
+ time_slice = 0;
+ if (p->policy == SCHED_RR) {
time_slice = DEF_TIMESLICE;
- else {
+ } else {
struct sched_entity *se = &p->se;
unsigned long flags;
struct rq *rq;
rq = task_rq_lock(p, &flags);
- time_slice = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
+ if (rq->cfs.load.weight)
+ time_slice = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
task_rq_unlock(rq, &flags);
}
read_unlock(&tasklist_lock);
@@ -4870,7 +5135,7 @@ out_unlock:
static const char stat_nam[] = "RSDTtZX";
-static void show_task(struct task_struct *p)
+void sched_show_task(struct task_struct *p)
{
unsigned long free = 0;
unsigned state;
@@ -4898,10 +5163,9 @@ static void show_task(struct task_struct *p)
}
#endif
printk(KERN_CONT "%5lu %5d %6d\n", free,
- task_pid_nr(p), task_pid_nr(p->parent));
+ task_pid_nr(p), task_pid_nr(p->real_parent));
- if (state != TASK_RUNNING)
- show_stack(p, NULL);
+ show_stack(p, NULL);
}
void show_state_filter(unsigned long state_filter)
@@ -4923,7 +5187,7 @@ void show_state_filter(unsigned long state_filter)
*/
touch_nmi_watchdog();
if (!state_filter || (p->state & state_filter))
- show_task(p);
+ sched_show_task(p);
} while_each_thread(g, p);
touch_all_softlockup_watchdogs();
@@ -4972,11 +5236,8 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
spin_unlock_irqrestore(&rq->lock, flags);
/* Set the preempt count _outside_ the spinlocks! */
-#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL)
- task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0);
-#else
task_thread_info(idle)->preempt_count = 0;
-#endif
+
/*
* The idle tasks have their own, simple scheduling class:
*/
@@ -4992,6 +5253,32 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
*/
cpumask_t nohz_cpu_mask = CPU_MASK_NONE;
+/*
+ * Increase the granularity value when there are more CPUs,
+ * because with more CPUs the 'effective latency' as visible
+ * to users decreases. But the relationship is not linear,
+ * so pick a second-best guess by going with the log2 of the
+ * number of CPUs.
+ *
+ * This idea comes from the SD scheduler of Con Kolivas:
+ */
+static inline void sched_init_granularity(void)
+{
+ unsigned int factor = 1 + ilog2(num_online_cpus());
+ const unsigned long limit = 200000000;
+
+ sysctl_sched_min_granularity *= factor;
+ if (sysctl_sched_min_granularity > limit)
+ sysctl_sched_min_granularity = limit;
+
+ sysctl_sched_latency *= factor;
+ if (sysctl_sched_latency > limit)
+ sysctl_sched_latency = limit;
+
+ sysctl_sched_wakeup_granularity *= factor;
+ sysctl_sched_batch_wakeup_granularity *= factor;
+}
+
#ifdef CONFIG_SMP
/*
* This is how migration works:
@@ -5015,7 +5302,7 @@ cpumask_t nohz_cpu_mask = CPU_MASK_NONE;
* is removed from the allowed bitmask.
*
* NOTE: the caller must have a valid reference to the task, the
- * task must not exit() & deallocate itself prematurely. The
+ * task must not exit() & deallocate itself prematurely. The
* call is not atomic; no spinlocks may be held.
*/
int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
@@ -5031,7 +5318,13 @@ int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
goto out;
}
- p->cpus_allowed = new_mask;
+ if (p->sched_class->set_cpus_allowed)
+ p->sched_class->set_cpus_allowed(p, &new_mask);
+ else {
+ p->cpus_allowed = new_mask;
+ p->rt.nr_cpus_allowed = cpus_weight(new_mask);
+ }
+
/* Can the task run on the task's current CPU? If so, we're done */
if (cpu_isset(task_cpu(p), new_mask))
goto out;
@@ -5052,7 +5345,7 @@ out:
EXPORT_SYMBOL_GPL(set_cpus_allowed);
/*
- * Move (not current) task off this cpu, onto dest cpu. We're doing
+ * Move (not current) task off this cpu, onto dest cpu. We're doing
* this because either it can't run here any more (set_cpus_allowed()
* away from this CPU, or CPU going down), or because we're
* attempting to rebalance this task on exec (sched_exec).
@@ -5197,7 +5490,7 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
* Try to stay on the same cpuset, where the
* current cpuset may be a subset of all cpus.
* The cpuset_cpus_allowed_locked() variant of
- * cpuset_cpus_allowed() will not block. It must be
+ * cpuset_cpus_allowed() will not block. It must be
* called within calls to cpuset_lock/cpuset_unlock.
*/
rq = task_rq_lock(p, &flags);
@@ -5210,10 +5503,11 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
* kernel threads (both mm NULL), since they never
* leave kernel.
*/
- if (p->mm && printk_ratelimit())
+ if (p->mm && printk_ratelimit()) {
printk(KERN_INFO "process %d (%s) no "
"longer affine to cpu%d\n",
- task_pid_nr(p), p->comm, dead_cpu);
+ task_pid_nr(p), p->comm, dead_cpu);
+ }
}
} while (!__migrate_task_irq(p, dead_cpu, dest_cpu));
}
@@ -5257,23 +5551,9 @@ static void migrate_live_tasks(int src_cpu)
}
/*
- * activate_idle_task - move idle task to the _front_ of runqueue.
- */
-static void activate_idle_task(struct task_struct *p, struct rq *rq)
-{
- update_rq_clock(rq);
-
- if (p->state == TASK_UNINTERRUPTIBLE)
- rq->nr_uninterruptible--;
-
- enqueue_task(rq, p, 0);
- inc_nr_running(p, rq);
-}
-
-/*
* Schedules idle task to be the next runnable task on current CPU.
- * It does so by boosting its priority to highest possible and adding it to
- * the _front_ of the runqueue. Used by CPU offline code.
+ * It does so by boosting its priority to highest possible.
+ * Used by CPU offline code.
*/
void sched_idle_next(void)
{
@@ -5293,8 +5573,8 @@ void sched_idle_next(void)
__setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
- /* Add idle task to the _front_ of its priority queue: */
- activate_idle_task(p, rq);
+ update_rq_clock(rq);
+ activate_task(rq, p, 0);
spin_unlock_irqrestore(&rq->lock, flags);
}
@@ -5329,7 +5609,7 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
/*
* Drop lock around migration; if someone else moves it,
- * that's OK. No task can be added to this CPU, so iteration is
+ * that's OK. No task can be added to this CPU, so iteration is
* fine.
*/
spin_unlock_irq(&rq->lock);
@@ -5365,7 +5645,7 @@ static struct ctl_table sd_ctl_dir[] = {
.procname = "sched_domain",
.mode = 0555,
},
- {0,},
+ {0, },
};
static struct ctl_table sd_ctl_root[] = {
@@ -5375,7 +5655,7 @@ static struct ctl_table sd_ctl_root[] = {
.mode = 0555,
.child = sd_ctl_dir,
},
- {0,},
+ {0, },
};
static struct ctl_table *sd_alloc_ctl_entry(int n)
@@ -5393,7 +5673,7 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
/*
* In the intermediate directories, both the child directory and
* procname are dynamically allocated and could fail but the mode
- * will always be set. In the lowest directory the names are
+ * will always be set. In the lowest directory the names are
* static strings and all have proc handlers.
*/
for (entry = *tablep; entry->mode; entry++) {
@@ -5455,7 +5735,7 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
return table;
}
-static ctl_table * sd_alloc_ctl_cpu_table(int cpu)
+static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
{
struct ctl_table *entry, *table;
struct sched_domain *sd;
@@ -5536,9 +5816,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
struct rq *rq;
switch (action) {
- case CPU_LOCK_ACQUIRE:
- mutex_lock(&sched_hotcpu_mutex);
- break;
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
@@ -5557,6 +5834,15 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
case CPU_ONLINE_FROZEN:
/* Strictly unnecessary, as first user will wake it. */
wake_up_process(cpu_rq(cpu)->migration_thread);
+
+ /* Update our root-domain */
+ rq = cpu_rq(cpu);
+ spin_lock_irqsave(&rq->lock, flags);
+ if (rq->rd) {
+ BUG_ON(!cpu_isset(cpu, rq->rd->span));
+ cpu_set(cpu, rq->rd->online);
+ }
+ spin_unlock_irqrestore(&rq->lock, flags);
break;
#ifdef CONFIG_HOTPLUG_CPU
@@ -5564,7 +5850,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
case CPU_UP_CANCELED_FROZEN:
if (!cpu_rq(cpu)->migration_thread)
break;
- /* Unbind it from offline cpu so it can run. Fall thru. */
+ /* Unbind it from offline cpu so it can run. Fall thru. */
kthread_bind(cpu_rq(cpu)->migration_thread,
any_online_cpu(cpu_online_map));
kthread_stop(cpu_rq(cpu)->migration_thread);
@@ -5591,9 +5877,11 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
migrate_nr_uninterruptible(rq);
BUG_ON(rq->nr_running != 0);
- /* No need to migrate the tasks: it was best-effort if
- * they didn't take sched_hotcpu_mutex. Just wake up
- * the requestors. */
+ /*
+ * No need to migrate the tasks: it was best-effort if
+ * they didn't take sched_hotcpu_mutex. Just wake up
+ * the requestors.
+ */
spin_lock_irq(&rq->lock);
while (!list_empty(&rq->migration_queue)) {
struct migration_req *req;
@@ -5605,10 +5893,18 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
}
spin_unlock_irq(&rq->lock);
break;
-#endif
- case CPU_LOCK_RELEASE:
- mutex_unlock(&sched_hotcpu_mutex);
+
+ case CPU_DOWN_PREPARE:
+ /* Update our root-domain */
+ rq = cpu_rq(cpu);
+ spin_lock_irqsave(&rq->lock, flags);
+ if (rq->rd) {
+ BUG_ON(!cpu_isset(cpu, rq->rd->span));
+ cpu_clear(cpu, rq->rd->online);
+ }
+ spin_unlock_irqrestore(&rq->lock, flags);
break;
+#endif
}
return NOTIFY_OK;
}
@@ -5621,7 +5917,7 @@ static struct notifier_block __cpuinitdata migration_notifier = {
.priority = 10
};
-int __init migration_init(void)
+void __init migration_init(void)
{
void *cpu = (void *)(long)smp_processor_id();
int err;
@@ -5631,8 +5927,6 @@ int __init migration_init(void)
BUG_ON(err == NOTIFY_BAD);
migration_call(&migration_notifier, CPU_ONLINE, cpu);
register_cpu_notifier(&migration_notifier);
-
- return 0;
}
#endif
@@ -5798,11 +6092,76 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
return 1;
}
+static void rq_attach_root(struct rq *rq, struct root_domain *rd)
+{
+ unsigned long flags;
+ const struct sched_class *class;
+
+ spin_lock_irqsave(&rq->lock, flags);
+
+ if (rq->rd) {
+ struct root_domain *old_rd = rq->rd;
+
+ for (class = sched_class_highest; class; class = class->next) {
+ if (class->leave_domain)
+ class->leave_domain(rq);
+ }
+
+ cpu_clear(rq->cpu, old_rd->span);
+ cpu_clear(rq->cpu, old_rd->online);
+
+ if (atomic_dec_and_test(&old_rd->refcount))
+ kfree(old_rd);
+ }
+
+ atomic_inc(&rd->refcount);
+ rq->rd = rd;
+
+ cpu_set(rq->cpu, rd->span);
+ if (cpu_isset(rq->cpu, cpu_online_map))
+ cpu_set(rq->cpu, rd->online);
+
+ for (class = sched_class_highest; class; class = class->next) {
+ if (class->join_domain)
+ class->join_domain(rq);
+ }
+
+ spin_unlock_irqrestore(&rq->lock, flags);
+}
+
+static void init_rootdomain(struct root_domain *rd)
+{
+ memset(rd, 0, sizeof(*rd));
+
+ cpus_clear(rd->span);
+ cpus_clear(rd->online);
+}
+
+static void init_defrootdomain(void)
+{
+ init_rootdomain(&def_root_domain);
+ atomic_set(&def_root_domain.refcount, 1);
+}
+
+static struct root_domain *alloc_rootdomain(void)
+{
+ struct root_domain *rd;
+
+ rd = kmalloc(sizeof(*rd), GFP_KERNEL);
+ if (!rd)
+ return NULL;
+
+ init_rootdomain(rd);
+
+ return rd;
+}
+
/*
- * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
+ * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
* hold the hotplug lock.
*/
-static void cpu_attach_domain(struct sched_domain *sd, int cpu)
+static void
+cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
{
struct rq *rq = cpu_rq(cpu);
struct sched_domain *tmp;
@@ -5827,6 +6186,7 @@ static void cpu_attach_domain(struct sched_domain *sd, int cpu)
sched_domain_debug(sd, cpu);
+ rq_attach_root(rq, rd);
rcu_assign_pointer(rq->sd, sd);
}
@@ -5903,7 +6263,7 @@ init_sched_build_groups(cpumask_t span, const cpumask_t *cpu_map,
* @node: node whose sched_domain we're building
* @used_nodes: nodes already in the sched_domain
*
- * Find the next node to include in a given scheduling domain. Simply
+ * Find the next node to include in a given scheduling domain. Simply
* finds the closest node not already in the @used_nodes map.
*
* Should use nodemask_t.
@@ -5943,7 +6303,7 @@ static int find_next_best_node(int node, unsigned long *used_nodes)
* @node: node whose cpumask we're constructing
* @size: number of nodes to include in this span
*
- * Given a node, construct a good cpumask for its sched_domain to span. It
+ * Given a node, construct a good cpumask for its sched_domain to span. It
* should be one that prevents unnecessary balancing, but also spreads tasks
* out optimally.
*/
@@ -5980,8 +6340,8 @@ int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
static DEFINE_PER_CPU(struct sched_group, sched_group_cpus);
-static int cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map,
- struct sched_group **sg)
+static int
+cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
{
if (sg)
*sg = &per_cpu(sched_group_cpus, cpu);
@@ -5998,8 +6358,8 @@ static DEFINE_PER_CPU(struct sched_group, sched_group_core);
#endif
#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
-static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
- struct sched_group **sg)
+static int
+cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
{
int group;
cpumask_t mask = per_cpu(cpu_sibling_map, cpu);
@@ -6010,8 +6370,8 @@ static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
return group;
}
#elif defined(CONFIG_SCHED_MC)
-static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
- struct sched_group **sg)
+static int
+cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
{
if (sg)
*sg = &per_cpu(sched_group_core, cpu);
@@ -6022,8 +6382,8 @@ static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
static DEFINE_PER_CPU(struct sched_domain, phys_domains);
static DEFINE_PER_CPU(struct sched_group, sched_group_phys);
-static int cpu_to_phys_group(int cpu, const cpumask_t *cpu_map,
- struct sched_group **sg)
+static int
+cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
{
int group;
#ifdef CONFIG_SCHED_MC
@@ -6195,6 +6555,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
static int build_sched_domains(const cpumask_t *cpu_map)
{
int i;
+ struct root_domain *rd;
#ifdef CONFIG_NUMA
struct sched_group **sched_group_nodes = NULL;
int sd_allnodes = 0;
@@ -6203,7 +6564,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
* Allocate the per-node list of sched groups
*/
sched_group_nodes = kcalloc(MAX_NUMNODES, sizeof(struct sched_group *),
- GFP_KERNEL);
+ GFP_KERNEL);
if (!sched_group_nodes) {
printk(KERN_WARNING "Can not alloc sched group node list\n");
return -ENOMEM;
@@ -6211,6 +6572,12 @@ static int build_sched_domains(const cpumask_t *cpu_map)
sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes;
#endif
+ rd = alloc_rootdomain();
+ if (!rd) {
+ printk(KERN_WARNING "Cannot alloc root domain\n");
+ return -ENOMEM;
+ }
+
/*
* Set up domains for cpus specified by the cpu_map.
*/
@@ -6427,7 +6794,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
#else
sd = &per_cpu(phys_domains, i);
#endif
- cpu_attach_domain(sd, i);
+ cpu_attach_domain(sd, rd, i);
}
return 0;
@@ -6450,7 +6817,7 @@ static int ndoms_cur; /* number of sched domains in 'doms_cur' */
static cpumask_t fallback_doms;
/*
- * Set up scheduler domains and groups. Callers must hold the hotplug lock.
+ * Set up scheduler domains and groups. Callers must hold the hotplug lock.
* For now this just excludes isolated cpus, but could be used to
* exclude other special cases in the future.
*/
@@ -6485,26 +6852,26 @@ static void detach_destroy_domains(const cpumask_t *cpu_map)
unregister_sched_domain_sysctl();
for_each_cpu_mask(i, *cpu_map)
- cpu_attach_domain(NULL, i);
+ cpu_attach_domain(NULL, &def_root_domain, i);
synchronize_sched();
arch_destroy_sched_domains(cpu_map);
}
/*
* Partition sched domains as specified by the 'ndoms_new'
- * cpumasks in the array doms_new[] of cpumasks. This compares
+ * cpumasks in the array doms_new[] of cpumasks. This compares
* doms_new[] to the current sched domain partitioning, doms_cur[].
* It destroys each deleted domain and builds each new domain.
*
* 'doms_new' is an array of cpumask_t's of length 'ndoms_new'.
- * The masks don't intersect (don't overlap.) We should setup one
- * sched domain for each mask. CPUs not in any of the cpumasks will
- * not be load balanced. If the same cpumask appears both in the
+ * The masks don't intersect (don't overlap.) We should setup one
+ * sched domain for each mask. CPUs not in any of the cpumasks will
+ * not be load balanced. If the same cpumask appears both in the
* current 'doms_cur' domains and in the new 'doms_new', we can leave
* it as it is.
*
- * The passed in 'doms_new' should be kmalloc'd. This routine takes
- * ownership of it and will kfree it when done with it. If the caller
+ * The passed in 'doms_new' should be kmalloc'd. This routine takes
+ * ownership of it and will kfree it when done with it. If the caller
* failed the kmalloc call, then it can pass in doms_new == NULL,
* and partition_sched_domains() will fallback to the single partition
* 'fallback_doms'.
@@ -6515,6 +6882,8 @@ void partition_sched_domains(int ndoms_new, cpumask_t *doms_new)
{
int i, j;
+ lock_doms_cur();
+
/* always unregister in case we don't destroy any domains */
unregister_sched_domain_sysctl();
@@ -6555,6 +6924,8 @@ match2:
ndoms_cur = ndoms_new;
register_sched_domain_sysctl();
+
+ unlock_doms_cur();
}
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
@@ -6562,10 +6933,10 @@ static int arch_reinit_sched_domains(void)
{
int err;
- mutex_lock(&sched_hotcpu_mutex);
+ get_online_cpus();
detach_destroy_domains(&cpu_online_map);
err = arch_init_sched_domains(&cpu_online_map);
- mutex_unlock(&sched_hotcpu_mutex);
+ put_online_cpus();
return err;
}
@@ -6634,7 +7005,7 @@ int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
#endif
/*
- * Force a reinitialization of the sched domains hierarchy. The domains
+ * Force a reinitialization of the sched domains hierarchy. The domains
* and groups cannot be updated in place without racing with the balancing
* code, so we temporarily attach all running cpus to the NULL domain
* which will prevent rebalancing while the sched domains are recalculated.
@@ -6676,30 +7047,44 @@ void __init sched_init_smp(void)
{
cpumask_t non_isolated_cpus;
- mutex_lock(&sched_hotcpu_mutex);
+ get_online_cpus();
arch_init_sched_domains(&cpu_online_map);
cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map);
if (cpus_empty(non_isolated_cpus))
cpu_set(smp_processor_id(), non_isolated_cpus);
- mutex_unlock(&sched_hotcpu_mutex);
+ put_online_cpus();
/* XXX: Theoretical race here - CPU may be hotplugged now */
hotcpu_notifier(update_sched_domains, 0);
/* Move init over to a non-isolated CPU */
if (set_cpus_allowed(current, non_isolated_cpus) < 0)
BUG();
+ sched_init_granularity();
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+ if (nr_cpu_ids == 1)
+ return;
+
+ lb_monitor_task = kthread_create(load_balance_monitor, NULL,
+ "group_balance");
+ if (!IS_ERR(lb_monitor_task)) {
+ lb_monitor_task->flags |= PF_NOFREEZE;
+ wake_up_process(lb_monitor_task);
+ } else {
+ printk(KERN_ERR "Could not create load balance monitor thread"
+ "(error = %ld) \n", PTR_ERR(lb_monitor_task));
+ }
+#endif
}
#else
void __init sched_init_smp(void)
{
+ sched_init_granularity();
}
#endif /* CONFIG_SMP */
int in_sched_functions(unsigned long addr)
{
- /* Linker adds these: start and end of __sched functions */
- extern char __sched_text_start[], __sched_text_end[];
-
return in_lock_functions(addr) ||
(addr >= (unsigned long)__sched_text_start
&& addr < (unsigned long)__sched_text_end);
@@ -6714,13 +7099,87 @@ static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
cfs_rq->min_vruntime = (u64)(-(1LL << 20));
}
+static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
+{
+ struct rt_prio_array *array;
+ int i;
+
+ array = &rt_rq->active;
+ for (i = 0; i < MAX_RT_PRIO; i++) {
+ INIT_LIST_HEAD(array->queue + i);
+ __clear_bit(i, array->bitmap);
+ }
+ /* delimiter for bitsearch: */
+ __set_bit(MAX_RT_PRIO, array->bitmap);
+
+#if defined CONFIG_SMP || defined CONFIG_FAIR_GROUP_SCHED
+ rt_rq->highest_prio = MAX_RT_PRIO;
+#endif
+#ifdef CONFIG_SMP
+ rt_rq->rt_nr_migratory = 0;
+ rt_rq->overloaded = 0;
+#endif
+
+ rt_rq->rt_time = 0;
+ rt_rq->rt_throttled = 0;
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+ rt_rq->rq = rq;
+#endif
+}
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+static void init_tg_cfs_entry(struct rq *rq, struct task_group *tg,
+ struct cfs_rq *cfs_rq, struct sched_entity *se,
+ int cpu, int add)
+{
+ tg->cfs_rq[cpu] = cfs_rq;
+ init_cfs_rq(cfs_rq, rq);
+ cfs_rq->tg = tg;
+ if (add)
+ list_add(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);
+
+ tg->se[cpu] = se;
+ se->cfs_rq = &rq->cfs;
+ se->my_q = cfs_rq;
+ se->load.weight = tg->shares;
+ se->load.inv_weight = div64_64(1ULL<<32, se->load.weight);
+ se->parent = NULL;
+}
+
+static void init_tg_rt_entry(struct rq *rq, struct task_group *tg,
+ struct rt_rq *rt_rq, struct sched_rt_entity *rt_se,
+ int cpu, int add)
+{
+ tg->rt_rq[cpu] = rt_rq;
+ init_rt_rq(rt_rq, rq);
+ rt_rq->tg = tg;
+ rt_rq->rt_se = rt_se;
+ if (add)
+ list_add(&rt_rq->leaf_rt_rq_list, &rq->leaf_rt_rq_list);
+
+ tg->rt_se[cpu] = rt_se;
+ rt_se->rt_rq = &rq->rt;
+ rt_se->my_q = rt_rq;
+ rt_se->parent = NULL;
+ INIT_LIST_HEAD(&rt_se->run_list);
+}
+#endif
+
void __init sched_init(void)
{
int highest_cpu = 0;
int i, j;
+#ifdef CONFIG_SMP
+ init_defrootdomain();
+#endif
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+ list_add(&init_task_group.list, &task_groups);
+#endif
+
for_each_possible_cpu(i) {
- struct rt_prio_array *array;
struct rq *rq;
rq = cpu_rq(i);
@@ -6729,52 +7188,39 @@ void __init sched_init(void)
rq->nr_running = 0;
rq->clock = 1;
init_cfs_rq(&rq->cfs, rq);
+ init_rt_rq(&rq->rt, rq);
#ifdef CONFIG_FAIR_GROUP_SCHED
- INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
- {
- struct cfs_rq *cfs_rq = &per_cpu(init_cfs_rq, i);
- struct sched_entity *se =
- &per_cpu(init_sched_entity, i);
-
- init_cfs_rq_p[i] = cfs_rq;
- init_cfs_rq(cfs_rq, rq);
- cfs_rq->tg = &init_task_group;
- list_add(&cfs_rq->leaf_cfs_rq_list,
- &rq->leaf_cfs_rq_list);
-
- init_sched_entity_p[i] = se;
- se->cfs_rq = &rq->cfs;
- se->my_q = cfs_rq;
- se->load.weight = init_task_group_load;
- se->load.inv_weight =
- div64_64(1ULL<<32, init_task_group_load);
- se->parent = NULL;
- }
init_task_group.shares = init_task_group_load;
- spin_lock_init(&init_task_group.lock);
+ INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
+ init_tg_cfs_entry(rq, &init_task_group,
+ &per_cpu(init_cfs_rq, i),
+ &per_cpu(init_sched_entity, i), i, 1);
+
+ init_task_group.rt_ratio = sysctl_sched_rt_ratio; /* XXX */
+ INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
+ init_tg_rt_entry(rq, &init_task_group,
+ &per_cpu(init_rt_rq, i),
+ &per_cpu(init_sched_rt_entity, i), i, 1);
#endif
+ rq->rt_period_expire = 0;
+ rq->rt_throttled = 0;
for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
rq->cpu_load[j] = 0;
#ifdef CONFIG_SMP
rq->sd = NULL;
+ rq->rd = NULL;
rq->active_balance = 0;
rq->next_balance = jiffies;
rq->push_cpu = 0;
rq->cpu = i;
rq->migration_thread = NULL;
INIT_LIST_HEAD(&rq->migration_queue);
+ rq_attach_root(rq, &def_root_domain);
#endif
+ init_rq_hrtick(rq);
atomic_set(&rq->nr_iowait, 0);
-
- array = &rq->rt.active;
- for (j = 0; j < MAX_RT_PRIO; j++) {
- INIT_LIST_HEAD(array->queue + j);
- __clear_bit(j, array->bitmap);
- }
highest_cpu = i;
- /* delimiter for bitsearch: */
- __set_bit(MAX_RT_PRIO, array->bitmap);
}
set_load_weight(&init_task);
@@ -6925,8 +7371,8 @@ struct task_struct *curr_task(int cpu)
* @p: the task pointer to set.
*
* Description: This function must only be used when non-maskable interrupts
- * are serviced on a separate stack. It allows the architecture to switch the
- * notion of the current task on a cpu in a non-blocking manner. This function
+ * are serviced on a separate stack. It allows the architecture to switch the
+ * notion of the current task on a cpu in a non-blocking manner. This function
* must be called with all CPU's synchronized, and interrupts disabled, the
* and caller must save the original value of the current task (see
* curr_task() above) and restore that value before reenabling interrupts and
@@ -6943,12 +7389,187 @@ void set_curr_task(int cpu, struct task_struct *p)
#ifdef CONFIG_FAIR_GROUP_SCHED
+#ifdef CONFIG_SMP
+/*
+ * distribute shares of all task groups among their schedulable entities,
+ * to reflect load distribution across cpus.
+ */
+static int rebalance_shares(struct sched_domain *sd, int this_cpu)
+{
+ struct cfs_rq *cfs_rq;
+ struct rq *rq = cpu_rq(this_cpu);
+ cpumask_t sdspan = sd->span;
+ int balanced = 1;
+
+ /* Walk thr' all the task groups that we have */
+ for_each_leaf_cfs_rq(rq, cfs_rq) {
+ int i;
+ unsigned long total_load = 0, total_shares;
+ struct task_group *tg = cfs_rq->tg;
+
+ /* Gather total task load of this group across cpus */
+ for_each_cpu_mask(i, sdspan)
+ total_load += tg->cfs_rq[i]->load.weight;
+
+ /* Nothing to do if this group has no load */
+ if (!total_load)
+ continue;
+
+ /*
+ * tg->shares represents the number of cpu shares the task group
+ * is eligible to hold on a single cpu. On N cpus, it is
+ * eligible to hold (N * tg->shares) number of cpu shares.
+ */
+ total_shares = tg->shares * cpus_weight(sdspan);
+
+ /*
+ * redistribute total_shares across cpus as per the task load
+ * distribution.
+ */
+ for_each_cpu_mask(i, sdspan) {
+ unsigned long local_load, local_shares;
+
+ local_load = tg->cfs_rq[i]->load.weight;
+ local_shares = (local_load * total_shares) / total_load;
+ if (!local_shares)
+ local_shares = MIN_GROUP_SHARES;
+ if (local_shares == tg->se[i]->load.weight)
+ continue;
+
+ spin_lock_irq(&cpu_rq(i)->lock);
+ set_se_shares(tg->se[i], local_shares);
+ spin_unlock_irq(&cpu_rq(i)->lock);
+ balanced = 0;
+ }
+ }
+
+ return balanced;
+}
+
+/*
+ * How frequently should we rebalance_shares() across cpus?
+ *
+ * The more frequently we rebalance shares, the more accurate is the fairness
+ * of cpu bandwidth distribution between task groups. However higher frequency
+ * also implies increased scheduling overhead.
+ *
+ * sysctl_sched_min_bal_int_shares represents the minimum interval between
+ * consecutive calls to rebalance_shares() in the same sched domain.
+ *
+ * sysctl_sched_max_bal_int_shares represents the maximum interval between
+ * consecutive calls to rebalance_shares() in the same sched domain.
+ *
+ * These settings allows for the appropriate trade-off between accuracy of
+ * fairness and the associated overhead.
+ *
+ */
+
+/* default: 8ms, units: milliseconds */
+const_debug unsigned int sysctl_sched_min_bal_int_shares = 8;
+
+/* default: 128ms, units: milliseconds */
+const_debug unsigned int sysctl_sched_max_bal_int_shares = 128;
+
+/* kernel thread that runs rebalance_shares() periodically */
+static int load_balance_monitor(void *unused)
+{
+ unsigned int timeout = sysctl_sched_min_bal_int_shares;
+ struct sched_param schedparm;
+ int ret;
+
+ /*
+ * We don't want this thread's execution to be limited by the shares
+ * assigned to default group (init_task_group). Hence make it run
+ * as a SCHED_RR RT task at the lowest priority.
+ */
+ schedparm.sched_priority = 1;
+ ret = sched_setscheduler(current, SCHED_RR, &schedparm);
+ if (ret)
+ printk(KERN_ERR "Couldn't set SCHED_RR policy for load balance"
+ " monitor thread (error = %d) \n", ret);
+
+ while (!kthread_should_stop()) {
+ int i, cpu, balanced = 1;
+
+ /* Prevent cpus going down or coming up */
+ get_online_cpus();
+ /* lockout changes to doms_cur[] array */
+ lock_doms_cur();
+ /*
+ * Enter a rcu read-side critical section to safely walk rq->sd
+ * chain on various cpus and to walk task group list
+ * (rq->leaf_cfs_rq_list) in rebalance_shares().
+ */
+ rcu_read_lock();
+
+ for (i = 0; i < ndoms_cur; i++) {
+ cpumask_t cpumap = doms_cur[i];
+ struct sched_domain *sd = NULL, *sd_prev = NULL;
+
+ cpu = first_cpu(cpumap);
+
+ /* Find the highest domain at which to balance shares */
+ for_each_domain(cpu, sd) {
+ if (!(sd->flags & SD_LOAD_BALANCE))
+ continue;
+ sd_prev = sd;
+ }
+
+ sd = sd_prev;
+ /* sd == NULL? No load balance reqd in this domain */
+ if (!sd)
+ continue;
+
+ balanced &= rebalance_shares(sd, cpu);
+ }
+
+ rcu_read_unlock();
+
+ unlock_doms_cur();
+ put_online_cpus();
+
+ if (!balanced)
+ timeout = sysctl_sched_min_bal_int_shares;
+ else if (timeout < sysctl_sched_max_bal_int_shares)
+ timeout *= 2;
+
+ msleep_interruptible(timeout);
+ }
+
+ return 0;
+}
+#endif /* CONFIG_SMP */
+
+static void free_sched_group(struct task_group *tg)
+{
+ int i;
+
+ for_each_possible_cpu(i) {
+ if (tg->cfs_rq)
+ kfree(tg->cfs_rq[i]);
+ if (tg->se)
+ kfree(tg->se[i]);
+ if (tg->rt_rq)
+ kfree(tg->rt_rq[i]);
+ if (tg->rt_se)
+ kfree(tg->rt_se[i]);
+ }
+
+ kfree(tg->cfs_rq);
+ kfree(tg->se);
+ kfree(tg->rt_rq);
+ kfree(tg->rt_se);
+ kfree(tg);
+}
+
/* allocate runqueue etc for a new task group */
struct task_group *sched_create_group(void)
{
struct task_group *tg;
struct cfs_rq *cfs_rq;
struct sched_entity *se;
+ struct rt_rq *rt_rq;
+ struct sched_rt_entity *rt_se;
struct rq *rq;
int i;
@@ -6962,97 +7583,89 @@ struct task_group *sched_create_group(void)
tg->se = kzalloc(sizeof(se) * NR_CPUS, GFP_KERNEL);
if (!tg->se)
goto err;
+ tg->rt_rq = kzalloc(sizeof(rt_rq) * NR_CPUS, GFP_KERNEL);
+ if (!tg->rt_rq)
+ goto err;
+ tg->rt_se = kzalloc(sizeof(rt_se) * NR_CPUS, GFP_KERNEL);
+ if (!tg->rt_se)
+ goto err;
+
+ tg->shares = NICE_0_LOAD;
+ tg->rt_ratio = 0; /* XXX */
for_each_possible_cpu(i) {
rq = cpu_rq(i);
- cfs_rq = kmalloc_node(sizeof(struct cfs_rq), GFP_KERNEL,
- cpu_to_node(i));
+ cfs_rq = kmalloc_node(sizeof(struct cfs_rq),
+ GFP_KERNEL|__GFP_ZERO, cpu_to_node(i));
if (!cfs_rq)
goto err;
- se = kmalloc_node(sizeof(struct sched_entity), GFP_KERNEL,
- cpu_to_node(i));
+ se = kmalloc_node(sizeof(struct sched_entity),
+ GFP_KERNEL|__GFP_ZERO, cpu_to_node(i));
if (!se)
goto err;
- memset(cfs_rq, 0, sizeof(struct cfs_rq));
- memset(se, 0, sizeof(struct sched_entity));
+ rt_rq = kmalloc_node(sizeof(struct rt_rq),
+ GFP_KERNEL|__GFP_ZERO, cpu_to_node(i));
+ if (!rt_rq)
+ goto err;
- tg->cfs_rq[i] = cfs_rq;
- init_cfs_rq(cfs_rq, rq);
- cfs_rq->tg = tg;
+ rt_se = kmalloc_node(sizeof(struct sched_rt_entity),
+ GFP_KERNEL|__GFP_ZERO, cpu_to_node(i));
+ if (!rt_se)
+ goto err;
- tg->se[i] = se;
- se->cfs_rq = &rq->cfs;
- se->my_q = cfs_rq;
- se->load.weight = NICE_0_LOAD;
- se->load.inv_weight = div64_64(1ULL<<32, NICE_0_LOAD);
- se->parent = NULL;
+ init_tg_cfs_entry(rq, tg, cfs_rq, se, i, 0);
+ init_tg_rt_entry(rq, tg, rt_rq, rt_se, i, 0);
}
+ lock_task_group_list();
for_each_possible_cpu(i) {
rq = cpu_rq(i);
cfs_rq = tg->cfs_rq[i];
list_add_rcu(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);
+ rt_rq = tg->rt_rq[i];
+ list_add_rcu(&rt_rq->leaf_rt_rq_list, &rq->leaf_rt_rq_list);
}
-
- tg->shares = NICE_0_LOAD;
- spin_lock_init(&tg->lock);
+ list_add_rcu(&tg->list, &task_groups);
+ unlock_task_group_list();
return tg;
err:
- for_each_possible_cpu(i) {
- if (tg->cfs_rq)
- kfree(tg->cfs_rq[i]);
- if (tg->se)
- kfree(tg->se[i]);
- }
- kfree(tg->cfs_rq);
- kfree(tg->se);
- kfree(tg);
-
+ free_sched_group(tg);
return ERR_PTR(-ENOMEM);
}
/* rcu callback to free various structures associated with a task group */
-static void free_sched_group(struct rcu_head *rhp)
+static void free_sched_group_rcu(struct rcu_head *rhp)
{
- struct cfs_rq *cfs_rq = container_of(rhp, struct cfs_rq, rcu);
- struct task_group *tg = cfs_rq->tg;
- struct sched_entity *se;
- int i;
-
/* now it should be safe to free those cfs_rqs */
- for_each_possible_cpu(i) {
- cfs_rq = tg->cfs_rq[i];
- kfree(cfs_rq);
-
- se = tg->se[i];
- kfree(se);
- }
-
- kfree(tg->cfs_rq);
- kfree(tg->se);
- kfree(tg);
+ free_sched_group(container_of(rhp, struct task_group, rcu));
}
/* Destroy runqueue etc associated with a task group */
void sched_destroy_group(struct task_group *tg)
{
- struct cfs_rq *cfs_rq;
+ struct cfs_rq *cfs_rq = NULL;
+ struct rt_rq *rt_rq = NULL;
int i;
+ lock_task_group_list();
for_each_possible_cpu(i) {
cfs_rq = tg->cfs_rq[i];
list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
+ rt_rq = tg->rt_rq[i];
+ list_del_rcu(&rt_rq->leaf_rt_rq_list);
}
+ list_del_rcu(&tg->list);
+ unlock_task_group_list();
- cfs_rq = tg->cfs_rq[0];
+ BUG_ON(!cfs_rq);
/* wait for possible concurrent references to cfs_rqs complete */
- call_rcu(&cfs_rq->rcu, free_sched_group);
+ call_rcu(&tg->rcu, free_sched_group_rcu);
}
/* change task's runqueue when it moves between groups.
@@ -7068,12 +7681,9 @@ void sched_move_task(struct task_struct *tsk)
rq = task_rq_lock(tsk, &flags);
- if (tsk->sched_class != &fair_sched_class)
- goto done;
-
update_rq_clock(rq);
- running = task_running(rq, tsk);
+ running = task_current(rq, tsk);
on_rq = tsk->se.on_rq;
if (on_rq) {
@@ -7082,7 +7692,7 @@ void sched_move_task(struct task_struct *tsk)
tsk->sched_class->put_prev_task(rq, tsk);
}
- set_task_cfs_rq(tsk);
+ set_task_rq(tsk, task_cpu(tsk));
if (on_rq) {
if (unlikely(running))
@@ -7090,45 +7700,82 @@ void sched_move_task(struct task_struct *tsk)
enqueue_task(rq, tsk, 0);
}
-done:
task_rq_unlock(rq, &flags);
}
+/* rq->lock to be locked by caller */
static void set_se_shares(struct sched_entity *se, unsigned long shares)
{
struct cfs_rq *cfs_rq = se->cfs_rq;
struct rq *rq = cfs_rq->rq;
int on_rq;
- spin_lock_irq(&rq->lock);
+ if (!shares)
+ shares = MIN_GROUP_SHARES;
on_rq = se->on_rq;
- if (on_rq)
+ if (on_rq) {
dequeue_entity(cfs_rq, se, 0);
+ dec_cpu_load(rq, se->load.weight);
+ }
se->load.weight = shares;
se->load.inv_weight = div64_64((1ULL<<32), shares);
- if (on_rq)
+ if (on_rq) {
enqueue_entity(cfs_rq, se, 0);
-
- spin_unlock_irq(&rq->lock);
+ inc_cpu_load(rq, se->load.weight);
+ }
}
int sched_group_set_shares(struct task_group *tg, unsigned long shares)
{
int i;
+ struct cfs_rq *cfs_rq;
+ struct rq *rq;
- spin_lock(&tg->lock);
+ lock_task_group_list();
if (tg->shares == shares)
goto done;
+ if (shares < MIN_GROUP_SHARES)
+ shares = MIN_GROUP_SHARES;
+
+ /*
+ * Prevent any load balance activity (rebalance_shares,
+ * load_balance_fair) from referring to this group first,
+ * by taking it off the rq->leaf_cfs_rq_list on each cpu.
+ */
+ for_each_possible_cpu(i) {
+ cfs_rq = tg->cfs_rq[i];
+ list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
+ }
+
+ /* wait for any ongoing reference to this group to finish */
+ synchronize_sched();
+
+ /*
+ * Now we are free to modify the group's share on each cpu
+ * w/o tripping rebalance_share or load_balance_fair.
+ */
tg->shares = shares;
- for_each_possible_cpu(i)
+ for_each_possible_cpu(i) {
+ spin_lock_irq(&cpu_rq(i)->lock);
set_se_shares(tg->se[i], shares);
+ spin_unlock_irq(&cpu_rq(i)->lock);
+ }
+ /*
+ * Enable load balance activity on this group, by inserting it back on
+ * each cpu's rq->leaf_cfs_rq_list.
+ */
+ for_each_possible_cpu(i) {
+ rq = cpu_rq(i);
+ cfs_rq = tg->cfs_rq[i];
+ list_add_rcu(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);
+ }
done:
- spin_unlock(&tg->lock);
+ unlock_task_group_list();
return 0;
}
@@ -7137,6 +7784,31 @@ unsigned long sched_group_shares(struct task_group *tg)
return tg->shares;
}
+/*
+ * Ensure the total rt_ratio <= sysctl_sched_rt_ratio
+ */
+int sched_group_set_rt_ratio(struct task_group *tg, unsigned long rt_ratio)
+{
+ struct task_group *tgi;
+ unsigned long total = 0;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(tgi, &task_groups, list)
+ total += tgi->rt_ratio;
+ rcu_read_unlock();
+
+ if (total + rt_ratio - tg->rt_ratio > sysctl_sched_rt_ratio)
+ return -EINVAL;
+
+ tg->rt_ratio = rt_ratio;
+ return 0;
+}
+
+unsigned long sched_group_rt_ratio(struct task_group *tg)
+{
+ return tg->rt_ratio;
+}
+
#endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_FAIR_CGROUP_SCHED
@@ -7173,16 +7845,17 @@ cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
return &tg->css;
}
-static void cpu_cgroup_destroy(struct cgroup_subsys *ss,
- struct cgroup *cgrp)
+static void
+cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
{
struct task_group *tg = cgroup_tg(cgrp);
sched_destroy_group(tg);
}
-static int cpu_cgroup_can_attach(struct cgroup_subsys *ss,
- struct cgroup *cgrp, struct task_struct *tsk)
+static int
+cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
+ struct task_struct *tsk)
{
/* We don't support RT-tasks being in separate groups */
if (tsk->sched_class != &fair_sched_class)
@@ -7211,26 +7884,169 @@ static u64 cpu_shares_read_uint(struct cgroup *cgrp, struct cftype *cft)
return (u64) tg->shares;
}
-static struct cftype cpu_shares = {
- .name = "shares",
- .read_uint = cpu_shares_read_uint,
- .write_uint = cpu_shares_write_uint,
+static int cpu_rt_ratio_write_uint(struct cgroup *cgrp, struct cftype *cftype,
+ u64 rt_ratio_val)
+{
+ return sched_group_set_rt_ratio(cgroup_tg(cgrp), rt_ratio_val);
+}
+
+static u64 cpu_rt_ratio_read_uint(struct cgroup *cgrp, struct cftype *cft)
+{
+ struct task_group *tg = cgroup_tg(cgrp);
+
+ return (u64) tg->rt_ratio;
+}
+
+static struct cftype cpu_files[] = {
+ {
+ .name = "shares",
+ .read_uint = cpu_shares_read_uint,
+ .write_uint = cpu_shares_write_uint,
+ },
+ {
+ .name = "rt_ratio",
+ .read_uint = cpu_rt_ratio_read_uint,
+ .write_uint = cpu_rt_ratio_write_uint,
+ },
};
static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont)
{
- return cgroup_add_file(cont, ss, &cpu_shares);
+ return cgroup_add_files(cont, ss, cpu_files, ARRAY_SIZE(cpu_files));
}
struct cgroup_subsys cpu_cgroup_subsys = {
- .name = "cpu",
- .create = cpu_cgroup_create,
- .destroy = cpu_cgroup_destroy,
- .can_attach = cpu_cgroup_can_attach,
- .attach = cpu_cgroup_attach,
- .populate = cpu_cgroup_populate,
- .subsys_id = cpu_cgroup_subsys_id,
+ .name = "cpu",
+ .create = cpu_cgroup_create,
+ .destroy = cpu_cgroup_destroy,
+ .can_attach = cpu_cgroup_can_attach,
+ .attach = cpu_cgroup_attach,
+ .populate = cpu_cgroup_populate,
+ .subsys_id = cpu_cgroup_subsys_id,
.early_init = 1,
};
#endif /* CONFIG_FAIR_CGROUP_SCHED */
+
+#ifdef CONFIG_CGROUP_CPUACCT
+
+/*
+ * CPU accounting code for task groups.
+ *
+ * Based on the work by Paul Menage (menage@google.com) and Balbir Singh
+ * (balbir@in.ibm.com).
+ */
+
+/* track cpu usage of a group of tasks */
+struct cpuacct {
+ struct cgroup_subsys_state css;
+ /* cpuusage holds pointer to a u64-type object on every cpu */
+ u64 *cpuusage;
+};
+
+struct cgroup_subsys cpuacct_subsys;
+
+/* return cpu accounting group corresponding to this container */
+static inline struct cpuacct *cgroup_ca(struct cgroup *cont)
+{
+ return container_of(cgroup_subsys_state(cont, cpuacct_subsys_id),
+ struct cpuacct, css);
+}
+
+/* return cpu accounting group to which this task belongs */
+static inline struct cpuacct *task_ca(struct task_struct *tsk)
+{
+ return container_of(task_subsys_state(tsk, cpuacct_subsys_id),
+ struct cpuacct, css);
+}
+
+/* create a new cpu accounting group */
+static struct cgroup_subsys_state *cpuacct_create(
+ struct cgroup_subsys *ss, struct cgroup *cont)
+{
+ struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
+
+ if (!ca)
+ return ERR_PTR(-ENOMEM);
+
+ ca->cpuusage = alloc_percpu(u64);
+ if (!ca->cpuusage) {
+ kfree(ca);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return &ca->css;
+}
+
+/* destroy an existing cpu accounting group */
+static void
+cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cont)
+{
+ struct cpuacct *ca = cgroup_ca(cont);
+
+ free_percpu(ca->cpuusage);
+ kfree(ca);
+}
+
+/* return total cpu usage (in nanoseconds) of a group */
+static u64 cpuusage_read(struct cgroup *cont, struct cftype *cft)
+{
+ struct cpuacct *ca = cgroup_ca(cont);
+ u64 totalcpuusage = 0;
+ int i;
+
+ for_each_possible_cpu(i) {
+ u64 *cpuusage = percpu_ptr(ca->cpuusage, i);
+
+ /*
+ * Take rq->lock to make 64-bit addition safe on 32-bit
+ * platforms.
+ */
+ spin_lock_irq(&cpu_rq(i)->lock);
+ totalcpuusage += *cpuusage;
+ spin_unlock_irq(&cpu_rq(i)->lock);
+ }
+
+ return totalcpuusage;
+}
+
+static struct cftype files[] = {
+ {
+ .name = "usage",
+ .read_uint = cpuusage_read,
+ },
+};
+
+static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cont)
+{
+ return cgroup_add_files(cont, ss, files, ARRAY_SIZE(files));
+}
+
+/*
+ * charge this task's execution time to its accounting group.
+ *
+ * called with rq->lock held.
+ */
+static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
+{
+ struct cpuacct *ca;
+
+ if (!cpuacct_subsys.active)
+ return;
+
+ ca = task_ca(tsk);
+ if (ca) {
+ u64 *cpuusage = percpu_ptr(ca->cpuusage, task_cpu(tsk));
+
+ *cpuusage += cputime;
+ }
+}
+
+struct cgroup_subsys cpuacct_subsys = {
+ .name = "cpuacct",
+ .create = cpuacct_create,
+ .destroy = cpuacct_destroy,
+ .populate = cpuacct_populate,
+ .subsys_id = cpuacct_subsys_id,
+};
+#endif /* CONFIG_CGROUP_CPUACCT */
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index e6fb392e516..4b5e24cf2f4 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -31,9 +31,9 @@
/*
* Ease the printing of nsec fields:
*/
-static long long nsec_high(long long nsec)
+static long long nsec_high(unsigned long long nsec)
{
- if (nsec < 0) {
+ if ((long long)nsec < 0) {
nsec = -nsec;
do_div(nsec, 1000000);
return -nsec;
@@ -43,9 +43,9 @@ static long long nsec_high(long long nsec)
return nsec;
}
-static unsigned long nsec_low(long long nsec)
+static unsigned long nsec_low(unsigned long long nsec)
{
- if (nsec < 0)
+ if ((long long)nsec < 0)
nsec = -nsec;
return do_div(nsec, 1000000);
@@ -80,6 +80,7 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
{
struct task_struct *g, *p;
+ unsigned long flags;
SEQ_printf(m,
"\nrunnable tasks:\n"
@@ -88,7 +89,7 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
"------------------------------------------------------"
"----------------------------------------------------\n");
- read_lock_irq(&tasklist_lock);
+ read_lock_irqsave(&tasklist_lock, flags);
do_each_thread(g, p) {
if (!p->se.on_rq || task_cpu(p) != rq_cpu)
@@ -97,7 +98,7 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
print_task(m, rq, p);
} while_each_thread(g, p);
- read_unlock_irq(&tasklist_lock);
+ read_unlock_irqrestore(&tasklist_lock, flags);
}
void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
@@ -178,6 +179,7 @@ static void print_cpu(struct seq_file *m, int cpu)
PN(prev_clock_raw);
P(clock_warps);
P(clock_overflows);
+ P(clock_underflows);
P(clock_deep_idle_events);
PN(clock_max_delta);
P(cpu_load[0]);
@@ -198,7 +200,7 @@ static int sched_debug_show(struct seq_file *m, void *v)
u64 now = ktime_to_ns(ktime_get());
int cpu;
- SEQ_printf(m, "Sched Debug Version: v0.06-v22, %s %.*s\n",
+ SEQ_printf(m, "Sched Debug Version: v0.07, %s %.*s\n",
init_utsname()->release,
(int)strcspn(init_utsname()->version, " "),
init_utsname()->version);
@@ -210,7 +212,7 @@ static int sched_debug_show(struct seq_file *m, void *v)
#define PN(x) \
SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
PN(sysctl_sched_latency);
- PN(sysctl_sched_nr_latency);
+ PN(sysctl_sched_min_granularity);
PN(sysctl_sched_wakeup_granularity);
PN(sysctl_sched_batch_wakeup_granularity);
PN(sysctl_sched_child_runs_first);
@@ -298,6 +300,8 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
PN(se.exec_max);
PN(se.slice_max);
PN(se.wait_max);
+ PN(se.wait_sum);
+ P(se.wait_count);
P(sched_info.bkl_count);
P(se.nr_migrations);
P(se.nr_migrations_cold);
@@ -326,10 +330,12 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
avg_atom = -1LL;
avg_per_cpu = p->se.sum_exec_runtime;
- if (p->se.nr_migrations)
- avg_per_cpu = div64_64(avg_per_cpu, p->se.nr_migrations);
- else
+ if (p->se.nr_migrations) {
+ avg_per_cpu = div64_64(avg_per_cpu,
+ p->se.nr_migrations);
+ } else {
avg_per_cpu = -1LL;
+ }
__PN(avg_atom);
__PN(avg_per_cpu);
@@ -363,6 +369,8 @@ void proc_sched_set_task(struct task_struct *p)
{
#ifdef CONFIG_SCHEDSTATS
p->se.wait_max = 0;
+ p->se.wait_sum = 0;
+ p->se.wait_count = 0;
p->se.sleep_max = 0;
p->se.sum_sleep_runtime = 0;
p->se.block_max = 0;
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 9971831b560..6c091d6e159 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -20,9 +20,11 @@
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
*/
+#include <linux/latencytop.h>
+
/*
* Targeted preemption latency for CPU-bound tasks:
- * (default: 20ms, units: nanoseconds)
+ * (default: 20ms * (1 + ilog(ncpus)), units: nanoseconds)
*
* NOTE: this latency value is not the same as the concept of
* 'timeslice length' - timeslices in CFS are of variable length
@@ -32,19 +34,24 @@
* (to see the precise effective timeslice length of your workload,
* run vmstat and monitor the context-switches (cs) field)
*/
-const_debug unsigned int sysctl_sched_latency = 20000000ULL;
+unsigned int sysctl_sched_latency = 20000000ULL;
/*
- * After fork, child runs first. (default) If set to 0 then
- * parent will (try to) run first.
+ * Minimal preemption granularity for CPU-bound tasks:
+ * (default: 4 msec * (1 + ilog(ncpus)), units: nanoseconds)
*/
-const_debug unsigned int sysctl_sched_child_runs_first = 1;
+unsigned int sysctl_sched_min_granularity = 4000000ULL;
/*
- * Minimal preemption granularity for CPU-bound tasks:
- * (default: 2 msec, units: nanoseconds)
+ * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
*/
-const_debug unsigned int sysctl_sched_nr_latency = 20;
+static unsigned int sched_nr_latency = 5;
+
+/*
+ * After fork, child runs first. (default) If set to 0 then
+ * parent will (try to) run first.
+ */
+const_debug unsigned int sysctl_sched_child_runs_first = 1;
/*
* sys_sched_yield() compat mode
@@ -56,23 +63,23 @@ unsigned int __read_mostly sysctl_sched_compat_yield;
/*
* SCHED_BATCH wake-up granularity.
- * (default: 10 msec, units: nanoseconds)
+ * (default: 10 msec * (1 + ilog(ncpus)), units: nanoseconds)
*
* This option delays the preemption effects of decoupled workloads
* and reduces their over-scheduling. Synchronous workloads will still
* have immediate wakeup/sleep latencies.
*/
-const_debug unsigned int sysctl_sched_batch_wakeup_granularity = 10000000UL;
+unsigned int sysctl_sched_batch_wakeup_granularity = 10000000UL;
/*
* SCHED_OTHER wake-up granularity.
- * (default: 10 msec, units: nanoseconds)
+ * (default: 10 msec * (1 + ilog(ncpus)), units: nanoseconds)
*
* This option delays the preemption effects of decoupled workloads
* and reduces their over-scheduling. Synchronous workloads will still
* have immediate wakeup/sleep latencies.
*/
-const_debug unsigned int sysctl_sched_wakeup_granularity = 10000000UL;
+unsigned int sysctl_sched_wakeup_granularity = 10000000UL;
const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
@@ -212,6 +219,22 @@ static inline struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
* Scheduling class statistics methods:
*/
+#ifdef CONFIG_SCHED_DEBUG
+int sched_nr_latency_handler(struct ctl_table *table, int write,
+ struct file *filp, void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
+
+ if (ret || !write)
+ return ret;
+
+ sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
+ sysctl_sched_min_granularity);
+
+ return 0;
+}
+#endif
/*
* The idea is to set a period in which each task runs once.
@@ -224,11 +247,11 @@ static inline struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
static u64 __sched_period(unsigned long nr_running)
{
u64 period = sysctl_sched_latency;
- unsigned long nr_latency = sysctl_sched_nr_latency;
+ unsigned long nr_latency = sched_nr_latency;
if (unlikely(nr_running > nr_latency)) {
+ period = sysctl_sched_min_granularity;
period *= nr_running;
- do_div(period, nr_latency);
}
return period;
@@ -259,6 +282,7 @@ static u64 __sched_vslice(unsigned long rq_weight, unsigned long nr_running)
{
u64 vslice = __sched_period(nr_running);
+ vslice *= NICE_0_LOAD;
do_div(vslice, rq_weight);
return vslice;
@@ -329,6 +353,12 @@ static void update_curr(struct cfs_rq *cfs_rq)
__update_curr(cfs_rq, curr, delta_exec);
curr->exec_start = now;
+
+ if (entity_is_task(curr)) {
+ struct task_struct *curtask = task_of(curr);
+
+ cpuacct_charge(curtask, delta_exec);
+ }
}
static inline void
@@ -355,6 +385,9 @@ update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
schedstat_set(se->wait_max, max(se->wait_max,
rq_of(cfs_rq)->clock - se->wait_start));
+ schedstat_set(se->wait_count, se->wait_count + 1);
+ schedstat_set(se->wait_sum, se->wait_sum +
+ rq_of(cfs_rq)->clock - se->wait_start);
schedstat_set(se->wait_start, 0);
}
@@ -406,6 +439,7 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
#ifdef CONFIG_SCHEDSTATS
if (se->sleep_start) {
u64 delta = rq_of(cfs_rq)->clock - se->sleep_start;
+ struct task_struct *tsk = task_of(se);
if ((s64)delta < 0)
delta = 0;
@@ -415,9 +449,12 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
se->sleep_start = 0;
se->sum_sleep_runtime += delta;
+
+ account_scheduler_latency(tsk, delta >> 10, 1);
}
if (se->block_start) {
u64 delta = rq_of(cfs_rq)->clock - se->block_start;
+ struct task_struct *tsk = task_of(se);
if ((s64)delta < 0)
delta = 0;
@@ -434,11 +471,11 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
* time that the task spent sleeping:
*/
if (unlikely(prof_on == SLEEP_PROFILING)) {
- struct task_struct *tsk = task_of(se);
profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk),
delta >> 20);
}
+ account_scheduler_latency(tsk, delta >> 10, 0);
}
#endif
}
@@ -472,19 +509,25 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
} else if (sched_feat(APPROX_AVG) && cfs_rq->nr_running)
vruntime += sched_vslice(cfs_rq)/2;
+ /*
+ * The 'current' period is already promised to the current tasks,
+ * however the extra weight of the new task will slow them down a
+ * little, place the new task so that it fits in the slot that
+ * stays open at the end.
+ */
if (initial && sched_feat(START_DEBIT))
vruntime += sched_vslice_add(cfs_rq, se);
if (!initial) {
- if (sched_feat(NEW_FAIR_SLEEPERS) && entity_is_task(se) &&
- task_of(se)->policy != SCHED_BATCH)
+ /* sleeps upto a single latency don't count. */
+ if (sched_feat(NEW_FAIR_SLEEPERS))
vruntime -= sysctl_sched_latency;
- vruntime = max_t(s64, vruntime, se->vruntime);
+ /* ensure we never gain time by being placed backwards. */
+ vruntime = max_vruntime(se->vruntime, vruntime);
}
se->vruntime = vruntime;
-
}
static void
@@ -517,7 +560,6 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
update_stats_dequeue(cfs_rq, se);
if (sleep) {
- se->peer_preempt = 0;
#ifdef CONFIG_SCHEDSTATS
if (entity_is_task(se)) {
struct task_struct *tsk = task_of(se);
@@ -545,10 +587,8 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
ideal_runtime = sched_slice(cfs_rq, curr);
delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
- if (delta_exec > ideal_runtime ||
- (sched_feat(PREEMPT_RESTRICT) && curr->peer_preempt))
+ if (delta_exec > ideal_runtime)
resched_task(rq_of(cfs_rq)->curr);
- curr->peer_preempt = 0;
}
static void
@@ -611,13 +651,29 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
cfs_rq->curr = NULL;
}
-static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+static void
+entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
{
/*
* Update run-time statistics of the 'current'.
*/
update_curr(cfs_rq);
+#ifdef CONFIG_SCHED_HRTICK
+ /*
+ * queued ticks are scheduled to match the slice, so don't bother
+ * validating it and just reschedule.
+ */
+ if (queued)
+ return resched_task(rq_of(cfs_rq)->curr);
+ /*
+ * don't let the period tick interfere with the hrtick preemption
+ */
+ if (!sched_feat(DOUBLE_TICK) &&
+ hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
+ return;
+#endif
+
if (cfs_rq->nr_running > 1 || !sched_feat(WAKEUP_PREEMPT))
check_preempt_tick(cfs_rq, curr);
}
@@ -659,7 +715,7 @@ static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
/* Iterate thr' all leaf cfs_rq's on a runqueue */
#define for_each_leaf_cfs_rq(rq, cfs_rq) \
- list_for_each_entry(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
+ list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
/* Do the two (enqueued) entities belong to the same group ? */
static inline int
@@ -676,6 +732,8 @@ static inline struct sched_entity *parent_entity(struct sched_entity *se)
return se->parent;
}
+#define GROUP_IMBALANCE_PCT 20
+
#else /* CONFIG_FAIR_GROUP_SCHED */
#define for_each_sched_entity(se) \
@@ -721,6 +779,43 @@ static inline struct sched_entity *parent_entity(struct sched_entity *se)
#endif /* CONFIG_FAIR_GROUP_SCHED */
+#ifdef CONFIG_SCHED_HRTICK
+static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
+{
+ int requeue = rq->curr == p;
+ struct sched_entity *se = &p->se;
+ struct cfs_rq *cfs_rq = cfs_rq_of(se);
+
+ WARN_ON(task_rq(p) != rq);
+
+ if (hrtick_enabled(rq) && cfs_rq->nr_running > 1) {
+ u64 slice = sched_slice(cfs_rq, se);
+ u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
+ s64 delta = slice - ran;
+
+ if (delta < 0) {
+ if (rq->curr == p)
+ resched_task(p);
+ return;
+ }
+
+ /*
+ * Don't schedule slices shorter than 10000ns, that just
+ * doesn't make sense. Rely on vruntime for fairness.
+ */
+ if (!requeue)
+ delta = max(10000LL, delta);
+
+ hrtick_start(rq, delta, requeue);
+ }
+}
+#else
+static inline void
+hrtick_start_fair(struct rq *rq, struct task_struct *p)
+{
+}
+#endif
+
/*
* The enqueue_task method is called before nr_running is
* increased. Here we update the fair scheduling stats and
@@ -729,15 +824,28 @@ static inline struct sched_entity *parent_entity(struct sched_entity *se)
static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
{
struct cfs_rq *cfs_rq;
- struct sched_entity *se = &p->se;
+ struct sched_entity *se = &p->se,
+ *topse = NULL; /* Highest schedulable entity */
+ int incload = 1;
for_each_sched_entity(se) {
- if (se->on_rq)
+ topse = se;
+ if (se->on_rq) {
+ incload = 0;
break;
+ }
cfs_rq = cfs_rq_of(se);
enqueue_entity(cfs_rq, se, wakeup);
wakeup = 1;
}
+ /* Increment cpu load if we just enqueued the first task of a group on
+ * 'rq->cpu'. 'topse' represents the group to which task 'p' belongs
+ * at the highest grouping level.
+ */
+ if (incload)
+ inc_cpu_load(rq, topse->load.weight);
+
+ hrtick_start_fair(rq, rq->curr);
}
/*
@@ -748,16 +856,30 @@ static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
{
struct cfs_rq *cfs_rq;
- struct sched_entity *se = &p->se;
+ struct sched_entity *se = &p->se,
+ *topse = NULL; /* Highest schedulable entity */
+ int decload = 1;
for_each_sched_entity(se) {
+ topse = se;
cfs_rq = cfs_rq_of(se);
dequeue_entity(cfs_rq, se, sleep);
/* Don't dequeue parent if it has other entities besides us */
- if (cfs_rq->load.weight)
+ if (cfs_rq->load.weight) {
+ if (parent_entity(se))
+ decload = 0;
break;
+ }
sleep = 1;
}
+ /* Decrement cpu load if we just dequeued the last task of a group on
+ * 'rq->cpu'. 'topse' represents the group to which task 'p' belongs
+ * at the highest grouping level.
+ */
+ if (decload)
+ dec_cpu_load(rq, topse->load.weight);
+
+ hrtick_start_fair(rq, rq->curr);
}
/*
@@ -767,8 +889,9 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
*/
static void yield_task_fair(struct rq *rq)
{
- struct cfs_rq *cfs_rq = task_cfs_rq(rq->curr);
- struct sched_entity *rightmost, *se = &rq->curr->se;
+ struct task_struct *curr = rq->curr;
+ struct cfs_rq *cfs_rq = task_cfs_rq(curr);
+ struct sched_entity *rightmost, *se = &curr->se;
/*
* Are we the only task in the tree?
@@ -776,7 +899,7 @@ static void yield_task_fair(struct rq *rq)
if (unlikely(cfs_rq->nr_running == 1))
return;
- if (likely(!sysctl_sched_compat_yield)) {
+ if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) {
__update_rq_clock(rq);
/*
* Update run-time statistics of the 'current'.
@@ -804,6 +927,154 @@ static void yield_task_fair(struct rq *rq)
}
/*
+ * wake_idle() will wake a task on an idle cpu if task->cpu is
+ * not idle and an idle cpu is available. The span of cpus to
+ * search starts with cpus closest then further out as needed,
+ * so we always favor a closer, idle cpu.
+ *
+ * Returns the CPU we should wake onto.
+ */
+#if defined(ARCH_HAS_SCHED_WAKE_IDLE)
+static int wake_idle(int cpu, struct task_struct *p)
+{
+ cpumask_t tmp;
+ struct sched_domain *sd;
+ int i;
+
+ /*
+ * If it is idle, then it is the best cpu to run this task.
+ *
+ * This cpu is also the best, if it has more than one task already.
+ * Siblings must be also busy(in most cases) as they didn't already
+ * pickup the extra load from this cpu and hence we need not check
+ * sibling runqueue info. This will avoid the checks and cache miss
+ * penalities associated with that.
+ */
+ if (idle_cpu(cpu) || cpu_rq(cpu)->nr_running > 1)
+ return cpu;
+
+ for_each_domain(cpu, sd) {
+ if (sd->flags & SD_WAKE_IDLE) {
+ cpus_and(tmp, sd->span, p->cpus_allowed);
+ for_each_cpu_mask(i, tmp) {
+ if (idle_cpu(i)) {
+ if (i != task_cpu(p)) {
+ schedstat_inc(p,
+ se.nr_wakeups_idle);
+ }
+ return i;
+ }
+ }
+ } else {
+ break;
+ }
+ }
+ return cpu;
+}
+#else
+static inline int wake_idle(int cpu, struct task_struct *p)
+{
+ return cpu;
+}
+#endif
+
+#ifdef CONFIG_SMP
+static int select_task_rq_fair(struct task_struct *p, int sync)
+{
+ int cpu, this_cpu;
+ struct rq *rq;
+ struct sched_domain *sd, *this_sd = NULL;
+ int new_cpu;
+
+ cpu = task_cpu(p);
+ rq = task_rq(p);
+ this_cpu = smp_processor_id();
+ new_cpu = cpu;
+
+ if (cpu == this_cpu)
+ goto out_set_cpu;
+
+ for_each_domain(this_cpu, sd) {
+ if (cpu_isset(cpu, sd->span)) {
+ this_sd = sd;
+ break;
+ }
+ }
+
+ if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed)))
+ goto out_set_cpu;
+
+ /*
+ * Check for affine wakeup and passive balancing possibilities.
+ */
+ if (this_sd) {
+ int idx = this_sd->wake_idx;
+ unsigned int imbalance;
+ unsigned long load, this_load;
+
+ imbalance = 100 + (this_sd->imbalance_pct - 100) / 2;
+
+ load = source_load(cpu, idx);
+ this_load = target_load(this_cpu, idx);
+
+ new_cpu = this_cpu; /* Wake to this CPU if we can */
+
+ if (this_sd->flags & SD_WAKE_AFFINE) {
+ unsigned long tl = this_load;
+ unsigned long tl_per_task;
+
+ /*
+ * Attract cache-cold tasks on sync wakeups:
+ */
+ if (sync && !task_hot(p, rq->clock, this_sd))
+ goto out_set_cpu;
+
+ schedstat_inc(p, se.nr_wakeups_affine_attempts);
+ tl_per_task = cpu_avg_load_per_task(this_cpu);
+
+ /*
+ * If sync wakeup then subtract the (maximum possible)
+ * effect of the currently running task from the load
+ * of the current CPU:
+ */
+ if (sync)
+ tl -= current->se.load.weight;
+
+ if ((tl <= load &&
+ tl + target_load(cpu, idx) <= tl_per_task) ||
+ 100*(tl + p->se.load.weight) <= imbalance*load) {
+ /*
+ * This domain has SD_WAKE_AFFINE and
+ * p is cache cold in this domain, and
+ * there is no bad imbalance.
+ */
+ schedstat_inc(this_sd, ttwu_move_affine);
+ schedstat_inc(p, se.nr_wakeups_affine);
+ goto out_set_cpu;
+ }
+ }
+
+ /*
+ * Start passive balancing when half the imbalance_pct
+ * limit is reached.
+ */
+ if (this_sd->flags & SD_WAKE_BALANCE) {
+ if (imbalance*this_load <= 100*load) {
+ schedstat_inc(this_sd, ttwu_move_balance);
+ schedstat_inc(p, se.nr_wakeups_passive);
+ goto out_set_cpu;
+ }
+ }
+ }
+
+ new_cpu = cpu; /* Could not wake to this_cpu. Wake to cpu instead */
+out_set_cpu:
+ return wake_idle(new_cpu, p);
+}
+#endif /* CONFIG_SMP */
+
+
+/*
* Preempt the current task with a newly woken task if needed:
*/
static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
@@ -811,7 +1082,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
struct task_struct *curr = rq->curr;
struct cfs_rq *cfs_rq = task_cfs_rq(curr);
struct sched_entity *se = &curr->se, *pse = &p->se;
- s64 delta, gran;
+ unsigned long gran;
if (unlikely(rt_prio(p->prio))) {
update_rq_clock(rq);
@@ -826,28 +1097,29 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
if (unlikely(p->policy == SCHED_BATCH))
return;
- if (sched_feat(WAKEUP_PREEMPT)) {
- while (!is_same_group(se, pse)) {
- se = parent_entity(se);
- pse = parent_entity(pse);
- }
+ if (!sched_feat(WAKEUP_PREEMPT))
+ return;
- delta = se->vruntime - pse->vruntime;
- gran = sysctl_sched_wakeup_granularity;
- if (unlikely(se->load.weight != NICE_0_LOAD))
- gran = calc_delta_fair(gran, &se->load);
+ while (!is_same_group(se, pse)) {
+ se = parent_entity(se);
+ pse = parent_entity(pse);
+ }
- if (delta > gran) {
- int now = !sched_feat(PREEMPT_RESTRICT);
+ gran = sysctl_sched_wakeup_granularity;
+ /*
+ * More easily preempt - nice tasks, while not making
+ * it harder for + nice tasks.
+ */
+ if (unlikely(se->load.weight > NICE_0_LOAD))
+ gran = calc_delta_fair(gran, &se->load);
- if (now || p->prio < curr->prio || !se->peer_preempt++)
- resched_task(curr);
- }
- }
+ if (pse->vruntime + gran < se->vruntime)
+ resched_task(curr);
}
static struct task_struct *pick_next_task_fair(struct rq *rq)
{
+ struct task_struct *p;
struct cfs_rq *cfs_rq = &rq->cfs;
struct sched_entity *se;
@@ -859,7 +1131,10 @@ static struct task_struct *pick_next_task_fair(struct rq *rq)
cfs_rq = group_cfs_rq(se);
} while (cfs_rq);
- return task_of(se);
+ p = task_of(se);
+ hrtick_start_fair(rq, p);
+
+ return p;
}
/*
@@ -916,25 +1191,6 @@ static struct task_struct *load_balance_next_fair(void *arg)
return __load_balance_iterator(cfs_rq, cfs_rq->rb_load_balance_curr);
}
-#ifdef CONFIG_FAIR_GROUP_SCHED
-static int cfs_rq_best_prio(struct cfs_rq *cfs_rq)
-{
- struct sched_entity *curr;
- struct task_struct *p;
-
- if (!cfs_rq->nr_running)
- return MAX_PRIO;
-
- curr = cfs_rq->curr;
- if (!curr)
- curr = __pick_next_entity(cfs_rq);
-
- p = task_of(curr);
-
- return p->prio;
-}
-#endif
-
static unsigned long
load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
unsigned long max_load_move,
@@ -944,28 +1200,45 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
struct cfs_rq *busy_cfs_rq;
long rem_load_move = max_load_move;
struct rq_iterator cfs_rq_iterator;
+ unsigned long load_moved;
cfs_rq_iterator.start = load_balance_start_fair;
cfs_rq_iterator.next = load_balance_next_fair;
for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
#ifdef CONFIG_FAIR_GROUP_SCHED
- struct cfs_rq *this_cfs_rq;
- long imbalance;
- unsigned long maxload;
+ struct cfs_rq *this_cfs_rq = busy_cfs_rq->tg->cfs_rq[this_cpu];
+ unsigned long maxload, task_load, group_weight;
+ unsigned long thisload, per_task_load;
+ struct sched_entity *se = busy_cfs_rq->tg->se[busiest->cpu];
- this_cfs_rq = cpu_cfs_rq(busy_cfs_rq, this_cpu);
+ task_load = busy_cfs_rq->load.weight;
+ group_weight = se->load.weight;
- imbalance = busy_cfs_rq->load.weight - this_cfs_rq->load.weight;
- /* Don't pull if this_cfs_rq has more load than busy_cfs_rq */
- if (imbalance <= 0)
+ /*
+ * 'group_weight' is contributed by tasks of total weight
+ * 'task_load'. To move 'rem_load_move' worth of weight only,
+ * we need to move a maximum task load of:
+ *
+ * maxload = (remload / group_weight) * task_load;
+ */
+ maxload = (rem_load_move * task_load) / group_weight;
+
+ if (!maxload || !task_load)
continue;
- /* Don't pull more than imbalance/2 */
- imbalance /= 2;
- maxload = min(rem_load_move, imbalance);
+ per_task_load = task_load / busy_cfs_rq->nr_running;
+ /*
+ * balance_tasks will try to forcibly move atleast one task if
+ * possible (because of SCHED_LOAD_SCALE_FUZZ). Avoid that if
+ * maxload is less than GROUP_IMBALANCE_FUZZ% the per_task_load.
+ */
+ if (100 * maxload < GROUP_IMBALANCE_PCT * per_task_load)
+ continue;
- *this_best_prio = cfs_rq_best_prio(this_cfs_rq);
+ /* Disable priority-based load balance */
+ *this_best_prio = 0;
+ thisload = this_cfs_rq->load.weight;
#else
# define maxload rem_load_move
#endif
@@ -974,11 +1247,33 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
* load_balance_[start|next]_fair iterators
*/
cfs_rq_iterator.arg = busy_cfs_rq;
- rem_load_move -= balance_tasks(this_rq, this_cpu, busiest,
+ load_moved = balance_tasks(this_rq, this_cpu, busiest,
maxload, sd, idle, all_pinned,
this_best_prio,
&cfs_rq_iterator);
+#ifdef CONFIG_FAIR_GROUP_SCHED
+ /*
+ * load_moved holds the task load that was moved. The
+ * effective (group) weight moved would be:
+ * load_moved_eff = load_moved/task_load * group_weight;
+ */
+ load_moved = (group_weight * load_moved) / task_load;
+
+ /* Adjust shares on both cpus to reflect load_moved */
+ group_weight -= load_moved;
+ set_se_shares(se, group_weight);
+
+ se = busy_cfs_rq->tg->se[this_cpu];
+ if (!thisload)
+ group_weight = load_moved;
+ else
+ group_weight = se->load.weight + load_moved;
+ set_se_shares(se, group_weight);
+#endif
+
+ rem_load_move -= load_moved;
+
if (rem_load_move <= 0)
break;
}
@@ -1014,18 +1309,18 @@ move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
/*
* scheduler tick hitting a task of our scheduling class:
*/
-static void task_tick_fair(struct rq *rq, struct task_struct *curr)
+static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
{
struct cfs_rq *cfs_rq;
struct sched_entity *se = &curr->se;
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
- entity_tick(cfs_rq, se);
+ entity_tick(cfs_rq, se, queued);
}
}
-#define swap(a,b) do { typeof(a) tmp = (a); (a) = (b); (b) = tmp; } while (0)
+#define swap(a, b) do { typeof(a) tmp = (a); (a) = (b); (b) = tmp; } while (0)
/*
* Share the fairness runtime between parent and child, thus the
@@ -1045,8 +1340,9 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
update_curr(cfs_rq);
place_entity(cfs_rq, se, 1);
+ /* 'curr' will be NULL if the child belongs to a different group */
if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) &&
- curr->vruntime < se->vruntime) {
+ curr && curr->vruntime < se->vruntime) {
/*
* Upon rescheduling, sched_class::put_prev_task() will place
* 'current' within the tree based on its new key value.
@@ -1054,11 +1350,46 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
swap(curr->vruntime, se->vruntime);
}
- se->peer_preempt = 0;
enqueue_task_fair(rq, p, 0);
resched_task(rq->curr);
}
+/*
+ * Priority of the task has changed. Check to see if we preempt
+ * the current task.
+ */
+static void prio_changed_fair(struct rq *rq, struct task_struct *p,
+ int oldprio, int running)
+{
+ /*
+ * Reschedule if we are currently running on this runqueue and
+ * our priority decreased, or if we are not currently running on
+ * this runqueue and our priority is higher than the current's
+ */
+ if (running) {
+ if (p->prio > oldprio)
+ resched_task(rq->curr);
+ } else
+ check_preempt_curr(rq, p);
+}
+
+/*
+ * We switched to the sched_fair class.
+ */
+static void switched_to_fair(struct rq *rq, struct task_struct *p,
+ int running)
+{
+ /*
+ * We were most likely switched from sched_rt, so
+ * kick off the schedule if running, otherwise just see
+ * if we can still preempt the current task.
+ */
+ if (running)
+ resched_task(rq->curr);
+ else
+ check_preempt_curr(rq, p);
+}
+
/* Account for a task changing its policy or group.
*
* This routine is mostly called to set cfs_rq->curr field when a task
@@ -1080,6 +1411,9 @@ static const struct sched_class fair_sched_class = {
.enqueue_task = enqueue_task_fair,
.dequeue_task = dequeue_task_fair,
.yield_task = yield_task_fair,
+#ifdef CONFIG_SMP
+ .select_task_rq = select_task_rq_fair,
+#endif /* CONFIG_SMP */
.check_preempt_curr = check_preempt_wakeup,
@@ -1094,6 +1428,9 @@ static const struct sched_class fair_sched_class = {
.set_curr_task = set_curr_task_fair,
.task_tick = task_tick_fair,
.task_new = task_new_fair,
+
+ .prio_changed = prio_changed_fair,
+ .switched_to = switched_to_fair,
};
#ifdef CONFIG_SCHED_DEBUG
@@ -1104,7 +1441,9 @@ static void print_cfs_stats(struct seq_file *m, int cpu)
#ifdef CONFIG_FAIR_GROUP_SCHED
print_cfs_rq(m, cpu, &cpu_rq(cpu)->cfs);
#endif
+ rcu_read_lock();
for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
print_cfs_rq(m, cpu, cfs_rq);
+ rcu_read_unlock();
}
#endif
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c
index bf9c25c15b8..2bcafa37563 100644
--- a/kernel/sched_idletask.c
+++ b/kernel/sched_idletask.c
@@ -5,6 +5,12 @@
* handled in sched_fair.c)
*/
+#ifdef CONFIG_SMP
+static int select_task_rq_idle(struct task_struct *p, int sync)
+{
+ return task_cpu(p); /* IDLE tasks as never migrated */
+}
+#endif /* CONFIG_SMP */
/*
* Idle tasks are unconditionally rescheduled:
*/
@@ -55,7 +61,7 @@ move_one_task_idle(struct rq *this_rq, int this_cpu, struct rq *busiest,
}
#endif
-static void task_tick_idle(struct rq *rq, struct task_struct *curr)
+static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
{
}
@@ -63,6 +69,33 @@ static void set_curr_task_idle(struct rq *rq)
{
}
+static void switched_to_idle(struct rq *rq, struct task_struct *p,
+ int running)
+{
+ /* Can this actually happen?? */
+ if (running)
+ resched_task(rq->curr);
+ else
+ check_preempt_curr(rq, p);
+}
+
+static void prio_changed_idle(struct rq *rq, struct task_struct *p,
+ int oldprio, int running)
+{
+ /* This can happen for hot plug CPUS */
+
+ /*
+ * Reschedule if we are currently running on this runqueue and
+ * our priority decreased, or if we are not currently running on
+ * this runqueue and our priority is higher than the current's
+ */
+ if (running) {
+ if (p->prio > oldprio)
+ resched_task(rq->curr);
+ } else
+ check_preempt_curr(rq, p);
+}
+
/*
* Simple, special scheduling class for the per-CPU idle tasks:
*/
@@ -72,6 +105,9 @@ const struct sched_class idle_sched_class = {
/* dequeue is not valid, we print a debug message there: */
.dequeue_task = dequeue_task_idle,
+#ifdef CONFIG_SMP
+ .select_task_rq = select_task_rq_idle,
+#endif /* CONFIG_SMP */
.check_preempt_curr = check_preempt_curr_idle,
@@ -85,5 +121,9 @@ const struct sched_class idle_sched_class = {
.set_curr_task = set_curr_task_idle,
.task_tick = task_tick_idle,
+
+ .prio_changed = prio_changed_idle,
+ .switched_to = switched_to_idle,
+
/* no .task_new for idle tasks */
};
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 8abd752a0eb..274b40d7bef 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -3,6 +3,217 @@
* policies)
*/
+#ifdef CONFIG_SMP
+
+static inline int rt_overloaded(struct rq *rq)
+{
+ return atomic_read(&rq->rd->rto_count);
+}
+
+static inline void rt_set_overload(struct rq *rq)
+{
+ cpu_set(rq->cpu, rq->rd->rto_mask);
+ /*
+ * Make sure the mask is visible before we set
+ * the overload count. That is checked to determine
+ * if we should look at the mask. It would be a shame
+ * if we looked at the mask, but the mask was not
+ * updated yet.
+ */
+ wmb();
+ atomic_inc(&rq->rd->rto_count);
+}
+
+static inline void rt_clear_overload(struct rq *rq)
+{
+ /* the order here really doesn't matter */
+ atomic_dec(&rq->rd->rto_count);
+ cpu_clear(rq->cpu, rq->rd->rto_mask);
+}
+
+static void update_rt_migration(struct rq *rq)
+{
+ if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) {
+ if (!rq->rt.overloaded) {
+ rt_set_overload(rq);
+ rq->rt.overloaded = 1;
+ }
+ } else if (rq->rt.overloaded) {
+ rt_clear_overload(rq);
+ rq->rt.overloaded = 0;
+ }
+}
+#endif /* CONFIG_SMP */
+
+static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
+{
+ return container_of(rt_se, struct task_struct, rt);
+}
+
+static inline int on_rt_rq(struct sched_rt_entity *rt_se)
+{
+ return !list_empty(&rt_se->run_list);
+}
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+
+static inline unsigned int sched_rt_ratio(struct rt_rq *rt_rq)
+{
+ if (!rt_rq->tg)
+ return SCHED_RT_FRAC;
+
+ return rt_rq->tg->rt_ratio;
+}
+
+#define for_each_leaf_rt_rq(rt_rq, rq) \
+ list_for_each_entry(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
+
+static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
+{
+ return rt_rq->rq;
+}
+
+static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
+{
+ return rt_se->rt_rq;
+}
+
+#define for_each_sched_rt_entity(rt_se) \
+ for (; rt_se; rt_se = rt_se->parent)
+
+static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
+{
+ return rt_se->my_q;
+}
+
+static void enqueue_rt_entity(struct sched_rt_entity *rt_se);
+static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
+
+static void sched_rt_ratio_enqueue(struct rt_rq *rt_rq)
+{
+ struct sched_rt_entity *rt_se = rt_rq->rt_se;
+
+ if (rt_se && !on_rt_rq(rt_se) && rt_rq->rt_nr_running) {
+ struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
+
+ enqueue_rt_entity(rt_se);
+ if (rt_rq->highest_prio < curr->prio)
+ resched_task(curr);
+ }
+}
+
+static void sched_rt_ratio_dequeue(struct rt_rq *rt_rq)
+{
+ struct sched_rt_entity *rt_se = rt_rq->rt_se;
+
+ if (rt_se && on_rt_rq(rt_se))
+ dequeue_rt_entity(rt_se);
+}
+
+#else
+
+static inline unsigned int sched_rt_ratio(struct rt_rq *rt_rq)
+{
+ return sysctl_sched_rt_ratio;
+}
+
+#define for_each_leaf_rt_rq(rt_rq, rq) \
+ for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
+
+static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
+{
+ return container_of(rt_rq, struct rq, rt);
+}
+
+static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
+{
+ struct task_struct *p = rt_task_of(rt_se);
+ struct rq *rq = task_rq(p);
+
+ return &rq->rt;
+}
+
+#define for_each_sched_rt_entity(rt_se) \
+ for (; rt_se; rt_se = NULL)
+
+static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
+{
+ return NULL;
+}
+
+static inline void sched_rt_ratio_enqueue(struct rt_rq *rt_rq)
+{
+}
+
+static inline void sched_rt_ratio_dequeue(struct rt_rq *rt_rq)
+{
+}
+
+#endif
+
+static inline int rt_se_prio(struct sched_rt_entity *rt_se)
+{
+#ifdef CONFIG_FAIR_GROUP_SCHED
+ struct rt_rq *rt_rq = group_rt_rq(rt_se);
+
+ if (rt_rq)
+ return rt_rq->highest_prio;
+#endif
+
+ return rt_task_of(rt_se)->prio;
+}
+
+static int sched_rt_ratio_exceeded(struct rt_rq *rt_rq)
+{
+ unsigned int rt_ratio = sched_rt_ratio(rt_rq);
+ u64 period, ratio;
+
+ if (rt_ratio == SCHED_RT_FRAC)
+ return 0;
+
+ if (rt_rq->rt_throttled)
+ return 1;
+
+ period = (u64)sysctl_sched_rt_period * NSEC_PER_MSEC;
+ ratio = (period * rt_ratio) >> SCHED_RT_FRAC_SHIFT;
+
+ if (rt_rq->rt_time > ratio) {
+ struct rq *rq = rq_of_rt_rq(rt_rq);
+
+ rq->rt_throttled = 1;
+ rt_rq->rt_throttled = 1;
+
+ sched_rt_ratio_dequeue(rt_rq);
+ return 1;
+ }
+
+ return 0;
+}
+
+static void update_sched_rt_period(struct rq *rq)
+{
+ struct rt_rq *rt_rq;
+ u64 period;
+
+ while (rq->clock > rq->rt_period_expire) {
+ period = (u64)sysctl_sched_rt_period * NSEC_PER_MSEC;
+ rq->rt_period_expire += period;
+
+ for_each_leaf_rt_rq(rt_rq, rq) {
+ unsigned long rt_ratio = sched_rt_ratio(rt_rq);
+ u64 ratio = (period * rt_ratio) >> SCHED_RT_FRAC_SHIFT;
+
+ rt_rq->rt_time -= min(rt_rq->rt_time, ratio);
+ if (rt_rq->rt_throttled) {
+ rt_rq->rt_throttled = 0;
+ sched_rt_ratio_enqueue(rt_rq);
+ }
+ }
+
+ rq->rt_throttled = 0;
+ }
+}
+
/*
* Update the current task's runtime statistics. Skip current tasks that
* are not in our scheduling class.
@@ -10,6 +221,8 @@
static void update_curr_rt(struct rq *rq)
{
struct task_struct *curr = rq->curr;
+ struct sched_rt_entity *rt_se = &curr->rt;
+ struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
u64 delta_exec;
if (!task_has_rt_policy(curr))
@@ -23,47 +236,229 @@ static void update_curr_rt(struct rq *rq)
curr->se.sum_exec_runtime += delta_exec;
curr->se.exec_start = rq->clock;
+ cpuacct_charge(curr, delta_exec);
+
+ rt_rq->rt_time += delta_exec;
+ /*
+ * might make it a tad more accurate:
+ *
+ * update_sched_rt_period(rq);
+ */
+ if (sched_rt_ratio_exceeded(rt_rq))
+ resched_task(curr);
}
-static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
+static inline
+void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+{
+ WARN_ON(!rt_prio(rt_se_prio(rt_se)));
+ rt_rq->rt_nr_running++;
+#if defined CONFIG_SMP || defined CONFIG_FAIR_GROUP_SCHED
+ if (rt_se_prio(rt_se) < rt_rq->highest_prio)
+ rt_rq->highest_prio = rt_se_prio(rt_se);
+#endif
+#ifdef CONFIG_SMP
+ if (rt_se->nr_cpus_allowed > 1) {
+ struct rq *rq = rq_of_rt_rq(rt_rq);
+ rq->rt.rt_nr_migratory++;
+ }
+
+ update_rt_migration(rq_of_rt_rq(rt_rq));
+#endif
+}
+
+static inline
+void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+{
+ WARN_ON(!rt_prio(rt_se_prio(rt_se)));
+ WARN_ON(!rt_rq->rt_nr_running);
+ rt_rq->rt_nr_running--;
+#if defined CONFIG_SMP || defined CONFIG_FAIR_GROUP_SCHED
+ if (rt_rq->rt_nr_running) {
+ struct rt_prio_array *array;
+
+ WARN_ON(rt_se_prio(rt_se) < rt_rq->highest_prio);
+ if (rt_se_prio(rt_se) == rt_rq->highest_prio) {
+ /* recalculate */
+ array = &rt_rq->active;
+ rt_rq->highest_prio =
+ sched_find_first_bit(array->bitmap);
+ } /* otherwise leave rq->highest prio alone */
+ } else
+ rt_rq->highest_prio = MAX_RT_PRIO;
+#endif
+#ifdef CONFIG_SMP
+ if (rt_se->nr_cpus_allowed > 1) {
+ struct rq *rq = rq_of_rt_rq(rt_rq);
+ rq->rt.rt_nr_migratory--;
+ }
+
+ update_rt_migration(rq_of_rt_rq(rt_rq));
+#endif /* CONFIG_SMP */
+}
+
+static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
+{
+ struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
+ struct rt_prio_array *array = &rt_rq->active;
+ struct rt_rq *group_rq = group_rt_rq(rt_se);
+
+ if (group_rq && group_rq->rt_throttled)
+ return;
+
+ list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se));
+ __set_bit(rt_se_prio(rt_se), array->bitmap);
+
+ inc_rt_tasks(rt_se, rt_rq);
+}
+
+static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
+{
+ struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
+ struct rt_prio_array *array = &rt_rq->active;
+
+ list_del_init(&rt_se->run_list);
+ if (list_empty(array->queue + rt_se_prio(rt_se)))
+ __clear_bit(rt_se_prio(rt_se), array->bitmap);
+
+ dec_rt_tasks(rt_se, rt_rq);
+}
+
+/*
+ * Because the prio of an upper entry depends on the lower
+ * entries, we must remove entries top - down.
+ *
+ * XXX: O(1/2 h^2) because we can only walk up, not down the chain.
+ * doesn't matter much for now, as h=2 for GROUP_SCHED.
+ */
+static void dequeue_rt_stack(struct task_struct *p)
{
- struct rt_prio_array *array = &rq->rt.active;
+ struct sched_rt_entity *rt_se, *top_se;
- list_add_tail(&p->run_list, array->queue + p->prio);
- __set_bit(p->prio, array->bitmap);
+ /*
+ * dequeue all, top - down.
+ */
+ do {
+ rt_se = &p->rt;
+ top_se = NULL;
+ for_each_sched_rt_entity(rt_se) {
+ if (on_rt_rq(rt_se))
+ top_se = rt_se;
+ }
+ if (top_se)
+ dequeue_rt_entity(top_se);
+ } while (top_se);
}
/*
* Adding/removing a task to/from a priority array:
*/
+static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
+{
+ struct sched_rt_entity *rt_se = &p->rt;
+
+ if (wakeup)
+ rt_se->timeout = 0;
+
+ dequeue_rt_stack(p);
+
+ /*
+ * enqueue everybody, bottom - up.
+ */
+ for_each_sched_rt_entity(rt_se)
+ enqueue_rt_entity(rt_se);
+
+ inc_cpu_load(rq, p->se.load.weight);
+}
+
static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
{
- struct rt_prio_array *array = &rq->rt.active;
+ struct sched_rt_entity *rt_se = &p->rt;
+ struct rt_rq *rt_rq;
update_curr_rt(rq);
- list_del(&p->run_list);
- if (list_empty(array->queue + p->prio))
- __clear_bit(p->prio, array->bitmap);
+ dequeue_rt_stack(p);
+
+ /*
+ * re-enqueue all non-empty rt_rq entities.
+ */
+ for_each_sched_rt_entity(rt_se) {
+ rt_rq = group_rt_rq(rt_se);
+ if (rt_rq && rt_rq->rt_nr_running)
+ enqueue_rt_entity(rt_se);
+ }
+
+ dec_cpu_load(rq, p->se.load.weight);
}
/*
* Put task to the end of the run list without the overhead of dequeue
* followed by enqueue.
*/
+static
+void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
+{
+ struct rt_prio_array *array = &rt_rq->active;
+
+ list_move_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se));
+}
+
static void requeue_task_rt(struct rq *rq, struct task_struct *p)
{
- struct rt_prio_array *array = &rq->rt.active;
+ struct sched_rt_entity *rt_se = &p->rt;
+ struct rt_rq *rt_rq;
- list_move_tail(&p->run_list, array->queue + p->prio);
+ for_each_sched_rt_entity(rt_se) {
+ rt_rq = rt_rq_of_se(rt_se);
+ requeue_rt_entity(rt_rq, rt_se);
+ }
}
-static void
-yield_task_rt(struct rq *rq)
+static void yield_task_rt(struct rq *rq)
{
requeue_task_rt(rq, rq->curr);
}
+#ifdef CONFIG_SMP
+static int find_lowest_rq(struct task_struct *task);
+
+static int select_task_rq_rt(struct task_struct *p, int sync)
+{
+ struct rq *rq = task_rq(p);
+
+ /*
+ * If the current task is an RT task, then
+ * try to see if we can wake this RT task up on another
+ * runqueue. Otherwise simply start this RT task
+ * on its current runqueue.
+ *
+ * We want to avoid overloading runqueues. Even if
+ * the RT task is of higher priority than the current RT task.
+ * RT tasks behave differently than other tasks. If
+ * one gets preempted, we try to push it off to another queue.
+ * So trying to keep a preempting RT task on the same
+ * cache hot CPU will force the running RT task to
+ * a cold CPU. So we waste all the cache for the lower
+ * RT task in hopes of saving some of a RT task
+ * that is just being woken and probably will have
+ * cold cache anyway.
+ */
+ if (unlikely(rt_task(rq->curr)) &&
+ (p->rt.nr_cpus_allowed > 1)) {
+ int cpu = find_lowest_rq(p);
+
+ return (cpu == -1) ? task_cpu(p) : cpu;
+ }
+
+ /*
+ * Otherwise, just let it ride on the affined RQ and the
+ * post-schedule router will push the preempted task away
+ */
+ return task_cpu(p);
+}
+#endif /* CONFIG_SMP */
+
/*
* Preempt the current task with a newly woken task if needed:
*/
@@ -73,25 +468,48 @@ static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
resched_task(rq->curr);
}
-static struct task_struct *pick_next_task_rt(struct rq *rq)
+static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
+ struct rt_rq *rt_rq)
{
- struct rt_prio_array *array = &rq->rt.active;
- struct task_struct *next;
+ struct rt_prio_array *array = &rt_rq->active;
+ struct sched_rt_entity *next = NULL;
struct list_head *queue;
int idx;
idx = sched_find_first_bit(array->bitmap);
- if (idx >= MAX_RT_PRIO)
- return NULL;
+ BUG_ON(idx >= MAX_RT_PRIO);
queue = array->queue + idx;
- next = list_entry(queue->next, struct task_struct, run_list);
-
- next->se.exec_start = rq->clock;
+ next = list_entry(queue->next, struct sched_rt_entity, run_list);
return next;
}
+static struct task_struct *pick_next_task_rt(struct rq *rq)
+{
+ struct sched_rt_entity *rt_se;
+ struct task_struct *p;
+ struct rt_rq *rt_rq;
+
+ rt_rq = &rq->rt;
+
+ if (unlikely(!rt_rq->rt_nr_running))
+ return NULL;
+
+ if (sched_rt_ratio_exceeded(rt_rq))
+ return NULL;
+
+ do {
+ rt_se = pick_next_rt_entity(rq, rt_rq);
+ BUG_ON(!rt_se);
+ rt_rq = group_rt_rq(rt_se);
+ } while (rt_rq);
+
+ p = rt_task_of(rt_se);
+ p->se.exec_start = rq->clock;
+ return p;
+}
+
static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
{
update_curr_rt(rq);
@@ -99,76 +517,448 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
}
#ifdef CONFIG_SMP
-/*
- * Load-balancing iterator. Note: while the runqueue stays locked
- * during the whole iteration, the current task might be
- * dequeued so the iterator has to be dequeue-safe. Here we
- * achieve that by always pre-iterating before returning
- * the current task:
- */
-static struct task_struct *load_balance_start_rt(void *arg)
+
+/* Only try algorithms three times */
+#define RT_MAX_TRIES 3
+
+static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
+static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
+
+static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
{
- struct rq *rq = arg;
- struct rt_prio_array *array = &rq->rt.active;
- struct list_head *head, *curr;
- struct task_struct *p;
+ if (!task_running(rq, p) &&
+ (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) &&
+ (p->rt.nr_cpus_allowed > 1))
+ return 1;
+ return 0;
+}
+
+/* Return the second highest RT task, NULL otherwise */
+static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
+{
+ struct task_struct *next = NULL;
+ struct sched_rt_entity *rt_se;
+ struct rt_prio_array *array;
+ struct rt_rq *rt_rq;
int idx;
- idx = sched_find_first_bit(array->bitmap);
- if (idx >= MAX_RT_PRIO)
- return NULL;
+ for_each_leaf_rt_rq(rt_rq, rq) {
+ array = &rt_rq->active;
+ idx = sched_find_first_bit(array->bitmap);
+ next_idx:
+ if (idx >= MAX_RT_PRIO)
+ continue;
+ if (next && next->prio < idx)
+ continue;
+ list_for_each_entry(rt_se, array->queue + idx, run_list) {
+ struct task_struct *p = rt_task_of(rt_se);
+ if (pick_rt_task(rq, p, cpu)) {
+ next = p;
+ break;
+ }
+ }
+ if (!next) {
+ idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
+ goto next_idx;
+ }
+ }
+
+ return next;
+}
- head = array->queue + idx;
- curr = head->prev;
+static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);
- p = list_entry(curr, struct task_struct, run_list);
+static int find_lowest_cpus(struct task_struct *task, cpumask_t *lowest_mask)
+{
+ int lowest_prio = -1;
+ int lowest_cpu = -1;
+ int count = 0;
+ int cpu;
- curr = curr->prev;
+ cpus_and(*lowest_mask, task_rq(task)->rd->online, task->cpus_allowed);
- rq->rt.rt_load_balance_idx = idx;
- rq->rt.rt_load_balance_head = head;
- rq->rt.rt_load_balance_curr = curr;
+ /*
+ * Scan each rq for the lowest prio.
+ */
+ for_each_cpu_mask(cpu, *lowest_mask) {
+ struct rq *rq = cpu_rq(cpu);
- return p;
+ /* We look for lowest RT prio or non-rt CPU */
+ if (rq->rt.highest_prio >= MAX_RT_PRIO) {
+ /*
+ * if we already found a low RT queue
+ * and now we found this non-rt queue
+ * clear the mask and set our bit.
+ * Otherwise just return the queue as is
+ * and the count==1 will cause the algorithm
+ * to use the first bit found.
+ */
+ if (lowest_cpu != -1) {
+ cpus_clear(*lowest_mask);
+ cpu_set(rq->cpu, *lowest_mask);
+ }
+ return 1;
+ }
+
+ /* no locking for now */
+ if ((rq->rt.highest_prio > task->prio)
+ && (rq->rt.highest_prio >= lowest_prio)) {
+ if (rq->rt.highest_prio > lowest_prio) {
+ /* new low - clear old data */
+ lowest_prio = rq->rt.highest_prio;
+ lowest_cpu = cpu;
+ count = 0;
+ }
+ count++;
+ } else
+ cpu_clear(cpu, *lowest_mask);
+ }
+
+ /*
+ * Clear out all the set bits that represent
+ * runqueues that were of higher prio than
+ * the lowest_prio.
+ */
+ if (lowest_cpu > 0) {
+ /*
+ * Perhaps we could add another cpumask op to
+ * zero out bits. Like cpu_zero_bits(cpumask, nrbits);
+ * Then that could be optimized to use memset and such.
+ */
+ for_each_cpu_mask(cpu, *lowest_mask) {
+ if (cpu >= lowest_cpu)
+ break;
+ cpu_clear(cpu, *lowest_mask);
+ }
+ }
+
+ return count;
}
-static struct task_struct *load_balance_next_rt(void *arg)
+static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
{
- struct rq *rq = arg;
- struct rt_prio_array *array = &rq->rt.active;
- struct list_head *head, *curr;
- struct task_struct *p;
- int idx;
+ int first;
+
+ /* "this_cpu" is cheaper to preempt than a remote processor */
+ if ((this_cpu != -1) && cpu_isset(this_cpu, *mask))
+ return this_cpu;
+
+ first = first_cpu(*mask);
+ if (first != NR_CPUS)
+ return first;
+
+ return -1;
+}
+
+static int find_lowest_rq(struct task_struct *task)
+{
+ struct sched_domain *sd;
+ cpumask_t *lowest_mask = &__get_cpu_var(local_cpu_mask);
+ int this_cpu = smp_processor_id();
+ int cpu = task_cpu(task);
+ int count = find_lowest_cpus(task, lowest_mask);
+
+ if (!count)
+ return -1; /* No targets found */
- idx = rq->rt.rt_load_balance_idx;
- head = rq->rt.rt_load_balance_head;
- curr = rq->rt.rt_load_balance_curr;
+ /*
+ * There is no sense in performing an optimal search if only one
+ * target is found.
+ */
+ if (count == 1)
+ return first_cpu(*lowest_mask);
+
+ /*
+ * At this point we have built a mask of cpus representing the
+ * lowest priority tasks in the system. Now we want to elect
+ * the best one based on our affinity and topology.
+ *
+ * We prioritize the last cpu that the task executed on since
+ * it is most likely cache-hot in that location.
+ */
+ if (cpu_isset(cpu, *lowest_mask))
+ return cpu;
/*
- * If we arrived back to the head again then
- * iterate to the next queue (if any):
+ * Otherwise, we consult the sched_domains span maps to figure
+ * out which cpu is logically closest to our hot cache data.
*/
- if (unlikely(head == curr)) {
- int next_idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
+ if (this_cpu == cpu)
+ this_cpu = -1; /* Skip this_cpu opt if the same */
- if (next_idx >= MAX_RT_PRIO)
- return NULL;
+ for_each_domain(cpu, sd) {
+ if (sd->flags & SD_WAKE_AFFINE) {
+ cpumask_t domain_mask;
+ int best_cpu;
- idx = next_idx;
- head = array->queue + idx;
- curr = head->prev;
+ cpus_and(domain_mask, sd->span, *lowest_mask);
- rq->rt.rt_load_balance_idx = idx;
- rq->rt.rt_load_balance_head = head;
+ best_cpu = pick_optimal_cpu(this_cpu,
+ &domain_mask);
+ if (best_cpu != -1)
+ return best_cpu;
+ }
}
- p = list_entry(curr, struct task_struct, run_list);
+ /*
+ * And finally, if there were no matches within the domains
+ * just give the caller *something* to work with from the compatible
+ * locations.
+ */
+ return pick_optimal_cpu(this_cpu, lowest_mask);
+}
- curr = curr->prev;
+/* Will lock the rq it finds */
+static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
+{
+ struct rq *lowest_rq = NULL;
+ int tries;
+ int cpu;
- rq->rt.rt_load_balance_curr = curr;
+ for (tries = 0; tries < RT_MAX_TRIES; tries++) {
+ cpu = find_lowest_rq(task);
- return p;
+ if ((cpu == -1) || (cpu == rq->cpu))
+ break;
+
+ lowest_rq = cpu_rq(cpu);
+
+ /* if the prio of this runqueue changed, try again */
+ if (double_lock_balance(rq, lowest_rq)) {
+ /*
+ * We had to unlock the run queue. In
+ * the mean time, task could have
+ * migrated already or had its affinity changed.
+ * Also make sure that it wasn't scheduled on its rq.
+ */
+ if (unlikely(task_rq(task) != rq ||
+ !cpu_isset(lowest_rq->cpu,
+ task->cpus_allowed) ||
+ task_running(rq, task) ||
+ !task->se.on_rq)) {
+
+ spin_unlock(&lowest_rq->lock);
+ lowest_rq = NULL;
+ break;
+ }
+ }
+
+ /* If this rq is still suitable use it. */
+ if (lowest_rq->rt.highest_prio > task->prio)
+ break;
+
+ /* try again */
+ spin_unlock(&lowest_rq->lock);
+ lowest_rq = NULL;
+ }
+
+ return lowest_rq;
+}
+
+/*
+ * If the current CPU has more than one RT task, see if the non
+ * running task can migrate over to a CPU that is running a task
+ * of lesser priority.
+ */
+static int push_rt_task(struct rq *rq)
+{
+ struct task_struct *next_task;
+ struct rq *lowest_rq;
+ int ret = 0;
+ int paranoid = RT_MAX_TRIES;
+
+ if (!rq->rt.overloaded)
+ return 0;
+
+ next_task = pick_next_highest_task_rt(rq, -1);
+ if (!next_task)
+ return 0;
+
+ retry:
+ if (unlikely(next_task == rq->curr)) {
+ WARN_ON(1);
+ return 0;
+ }
+
+ /*
+ * It's possible that the next_task slipped in of
+ * higher priority than current. If that's the case
+ * just reschedule current.
+ */
+ if (unlikely(next_task->prio < rq->curr->prio)) {
+ resched_task(rq->curr);
+ return 0;
+ }
+
+ /* We might release rq lock */
+ get_task_struct(next_task);
+
+ /* find_lock_lowest_rq locks the rq if found */
+ lowest_rq = find_lock_lowest_rq(next_task, rq);
+ if (!lowest_rq) {
+ struct task_struct *task;
+ /*
+ * find lock_lowest_rq releases rq->lock
+ * so it is possible that next_task has changed.
+ * If it has, then try again.
+ */
+ task = pick_next_highest_task_rt(rq, -1);
+ if (unlikely(task != next_task) && task && paranoid--) {
+ put_task_struct(next_task);
+ next_task = task;
+ goto retry;
+ }
+ goto out;
+ }
+
+ deactivate_task(rq, next_task, 0);
+ set_task_cpu(next_task, lowest_rq->cpu);
+ activate_task(lowest_rq, next_task, 0);
+
+ resched_task(lowest_rq->curr);
+
+ spin_unlock(&lowest_rq->lock);
+
+ ret = 1;
+out:
+ put_task_struct(next_task);
+
+ return ret;
+}
+
+/*
+ * TODO: Currently we just use the second highest prio task on
+ * the queue, and stop when it can't migrate (or there's
+ * no more RT tasks). There may be a case where a lower
+ * priority RT task has a different affinity than the
+ * higher RT task. In this case the lower RT task could
+ * possibly be able to migrate where as the higher priority
+ * RT task could not. We currently ignore this issue.
+ * Enhancements are welcome!
+ */
+static void push_rt_tasks(struct rq *rq)
+{
+ /* push_rt_task will return true if it moved an RT */
+ while (push_rt_task(rq))
+ ;
+}
+
+static int pull_rt_task(struct rq *this_rq)
+{
+ int this_cpu = this_rq->cpu, ret = 0, cpu;
+ struct task_struct *p, *next;
+ struct rq *src_rq;
+
+ if (likely(!rt_overloaded(this_rq)))
+ return 0;
+
+ next = pick_next_task_rt(this_rq);
+
+ for_each_cpu_mask(cpu, this_rq->rd->rto_mask) {
+ if (this_cpu == cpu)
+ continue;
+
+ src_rq = cpu_rq(cpu);
+ /*
+ * We can potentially drop this_rq's lock in
+ * double_lock_balance, and another CPU could
+ * steal our next task - hence we must cause
+ * the caller to recalculate the next task
+ * in that case:
+ */
+ if (double_lock_balance(this_rq, src_rq)) {
+ struct task_struct *old_next = next;
+
+ next = pick_next_task_rt(this_rq);
+ if (next != old_next)
+ ret = 1;
+ }
+
+ /*
+ * Are there still pullable RT tasks?
+ */
+ if (src_rq->rt.rt_nr_running <= 1)
+ goto skip;
+
+ p = pick_next_highest_task_rt(src_rq, this_cpu);
+
+ /*
+ * Do we have an RT task that preempts
+ * the to-be-scheduled task?
+ */
+ if (p && (!next || (p->prio < next->prio))) {
+ WARN_ON(p == src_rq->curr);
+ WARN_ON(!p->se.on_rq);
+
+ /*
+ * There's a chance that p is higher in priority
+ * than what's currently running on its cpu.
+ * This is just that p is wakeing up and hasn't
+ * had a chance to schedule. We only pull
+ * p if it is lower in priority than the
+ * current task on the run queue or
+ * this_rq next task is lower in prio than
+ * the current task on that rq.
+ */
+ if (p->prio < src_rq->curr->prio ||
+ (next && next->prio < src_rq->curr->prio))
+ goto skip;
+
+ ret = 1;
+
+ deactivate_task(src_rq, p, 0);
+ set_task_cpu(p, this_cpu);
+ activate_task(this_rq, p, 0);
+ /*
+ * We continue with the search, just in
+ * case there's an even higher prio task
+ * in another runqueue. (low likelyhood
+ * but possible)
+ *
+ * Update next so that we won't pick a task
+ * on another cpu with a priority lower (or equal)
+ * than the one we just picked.
+ */
+ next = p;
+
+ }
+ skip:
+ spin_unlock(&src_rq->lock);
+ }
+
+ return ret;
+}
+
+static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
+{
+ /* Try to pull RT tasks here if we lower this rq's prio */
+ if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio)
+ pull_rt_task(rq);
+}
+
+static void post_schedule_rt(struct rq *rq)
+{
+ /*
+ * If we have more than one rt_task queued, then
+ * see if we can push the other rt_tasks off to other CPUS.
+ * Note we may release the rq lock, and since
+ * the lock was owned by prev, we need to release it
+ * first via finish_lock_switch and then reaquire it here.
+ */
+ if (unlikely(rq->rt.overloaded)) {
+ spin_lock_irq(&rq->lock);
+ push_rt_tasks(rq);
+ spin_unlock_irq(&rq->lock);
+ }
+}
+
+
+static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
+{
+ if (!task_running(rq, p) &&
+ (p->prio >= rq->rt.highest_prio) &&
+ rq->rt.overloaded)
+ push_rt_tasks(rq);
}
static unsigned long
@@ -177,36 +967,170 @@ load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
struct sched_domain *sd, enum cpu_idle_type idle,
int *all_pinned, int *this_best_prio)
{
- struct rq_iterator rt_rq_iterator;
-
- rt_rq_iterator.start = load_balance_start_rt;
- rt_rq_iterator.next = load_balance_next_rt;
- /* pass 'busiest' rq argument into
- * load_balance_[start|next]_rt iterators
- */
- rt_rq_iterator.arg = busiest;
-
- return balance_tasks(this_rq, this_cpu, busiest, max_load_move, sd,
- idle, all_pinned, this_best_prio, &rt_rq_iterator);
+ /* don't touch RT tasks */
+ return 0;
}
static int
move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
struct sched_domain *sd, enum cpu_idle_type idle)
{
- struct rq_iterator rt_rq_iterator;
+ /* don't touch RT tasks */
+ return 0;
+}
+
+static void set_cpus_allowed_rt(struct task_struct *p, cpumask_t *new_mask)
+{
+ int weight = cpus_weight(*new_mask);
- rt_rq_iterator.start = load_balance_start_rt;
- rt_rq_iterator.next = load_balance_next_rt;
- rt_rq_iterator.arg = busiest;
+ BUG_ON(!rt_task(p));
+
+ /*
+ * Update the migration status of the RQ if we have an RT task
+ * which is running AND changing its weight value.
+ */
+ if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
+ struct rq *rq = task_rq(p);
- return iter_move_one_task(this_rq, this_cpu, busiest, sd, idle,
- &rt_rq_iterator);
+ if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
+ rq->rt.rt_nr_migratory++;
+ } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
+ BUG_ON(!rq->rt.rt_nr_migratory);
+ rq->rt.rt_nr_migratory--;
+ }
+
+ update_rt_migration(rq);
+ }
+
+ p->cpus_allowed = *new_mask;
+ p->rt.nr_cpus_allowed = weight;
}
-#endif
-static void task_tick_rt(struct rq *rq, struct task_struct *p)
+/* Assumes rq->lock is held */
+static void join_domain_rt(struct rq *rq)
{
+ if (rq->rt.overloaded)
+ rt_set_overload(rq);
+}
+
+/* Assumes rq->lock is held */
+static void leave_domain_rt(struct rq *rq)
+{
+ if (rq->rt.overloaded)
+ rt_clear_overload(rq);
+}
+
+/*
+ * When switch from the rt queue, we bring ourselves to a position
+ * that we might want to pull RT tasks from other runqueues.
+ */
+static void switched_from_rt(struct rq *rq, struct task_struct *p,
+ int running)
+{
+ /*
+ * If there are other RT tasks then we will reschedule
+ * and the scheduling of the other RT tasks will handle
+ * the balancing. But if we are the last RT task
+ * we may need to handle the pulling of RT tasks
+ * now.
+ */
+ if (!rq->rt.rt_nr_running)
+ pull_rt_task(rq);
+}
+#endif /* CONFIG_SMP */
+
+/*
+ * When switching a task to RT, we may overload the runqueue
+ * with RT tasks. In this case we try to push them off to
+ * other runqueues.
+ */
+static void switched_to_rt(struct rq *rq, struct task_struct *p,
+ int running)
+{
+ int check_resched = 1;
+
+ /*
+ * If we are already running, then there's nothing
+ * that needs to be done. But if we are not running
+ * we may need to preempt the current running task.
+ * If that current running task is also an RT task
+ * then see if we can move to another run queue.
+ */
+ if (!running) {
+#ifdef CONFIG_SMP
+ if (rq->rt.overloaded && push_rt_task(rq) &&
+ /* Don't resched if we changed runqueues */
+ rq != task_rq(p))
+ check_resched = 0;
+#endif /* CONFIG_SMP */
+ if (check_resched && p->prio < rq->curr->prio)
+ resched_task(rq->curr);
+ }
+}
+
+/*
+ * Priority of the task has changed. This may cause
+ * us to initiate a push or pull.
+ */
+static void prio_changed_rt(struct rq *rq, struct task_struct *p,
+ int oldprio, int running)
+{
+ if (running) {
+#ifdef CONFIG_SMP
+ /*
+ * If our priority decreases while running, we
+ * may need to pull tasks to this runqueue.
+ */
+ if (oldprio < p->prio)
+ pull_rt_task(rq);
+ /*
+ * If there's a higher priority task waiting to run
+ * then reschedule.
+ */
+ if (p->prio > rq->rt.highest_prio)
+ resched_task(p);
+#else
+ /* For UP simply resched on drop of prio */
+ if (oldprio < p->prio)
+ resched_task(p);
+#endif /* CONFIG_SMP */
+ } else {
+ /*
+ * This task is not running, but if it is
+ * greater than the current running task
+ * then reschedule.
+ */
+ if (p->prio < rq->curr->prio)
+ resched_task(rq->curr);
+ }
+}
+
+static void watchdog(struct rq *rq, struct task_struct *p)
+{
+ unsigned long soft, hard;
+
+ if (!p->signal)
+ return;
+
+ soft = p->signal->rlim[RLIMIT_RTTIME].rlim_cur;
+ hard = p->signal->rlim[RLIMIT_RTTIME].rlim_max;
+
+ if (soft != RLIM_INFINITY) {
+ unsigned long next;
+
+ p->rt.timeout++;
+ next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
+ if (p->rt.timeout > next)
+ p->it_sched_expires = p->se.sum_exec_runtime;
+ }
+}
+
+static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
+{
+ update_curr_rt(rq);
+
+ watchdog(rq, p);
+
/*
* RR tasks need a special form of timeslice management.
* FIFO tasks have no timeslices.
@@ -214,16 +1138,16 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p)
if (p->policy != SCHED_RR)
return;
- if (--p->time_slice)
+ if (--p->rt.time_slice)
return;
- p->time_slice = DEF_TIMESLICE;
+ p->rt.time_slice = DEF_TIMESLICE;
/*
* Requeue to the end of queue if we are not the only element
* on the queue:
*/
- if (p->run_list.prev != p->run_list.next) {
+ if (p->rt.run_list.prev != p->rt.run_list.next) {
requeue_task_rt(rq, p);
set_tsk_need_resched(p);
}
@@ -241,6 +1165,9 @@ const struct sched_class rt_sched_class = {
.enqueue_task = enqueue_task_rt,
.dequeue_task = dequeue_task_rt,
.yield_task = yield_task_rt,
+#ifdef CONFIG_SMP
+ .select_task_rq = select_task_rq_rt,
+#endif /* CONFIG_SMP */
.check_preempt_curr = check_preempt_curr_rt,
@@ -250,8 +1177,18 @@ const struct sched_class rt_sched_class = {
#ifdef CONFIG_SMP
.load_balance = load_balance_rt,
.move_one_task = move_one_task_rt,
+ .set_cpus_allowed = set_cpus_allowed_rt,
+ .join_domain = join_domain_rt,
+ .leave_domain = leave_domain_rt,
+ .pre_schedule = pre_schedule_rt,
+ .post_schedule = post_schedule_rt,
+ .task_wake_up = task_wake_up_rt,
+ .switched_from = switched_from_rt,
#endif
.set_curr_task = set_curr_task_rt,
.task_tick = task_tick_rt,
+
+ .prio_changed = prio_changed_rt,
+ .switched_to = switched_to_rt,
};
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index ef1a7df80ea..5b32433e7ee 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -52,7 +52,8 @@ static int show_schedstat(struct seq_file *seq, void *v)
sd->lb_nobusyq[itype],
sd->lb_nobusyg[itype]);
}
- seq_printf(seq, " %u %u %u %u %u %u %u %u %u %u %u %u\n",
+ seq_printf(seq,
+ " %u %u %u %u %u %u %u %u %u %u %u %u\n",
sd->alb_count, sd->alb_failed, sd->alb_pushed,
sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed,
sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,
@@ -127,7 +128,7 @@ rq_sched_info_depart(struct rq *rq, unsigned long long delta)
# define schedstat_set(var, val) do { } while (0)
#endif
-#ifdef CONFIG_SCHEDSTATS
+#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
/*
* Called when a process is dequeued from the active array and given
* the cpu. We should note that with the exception of interactive
@@ -155,7 +156,7 @@ static inline void sched_info_dequeued(struct task_struct *t)
*/
static void sched_info_arrive(struct task_struct *t)
{
- unsigned long long now = sched_clock(), delta = 0;
+ unsigned long long now = task_rq(t)->clock, delta = 0;
if (t->sched_info.last_queued)
delta = now - t->sched_info.last_queued;
@@ -186,7 +187,7 @@ static inline void sched_info_queued(struct task_struct *t)
{
if (unlikely(sched_info_on()))
if (!t->sched_info.last_queued)
- t->sched_info.last_queued = sched_clock();
+ t->sched_info.last_queued = task_rq(t)->clock;
}
/*
@@ -195,7 +196,8 @@ static inline void sched_info_queued(struct task_struct *t)
*/
static inline void sched_info_depart(struct task_struct *t)
{
- unsigned long long delta = sched_clock() - t->sched_info.last_arrival;
+ unsigned long long delta = task_rq(t)->clock -
+ t->sched_info.last_arrival;
t->sched_info.cpu_time += delta;
rq_sched_info_depart(task_rq(t), delta);
@@ -231,5 +233,5 @@ sched_info_switch(struct task_struct *prev, struct task_struct *next)
#else
#define sched_info_queued(t) do { } while (0)
#define sched_info_switch(t, next) do { } while (0)
-#endif /* CONFIG_SCHEDSTATS */
+#endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
diff --git a/kernel/signal.c b/kernel/signal.c
index 12006308c7e..4333b6dbb42 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -55,7 +55,7 @@ static int sig_ignored(struct task_struct *t, int sig)
* signal handler may change by the time it is
* unblocked.
*/
- if (sigismember(&t->blocked, sig))
+ if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
return 0;
/* Is it explicitly or implicitly ignored? */
@@ -124,7 +124,7 @@ void recalc_sigpending_and_wake(struct task_struct *t)
void recalc_sigpending(void)
{
- if (!recalc_sigpending_tsk(current))
+ if (!recalc_sigpending_tsk(current) && !freezing(current))
clear_thread_flag(TIF_SIGPENDING);
}
@@ -456,15 +456,15 @@ void signal_wake_up(struct task_struct *t, int resume)
set_tsk_thread_flag(t, TIF_SIGPENDING);
/*
- * For SIGKILL, we want to wake it up in the stopped/traced case.
- * We don't check t->state here because there is a race with it
+ * For SIGKILL, we want to wake it up in the stopped/traced/killable
+ * case. We don't check t->state here because there is a race with it
* executing another processor and just now entering stopped state.
* By using wake_up_state, we ensure the process will wake up and
* handle its death signal.
*/
mask = TASK_INTERRUPTIBLE;
if (resume)
- mask |= TASK_STOPPED | TASK_TRACED;
+ mask |= TASK_WAKEKILL;
if (!wake_up_state(t, mask))
kick_process(t);
}
@@ -620,7 +620,7 @@ static void handle_stop_signal(int sig, struct task_struct *p)
* Wake up the stopped thread _after_ setting
* TIF_SIGPENDING
*/
- state = TASK_STOPPED;
+ state = __TASK_STOPPED;
if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
set_tsk_thread_flag(t, TIF_SIGPENDING);
state |= TASK_INTERRUPTIBLE;
@@ -732,14 +732,14 @@ static void print_fatal_signal(struct pt_regs *regs, int signr)
printk("%s/%d: potentially unexpected fatal signal %d.\n",
current->comm, task_pid_nr(current), signr);
-#ifdef __i386__
- printk("code at %08lx: ", regs->eip);
+#if defined(__i386__) && !defined(__arch_um__)
+ printk("code at %08lx: ", regs->ip);
{
int i;
for (i = 0; i < 16; i++) {
unsigned char insn;
- __get_user(insn, (unsigned char *)(regs->eip + i));
+ __get_user(insn, (unsigned char *)(regs->ip + i));
printk("%02x ", insn);
}
}
@@ -838,7 +838,7 @@ static inline int wants_signal(int sig, struct task_struct *p)
return 0;
if (sig == SIGKILL)
return 1;
- if (p->state & (TASK_STOPPED | TASK_TRACED))
+ if (task_is_stopped_or_traced(p))
return 0;
return task_curr(p) || !signal_pending(p);
}
@@ -994,6 +994,12 @@ void zap_other_threads(struct task_struct *p)
}
}
+int fastcall __fatal_signal_pending(struct task_struct *tsk)
+{
+ return sigismember(&tsk->pending.signal, SIGKILL);
+}
+EXPORT_SYMBOL(__fatal_signal_pending);
+
/*
* Must be called under rcu_read_lock() or with tasklist_lock read-held.
*/
@@ -1441,7 +1447,7 @@ void do_notify_parent(struct task_struct *tsk, int sig)
BUG_ON(sig == -1);
/* do_notify_parent_cldstop should have been called instead. */
- BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
+ BUG_ON(task_is_stopped_or_traced(tsk));
BUG_ON(!tsk->ptrace &&
(tsk->group_leader != tsk || !thread_group_empty(tsk)));
@@ -1729,7 +1735,7 @@ static int do_signal_stop(int signr)
* so this check has no races.
*/
if (!t->exit_state &&
- !(t->state & (TASK_STOPPED|TASK_TRACED))) {
+ !task_is_stopped_or_traced(t)) {
stop_count++;
signal_wake_up(t, 0);
}
diff --git a/kernel/softirq.c b/kernel/softirq.c
index bd89bc4eb0b..d7837d45419 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -3,7 +3,9 @@
*
* Copyright (C) 1992 Linus Torvalds
*
- * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
+ * Distribute under GPLv2.
+ *
+ * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
*/
#include <linux/module.h>
@@ -278,9 +280,14 @@ asmlinkage void do_softirq(void)
*/
void irq_enter(void)
{
+#ifdef CONFIG_NO_HZ
+ int cpu = smp_processor_id();
+ if (idle_cpu(cpu) && !in_interrupt())
+ tick_nohz_stop_idle(cpu);
+#endif
__irq_enter();
#ifdef CONFIG_NO_HZ
- if (idle_cpu(smp_processor_id()))
+ if (idle_cpu(cpu))
tick_nohz_update_jiffies();
#endif
}
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index 11df812263c..7c2da88db4e 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -8,6 +8,7 @@
*/
#include <linux/mm.h>
#include <linux/cpu.h>
+#include <linux/nmi.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/freezer.h>
@@ -23,8 +24,8 @@ static DEFINE_PER_CPU(unsigned long, touch_timestamp);
static DEFINE_PER_CPU(unsigned long, print_timestamp);
static DEFINE_PER_CPU(struct task_struct *, watchdog_task);
-static int did_panic;
-int softlockup_thresh = 10;
+static int __read_mostly did_panic;
+unsigned long __read_mostly softlockup_thresh = 60;
static int
softlock_panic(struct notifier_block *this, unsigned long event, void *ptr)
@@ -45,7 +46,7 @@ static struct notifier_block panic_block = {
*/
static unsigned long get_timestamp(int this_cpu)
{
- return cpu_clock(this_cpu) >> 30; /* 2^30 ~= 10^9 */
+ return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */
}
void touch_softlockup_watchdog(void)
@@ -104,7 +105,7 @@ void softlockup_tick(void)
if (now > (touch_timestamp + 1))
wake_up_process(per_cpu(watchdog_task, this_cpu));
- /* Warn about unreasonable 10+ seconds delays: */
+ /* Warn about unreasonable delays: */
if (now <= (touch_timestamp + softlockup_thresh))
return;
@@ -122,11 +123,93 @@ void softlockup_tick(void)
}
/*
+ * Have a reasonable limit on the number of tasks checked:
+ */
+unsigned long __read_mostly sysctl_hung_task_check_count = 1024;
+
+/*
+ * Zero means infinite timeout - no checking done:
+ */
+unsigned long __read_mostly sysctl_hung_task_timeout_secs = 120;
+
+unsigned long __read_mostly sysctl_hung_task_warnings = 10;
+
+/*
+ * Only do the hung-tasks check on one CPU:
+ */
+static int check_cpu __read_mostly = -1;
+
+static void check_hung_task(struct task_struct *t, unsigned long now)
+{
+ unsigned long switch_count = t->nvcsw + t->nivcsw;
+
+ if (t->flags & PF_FROZEN)
+ return;
+
+ if (switch_count != t->last_switch_count || !t->last_switch_timestamp) {
+ t->last_switch_count = switch_count;
+ t->last_switch_timestamp = now;
+ return;
+ }
+ if ((long)(now - t->last_switch_timestamp) <
+ sysctl_hung_task_timeout_secs)
+ return;
+ if (sysctl_hung_task_warnings < 0)
+ return;
+ sysctl_hung_task_warnings--;
+
+ /*
+ * Ok, the task did not get scheduled for more than 2 minutes,
+ * complain:
+ */
+ printk(KERN_ERR "INFO: task %s:%d blocked for more than "
+ "%ld seconds.\n", t->comm, t->pid,
+ sysctl_hung_task_timeout_secs);
+ printk(KERN_ERR "\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\""
+ " disables this message.\n");
+ sched_show_task(t);
+ __debug_show_held_locks(t);
+
+ t->last_switch_timestamp = now;
+ touch_nmi_watchdog();
+}
+
+/*
+ * Check whether a TASK_UNINTERRUPTIBLE does not get woken up for
+ * a really long time (120 seconds). If that happens, print out
+ * a warning.
+ */
+static void check_hung_uninterruptible_tasks(int this_cpu)
+{
+ int max_count = sysctl_hung_task_check_count;
+ unsigned long now = get_timestamp(this_cpu);
+ struct task_struct *g, *t;
+
+ /*
+ * If the system crashed already then all bets are off,
+ * do not report extra hung tasks:
+ */
+ if ((tainted & TAINT_DIE) || did_panic)
+ return;
+
+ read_lock(&tasklist_lock);
+ do_each_thread(g, t) {
+ if (!--max_count)
+ goto unlock;
+ if (t->state & TASK_UNINTERRUPTIBLE)
+ check_hung_task(t, now);
+ } while_each_thread(g, t);
+ unlock:
+ read_unlock(&tasklist_lock);
+}
+
+/*
* The watchdog thread - runs every second and touches the timestamp.
*/
static int watchdog(void *__bind_cpu)
{
struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
+ int this_cpu = (long)__bind_cpu;
sched_setscheduler(current, SCHED_FIFO, &param);
@@ -135,13 +218,23 @@ static int watchdog(void *__bind_cpu)
/*
* Run briefly once per second to reset the softlockup timestamp.
- * If this gets delayed for more than 10 seconds then the
+ * If this gets delayed for more than 60 seconds then the
* debug-printout triggers in softlockup_tick().
*/
while (!kthread_should_stop()) {
set_current_state(TASK_INTERRUPTIBLE);
touch_softlockup_watchdog();
schedule();
+
+ if (kthread_should_stop())
+ break;
+
+ if (this_cpu != check_cpu)
+ continue;
+
+ if (sysctl_hung_task_timeout_secs)
+ check_hung_uninterruptible_tasks(this_cpu);
+
}
return 0;
@@ -171,9 +264,20 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
break;
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
+ check_cpu = any_online_cpu(cpu_online_map);
wake_up_process(per_cpu(watchdog_task, hotcpu));
break;
#ifdef CONFIG_HOTPLUG_CPU
+ case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
+ if (hotcpu == check_cpu) {
+ cpumask_t temp_cpu_online_map = cpu_online_map;
+
+ cpu_clear(hotcpu, temp_cpu_online_map);
+ check_cpu = any_online_cpu(temp_cpu_online_map);
+ }
+ break;
+
case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN:
if (!per_cpu(watchdog_task, hotcpu))
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index cd72424c266..ae28c824512 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -65,8 +65,7 @@ EXPORT_SYMBOL(_write_trylock);
* even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
* not re-enabled during lock-acquire (which the preempt-spin-ops do):
*/
-#if !defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP) || \
- defined(CONFIG_DEBUG_LOCK_ALLOC)
+#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
void __lockfunc _read_lock(rwlock_t *lock)
{
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 319821ef78a..51b5ee53571 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -203,13 +203,13 @@ int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu)
int ret;
/* No CPUs can come up or down during this. */
- lock_cpu_hotplug();
+ get_online_cpus();
p = __stop_machine_run(fn, data, cpu);
if (!IS_ERR(p))
ret = kthread_stop(p);
else
ret = PTR_ERR(p);
- unlock_cpu_hotplug();
+ put_online_cpus();
return ret;
}
diff --git a/kernel/sys.c b/kernel/sys.c
index 304b5410d74..d1fe71eb454 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1750,7 +1750,7 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
}
asmlinkage long sys_getcpu(unsigned __user *cpup, unsigned __user *nodep,
- struct getcpu_cache __user *cache)
+ struct getcpu_cache __user *unused)
{
int err = 0;
int cpu = raw_smp_processor_id();
@@ -1758,24 +1758,6 @@ asmlinkage long sys_getcpu(unsigned __user *cpup, unsigned __user *nodep,
err |= put_user(cpu, cpup);
if (nodep)
err |= put_user(cpu_to_node(cpu), nodep);
- if (cache) {
- /*
- * The cache is not needed for this implementation,
- * but make sure user programs pass something
- * valid. vsyscall implementations can instead make
- * good use of the cache. Only use t0 and t1 because
- * these are available in both 32bit and 64bit ABI (no
- * need for a compat_getcpu). 32bit has enough
- * padding
- */
- unsigned long t0, t1;
- get_user(t0, &cache->blob[0]);
- get_user(t1, &cache->blob[1]);
- t0++;
- t1++;
- put_user(t0, &cache->blob[0]);
- put_user(t1, &cache->blob[1]);
- }
return err ? -EFAULT : 0;
}
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index 52c7a151e29..beee5b3b68a 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -40,10 +40,14 @@ cond_syscall(sys_recvfrom);
cond_syscall(sys_recv);
cond_syscall(sys_socket);
cond_syscall(sys_setsockopt);
+cond_syscall(compat_sys_setsockopt);
cond_syscall(sys_getsockopt);
+cond_syscall(compat_sys_getsockopt);
cond_syscall(sys_shutdown);
cond_syscall(sys_sendmsg);
+cond_syscall(compat_sys_sendmsg);
cond_syscall(sys_recvmsg);
+cond_syscall(compat_sys_recvmsg);
cond_syscall(sys_socketcall);
cond_syscall(sys_futex);
cond_syscall(compat_sys_futex);
@@ -127,6 +131,7 @@ cond_syscall(sys32_sysctl);
cond_syscall(ppc_rtas);
cond_syscall(sys_spu_run);
cond_syscall(sys_spu_create);
+cond_syscall(sys_subpage_prot);
/* mmu depending weak syscall entries */
cond_syscall(sys_mprotect);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 3b4efbe2644..7cb1ac3e6ff 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -53,6 +53,7 @@
#ifdef CONFIG_X86
#include <asm/nmi.h>
#include <asm/stacktrace.h>
+#include <asm/io.h>
#endif
static int deprecated_sysctl_warning(struct __sysctl_args *args);
@@ -80,7 +81,7 @@ extern int percpu_pagelist_fraction;
extern int compat_log;
extern int maps_protect;
extern int sysctl_stat_interval;
-extern int audit_argv_kb;
+extern int latencytop_enabled;
/* Constants used for minimum and maximum */
#ifdef CONFIG_DETECT_SOFTLOCKUP
@@ -156,8 +157,16 @@ static int proc_dointvec_taint(struct ctl_table *table, int write, struct file *
#endif
static struct ctl_table root_table[];
-static struct ctl_table_header root_table_header =
- { root_table, LIST_HEAD_INIT(root_table_header.ctl_entry) };
+static struct ctl_table_root sysctl_table_root;
+static struct ctl_table_header root_table_header = {
+ .ctl_table = root_table,
+ .ctl_entry = LIST_HEAD_INIT(sysctl_table_root.header_list),
+ .root = &sysctl_table_root,
+};
+static struct ctl_table_root sysctl_table_root = {
+ .root_list = LIST_HEAD_INIT(sysctl_table_root.root_list),
+ .header_list = LIST_HEAD_INIT(root_table_header.ctl_entry),
+};
static struct ctl_table kern_table[];
static struct ctl_table vm_table[];
@@ -191,14 +200,6 @@ static struct ctl_table root_table[] = {
.mode = 0555,
.child = vm_table,
},
-#ifdef CONFIG_NET
- {
- .ctl_name = CTL_NET,
- .procname = "net",
- .mode = 0555,
- .child = net_table,
- },
-#endif
{
.ctl_name = CTL_FS,
.procname = "fs",
@@ -225,21 +226,24 @@ static struct ctl_table root_table[] = {
};
#ifdef CONFIG_SCHED_DEBUG
-static unsigned long min_sched_granularity_ns = 100000; /* 100 usecs */
-static unsigned long max_sched_granularity_ns = 1000000000; /* 1 second */
-static unsigned long min_wakeup_granularity_ns; /* 0 usecs */
-static unsigned long max_wakeup_granularity_ns = 1000000000; /* 1 second */
+static int min_sched_granularity_ns = 100000; /* 100 usecs */
+static int max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */
+static int min_wakeup_granularity_ns; /* 0 usecs */
+static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */
#endif
static struct ctl_table kern_table[] = {
#ifdef CONFIG_SCHED_DEBUG
{
.ctl_name = CTL_UNNUMBERED,
- .procname = "sched_nr_latency",
- .data = &sysctl_sched_nr_latency,
+ .procname = "sched_min_granularity_ns",
+ .data = &sysctl_sched_min_granularity,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = &sched_nr_latency_handler,
+ .strategy = &sysctl_intvec,
+ .extra1 = &min_sched_granularity_ns,
+ .extra2 = &max_sched_granularity_ns,
},
{
.ctl_name = CTL_UNNUMBERED,
@@ -247,7 +251,7 @@ static struct ctl_table kern_table[] = {
.data = &sysctl_sched_latency,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
+ .proc_handler = &sched_nr_latency_handler,
.strategy = &sysctl_intvec,
.extra1 = &min_sched_granularity_ns,
.extra2 = &max_sched_granularity_ns,
@@ -298,6 +302,48 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = &proc_dointvec,
},
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "sched_nr_migrate",
+ .data = &sysctl_sched_nr_migrate,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "sched_rt_period_ms",
+ .data = &sysctl_sched_rt_period,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "sched_rt_ratio",
+ .data = &sysctl_sched_rt_ratio,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
+#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_SMP)
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "sched_min_bal_int_shares",
+ .data = &sysctl_sched_min_bal_int_shares,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "sched_max_bal_int_shares",
+ .data = &sysctl_sched_max_bal_int_shares,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
+#endif
#endif
{
.ctl_name = CTL_UNNUMBERED,
@@ -343,16 +389,6 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = &proc_dointvec,
},
-#ifdef CONFIG_AUDITSYSCALL
- {
- .ctl_name = CTL_UNNUMBERED,
- .procname = "audit_argv_kb",
- .data = &audit_argv_kb,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- },
-#endif
{
.ctl_name = KERN_CORE_PATTERN,
.procname = "core_pattern",
@@ -371,6 +407,15 @@ static struct ctl_table kern_table[] = {
.proc_handler = &proc_dointvec_taint,
},
#endif
+#ifdef CONFIG_LATENCYTOP
+ {
+ .procname = "latencytop",
+ .data = &latencytop_enabled,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
+#endif
#ifdef CONFIG_SECURITY_CAPABILITIES
{
.procname = "cap-bound",
@@ -672,6 +717,14 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = &proc_dointvec,
},
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "io_delay_type",
+ .data = &io_delay_type,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
#endif
#if defined(CONFIG_MMU)
{
@@ -717,13 +770,40 @@ static struct ctl_table kern_table[] = {
.ctl_name = CTL_UNNUMBERED,
.procname = "softlockup_thresh",
.data = &softlockup_thresh,
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
+ .proc_handler = &proc_doulongvec_minmax,
.strategy = &sysctl_intvec,
.extra1 = &one,
.extra2 = &sixty,
},
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "hung_task_check_count",
+ .data = &sysctl_hung_task_check_count,
+ .maxlen = sizeof(unsigned long),
+ .mode = 0644,
+ .proc_handler = &proc_doulongvec_minmax,
+ .strategy = &sysctl_intvec,
+ },
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "hung_task_timeout_secs",
+ .data = &sysctl_hung_task_timeout_secs,
+ .maxlen = sizeof(unsigned long),
+ .mode = 0644,
+ .proc_handler = &proc_doulongvec_minmax,
+ .strategy = &sysctl_intvec,
+ },
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "hung_task_warnings",
+ .data = &sysctl_hung_task_warnings,
+ .maxlen = sizeof(unsigned long),
+ .mode = 0644,
+ .proc_handler = &proc_doulongvec_minmax,
+ .strategy = &sysctl_intvec,
+ },
#endif
#ifdef CONFIG_COMPAT
{
@@ -895,11 +975,11 @@ static struct ctl_table vm_table[] = {
},
{
.ctl_name = CTL_UNNUMBERED,
- .procname = "hugetlb_dynamic_pool",
- .data = &hugetlb_dynamic_pool,
- .maxlen = sizeof(hugetlb_dynamic_pool),
+ .procname = "nr_overcommit_hugepages",
+ .data = &nr_overcommit_huge_pages,
+ .maxlen = sizeof(nr_overcommit_huge_pages),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = &proc_doulongvec_minmax,
},
#endif
{
@@ -1289,12 +1369,27 @@ void sysctl_head_finish(struct ctl_table_header *head)
spin_unlock(&sysctl_lock);
}
-struct ctl_table_header *sysctl_head_next(struct ctl_table_header *prev)
+static struct list_head *
+lookup_header_list(struct ctl_table_root *root, struct nsproxy *namespaces)
+{
+ struct list_head *header_list;
+ header_list = &root->header_list;
+ if (root->lookup)
+ header_list = root->lookup(root, namespaces);
+ return header_list;
+}
+
+struct ctl_table_header *__sysctl_head_next(struct nsproxy *namespaces,
+ struct ctl_table_header *prev)
{
+ struct ctl_table_root *root;
+ struct list_head *header_list;
struct ctl_table_header *head;
struct list_head *tmp;
+
spin_lock(&sysctl_lock);
if (prev) {
+ head = prev;
tmp = &prev->ctl_entry;
unuse_table(prev);
goto next;
@@ -1308,14 +1403,38 @@ struct ctl_table_header *sysctl_head_next(struct ctl_table_header *prev)
spin_unlock(&sysctl_lock);
return head;
next:
+ root = head->root;
tmp = tmp->next;
- if (tmp == &root_table_header.ctl_entry)
- break;
+ header_list = lookup_header_list(root, namespaces);
+ if (tmp != header_list)
+ continue;
+
+ do {
+ root = list_entry(root->root_list.next,
+ struct ctl_table_root, root_list);
+ if (root == &sysctl_table_root)
+ goto out;
+ header_list = lookup_header_list(root, namespaces);
+ } while (list_empty(header_list));
+ tmp = header_list->next;
}
+out:
spin_unlock(&sysctl_lock);
return NULL;
}
+struct ctl_table_header *sysctl_head_next(struct ctl_table_header *prev)
+{
+ return __sysctl_head_next(current->nsproxy, prev);
+}
+
+void register_sysctl_root(struct ctl_table_root *root)
+{
+ spin_lock(&sysctl_lock);
+ list_add_tail(&root->root_list, &sysctl_table_root.root_list);
+ spin_unlock(&sysctl_lock);
+}
+
#ifdef CONFIG_SYSCTL_SYSCALL
int do_sysctl(int __user *name, int nlen, void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen)
@@ -1472,18 +1591,21 @@ static __init int sysctl_init(void)
{
int err;
sysctl_set_parent(NULL, root_table);
- err = sysctl_check_table(root_table);
+ err = sysctl_check_table(current->nsproxy, root_table);
return 0;
}
core_initcall(sysctl_init);
/**
- * register_sysctl_table - register a sysctl hierarchy
+ * __register_sysctl_paths - register a sysctl hierarchy
+ * @root: List of sysctl headers to register on
+ * @namespaces: Data to compute which lists of sysctl entries are visible
+ * @path: The path to the directory the sysctl table is in.
* @table: the top-level table structure
*
* Register a sysctl table hierarchy. @table should be a filled in ctl_table
- * array. An entry with a ctl_name of 0 terminates the table.
+ * array. A completely 0 filled entry terminates the table.
*
* The members of the &struct ctl_table structure are used as follows:
*
@@ -1546,25 +1668,99 @@ core_initcall(sysctl_init);
* This routine returns %NULL on a failure to register, and a pointer
* to the table header on success.
*/
-struct ctl_table_header *register_sysctl_table(struct ctl_table * table)
+struct ctl_table_header *__register_sysctl_paths(
+ struct ctl_table_root *root,
+ struct nsproxy *namespaces,
+ const struct ctl_path *path, struct ctl_table *table)
{
- struct ctl_table_header *tmp;
- tmp = kmalloc(sizeof(struct ctl_table_header), GFP_KERNEL);
- if (!tmp)
+ struct list_head *header_list;
+ struct ctl_table_header *header;
+ struct ctl_table *new, **prevp;
+ unsigned int n, npath;
+
+ /* Count the path components */
+ for (npath = 0; path[npath].ctl_name || path[npath].procname; ++npath)
+ ;
+
+ /*
+ * For each path component, allocate a 2-element ctl_table array.
+ * The first array element will be filled with the sysctl entry
+ * for this, the second will be the sentinel (ctl_name == 0).
+ *
+ * We allocate everything in one go so that we don't have to
+ * worry about freeing additional memory in unregister_sysctl_table.
+ */
+ header = kzalloc(sizeof(struct ctl_table_header) +
+ (2 * npath * sizeof(struct ctl_table)), GFP_KERNEL);
+ if (!header)
return NULL;
- tmp->ctl_table = table;
- INIT_LIST_HEAD(&tmp->ctl_entry);
- tmp->used = 0;
- tmp->unregistering = NULL;
- sysctl_set_parent(NULL, table);
- if (sysctl_check_table(tmp->ctl_table)) {
- kfree(tmp);
+
+ new = (struct ctl_table *) (header + 1);
+
+ /* Now connect the dots */
+ prevp = &header->ctl_table;
+ for (n = 0; n < npath; ++n, ++path) {
+ /* Copy the procname */
+ new->procname = path->procname;
+ new->ctl_name = path->ctl_name;
+ new->mode = 0555;
+
+ *prevp = new;
+ prevp = &new->child;
+
+ new += 2;
+ }
+ *prevp = table;
+ header->ctl_table_arg = table;
+
+ INIT_LIST_HEAD(&header->ctl_entry);
+ header->used = 0;
+ header->unregistering = NULL;
+ header->root = root;
+ sysctl_set_parent(NULL, header->ctl_table);
+ if (sysctl_check_table(namespaces, header->ctl_table)) {
+ kfree(header);
return NULL;
}
spin_lock(&sysctl_lock);
- list_add_tail(&tmp->ctl_entry, &root_table_header.ctl_entry);
+ header_list = lookup_header_list(root, namespaces);
+ list_add_tail(&header->ctl_entry, header_list);
spin_unlock(&sysctl_lock);
- return tmp;
+
+ return header;
+}
+
+/**
+ * register_sysctl_table_path - register a sysctl table hierarchy
+ * @path: The path to the directory the sysctl table is in.
+ * @table: the top-level table structure
+ *
+ * Register a sysctl table hierarchy. @table should be a filled in ctl_table
+ * array. A completely 0 filled entry terminates the table.
+ *
+ * See __register_sysctl_paths for more details.
+ */
+struct ctl_table_header *register_sysctl_paths(const struct ctl_path *path,
+ struct ctl_table *table)
+{
+ return __register_sysctl_paths(&sysctl_table_root, current->nsproxy,
+ path, table);
+}
+
+/**
+ * register_sysctl_table - register a sysctl table hierarchy
+ * @table: the top-level table structure
+ *
+ * Register a sysctl table hierarchy. @table should be a filled in ctl_table
+ * array. A completely 0 filled entry terminates the table.
+ *
+ * See register_sysctl_paths for more details.
+ */
+struct ctl_table_header *register_sysctl_table(struct ctl_table *table)
+{
+ static const struct ctl_path null_path[] = { {} };
+
+ return register_sysctl_paths(null_path, table);
}
/**
@@ -1577,6 +1773,10 @@ struct ctl_table_header *register_sysctl_table(struct ctl_table * table)
void unregister_sysctl_table(struct ctl_table_header * header)
{
might_sleep();
+
+ if (header == NULL)
+ return;
+
spin_lock(&sysctl_lock);
start_unregistering(header);
spin_unlock(&sysctl_lock);
@@ -1589,6 +1789,12 @@ struct ctl_table_header *register_sysctl_table(struct ctl_table * table)
return NULL;
}
+struct ctl_table_header *register_sysctl_paths(const struct ctl_path *path,
+ struct ctl_table *table)
+{
+ return NULL;
+}
+
void unregister_sysctl_table(struct ctl_table_header * table)
{
}
@@ -2609,6 +2815,10 @@ static int deprecated_sysctl_warning(struct __sysctl_args *args)
int name[CTL_MAXNAME];
int i;
+ /* Check args->nlen. */
+ if (args->nlen < 0 || args->nlen > CTL_MAXNAME)
+ return -ENOTDIR;
+
/* Read in the sysctl name for better debug message logging */
for (i = 0; i < args->nlen; i++)
if (get_user(name[i], args->name + i))
@@ -2643,6 +2853,7 @@ EXPORT_SYMBOL(proc_dostring);
EXPORT_SYMBOL(proc_doulongvec_minmax);
EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
EXPORT_SYMBOL(register_sysctl_table);
+EXPORT_SYMBOL(register_sysctl_paths);
EXPORT_SYMBOL(sysctl_intvec);
EXPORT_SYMBOL(sysctl_jiffies);
EXPORT_SYMBOL(sysctl_ms_jiffies);
diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
index ed6fe51df77..c3206fa5004 100644
--- a/kernel/sysctl_check.c
+++ b/kernel/sysctl_check.c
@@ -1,6 +1,5 @@
#include <linux/stat.h>
#include <linux/sysctl.h>
-#include "../arch/s390/appldata/appldata.h"
#include "../fs/xfs/linux-2.6/xfs_sysctl.h"
#include <linux/sunrpc/debug.h>
#include <linux/string.h>
@@ -96,7 +95,7 @@ static struct trans_ctl_table trans_kern_table[] = {
{ KERN_PTY, "pty", trans_pty_table },
{ KERN_NGROUPS_MAX, "ngroups_max" },
- { KERN_SPARC_SCONS_PWROFF, "scons_poweroff" },
+ { KERN_SPARC_SCONS_PWROFF, "scons-poweroff" },
{ KERN_HZ_TIMER, "hz_timer" },
{ KERN_UNKNOWN_NMI_PANIC, "unknown_nmi_panic" },
{ KERN_BOOTLOADER_TYPE, "bootloader_type" },
@@ -140,9 +139,6 @@ static struct trans_ctl_table trans_vm_table[] = {
{ VM_PANIC_ON_OOM, "panic_on_oom" },
{ VM_VDSO_ENABLED, "vdso_enabled" },
{ VM_MIN_SLAB, "min_slab_ratio" },
- { VM_CMM_PAGES, "cmm_pages" },
- { VM_CMM_TIMED_PAGES, "cmm_timed_pages" },
- { VM_CMM_TIMEOUT, "cmm_timeout" },
{}
};
@@ -237,36 +233,6 @@ static struct trans_ctl_table trans_net_ipv4_conf_table[] = {
{}
};
-
-static struct trans_ctl_table trans_net_ipv4_vs_table[] = {
- { NET_IPV4_VS_AMEMTHRESH, "amemthresh" },
- { NET_IPV4_VS_DEBUG_LEVEL, "debug_level" },
- { NET_IPV4_VS_AMDROPRATE, "am_droprate" },
- { NET_IPV4_VS_DROP_ENTRY, "drop_entry" },
- { NET_IPV4_VS_DROP_PACKET, "drop_packet" },
- { NET_IPV4_VS_SECURE_TCP, "secure_tcp" },
- { NET_IPV4_VS_TO_ES, "timeout_established" },
- { NET_IPV4_VS_TO_SS, "timeout_synsent" },
- { NET_IPV4_VS_TO_SR, "timeout_synrecv" },
- { NET_IPV4_VS_TO_FW, "timeout_finwait" },
- { NET_IPV4_VS_TO_TW, "timeout_timewait" },
- { NET_IPV4_VS_TO_CL, "timeout_close" },
- { NET_IPV4_VS_TO_CW, "timeout_closewait" },
- { NET_IPV4_VS_TO_LA, "timeout_lastack" },
- { NET_IPV4_VS_TO_LI, "timeout_listen" },
- { NET_IPV4_VS_TO_SA, "timeout_synack" },
- { NET_IPV4_VS_TO_UDP, "timeout_udp" },
- { NET_IPV4_VS_TO_ICMP, "timeout_icmp" },
- { NET_IPV4_VS_CACHE_BYPASS, "cache_bypass" },
- { NET_IPV4_VS_EXPIRE_NODEST_CONN, "expire_nodest_conn" },
- { NET_IPV4_VS_EXPIRE_QUIESCENT_TEMPLATE, "expire_quiescent_template" },
- { NET_IPV4_VS_SYNC_THRESHOLD, "sync_threshold" },
- { NET_IPV4_VS_NAT_ICMP_SEND, "nat_icmp_send" },
- { NET_IPV4_VS_LBLC_EXPIRE, "lblc_expiration" },
- { NET_IPV4_VS_LBLCR_EXPIRE, "lblcr_expiration" },
- {}
-};
-
static struct trans_ctl_table trans_net_neigh_vars_table[] = {
{ NET_NEIGH_MCAST_SOLICIT, "mcast_solicit" },
{ NET_NEIGH_UCAST_SOLICIT, "ucast_solicit" },
@@ -341,7 +307,6 @@ static struct trans_ctl_table trans_net_ipv4_table[] = {
{ NET_IPV4_ROUTE, "route", trans_net_ipv4_route_table },
/* NET_IPV4_FIB_HASH unused */
{ NET_IPV4_NETFILTER, "netfilter", trans_net_ipv4_netfilter_table },
- { NET_IPV4_VS, "vs", trans_net_ipv4_vs_table },
{ NET_IPV4_TCP_TIMESTAMPS, "tcp_timestamps" },
{ NET_IPV4_TCP_WINDOW_SCALING, "tcp_window_scaling" },
@@ -462,7 +427,7 @@ static struct trans_ctl_table trans_net_netrom_table[] = {
{}
};
-static struct trans_ctl_table trans_net_ax25_table[] = {
+static struct trans_ctl_table trans_net_ax25_param_table[] = {
{ NET_AX25_IP_DEFAULT_MODE, "ip_default_mode" },
{ NET_AX25_DEFAULT_MODE, "ax25_default_mode" },
{ NET_AX25_BACKOFF_TYPE, "backoff_type" },
@@ -480,6 +445,11 @@ static struct trans_ctl_table trans_net_ax25_table[] = {
{}
};
+static struct trans_ctl_table trans_net_ax25_table[] = {
+ { 0, NULL, trans_net_ax25_param_table },
+ {}
+};
+
static struct trans_ctl_table trans_net_bridge_table[] = {
{ NET_BRIDGE_NF_CALL_ARPTABLES, "bridge-nf-call-arptables" },
{ NET_BRIDGE_NF_CALL_IPTABLES, "bridge-nf-call-iptables" },
@@ -738,7 +708,7 @@ static struct trans_ctl_table trans_net_table[] = {
{ NET_ROSE, "rose", trans_net_rose_table },
{ NET_IPV6, "ipv6", trans_net_ipv6_table },
{ NET_X25, "x25", trans_net_x25_table },
- { NET_TR, "tr", trans_net_tr_table },
+ { NET_TR, "token-ring", trans_net_tr_table },
{ NET_DECNET, "decnet", trans_net_decnet_table },
/* NET_ECONET not used */
{ NET_SCTP, "sctp", trans_net_sctp_table },
@@ -1219,16 +1189,6 @@ static struct trans_ctl_table trans_arlan_table[] = {
{}
};
-static struct trans_ctl_table trans_appldata_table[] = {
- { CTL_APPLDATA_TIMER, "timer" },
- { CTL_APPLDATA_INTERVAL, "interval" },
- { CTL_APPLDATA_OS, "os" },
- { CTL_APPLDATA_NET_SUM, "net_sum" },
- { CTL_APPLDATA_MEM, "mem" },
- {}
-
-};
-
static struct trans_ctl_table trans_s390dbf_table[] = {
{ 5678 /* CTL_S390DBF_STOPPABLE */, "debug_stoppable" },
{ 5679 /* CTL_S390DBF_ACTIVE */, "debug_active" },
@@ -1273,7 +1233,6 @@ static struct trans_ctl_table trans_root_table[] = {
{ CTL_ABI, "abi" },
/* CTL_CPU not used */
{ CTL_ARLAN, "arlan", trans_arlan_table },
- { CTL_APPLDATA, "appldata", trans_appldata_table },
{ CTL_S390DBF, "s390dbf", trans_s390dbf_table },
{ CTL_SUNRPC, "sunrpc", trans_sunrpc_table },
{ CTL_PM, "pm", trans_pm_table },
@@ -1383,7 +1342,8 @@ static void sysctl_repair_table(struct ctl_table *table)
}
}
-static struct ctl_table *sysctl_check_lookup(struct ctl_table *table)
+static struct ctl_table *sysctl_check_lookup(struct nsproxy *namespaces,
+ struct ctl_table *table)
{
struct ctl_table_header *head;
struct ctl_table *ref, *test;
@@ -1391,8 +1351,8 @@ static struct ctl_table *sysctl_check_lookup(struct ctl_table *table)
depth = sysctl_depth(table);
- for (head = sysctl_head_next(NULL); head;
- head = sysctl_head_next(head)) {
+ for (head = __sysctl_head_next(namespaces, NULL); head;
+ head = __sysctl_head_next(namespaces, head)) {
cur_depth = depth;
ref = head->ctl_table;
repeat:
@@ -1432,17 +1392,19 @@ static void set_fail(const char **fail, struct ctl_table *table, const char *str
printk(KERN_ERR "sysctl table check failed: ");
sysctl_print_path(table);
printk(" %s\n", *fail);
+ dump_stack();
}
*fail = str;
}
-static int sysctl_check_dir(struct ctl_table *table)
+static int sysctl_check_dir(struct nsproxy *namespaces,
+ struct ctl_table *table)
{
struct ctl_table *ref;
int error;
error = 0;
- ref = sysctl_check_lookup(table);
+ ref = sysctl_check_lookup(namespaces, table);
if (ref) {
int match = 0;
if ((!table->procname && !ref->procname) ||
@@ -1467,11 +1429,12 @@ static int sysctl_check_dir(struct ctl_table *table)
return error;
}
-static void sysctl_check_leaf(struct ctl_table *table, const char **fail)
+static void sysctl_check_leaf(struct nsproxy *namespaces,
+ struct ctl_table *table, const char **fail)
{
struct ctl_table *ref;
- ref = sysctl_check_lookup(table);
+ ref = sysctl_check_lookup(namespaces, table);
if (ref && (ref != table))
set_fail(fail, table, "Sysctl already exists");
}
@@ -1495,7 +1458,7 @@ static void sysctl_check_bin_path(struct ctl_table *table, const char **fail)
}
}
-int sysctl_check_table(struct ctl_table *table)
+int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
{
int error = 0;
for (; table->ctl_name || table->procname; table++) {
@@ -1525,7 +1488,7 @@ int sysctl_check_table(struct ctl_table *table)
set_fail(&fail, table, "Directory with extra1");
if (table->extra2)
set_fail(&fail, table, "Directory with extra2");
- if (sysctl_check_dir(table))
+ if (sysctl_check_dir(namespaces, table))
set_fail(&fail, table, "Inconsistent directory names");
} else {
if ((table->strategy == sysctl_data) ||
@@ -1574,7 +1537,7 @@ int sysctl_check_table(struct ctl_table *table)
if (!table->procname && table->proc_handler)
set_fail(&fail, table, "proc_handler without procname");
#endif
- sysctl_check_leaf(table, &fail);
+ sysctl_check_leaf(namespaces, table, &fail);
}
sysctl_check_bin_path(table, &fail);
if (fail) {
@@ -1582,7 +1545,7 @@ int sysctl_check_table(struct ctl_table *table)
error = -EINVAL;
}
if (table->child)
- error |= sysctl_check_table(table->child);
+ error |= sysctl_check_table(namespaces, table->child);
}
return error;
}
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index 354e74bc17c..07e86a82807 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -398,31 +398,31 @@ static int cgroupstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
fd = nla_get_u32(info->attrs[CGROUPSTATS_CMD_ATTR_FD]);
file = fget_light(fd, &fput_needed);
- if (file) {
- size = nla_total_size(sizeof(struct cgroupstats));
+ if (!file)
+ return 0;
- rc = prepare_reply(info, CGROUPSTATS_CMD_NEW, &rep_skb,
- size);
- if (rc < 0)
- goto err;
+ size = nla_total_size(sizeof(struct cgroupstats));
- na = nla_reserve(rep_skb, CGROUPSTATS_TYPE_CGROUP_STATS,
- sizeof(struct cgroupstats));
- stats = nla_data(na);
- memset(stats, 0, sizeof(*stats));
+ rc = prepare_reply(info, CGROUPSTATS_CMD_NEW, &rep_skb,
+ size);
+ if (rc < 0)
+ goto err;
- rc = cgroupstats_build(stats, file->f_dentry);
- if (rc < 0)
- goto err;
+ na = nla_reserve(rep_skb, CGROUPSTATS_TYPE_CGROUP_STATS,
+ sizeof(struct cgroupstats));
+ stats = nla_data(na);
+ memset(stats, 0, sizeof(*stats));
- fput_light(file, fput_needed);
- return send_reply(rep_skb, info->snd_pid);
+ rc = cgroupstats_build(stats, file->f_dentry);
+ if (rc < 0) {
+ nlmsg_free(rep_skb);
+ goto err;
}
+ rc = send_reply(rep_skb, info->snd_pid);
+
err:
- if (file)
- fput_light(file, fput_needed);
- nlmsg_free(rep_skb);
+ fput_light(file, fput_needed);
return rc;
}
diff --git a/kernel/test_kprobes.c b/kernel/test_kprobes.c
new file mode 100644
index 00000000000..88cdb109e13
--- /dev/null
+++ b/kernel/test_kprobes.c
@@ -0,0 +1,216 @@
+/*
+ * test_kprobes.c - simple sanity test for *probes
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+#include <linux/random.h>
+
+#define div_factor 3
+
+static u32 rand1, preh_val, posth_val, jph_val;
+static int errors, handler_errors, num_tests;
+
+static noinline u32 kprobe_target(u32 value)
+{
+ /*
+ * gcc ignores noinline on some architectures unless we stuff
+ * sufficient lard into the function. The get_kprobe() here is
+ * just for that.
+ *
+ * NOTE: We aren't concerned about the correctness of get_kprobe()
+ * here; hence, this call is neither under !preempt nor with the
+ * kprobe_mutex held. This is fine(tm)
+ */
+ if (get_kprobe((void *)0xdeadbeef))
+ printk(KERN_INFO "Kprobe smoke test: probe on 0xdeadbeef!\n");
+
+ return (value / div_factor);
+}
+
+static int kp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ preh_val = (rand1 / div_factor);
+ return 0;
+}
+
+static void kp_post_handler(struct kprobe *p, struct pt_regs *regs,
+ unsigned long flags)
+{
+ if (preh_val != (rand1 / div_factor)) {
+ handler_errors++;
+ printk(KERN_ERR "Kprobe smoke test failed: "
+ "incorrect value in post_handler\n");
+ }
+ posth_val = preh_val + div_factor;
+}
+
+static struct kprobe kp = {
+ .symbol_name = "kprobe_target",
+ .pre_handler = kp_pre_handler,
+ .post_handler = kp_post_handler
+};
+
+static int test_kprobe(void)
+{
+ int ret;
+
+ ret = register_kprobe(&kp);
+ if (ret < 0) {
+ printk(KERN_ERR "Kprobe smoke test failed: "
+ "register_kprobe returned %d\n", ret);
+ return ret;
+ }
+
+ ret = kprobe_target(rand1);
+ unregister_kprobe(&kp);
+
+ if (preh_val == 0) {
+ printk(KERN_ERR "Kprobe smoke test failed: "
+ "kprobe pre_handler not called\n");
+ handler_errors++;
+ }
+
+ if (posth_val == 0) {
+ printk(KERN_ERR "Kprobe smoke test failed: "
+ "kprobe post_handler not called\n");
+ handler_errors++;
+ }
+
+ return 0;
+}
+
+static u32 j_kprobe_target(u32 value)
+{
+ if (value != rand1) {
+ handler_errors++;
+ printk(KERN_ERR "Kprobe smoke test failed: "
+ "incorrect value in jprobe handler\n");
+ }
+
+ jph_val = rand1;
+ jprobe_return();
+ return 0;
+}
+
+static struct jprobe jp = {
+ .entry = j_kprobe_target,
+ .kp.symbol_name = "kprobe_target"
+};
+
+static int test_jprobe(void)
+{
+ int ret;
+
+ ret = register_jprobe(&jp);
+ if (ret < 0) {
+ printk(KERN_ERR "Kprobe smoke test failed: "
+ "register_jprobe returned %d\n", ret);
+ return ret;
+ }
+
+ ret = kprobe_target(rand1);
+ unregister_jprobe(&jp);
+ if (jph_val == 0) {
+ printk(KERN_ERR "Kprobe smoke test failed: "
+ "jprobe handler not called\n");
+ handler_errors++;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_KRETPROBES
+static u32 krph_val;
+
+static int return_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
+{
+ unsigned long ret = regs_return_value(regs);
+
+ if (ret != (rand1 / div_factor)) {
+ handler_errors++;
+ printk(KERN_ERR "Kprobe smoke test failed: "
+ "incorrect value in kretprobe handler\n");
+ }
+
+ krph_val = (rand1 / div_factor);
+ return 0;
+}
+
+static struct kretprobe rp = {
+ .handler = return_handler,
+ .kp.symbol_name = "kprobe_target"
+};
+
+static int test_kretprobe(void)
+{
+ int ret;
+
+ ret = register_kretprobe(&rp);
+ if (ret < 0) {
+ printk(KERN_ERR "Kprobe smoke test failed: "
+ "register_kretprobe returned %d\n", ret);
+ return ret;
+ }
+
+ ret = kprobe_target(rand1);
+ unregister_kretprobe(&rp);
+ if (krph_val == 0) {
+ printk(KERN_ERR "Kprobe smoke test failed: "
+ "kretprobe handler not called\n");
+ handler_errors++;
+ }
+
+ return 0;
+}
+#endif /* CONFIG_KRETPROBES */
+
+int init_test_probes(void)
+{
+ int ret;
+
+ do {
+ rand1 = random32();
+ } while (rand1 <= div_factor);
+
+ printk(KERN_INFO "Kprobe smoke test started\n");
+ num_tests++;
+ ret = test_kprobe();
+ if (ret < 0)
+ errors++;
+
+ num_tests++;
+ ret = test_jprobe();
+ if (ret < 0)
+ errors++;
+
+#ifdef CONFIG_KRETPROBES
+ num_tests++;
+ ret = test_kretprobe();
+ if (ret < 0)
+ errors++;
+#endif /* CONFIG_KRETPROBES */
+
+ if (errors)
+ printk(KERN_ERR "BUG: Kprobe smoke test: %d out of "
+ "%d tests failed\n", errors, num_tests);
+ else if (handler_errors)
+ printk(KERN_ERR "BUG: Kprobe smoke test: %d error(s) "
+ "running handlers\n", handler_errors);
+ else
+ printk(KERN_INFO "Kprobe smoke test passed successfully\n");
+
+ return 0;
+}
diff --git a/kernel/time.c b/kernel/time.c
index 09d3c45c4da..4064c0566e7 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -129,6 +129,7 @@ static inline void warp_clock(void)
write_seqlock_irq(&xtime_lock);
wall_to_monotonic.tv_sec -= sys_tz.tz_minuteswest * 60;
xtime.tv_sec += sys_tz.tz_minuteswest * 60;
+ update_xtime_cache(0);
write_sequnlock_irq(&xtime_lock);
clock_was_set();
}
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 822beebe664..3e59fce6dd4 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -41,6 +41,11 @@ unsigned long clockevent_delta2ns(unsigned long latch,
{
u64 clc = ((u64) latch << evt->shift);
+ if (unlikely(!evt->mult)) {
+ evt->mult = 1;
+ WARN_ON(1);
+ }
+
do_div(clc, evt->mult);
if (clc < 1000)
clc = 1000;
@@ -78,6 +83,11 @@ int clockevents_program_event(struct clock_event_device *dev, ktime_t expires,
unsigned long long clc;
int64_t delta;
+ if (unlikely(expires.tv64 < 0)) {
+ WARN_ON_ONCE(1);
+ return -ETIME;
+ }
+
delta = ktime_to_ns(ktime_sub(expires, now));
if (delta <= 0)
@@ -146,6 +156,14 @@ static void clockevents_notify_released(void)
void clockevents_register_device(struct clock_event_device *dev)
{
BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
+ /*
+ * A nsec2cyc multiplicator of 0 is invalid and we'd crash
+ * on it, so fix it up and emit a warning:
+ */
+ if (unlikely(!dev->mult)) {
+ dev->mult = 1;
+ WARN_ON(1);
+ }
spin_lock(&clockevents_lock);
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index c8a9d13874d..6e9259a5d50 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -142,8 +142,13 @@ static void clocksource_watchdog(unsigned long data)
}
if (!list_empty(&watchdog_list)) {
- __mod_timer(&watchdog_timer,
- watchdog_timer.expires + WATCHDOG_INTERVAL);
+ /* Cycle through CPUs to check if the CPUs stay synchronized to
+ * each other. */
+ int next_cpu = next_cpu(raw_smp_processor_id(), cpu_online_map);
+ if (next_cpu >= NR_CPUS)
+ next_cpu = first_cpu(cpu_online_map);
+ watchdog_timer.expires += WATCHDOG_INTERVAL;
+ add_timer_on(&watchdog_timer, next_cpu);
}
spin_unlock(&watchdog_lock);
}
@@ -165,7 +170,7 @@ static void clocksource_check_watchdog(struct clocksource *cs)
if (!started && watchdog) {
watchdog_last = watchdog->read();
watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
- add_timer(&watchdog_timer);
+ add_timer_on(&watchdog_timer, first_cpu(cpu_online_map));
}
} else {
if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
@@ -175,7 +180,7 @@ static void clocksource_check_watchdog(struct clocksource *cs)
if (watchdog)
del_timer(&watchdog_timer);
watchdog = cs;
- init_timer(&watchdog_timer);
+ init_timer_deferrable(&watchdog_timer);
watchdog_timer.function = clocksource_watchdog;
/* Reset watchdog cycles */
@@ -186,7 +191,8 @@ static void clocksource_check_watchdog(struct clocksource *cs)
watchdog_last = watchdog->read();
watchdog_timer.expires =
jiffies + WATCHDOG_INTERVAL;
- add_timer(&watchdog_timer);
+ add_timer_on(&watchdog_timer,
+ first_cpu(cpu_online_map));
}
}
}
@@ -331,6 +337,21 @@ void clocksource_change_rating(struct clocksource *cs, int rating)
spin_unlock_irqrestore(&clocksource_lock, flags);
}
+/**
+ * clocksource_unregister - remove a registered clocksource
+ */
+void clocksource_unregister(struct clocksource *cs)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&clocksource_lock, flags);
+ list_del(&cs->list);
+ if (clocksource_override == cs)
+ clocksource_override = NULL;
+ next_clocksource = select_clocksource();
+ spin_unlock_irqrestore(&clocksource_lock, flags);
+}
+
#ifdef CONFIG_SYSFS
/**
* sysfs_show_current_clocksources - sysfs interface for current clocksource
@@ -441,7 +462,7 @@ static SYSDEV_ATTR(available_clocksource, 0600,
sysfs_show_available_clocksources, NULL);
static struct sysdev_class clocksource_sysclass = {
- set_kset_name("clocksource"),
+ .name = "clocksource",
};
static struct sys_device device_clocksource = {
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index de6a2d6b3eb..e64efaf957e 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -205,7 +205,7 @@ static void sync_cmos_clock(unsigned long dummy)
return;
getnstimeofday(&now);
- if (abs(xtime.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2)
+ if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2)
fail = update_persistent_clock(now);
next.tv_nsec = (NSEC_PER_SEC / 2) - now.tv_nsec;
@@ -249,10 +249,12 @@ int do_adjtimex(struct timex *txc)
/* Now we validate the data before disabling interrupts */
- if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT)
+ if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT) {
/* singleshot must not be used with any other mode bits */
- if (txc->modes != ADJ_OFFSET_SINGLESHOT)
+ if (txc->modes != ADJ_OFFSET_SINGLESHOT &&
+ txc->modes != ADJ_OFFSET_SS_READ)
return -EINVAL;
+ }
if (txc->modes != ADJ_OFFSET_SINGLESHOT && (txc->modes & ADJ_OFFSET))
/* adjustment Offset limited to +- .512 seconds */
@@ -372,7 +374,8 @@ int do_adjtimex(struct timex *txc)
leave: if ((time_status & (STA_UNSYNC|STA_CLOCKERR)) != 0)
result = TIME_ERROR;
- if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT)
+ if ((txc->modes == ADJ_OFFSET_SINGLESHOT) ||
+ (txc->modes == ADJ_OFFSET_SS_READ))
txc->offset = save_adjust;
else
txc->offset = ((long)shift_right(time_offset, SHIFT_UPDATE)) *
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 8cfb8b2ce77..e1bd50cbbf5 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -126,9 +126,9 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
/*
* Broadcast the event to the cpus, which are set in the mask
*/
-int tick_do_broadcast(cpumask_t mask)
+static void tick_do_broadcast(cpumask_t mask)
{
- int ret = 0, cpu = smp_processor_id();
+ int cpu = smp_processor_id();
struct tick_device *td;
/*
@@ -138,7 +138,6 @@ int tick_do_broadcast(cpumask_t mask)
cpu_clear(cpu, mask);
td = &per_cpu(tick_cpu_device, cpu);
td->evtdev->event_handler(td->evtdev);
- ret = 1;
}
if (!cpus_empty(mask)) {
@@ -151,9 +150,7 @@ int tick_do_broadcast(cpumask_t mask)
cpu = first_cpu(mask);
td = &per_cpu(tick_cpu_device, cpu);
td->evtdev->broadcast(mask);
- ret = 1;
}
- return ret;
}
/*
@@ -384,45 +381,19 @@ int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
}
/*
- * Reprogram the broadcast device:
- *
- * Called with tick_broadcast_lock held and interrupts disabled.
- */
-static int tick_broadcast_reprogram(void)
-{
- ktime_t expires = { .tv64 = KTIME_MAX };
- struct tick_device *td;
- int cpu;
-
- /*
- * Find the event which expires next:
- */
- for (cpu = first_cpu(tick_broadcast_oneshot_mask); cpu != NR_CPUS;
- cpu = next_cpu(cpu, tick_broadcast_oneshot_mask)) {
- td = &per_cpu(tick_cpu_device, cpu);
- if (td->evtdev->next_event.tv64 < expires.tv64)
- expires = td->evtdev->next_event;
- }
-
- if (expires.tv64 == KTIME_MAX)
- return 0;
-
- return tick_broadcast_set_event(expires, 0);
-}
-
-/*
* Handle oneshot mode broadcasting
*/
static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
{
struct tick_device *td;
cpumask_t mask;
- ktime_t now;
+ ktime_t now, next_event;
int cpu;
spin_lock(&tick_broadcast_lock);
again:
dev->next_event.tv64 = KTIME_MAX;
+ next_event.tv64 = KTIME_MAX;
mask = CPU_MASK_NONE;
now = ktime_get();
/* Find all expired events */
@@ -431,19 +402,31 @@ again:
td = &per_cpu(tick_cpu_device, cpu);
if (td->evtdev->next_event.tv64 <= now.tv64)
cpu_set(cpu, mask);
+ else if (td->evtdev->next_event.tv64 < next_event.tv64)
+ next_event.tv64 = td->evtdev->next_event.tv64;
}
/*
- * Wakeup the cpus which have an expired event. The broadcast
- * device is reprogrammed in the return from idle code.
+ * Wakeup the cpus which have an expired event.
+ */
+ tick_do_broadcast(mask);
+
+ /*
+ * Two reasons for reprogram:
+ *
+ * - The global event did not expire any CPU local
+ * events. This happens in dyntick mode, as the maximum PIT
+ * delta is quite small.
+ *
+ * - There are pending events on sleeping CPUs which were not
+ * in the event mask
*/
- if (!tick_do_broadcast(mask)) {
+ if (next_event.tv64 != KTIME_MAX) {
/*
- * The global event did not expire any CPU local
- * events. This happens in dyntick mode, as the
- * maximum PIT delta is quite small.
+ * Rearm the broadcast device. If event expired,
+ * repeat the above
*/
- if (tick_broadcast_reprogram())
+ if (tick_broadcast_set_event(next_event, 0))
goto again;
}
spin_unlock(&tick_broadcast_lock);
@@ -508,7 +491,7 @@ static void tick_broadcast_clear_oneshot(int cpu)
}
/**
- * tick_broadcast_setup_highres - setup the broadcast device for highres
+ * tick_broadcast_setup_oneshot - setup the broadcast device
*/
void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
{
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index bb13f272490..f13f2b7f4fd 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -70,8 +70,6 @@ static inline int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
* Broadcasting support
*/
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
-extern int tick_do_broadcast(cpumask_t mask);
-
extern int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu);
extern int tick_check_broadcast_device(struct clock_event_device *dev);
extern int tick_is_broadcast_device(struct clock_event_device *dev);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 10a1347597f..88267f0a847 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -9,7 +9,7 @@
*
* Started by: Thomas Gleixner and Ingo Molnar
*
- * For licencing details see kernel-base/COPYING
+ * Distribute under GPLv2.
*/
#include <linux/cpu.h>
#include <linux/err.h>
@@ -133,14 +133,55 @@ void tick_nohz_update_jiffies(void)
if (!ts->tick_stopped)
return;
+ touch_softlockup_watchdog();
+
cpu_clear(cpu, nohz_cpu_mask);
now = ktime_get();
+ ts->idle_waketime = now;
local_irq_save(flags);
tick_do_update_jiffies64(now);
local_irq_restore(flags);
}
+void tick_nohz_stop_idle(int cpu)
+{
+ struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
+
+ if (ts->idle_active) {
+ ktime_t now, delta;
+ now = ktime_get();
+ delta = ktime_sub(now, ts->idle_entrytime);
+ ts->idle_lastupdate = now;
+ ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
+ ts->idle_active = 0;
+ }
+}
+
+static ktime_t tick_nohz_start_idle(int cpu)
+{
+ struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
+ ktime_t now, delta;
+
+ now = ktime_get();
+ if (ts->idle_active) {
+ delta = ktime_sub(now, ts->idle_entrytime);
+ ts->idle_lastupdate = now;
+ ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
+ }
+ ts->idle_entrytime = now;
+ ts->idle_active = 1;
+ return now;
+}
+
+u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
+{
+ struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
+
+ *last_update_time = ktime_to_us(ts->idle_lastupdate);
+ return ktime_to_us(ts->idle_sleeptime);
+}
+
/**
* tick_nohz_stop_sched_tick - stop the idle tick from the idle task
*
@@ -151,14 +192,16 @@ void tick_nohz_update_jiffies(void)
void tick_nohz_stop_sched_tick(void)
{
unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags;
+ unsigned long rt_jiffies;
struct tick_sched *ts;
- ktime_t last_update, expires, now, delta;
+ ktime_t last_update, expires, now;
struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
int cpu;
local_irq_save(flags);
cpu = smp_processor_id();
+ now = tick_nohz_start_idle(cpu);
ts = &per_cpu(tick_cpu_sched, cpu);
/*
@@ -190,19 +233,7 @@ void tick_nohz_stop_sched_tick(void)
}
}
- now = ktime_get();
- /*
- * When called from irq_exit we need to account the idle sleep time
- * correctly.
- */
- if (ts->tick_stopped) {
- delta = ktime_sub(now, ts->idle_entrytime);
- ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
- }
-
- ts->idle_entrytime = now;
ts->idle_calls++;
-
/* Read jiffies and the time when jiffies were updated last */
do {
seq = read_seqbegin(&xtime_lock);
@@ -214,6 +245,10 @@ void tick_nohz_stop_sched_tick(void)
next_jiffies = get_next_timer_interrupt(last_jiffies);
delta_jiffies = next_jiffies - last_jiffies;
+ rt_jiffies = rt_needs_cpu(cpu);
+ if (rt_jiffies && rt_jiffies < delta_jiffies)
+ delta_jiffies = rt_jiffies;
+
if (rcu_needs_cpu(cpu))
delta_jiffies = 1;
/*
@@ -289,7 +324,7 @@ void tick_nohz_stop_sched_tick(void)
/* Check, if the timer was already in the past */
if (hrtimer_active(&ts->sched_timer))
goto out;
- } else if(!tick_program_event(expires, 0))
+ } else if (!tick_program_event(expires, 0))
goto out;
/*
* We are past the event already. So we crossed a
@@ -320,10 +355,8 @@ ktime_t tick_nohz_get_sleep_length(void)
return ts->sleep_length;
}
-EXPORT_SYMBOL_GPL(tick_nohz_get_sleep_length);
-
/**
- * nohz_restart_sched_tick - restart the idle tick from the idle task
+ * tick_nohz_restart_sched_tick - restart the idle tick from the idle task
*
* Restart the idle tick when the CPU is woken up from idle
*/
@@ -332,23 +365,22 @@ void tick_nohz_restart_sched_tick(void)
int cpu = smp_processor_id();
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
unsigned long ticks;
- ktime_t now, delta;
+ ktime_t now;
- if (!ts->tick_stopped)
+ local_irq_disable();
+ tick_nohz_stop_idle(cpu);
+
+ if (!ts->tick_stopped) {
+ local_irq_enable();
return;
+ }
/* Update jiffies first */
- now = ktime_get();
-
- local_irq_disable();
select_nohz_load_balancer(0);
+ now = ktime_get();
tick_do_update_jiffies64(now);
cpu_clear(cpu, nohz_cpu_mask);
- /* Account the idle time */
- delta = ktime_sub(now, ts->idle_entrytime);
- ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
-
/*
* We stopped the tick in idle. Update process times would miss the
* time we slept as update_process_times does only a 1 tick
@@ -369,6 +401,7 @@ void tick_nohz_restart_sched_tick(void)
* Cancel the scheduled timer and restore the tick
*/
ts->tick_stopped = 0;
+ ts->idle_exittime = now;
hrtimer_cancel(&ts->sched_timer);
ts->sched_timer.expires = ts->idle_tick;
@@ -502,14 +535,13 @@ static inline void tick_nohz_switch_to_nohz(void) { }
*/
#ifdef CONFIG_HIGH_RES_TIMERS
/*
- * We rearm the timer until we get disabled by the idle code
+ * We rearm the timer until we get disabled by the idle code.
* Called with interrupts disabled and timer->base->cpu_base->lock held.
*/
static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
{
struct tick_sched *ts =
container_of(timer, struct tick_sched, sched_timer);
- struct hrtimer_cpu_base *base = timer->base->cpu_base;
struct pt_regs *regs = get_irq_regs();
ktime_t now = ktime_get();
int cpu = smp_processor_id();
@@ -547,15 +579,8 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
touch_softlockup_watchdog();
ts->idle_jiffies++;
}
- /*
- * update_process_times() might take tasklist_lock, hence
- * drop the base lock. sched-tick hrtimers are per-CPU and
- * never accessible by userspace APIs, so this is safe to do.
- */
- spin_unlock(&base->lock);
update_process_times(user_mode(regs));
profile_tick(CPU_PROFILING);
- spin_lock(&base->lock);
}
/* Do not restart, when we are in the idle loop */
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index e5e466b2759..cd5dbc4579c 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -47,7 +47,7 @@ struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
static unsigned long total_sleep_time; /* seconds */
static struct timespec xtime_cache __attribute__ ((aligned (16)));
-static inline void update_xtime_cache(u64 nsec)
+void update_xtime_cache(u64 nsec)
{
xtime_cache = xtime;
timespec_add_ns(&xtime_cache, nsec);
@@ -82,13 +82,12 @@ static inline s64 __get_nsec_offset(void)
}
/**
- * __get_realtime_clock_ts - Returns the time of day in a timespec
+ * getnstimeofday - Returns the time of day in a timespec
* @ts: pointer to the timespec to be set
*
- * Returns the time of day in a timespec. Used by
- * do_gettimeofday() and get_realtime_clock_ts().
+ * Returns the time of day in a timespec.
*/
-static inline void __get_realtime_clock_ts(struct timespec *ts)
+void getnstimeofday(struct timespec *ts)
{
unsigned long seq;
s64 nsecs;
@@ -104,30 +103,19 @@ static inline void __get_realtime_clock_ts(struct timespec *ts)
timespec_add_ns(ts, nsecs);
}
-/**
- * getnstimeofday - Returns the time of day in a timespec
- * @ts: pointer to the timespec to be set
- *
- * Returns the time of day in a timespec.
- */
-void getnstimeofday(struct timespec *ts)
-{
- __get_realtime_clock_ts(ts);
-}
-
EXPORT_SYMBOL(getnstimeofday);
/**
* do_gettimeofday - Returns the time of day in a timeval
* @tv: pointer to the timeval to be set
*
- * NOTE: Users should be converted to using get_realtime_clock_ts()
+ * NOTE: Users should be converted to using getnstimeofday()
*/
void do_gettimeofday(struct timeval *tv)
{
struct timespec now;
- __get_realtime_clock_ts(&now);
+ getnstimeofday(&now);
tv->tv_sec = now.tv_sec;
tv->tv_usec = now.tv_nsec/1000;
}
@@ -157,6 +145,7 @@ int do_settimeofday(struct timespec *tv)
set_normalized_timespec(&xtime, sec, nsec);
set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
+ update_xtime_cache(0);
clock->error = 0;
ntp_clear();
@@ -198,7 +187,8 @@ static void change_clocksource(void)
clock->error = 0;
clock->xtime_nsec = 0;
- clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH);
+ clocksource_calculate_interval(clock,
+ (unsigned long)(current_tick_length()>>TICK_LENGTH_SHIFT));
tick_clock_notify();
@@ -255,15 +245,16 @@ void __init timekeeping_init(void)
ntp_clear();
clock = clocksource_get_next();
- clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH);
+ clocksource_calculate_interval(clock,
+ (unsigned long)(current_tick_length()>>TICK_LENGTH_SHIFT));
clock->cycle_last = clocksource_read(clock);
xtime.tv_sec = sec;
xtime.tv_nsec = 0;
set_normalized_timespec(&wall_to_monotonic,
-xtime.tv_sec, -xtime.tv_nsec);
+ update_xtime_cache(0);
total_sleep_time = 0;
-
write_sequnlock_irqrestore(&xtime_lock, flags);
}
@@ -300,6 +291,7 @@ static int timekeeping_resume(struct sys_device *dev)
}
/* Make sure that we have the correct xtime reference */
timespec_add_ns(&xtime, timekeeping_suspend_nsecs);
+ update_xtime_cache(0);
/* re-base the last cycle value */
clock->cycle_last = clocksource_read(clock);
clock->error = 0;
@@ -335,9 +327,9 @@ static int timekeeping_suspend(struct sys_device *dev, pm_message_t state)
/* sysfs resume/suspend bits for timekeeping */
static struct sysdev_class timekeeping_sysclass = {
+ .name = "timekeeping",
.resume = timekeeping_resume,
.suspend = timekeeping_suspend,
- set_kset_name("timekeeping"),
};
static struct sys_device device_timer = {
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index fdb2e03d4fe..d3d94c1a0fd 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -129,7 +129,8 @@ static void print_cpu(struct seq_file *m, int cpu, u64 now)
struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
int i;
- SEQ_printf(m, "\ncpu: %d\n", cpu);
+ SEQ_printf(m, "\n");
+ SEQ_printf(m, "cpu: %d\n", cpu);
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
SEQ_printf(m, " clock %d:\n", i);
print_base(m, cpu_base->clock_base + i, now);
@@ -165,6 +166,8 @@ static void print_cpu(struct seq_file *m, int cpu, u64 now)
P(idle_calls);
P(idle_sleeps);
P_ns(idle_entrytime);
+ P_ns(idle_waketime);
+ P_ns(idle_exittime);
P_ns(idle_sleeptime);
P(last_jiffies);
P(next_jiffies);
@@ -184,7 +187,8 @@ print_tickdevice(struct seq_file *m, struct tick_device *td)
{
struct clock_event_device *dev = td->evtdev;
- SEQ_printf(m, "\nTick Device: mode: %d\n", td->mode);
+ SEQ_printf(m, "\n");
+ SEQ_printf(m, "Tick Device: mode: %d\n", td->mode);
SEQ_printf(m, "Clock Event Device: ");
if (!dev) {
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
index c36bb7ed030..417da8c5bc7 100644
--- a/kernel/time/timer_stats.c
+++ b/kernel/time/timer_stats.c
@@ -26,7 +26,7 @@
* the pid and cmdline from the owner process if applicable.
*
* Start/stop data collection:
- * # echo 1[0] >/proc/timer_stats
+ * # echo [1|0] >/proc/timer_stats
*
* Display the information collected so far:
* # cat /proc/timer_stats
diff --git a/kernel/timer.c b/kernel/timer.c
index fb4e67d5dd6..9fbb472b8cf 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -58,59 +58,57 @@ EXPORT_SYMBOL(jiffies_64);
#define TVN_MASK (TVN_SIZE - 1)
#define TVR_MASK (TVR_SIZE - 1)
-typedef struct tvec_s {
+struct tvec {
struct list_head vec[TVN_SIZE];
-} tvec_t;
+};
-typedef struct tvec_root_s {
+struct tvec_root {
struct list_head vec[TVR_SIZE];
-} tvec_root_t;
+};
-struct tvec_t_base_s {
+struct tvec_base {
spinlock_t lock;
struct timer_list *running_timer;
unsigned long timer_jiffies;
- tvec_root_t tv1;
- tvec_t tv2;
- tvec_t tv3;
- tvec_t tv4;
- tvec_t tv5;
+ struct tvec_root tv1;
+ struct tvec tv2;
+ struct tvec tv3;
+ struct tvec tv4;
+ struct tvec tv5;
} ____cacheline_aligned;
-typedef struct tvec_t_base_s tvec_base_t;
-
-tvec_base_t boot_tvec_bases;
+struct tvec_base boot_tvec_bases;
EXPORT_SYMBOL(boot_tvec_bases);
-static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = &boot_tvec_bases;
+static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) = &boot_tvec_bases;
/*
- * Note that all tvec_bases is 2 byte aligned and lower bit of
+ * Note that all tvec_bases are 2 byte aligned and lower bit of
* base in timer_list is guaranteed to be zero. Use the LSB for
* the new flag to indicate whether the timer is deferrable
*/
#define TBASE_DEFERRABLE_FLAG (0x1)
/* Functions below help us manage 'deferrable' flag */
-static inline unsigned int tbase_get_deferrable(tvec_base_t *base)
+static inline unsigned int tbase_get_deferrable(struct tvec_base *base)
{
return ((unsigned int)(unsigned long)base & TBASE_DEFERRABLE_FLAG);
}
-static inline tvec_base_t *tbase_get_base(tvec_base_t *base)
+static inline struct tvec_base *tbase_get_base(struct tvec_base *base)
{
- return ((tvec_base_t *)((unsigned long)base & ~TBASE_DEFERRABLE_FLAG));
+ return ((struct tvec_base *)((unsigned long)base & ~TBASE_DEFERRABLE_FLAG));
}
static inline void timer_set_deferrable(struct timer_list *timer)
{
- timer->base = ((tvec_base_t *)((unsigned long)(timer->base) |
+ timer->base = ((struct tvec_base *)((unsigned long)(timer->base) |
TBASE_DEFERRABLE_FLAG));
}
static inline void
-timer_set_base(struct timer_list *timer, tvec_base_t *new_base)
+timer_set_base(struct timer_list *timer, struct tvec_base *new_base)
{
- timer->base = (tvec_base_t *)((unsigned long)(new_base) |
+ timer->base = (struct tvec_base *)((unsigned long)(new_base) |
tbase_get_deferrable(timer->base));
}
@@ -246,7 +244,7 @@ unsigned long round_jiffies_relative(unsigned long j)
EXPORT_SYMBOL_GPL(round_jiffies_relative);
-static inline void set_running_timer(tvec_base_t *base,
+static inline void set_running_timer(struct tvec_base *base,
struct timer_list *timer)
{
#ifdef CONFIG_SMP
@@ -254,7 +252,7 @@ static inline void set_running_timer(tvec_base_t *base,
#endif
}
-static void internal_add_timer(tvec_base_t *base, struct timer_list *timer)
+static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
{
unsigned long expires = timer->expires;
unsigned long idx = expires - base->timer_jiffies;
@@ -371,14 +369,14 @@ static inline void detach_timer(struct timer_list *timer,
* possible to set timer->base = NULL and drop the lock: the timer remains
* locked.
*/
-static tvec_base_t *lock_timer_base(struct timer_list *timer,
+static struct tvec_base *lock_timer_base(struct timer_list *timer,
unsigned long *flags)
__acquires(timer->base->lock)
{
- tvec_base_t *base;
+ struct tvec_base *base;
for (;;) {
- tvec_base_t *prelock_base = timer->base;
+ struct tvec_base *prelock_base = timer->base;
base = tbase_get_base(prelock_base);
if (likely(base != NULL)) {
spin_lock_irqsave(&base->lock, *flags);
@@ -393,7 +391,7 @@ static tvec_base_t *lock_timer_base(struct timer_list *timer,
int __mod_timer(struct timer_list *timer, unsigned long expires)
{
- tvec_base_t *base, *new_base;
+ struct tvec_base *base, *new_base;
unsigned long flags;
int ret = 0;
@@ -445,7 +443,7 @@ EXPORT_SYMBOL(__mod_timer);
*/
void add_timer_on(struct timer_list *timer, int cpu)
{
- tvec_base_t *base = per_cpu(tvec_bases, cpu);
+ struct tvec_base *base = per_cpu(tvec_bases, cpu);
unsigned long flags;
timer_stats_timer_set_start_info(timer);
@@ -508,7 +506,7 @@ EXPORT_SYMBOL(mod_timer);
*/
int del_timer(struct timer_list *timer)
{
- tvec_base_t *base;
+ struct tvec_base *base;
unsigned long flags;
int ret = 0;
@@ -539,7 +537,7 @@ EXPORT_SYMBOL(del_timer);
*/
int try_to_del_timer_sync(struct timer_list *timer)
{
- tvec_base_t *base;
+ struct tvec_base *base;
unsigned long flags;
int ret = -1;
@@ -591,7 +589,7 @@ int del_timer_sync(struct timer_list *timer)
EXPORT_SYMBOL(del_timer_sync);
#endif
-static int cascade(tvec_base_t *base, tvec_t *tv, int index)
+static int cascade(struct tvec_base *base, struct tvec *tv, int index)
{
/* cascade all the timers from tv up one level */
struct timer_list *timer, *tmp;
@@ -620,7 +618,7 @@ static int cascade(tvec_base_t *base, tvec_t *tv, int index)
* This function cascades all vectors and executes all expired timer
* vectors.
*/
-static inline void __run_timers(tvec_base_t *base)
+static inline void __run_timers(struct tvec_base *base)
{
struct timer_list *timer;
@@ -657,7 +655,7 @@ static inline void __run_timers(tvec_base_t *base)
int preempt_count = preempt_count();
fn(data);
if (preempt_count != preempt_count()) {
- printk(KERN_WARNING "huh, entered %p "
+ printk(KERN_ERR "huh, entered %p "
"with preempt_count %08x, exited"
" with %08x?\n",
fn, preempt_count,
@@ -678,13 +676,13 @@ static inline void __run_timers(tvec_base_t *base)
* is used on S/390 to stop all activity when a cpus is idle.
* This functions needs to be called disabled.
*/
-static unsigned long __next_timer_interrupt(tvec_base_t *base)
+static unsigned long __next_timer_interrupt(struct tvec_base *base)
{
unsigned long timer_jiffies = base->timer_jiffies;
unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA;
int index, slot, array, found = 0;
struct timer_list *nte;
- tvec_t *varray[4];
+ struct tvec *varray[4];
/* Look for timer events in tv1. */
index = slot = timer_jiffies & TVR_MASK;
@@ -716,7 +714,7 @@ cascade:
varray[3] = &base->tv5;
for (array = 0; array < 4; array++) {
- tvec_t *varp = varray[array];
+ struct tvec *varp = varray[array];
index = slot = timer_jiffies & TVN_MASK;
do {
@@ -790,12 +788,12 @@ static unsigned long cmp_next_hrtimer_event(unsigned long now,
}
/**
- * next_timer_interrupt - return the jiffy of the next pending timer
+ * get_next_timer_interrupt - return the jiffy of the next pending timer
* @now: current time (in jiffies)
*/
unsigned long get_next_timer_interrupt(unsigned long now)
{
- tvec_base_t *base = __get_cpu_var(tvec_bases);
+ struct tvec_base *base = __get_cpu_var(tvec_bases);
unsigned long expires;
spin_lock(&base->lock);
@@ -817,6 +815,19 @@ unsigned long next_timer_interrupt(void)
#endif
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING
+void account_process_tick(struct task_struct *p, int user_tick)
+{
+ if (user_tick) {
+ account_user_time(p, jiffies_to_cputime(1));
+ account_user_time_scaled(p, jiffies_to_cputime(1));
+ } else {
+ account_system_time(p, HARDIRQ_OFFSET, jiffies_to_cputime(1));
+ account_system_time_scaled(p, jiffies_to_cputime(1));
+ }
+}
+#endif
+
/*
* Called from the timer interrupt handler to charge one tick to the current
* process. user_tick is 1 if the tick is user time, 0 for system.
@@ -827,13 +838,7 @@ void update_process_times(int user_tick)
int cpu = smp_processor_id();
/* Note: this timer irq context must be accounted for as well. */
- if (user_tick) {
- account_user_time(p, jiffies_to_cputime(1));
- account_user_time_scaled(p, jiffies_to_cputime(1));
- } else {
- account_system_time(p, HARDIRQ_OFFSET, jiffies_to_cputime(1));
- account_system_time_scaled(p, jiffies_to_cputime(1));
- }
+ account_process_tick(p, user_tick);
run_local_timers();
if (rcu_pending(cpu))
rcu_check_callbacks(cpu, user_tick);
@@ -887,9 +892,9 @@ static inline void calc_load(unsigned long ticks)
*/
static void run_timer_softirq(struct softirq_action *h)
{
- tvec_base_t *base = __get_cpu_var(tvec_bases);
+ struct tvec_base *base = __get_cpu_var(tvec_bases);
- hrtimer_run_queues();
+ hrtimer_run_pending();
if (time_after_eq(jiffies, base->timer_jiffies))
__run_timers(base);
@@ -900,6 +905,7 @@ static void run_timer_softirq(struct softirq_action *h)
*/
void run_local_timers(void)
{
+ hrtimer_run_queues();
raise_softirq(TIMER_SOFTIRQ);
softlockup_tick();
}
@@ -971,7 +977,7 @@ asmlinkage long sys_getppid(void)
int pid;
rcu_read_lock();
- pid = task_ppid_nr_ns(current, current->nsproxy->pid_ns);
+ pid = task_tgid_nr_ns(current->real_parent, current->nsproxy->pid_ns);
rcu_read_unlock();
return pid;
@@ -1093,6 +1099,13 @@ signed long __sched schedule_timeout_interruptible(signed long timeout)
}
EXPORT_SYMBOL(schedule_timeout_interruptible);
+signed long __sched schedule_timeout_killable(signed long timeout)
+{
+ __set_current_state(TASK_KILLABLE);
+ return schedule_timeout(timeout);
+}
+EXPORT_SYMBOL(schedule_timeout_killable);
+
signed long __sched schedule_timeout_uninterruptible(signed long timeout)
{
__set_current_state(TASK_UNINTERRUPTIBLE);
@@ -1212,11 +1225,11 @@ asmlinkage long sys_sysinfo(struct sysinfo __user *info)
*/
static struct lock_class_key base_lock_keys[NR_CPUS];
-static int __devinit init_timers_cpu(int cpu)
+static int __cpuinit init_timers_cpu(int cpu)
{
int j;
- tvec_base_t *base;
- static char __devinitdata tvec_base_done[NR_CPUS];
+ struct tvec_base *base;
+ static char __cpuinitdata tvec_base_done[NR_CPUS];
if (!tvec_base_done[cpu]) {
static char boot_done;
@@ -1270,7 +1283,7 @@ static int __devinit init_timers_cpu(int cpu)
}
#ifdef CONFIG_HOTPLUG_CPU
-static void migrate_timer_list(tvec_base_t *new_base, struct list_head *head)
+static void migrate_timer_list(struct tvec_base *new_base, struct list_head *head)
{
struct timer_list *timer;
@@ -1282,10 +1295,10 @@ static void migrate_timer_list(tvec_base_t *new_base, struct list_head *head)
}
}
-static void __devinit migrate_timers(int cpu)
+static void __cpuinit migrate_timers(int cpu)
{
- tvec_base_t *old_base;
- tvec_base_t *new_base;
+ struct tvec_base *old_base;
+ struct tvec_base *new_base;
int i;
BUG_ON(cpu_online(cpu));
diff --git a/kernel/user.c b/kernel/user.c
index 0f3aa023410..bc1c48d35cb 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -115,7 +115,7 @@ static void sched_switch_user(struct task_struct *p) { }
#if defined(CONFIG_FAIR_USER_SCHED) && defined(CONFIG_SYSFS)
-static struct kobject uids_kobject; /* represents /sys/kernel/uids directory */
+static struct kset *uids_kset; /* represents the /sys/kernel/uids/ directory */
static DEFINE_MUTEX(uids_mutex);
static inline void uids_mutex_lock(void)
@@ -128,86 +128,83 @@ static inline void uids_mutex_unlock(void)
mutex_unlock(&uids_mutex);
}
-/* return cpu shares held by the user */
-static ssize_t cpu_shares_show(struct kset *kset, char *buffer)
+/* uid directory attributes */
+static ssize_t cpu_shares_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
{
- struct user_struct *up = container_of(kset, struct user_struct, kset);
+ struct user_struct *up = container_of(kobj, struct user_struct, kobj);
- return sprintf(buffer, "%lu\n", sched_group_shares(up->tg));
+ return sprintf(buf, "%lu\n", sched_group_shares(up->tg));
}
-/* modify cpu shares held by the user */
-static ssize_t cpu_shares_store(struct kset *kset, const char *buffer,
- size_t size)
+static ssize_t cpu_shares_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t size)
{
- struct user_struct *up = container_of(kset, struct user_struct, kset);
+ struct user_struct *up = container_of(kobj, struct user_struct, kobj);
unsigned long shares;
int rc;
- sscanf(buffer, "%lu", &shares);
+ sscanf(buf, "%lu", &shares);
rc = sched_group_set_shares(up->tg, shares);
return (rc ? rc : size);
}
-static void user_attr_init(struct subsys_attribute *sa, char *name, int mode)
+static struct kobj_attribute cpu_share_attr =
+ __ATTR(cpu_share, 0644, cpu_shares_show, cpu_shares_store);
+
+/* default attributes per uid directory */
+static struct attribute *uids_attributes[] = {
+ &cpu_share_attr.attr,
+ NULL
+};
+
+/* the lifetime of user_struct is not managed by the core (now) */
+static void uids_release(struct kobject *kobj)
{
- sa->attr.name = name;
- sa->attr.mode = mode;
- sa->show = cpu_shares_show;
- sa->store = cpu_shares_store;
+ return;
}
-/* Create "/sys/kernel/uids/<uid>" directory and
- * "/sys/kernel/uids/<uid>/cpu_share" file for this user.
- */
-static int user_kobject_create(struct user_struct *up)
+static struct kobj_type uids_ktype = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .default_attrs = uids_attributes,
+ .release = uids_release,
+};
+
+/* create /sys/kernel/uids/<uid>/cpu_share file for this user */
+static int uids_user_create(struct user_struct *up)
{
- struct kset *kset = &up->kset;
- struct kobject *kobj = &kset->kobj;
+ struct kobject *kobj = &up->kobj;
int error;
- memset(kset, 0, sizeof(struct kset));
- kobj->parent = &uids_kobject; /* create under /sys/kernel/uids dir */
- kobject_set_name(kobj, "%d", up->uid);
- kset_init(kset);
- user_attr_init(&up->user_attr, "cpu_share", 0644);
-
- error = kobject_add(kobj);
- if (error)
+ memset(kobj, 0, sizeof(struct kobject));
+ kobj->kset = uids_kset;
+ error = kobject_init_and_add(kobj, &uids_ktype, NULL, "%d", up->uid);
+ if (error) {
+ kobject_put(kobj);
goto done;
-
- error = sysfs_create_file(kobj, &up->user_attr.attr);
- if (error)
- kobject_del(kobj);
+ }
kobject_uevent(kobj, KOBJ_ADD);
-
done:
return error;
}
-/* create these in sysfs filesystem:
+/* create these entries in sysfs:
* "/sys/kernel/uids" directory
* "/sys/kernel/uids/0" directory (for root user)
* "/sys/kernel/uids/0/cpu_share" file (for root user)
*/
-int __init uids_kobject_init(void)
+int __init uids_sysfs_init(void)
{
- int error;
-
- /* create under /sys/kernel dir */
- uids_kobject.parent = &kernel_subsys.kobj;
- uids_kobject.kset = &kernel_subsys;
- kobject_set_name(&uids_kobject, "uids");
- kobject_init(&uids_kobject);
+ uids_kset = kset_create_and_add("uids", NULL, kernel_kobj);
+ if (!uids_kset)
+ return -ENOMEM;
- error = kobject_add(&uids_kobject);
- if (!error)
- error = user_kobject_create(&root_user);
-
- return error;
+ return uids_user_create(&root_user);
}
/* work function to remove sysfs directory for a user and free up
@@ -216,7 +213,6 @@ int __init uids_kobject_init(void)
static void remove_user_sysfs_dir(struct work_struct *w)
{
struct user_struct *up = container_of(w, struct user_struct, work);
- struct kobject *kobj = &up->kset.kobj;
unsigned long flags;
int remove_user = 0;
@@ -238,9 +234,9 @@ static void remove_user_sysfs_dir(struct work_struct *w)
if (!remove_user)
goto done;
- sysfs_remove_file(kobj, &up->user_attr.attr);
- kobject_uevent(kobj, KOBJ_REMOVE);
- kobject_del(kobj);
+ kobject_uevent(&up->kobj, KOBJ_REMOVE);
+ kobject_del(&up->kobj);
+ kobject_put(&up->kobj);
sched_destroy_user(up);
key_put(up->uid_keyring);
@@ -267,7 +263,8 @@ static inline void free_user(struct user_struct *up, unsigned long flags)
#else /* CONFIG_FAIR_USER_SCHED && CONFIG_SYSFS */
-static inline int user_kobject_create(struct user_struct *up) { return 0; }
+int uids_sysfs_init(void) { return 0; }
+static inline int uids_user_create(struct user_struct *up) { return 0; }
static inline void uids_mutex_lock(void) { }
static inline void uids_mutex_unlock(void) { }
@@ -322,9 +319,9 @@ void free_uid(struct user_struct *up)
struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
{
struct hlist_head *hashent = uidhashentry(ns, uid);
- struct user_struct *up;
+ struct user_struct *up, *new;
- /* Make uid_hash_find() + user_kobject_create() + uid_hash_insert()
+ /* Make uid_hash_find() + uids_user_create() + uid_hash_insert()
* atomic.
*/
uids_mutex_lock();
@@ -334,11 +331,10 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
spin_unlock_irq(&uidhash_lock);
if (!up) {
- struct user_struct *new;
-
new = kmem_cache_alloc(uid_cachep, GFP_KERNEL);
if (!new)
- return NULL;
+ goto out_unlock;
+
new->uid = uid;
atomic_set(&new->__count, 1);
atomic_set(&new->processes, 0);
@@ -353,26 +349,14 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
#endif
new->locked_shm = 0;
- if (alloc_uid_keyring(new, current) < 0) {
- kmem_cache_free(uid_cachep, new);
- return NULL;
- }
+ if (alloc_uid_keyring(new, current) < 0)
+ goto out_free_user;
- if (sched_create_user(new) < 0) {
- key_put(new->uid_keyring);
- key_put(new->session_keyring);
- kmem_cache_free(uid_cachep, new);
- return NULL;
- }
+ if (sched_create_user(new) < 0)
+ goto out_put_keys;
- if (user_kobject_create(new)) {
- sched_destroy_user(new);
- key_put(new->uid_keyring);
- key_put(new->session_keyring);
- kmem_cache_free(uid_cachep, new);
- uids_mutex_unlock();
- return NULL;
- }
+ if (uids_user_create(new))
+ goto out_destoy_sched;
/*
* Before adding this, check whether we raced
@@ -400,6 +384,17 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
uids_mutex_unlock();
return up;
+
+out_destoy_sched:
+ sched_destroy_user(new);
+out_put_keys:
+ key_put(new->uid_keyring);
+ key_put(new->session_keyring);
+out_free_user:
+ kmem_cache_free(uid_cachep, new);
+out_unlock:
+ uids_mutex_unlock();
+ return NULL;
}
void switch_uid(struct user_struct *new_user)
diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
index c76c06466bf..fe3a56c2256 100644
--- a/kernel/utsname_sysctl.c
+++ b/kernel/utsname_sysctl.c
@@ -18,6 +18,10 @@
static void *get_uts(ctl_table *table, int write)
{
char *which = table->data;
+ struct uts_namespace *uts_ns;
+
+ uts_ns = current->nsproxy->uts_ns;
+ which = (which - (char *)&init_uts_ns) + (char *)uts_ns;
if (!write)
down_read(&uts_sem);
diff --git a/kernel/wait.c b/kernel/wait.c
index 444ddbfaefc..f9876888a56 100644
--- a/kernel/wait.c
+++ b/kernel/wait.c
@@ -215,7 +215,7 @@ void fastcall __wake_up_bit(wait_queue_head_t *wq, void *word, int bit)
{
struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
if (waitqueue_active(wq))
- __wake_up(wq, TASK_INTERRUPTIBLE|TASK_UNINTERRUPTIBLE, 1, &key);
+ __wake_up(wq, TASK_NORMAL, 1, &key);
}
EXPORT_SYMBOL(__wake_up_bit);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 52d5e7c9a8e..52db48e7f6e 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -67,9 +67,8 @@ struct workqueue_struct {
#endif
};
-/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
- threads to each one as cpus come/go. */
-static DEFINE_MUTEX(workqueue_mutex);
+/* Serializes the accesses to the list of workqueues. */
+static DEFINE_SPINLOCK(workqueue_lock);
static LIST_HEAD(workqueues);
static int singlethread_cpu __read_mostly;
@@ -592,8 +591,6 @@ EXPORT_SYMBOL(schedule_delayed_work_on);
* Returns zero on success.
* Returns -ve errno on failure.
*
- * Appears to be racy against CPU hotplug.
- *
* schedule_on_each_cpu() is very slow.
*/
int schedule_on_each_cpu(work_func_t func)
@@ -605,7 +602,7 @@ int schedule_on_each_cpu(work_func_t func)
if (!works)
return -ENOMEM;
- preempt_disable(); /* CPU hotplug */
+ get_online_cpus();
for_each_online_cpu(cpu) {
struct work_struct *work = per_cpu_ptr(works, cpu);
@@ -613,8 +610,8 @@ int schedule_on_each_cpu(work_func_t func)
set_bit(WORK_STRUCT_PENDING, work_data_bits(work));
__queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
}
- preempt_enable();
flush_workqueue(keventd_wq);
+ put_online_cpus();
free_percpu(works);
return 0;
}
@@ -722,7 +719,8 @@ static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
struct workqueue_struct *__create_workqueue_key(const char *name,
int singlethread,
int freezeable,
- struct lock_class_key *key)
+ struct lock_class_key *key,
+ const char *lock_name)
{
struct workqueue_struct *wq;
struct cpu_workqueue_struct *cwq;
@@ -739,7 +737,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name,
}
wq->name = name;
- lockdep_init_map(&wq->lockdep_map, name, key, 0);
+ lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
wq->singlethread = singlethread;
wq->freezeable = freezeable;
INIT_LIST_HEAD(&wq->list);
@@ -749,8 +747,10 @@ struct workqueue_struct *__create_workqueue_key(const char *name,
err = create_workqueue_thread(cwq, singlethread_cpu);
start_workqueue_thread(cwq, -1);
} else {
- mutex_lock(&workqueue_mutex);
+ get_online_cpus();
+ spin_lock(&workqueue_lock);
list_add(&wq->list, &workqueues);
+ spin_unlock(&workqueue_lock);
for_each_possible_cpu(cpu) {
cwq = init_cpu_workqueue(wq, cpu);
@@ -759,7 +759,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name,
err = create_workqueue_thread(cwq, cpu);
start_workqueue_thread(cwq, cpu);
}
- mutex_unlock(&workqueue_mutex);
+ put_online_cpus();
}
if (err) {
@@ -774,7 +774,7 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
{
/*
* Our caller is either destroy_workqueue() or CPU_DEAD,
- * workqueue_mutex protects cwq->thread
+ * get_online_cpus() protects cwq->thread.
*/
if (cwq->thread == NULL)
return;
@@ -809,9 +809,11 @@ void destroy_workqueue(struct workqueue_struct *wq)
struct cpu_workqueue_struct *cwq;
int cpu;
- mutex_lock(&workqueue_mutex);
+ get_online_cpus();
+ spin_lock(&workqueue_lock);
list_del(&wq->list);
- mutex_unlock(&workqueue_mutex);
+ spin_unlock(&workqueue_lock);
+ put_online_cpus();
for_each_cpu_mask(cpu, *cpu_map) {
cwq = per_cpu_ptr(wq->cpu_wq, cpu);
@@ -834,13 +836,6 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
action &= ~CPU_TASKS_FROZEN;
switch (action) {
- case CPU_LOCK_ACQUIRE:
- mutex_lock(&workqueue_mutex);
- return NOTIFY_OK;
-
- case CPU_LOCK_RELEASE:
- mutex_unlock(&workqueue_mutex);
- return NOTIFY_OK;
case CPU_UP_PREPARE:
cpu_set(cpu, cpu_populated_map);
@@ -853,7 +848,8 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
case CPU_UP_PREPARE:
if (!create_workqueue_thread(cwq, cpu))
break;
- printk(KERN_ERR "workqueue for %i failed\n", cpu);
+ printk(KERN_ERR "workqueue [%s] for %i failed\n",
+ wq->name, cpu);
return NOTIFY_BAD;
case CPU_ONLINE: