aboutsummaryrefslogtreecommitdiff
path: root/kernel/rcuclassic.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2008-08-10 18:35:38 -0700
committerIngo Molnar <mingo@elte.hu>2008-08-11 13:35:18 +0200
commit67182ae1c42206e516f7efb292b745e826497b24 (patch)
treed2d402550a0432489090264df95a8154597dc989 /kernel/rcuclassic.c
parentc4c0c56a7a85ed5725786219e4fbca7e840b1531 (diff)
rcu, debug: detect stalled grace periods
this is a diagnostic patch for Classic RCU. The approach is to record a timestamp at the beginning of the grace period (in rcu_start_batch()), then have rcu_check_callbacks() complain if: 1. it is running on a CPU that has holding up grace periods for a long time (say one second). This will identify the culprit assuming that the culprit has not disabled hardware irqs, instruction execution, or some such. 2. it is running on a CPU that is not holding up grace periods, but grace periods have been held up for an even longer time (say two seconds). It is enabled via the default-off CONFIG_DEBUG_RCU_STALL kernel parameter. Rather than exponential backoff, it backs off to once per 30 seconds. My feeling upon thinking on it was that if you have stalled RCU grace periods for that long, a few extra printk() messages are probably the least of your worries... Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Yinghai Lu <yhlu.kernel@gmail.com> Cc: David Witbrodt <dawitbro@sbcglobal.net> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/rcuclassic.c')
-rw-r--r--kernel/rcuclassic.c80
1 files changed, 80 insertions, 0 deletions
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c
index d4271146a9b..d7ec731de75 100644
--- a/kernel/rcuclassic.c
+++ b/kernel/rcuclassic.c
@@ -47,6 +47,7 @@
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/mutex.h>
+#include <linux/time.h>
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static struct lock_class_key rcu_lock_key;
@@ -286,6 +287,81 @@ static void rcu_do_batch(struct rcu_data *rdp)
* rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace
* period (if necessary).
*/
+
+#ifdef CONFIG_DEBUG_RCU_STALL
+
+static inline void record_gp_check_time(struct rcu_ctrlblk *rcp)
+{
+ rcp->gp_check = get_seconds() + 3;
+}
+static void print_other_cpu_stall(struct rcu_ctrlblk *rcp)
+{
+ int cpu;
+ long delta;
+
+ /* Only let one CPU complain about others per time interval. */
+
+ spin_lock(&rcp->lock);
+ delta = get_seconds() - rcp->gp_check;
+ if (delta < 2L ||
+ cpus_empty(rcp->cpumask)) {
+ spin_unlock(&rcp->lock);
+ return;
+ rcp->gp_check = get_seconds() + 30;
+ }
+ spin_unlock(&rcp->lock);
+
+ /* OK, time to rat on our buddy... */
+
+ printk(KERN_ERR "RCU detected CPU stalls:");
+ for_each_cpu_mask(cpu, rcp->cpumask)
+ printk(" %d", cpu);
+ printk(" (detected by %d, t=%lu/%lu)\n",
+ smp_processor_id(), get_seconds(), rcp->gp_check);
+}
+static void print_cpu_stall(struct rcu_ctrlblk *rcp)
+{
+ printk(KERN_ERR "RCU detected CPU %d stall (t=%lu/%lu)\n",
+ smp_processor_id(), get_seconds(), rcp->gp_check);
+ dump_stack();
+ spin_lock(&rcp->lock);
+ if ((long)(get_seconds() - rcp->gp_check) >= 0L)
+ rcp->gp_check = get_seconds() + 30;
+ spin_unlock(&rcp->lock);
+}
+static inline void check_cpu_stall(struct rcu_ctrlblk *rcp,
+ struct rcu_data *rdp)
+{
+ long delta;
+
+ delta = get_seconds() - rcp->gp_check;
+ if (cpu_isset(smp_processor_id(), rcp->cpumask) && delta >= 0L) {
+
+ /* We haven't checked in, so go dump stack. */
+
+ print_cpu_stall(rcp);
+
+ } else if (!cpus_empty(rcp->cpumask) && delta >= 2L) {
+
+ /* They had two seconds to dump stack, so complain. */
+
+ print_other_cpu_stall(rcp);
+
+ }
+}
+
+#else /* #ifdef CONFIG_DEBUG_RCU_STALL */
+
+static inline void record_gp_check_time(struct rcu_ctrlblk *rcp)
+{
+}
+static inline void check_cpu_stall(struct rcu_ctrlblk *rcp,
+ struct rcu_data *rdp)
+{
+}
+
+#endif /* #else #ifdef CONFIG_DEBUG_RCU_STALL */
+
/*
* Register a new batch of callbacks, and start it up if there is currently no
* active batch and the batch to be registered has not already occurred.
@@ -296,6 +372,7 @@ static void rcu_start_batch(struct rcu_ctrlblk *rcp)
if (rcp->cur != rcp->pending &&
rcp->completed == rcp->cur) {
rcp->cur++;
+ record_gp_check_time(rcp);
/*
* Accessing nohz_cpu_mask before incrementing rcp->cur needs a
@@ -489,6 +566,9 @@ static void rcu_process_callbacks(struct softirq_action *unused)
static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
{
+ /* Check for CPU stalls, if enabled. */
+ check_cpu_stall(rcp, rdp);
+
if (rdp->nxtlist) {
/*
* This cpu has pending rcu entries and the grace period