From 1883c79a57a5fe25309007590cccb1b2782c41b2 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sat, 27 Feb 2010 14:53:08 -0800 Subject: rcu: Make task_subsys_state() RCU-lockdep checks handle boot-time use It is apparently legal to invoke task_subsys_state() without RCU protection during early boot time. After all, there are no concurrent tasks, so there can be no grace periods completing concurrently. But this does need an Acked-by from the cgroups folks. Located-by: Ingo Molnar Signed-off-by: Paul E. McKenney Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: mathieu.desnoyers@polymtl.ca Cc: josh@joshtriplett.org Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org Cc: Valdis.Kletnieks@vt.edu Cc: dhowells@redhat.com LKML-Reference: <1267311188-16603-2-git-send-email-paulmck@linux.vnet.ibm.com> Signed-off-by: Ingo Molnar --- include/linux/cgroup.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index c9bbcb2a75a..a73e1ced09b 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -489,6 +489,7 @@ static inline struct cgroup_subsys_state *task_subsys_state( { return rcu_dereference_check(task->cgroups->subsys[subsys_id], rcu_read_lock_held() || + !rcu_scheduler_active || cgroup_lock_is_held()); } -- cgit v1.2.3 From db1466b3e1bd1727375cdbfcbea4bcce2f860f61 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 3 Mar 2010 07:46:56 -0800 Subject: rcu: Use wrapper function instead of exporting tasklist_lock Lockdep-RCU commit d11c563d exported tasklist_lock, which is not a good thing. This patch instead exports a function that uses lockdep to check whether tasklist_lock is held. Suggested-by: Christoph Hellwig Signed-off-by: Paul E. McKenney Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: mathieu.desnoyers@polymtl.ca Cc: josh@joshtriplett.org Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org Cc: Valdis.Kletnieks@vt.edu Cc: dhowells@redhat.com Cc: Christoph Hellwig LKML-Reference: <1267631219-8713-1-git-send-email-paulmck@linux.vnet.ibm.com> Signed-off-by: Ingo Molnar --- include/linux/cred.h | 2 +- include/linux/sched.h | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/cred.h b/include/linux/cred.h index 4db09f89b63..52507c3e138 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -280,7 +280,7 @@ static inline void put_cred(const struct cred *_cred) * task or by holding tasklist_lock to prevent it from being unlinked. */ #define __task_cred(task) \ - ((const struct cred *)(rcu_dereference_check((task)->real_cred, rcu_read_lock_held() || lockdep_is_held(&tasklist_lock)))) + ((const struct cred *)(rcu_dereference_check((task)->real_cred, rcu_read_lock_held() || lockdep_tasklist_lock_is_held()))) /** * get_task_cred - Get another task's objective credentials diff --git a/include/linux/sched.h b/include/linux/sched.h index 0eef87b58ea..a47af2064dc 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -258,6 +258,10 @@ extern spinlock_t mmlist_lock; struct task_struct; +#ifdef CONFIG_PROVE_RCU +extern int lockdep_tasklist_lock_is_held(void); +#endif /* #ifdef CONFIG_PROVE_RCU */ + extern void sched_init(void); extern void sched_init_smp(void); extern asmlinkage void schedule_tail(struct task_struct *prev); -- cgit v1.2.3 From 5ed42b8113667c06a6ff2c72717395b5044d30a1 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 3 Mar 2010 07:46:58 -0800 Subject: rcu, cgroup: Relax the check in task_subsys_state() as early boot is now handled by lockdep-RCU This patch removes the check for !rcu_scheduler_active because this check has been incorporated into rcu_dereference_check(). Signed-off-by: Paul E. McKenney Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: mathieu.desnoyers@polymtl.ca Cc: josh@joshtriplett.org Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org Cc: Valdis.Kletnieks@vt.edu Cc: dhowells@redhat.com LKML-Reference: <1267631219-8713-3-git-send-email-paulmck@linux.vnet.ibm.com> Signed-off-by: Ingo Molnar --- include/linux/cgroup.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include') diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index a73e1ced09b..c9bbcb2a75a 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -489,7 +489,6 @@ static inline struct cgroup_subsys_state *task_subsys_state( { return rcu_dereference_check(task->cgroups->subsys[subsys_id], rcu_read_lock_held() || - !rcu_scheduler_active || cgroup_lock_is_held()); } -- cgit v1.2.3 From e6033e3b307fcfae08408e0673266db38392bda4 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 3 Mar 2010 17:50:16 -0800 Subject: rcu: Make rcu_read_lock_sched_held() handle !PREEMPT The rcu_read_lock_sched_held() needs to unconditionally return the value "1" in a !PREEMPT kernel, because under !PREEMPT, -all- kernel code is implicitly preempt-disabled. This patch makes this happen. Signed-off-by: Paul E. McKenney Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: mathieu.desnoyers@polymtl.ca Cc: josh@joshtriplett.org Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org Cc: Valdis.Kletnieks@vt.edu Cc: dhowells@redhat.com LKML-Reference: <1267667418-32233-1-git-send-email-paulmck@linux.vnet.ibm.com> Signed-off-by: Ingo Molnar --- include/linux/rcupdate.h | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'include') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index c8437362633..e22960ecb71 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -136,6 +136,7 @@ static inline int rcu_read_lock_bh_held(void) * can prove otherwise. Note that disabling of preemption (including * disabling irqs) counts as an RCU-sched read-side critical section. */ +#ifdef CONFIG_PREEMPT static inline int rcu_read_lock_sched_held(void) { int lockdep_opinion = 0; @@ -144,6 +145,12 @@ static inline int rcu_read_lock_sched_held(void) lockdep_opinion = lock_is_held(&rcu_sched_lock_map); return lockdep_opinion || preempt_count() != 0 || !rcu_scheduler_active; } +#else /* #ifdef CONFIG_PREEMPT */ +static inline int rcu_read_lock_sched_held(void) +{ + return 1; +} +#endif /* #else #ifdef CONFIG_PREEMPT */ #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ @@ -164,10 +171,17 @@ static inline int rcu_read_lock_bh_held(void) return 1; } +#ifdef CONFIG_PREEMPT static inline int rcu_read_lock_sched_held(void) { return preempt_count() != 0 || !rcu_scheduler_active; } +#else /* #ifdef CONFIG_PREEMPT */ +static inline int rcu_read_lock_sched_held(void) +{ + return 1; +} +#endif /* #else #ifdef CONFIG_PREEMPT */ #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ -- cgit v1.2.3 From 8d53dd546f36073e0d29b0cfc24c665db301e3e7 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 3 Mar 2010 17:50:18 -0800 Subject: rcu, ftrace: Fix RCU lockdep splat in ftrace_perf_buf_prepare() Change the pair of rcu_dereference() calls in ftrace_perf_buf_prepare() to rcu_dereference_sched(). Signed-off-by: Paul E. McKenney Acked-by: Frederic Weisbecker Cc: Steven Rostedt Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: mathieu.desnoyers@polymtl.ca Cc: josh@joshtriplett.org Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: Valdis.Kletnieks@vt.edu Cc: dhowells@redhat.com Cc: Frederic Weisbecker LKML-Reference: <1267667418-32233-3-git-send-email-paulmck@linux.vnet.ibm.com> Signed-off-by: Ingo Molnar --- include/trace/ftrace.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 0804cd59480..601ad774424 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -699,9 +699,9 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ * __cpu = smp_processor_id(); * * if (in_nmi()) - * trace_buf = rcu_dereference(perf_trace_buf_nmi); + * trace_buf = rcu_dereference_sched(perf_trace_buf_nmi); * else - * trace_buf = rcu_dereference(perf_trace_buf); + * trace_buf = rcu_dereference_sched(perf_trace_buf); * * if (!trace_buf) * goto end; -- cgit v1.2.3 From 54dbf96c921513bf98484a20ef366d51944a4c4d Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 3 Mar 2010 07:46:57 -0800 Subject: rcu: Suppress RCU lockdep warnings during early boot RCU is used during very early boot, before RCU and lockdep have been initialized. So make the underlying primitives (rcu_read_lock_held(), rcu_read_lock_bh_held(), rcu_read_lock_sched_held(), and rcu_dereference_check()) check for early boot via the rcu_scheduler_active flag. This will suppress false positives. Also introduce a debug_lockdep_rcu_enabled() static inline helper function, which tags the CONTINUE_PROVE_RCU case as likely(), as suggested by Ingo Molnar. Signed-off-by: Paul E. McKenney Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: mathieu.desnoyers@polymtl.ca Cc: josh@joshtriplett.org Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org Cc: Valdis.Kletnieks@vt.edu Cc: dhowells@redhat.com LKML-Reference: <1267631219-8713-2-git-send-email-paulmck@linux.vnet.ibm.com> [ v2: removed incomplete debug_lockdep_rcu_update() bits ] Signed-off-by: Ingo Molnar --- include/linux/rcupdate.h | 31 ++++++++++++++++++++++--------- 1 file changed, 22 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index e22960ecb71..75921b83c0a 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -97,6 +97,11 @@ extern struct lockdep_map rcu_sched_lock_map; # define rcu_read_release_sched() \ lock_release(&rcu_sched_lock_map, 1, _THIS_IP_) +static inline int debug_lockdep_rcu_enabled(void) +{ + return likely(rcu_scheduler_active && debug_locks); +} + /** * rcu_read_lock_held - might we be in RCU read-side critical section? * @@ -104,12 +109,14 @@ extern struct lockdep_map rcu_sched_lock_map; * an RCU read-side critical section. In absence of CONFIG_PROVE_LOCKING, * this assumes we are in an RCU read-side critical section unless it can * prove otherwise. + * + * Check rcu_scheduler_active to prevent false positives during boot. */ static inline int rcu_read_lock_held(void) { - if (debug_locks) - return lock_is_held(&rcu_lock_map); - return 1; + if (!debug_lockdep_rcu_enabled()) + return 1; + return lock_is_held(&rcu_lock_map); } /** @@ -119,12 +126,14 @@ static inline int rcu_read_lock_held(void) * an RCU-bh read-side critical section. In absence of CONFIG_PROVE_LOCKING, * this assumes we are in an RCU-bh read-side critical section unless it can * prove otherwise. + * + * Check rcu_scheduler_active to prevent false positives during boot. */ static inline int rcu_read_lock_bh_held(void) { - if (debug_locks) - return lock_is_held(&rcu_bh_lock_map); - return 1; + if (!debug_lockdep_rcu_enabled()) + return 1; + return lock_is_held(&rcu_bh_lock_map); } /** @@ -135,15 +144,19 @@ static inline int rcu_read_lock_bh_held(void) * this assumes we are in an RCU-sched read-side critical section unless it * can prove otherwise. Note that disabling of preemption (including * disabling irqs) counts as an RCU-sched read-side critical section. + * + * Check rcu_scheduler_active to prevent false positives during boot. */ #ifdef CONFIG_PREEMPT static inline int rcu_read_lock_sched_held(void) { int lockdep_opinion = 0; + if (!debug_lockdep_rcu_enabled()) + return 1; if (debug_locks) lockdep_opinion = lock_is_held(&rcu_sched_lock_map); - return lockdep_opinion || preempt_count() != 0 || !rcu_scheduler_active; + return lockdep_opinion || preempt_count() != 0; } #else /* #ifdef CONFIG_PREEMPT */ static inline int rcu_read_lock_sched_held(void) @@ -174,7 +187,7 @@ static inline int rcu_read_lock_bh_held(void) #ifdef CONFIG_PREEMPT static inline int rcu_read_lock_sched_held(void) { - return preempt_count() != 0 || !rcu_scheduler_active; + return !rcu_scheduler_active || preempt_count() != 0; } #else /* #ifdef CONFIG_PREEMPT */ static inline int rcu_read_lock_sched_held(void) @@ -198,7 +211,7 @@ static inline int rcu_read_lock_sched_held(void) */ #define rcu_dereference_check(p, c) \ ({ \ - if (debug_locks && !(c)) \ + if (debug_lockdep_rcu_enabled() && !(c)) \ lockdep_rcu_dereference(__FILE__, __LINE__); \ rcu_dereference_raw(p); \ }) -- cgit v1.2.3 From b97c4bc16734a2e597dac7f91ee9eb78f4aeef9a Mon Sep 17 00:00:00 2001 From: Luca Barbieri Date: Thu, 11 Mar 2010 14:08:45 -0800 Subject: locking: Make sparse work with inline spinlocks and rwlocks Currently sparse does not work with inline spinlock and rwlock functions. The problem is that they do not use the __acquires/__releases out-of-line functions, but use inline functions with no sparse annotations. This patch adds the appropriate annotations to make it work properly. Signed-off-by: Luca Barbieri Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Thomas Gleixner --- include/linux/rwlock.h | 20 ++++++++++---------- include/linux/spinlock.h | 13 ++++++++----- 2 files changed, 18 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h index 71e0b00b6f2..bc2994ed66e 100644 --- a/include/linux/rwlock.h +++ b/include/linux/rwlock.h @@ -29,25 +29,25 @@ do { \ #endif #ifdef CONFIG_DEBUG_SPINLOCK - extern void do_raw_read_lock(rwlock_t *lock); + extern void do_raw_read_lock(rwlock_t *lock) __acquires(lock); #define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock) extern int do_raw_read_trylock(rwlock_t *lock); - extern void do_raw_read_unlock(rwlock_t *lock); - extern void do_raw_write_lock(rwlock_t *lock); + extern void do_raw_read_unlock(rwlock_t *lock) __releases(lock); + extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock); #define do_raw_write_lock_flags(lock, flags) do_raw_write_lock(lock) extern int do_raw_write_trylock(rwlock_t *lock); - extern void do_raw_write_unlock(rwlock_t *lock); + extern void do_raw_write_unlock(rwlock_t *lock) __releases(lock); #else -# define do_raw_read_lock(rwlock) arch_read_lock(&(rwlock)->raw_lock) +# define do_raw_read_lock(rwlock) do {__acquire(lock); arch_read_lock(&(rwlock)->raw_lock); } while (0) # define do_raw_read_lock_flags(lock, flags) \ - arch_read_lock_flags(&(lock)->raw_lock, *(flags)) + do {__acquire(lock); arch_read_lock_flags(&(lock)->raw_lock, *(flags)); } while (0) # define do_raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock) -# define do_raw_read_unlock(rwlock) arch_read_unlock(&(rwlock)->raw_lock) -# define do_raw_write_lock(rwlock) arch_write_lock(&(rwlock)->raw_lock) +# define do_raw_read_unlock(rwlock) do {arch_read_unlock(&(rwlock)->raw_lock); __release(lock); } while (0) +# define do_raw_write_lock(rwlock) do {__acquire(lock); arch_write_lock(&(rwlock)->raw_lock); } while (0) # define do_raw_write_lock_flags(lock, flags) \ - arch_write_lock_flags(&(lock)->raw_lock, *(flags)) + do {__acquire(lock); arch_write_lock_flags(&(lock)->raw_lock, *(flags)); } while (0) # define do_raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock) -# define do_raw_write_unlock(rwlock) arch_write_unlock(&(rwlock)->raw_lock) +# define do_raw_write_unlock(rwlock) do {arch_write_unlock(&(rwlock)->raw_lock); __release(lock); } while (0) #endif #define read_can_lock(rwlock) arch_read_can_lock(&(rwlock)->raw_lock) diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 86088213334..89fac6a3f78 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -128,19 +128,21 @@ static inline void smp_mb__after_lock(void) { smp_mb(); } #define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock) #ifdef CONFIG_DEBUG_SPINLOCK - extern void do_raw_spin_lock(raw_spinlock_t *lock); + extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock) extern int do_raw_spin_trylock(raw_spinlock_t *lock); - extern void do_raw_spin_unlock(raw_spinlock_t *lock); + extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock); #else -static inline void do_raw_spin_lock(raw_spinlock_t *lock) +static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock) { + __acquire(lock); arch_spin_lock(&lock->raw_lock); } static inline void -do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) +do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock) { + __acquire(lock); arch_spin_lock_flags(&lock->raw_lock, *flags); } @@ -149,9 +151,10 @@ static inline int do_raw_spin_trylock(raw_spinlock_t *lock) return arch_spin_trylock(&(lock)->raw_lock); } -static inline void do_raw_spin_unlock(raw_spinlock_t *lock) +static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) { arch_spin_unlock(&lock->raw_lock); + __release(lock); } #endif -- cgit v1.2.3