aboutsummaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/completion.h12
-rw-r--r--include/linux/dcache.h12
-rw-r--r--include/linux/debug_locks.h69
-rw-r--r--include/linux/fs.h38
-rw-r--r--include/linux/hardirq.h27
-rw-r--r--include/linux/hrtimer.h1
-rw-r--r--include/linux/ide.h2
-rw-r--r--include/linux/idr.h2
-rw-r--r--include/linux/init_task.h15
-rw-r--r--include/linux/interrupt.h77
-rw-r--r--include/linux/ioport.h1
-rw-r--r--include/linux/irqflags.h96
-rw-r--r--include/linux/kallsyms.h23
-rw-r--r--include/linux/lockdep.h353
-rw-r--r--include/linux/mm.h8
-rw-r--r--include/linux/mmzone.h6
-rw-r--r--include/linux/module.h6
-rw-r--r--include/linux/mutex-debug.h18
-rw-r--r--include/linux/mutex.h37
-rw-r--r--include/linux/notifier.h2
-rw-r--r--include/linux/rtmutex.h10
-rw-r--r--include/linux/rwsem-spinlock.h27
-rw-r--r--include/linux/rwsem.h83
-rw-r--r--include/linux/sched.h86
-rw-r--r--include/linux/seqlock.h12
-rw-r--r--include/linux/skbuff.h3
-rw-r--r--include/linux/spinlock.h63
-rw-r--r--include/linux/spinlock_api_smp.h2
-rw-r--r--include/linux/spinlock_api_up.h1
-rw-r--r--include/linux/spinlock_types.h47
-rw-r--r--include/linux/spinlock_types_up.h9
-rw-r--r--include/linux/spinlock_up.h1
-rw-r--r--include/linux/stacktrace.h20
-rw-r--r--include/linux/swap.h1
-rw-r--r--include/linux/sysctl.h2
-rw-r--r--include/linux/wait.h8
36 files changed, 988 insertions, 192 deletions
diff --git a/include/linux/completion.h b/include/linux/completion.h
index 90663ad217f..251c41e3ddd 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -21,6 +21,18 @@ struct completion {
#define DECLARE_COMPLETION(work) \
struct completion work = COMPLETION_INITIALIZER(work)
+/*
+ * Lockdep needs to run a non-constant initializer for on-stack
+ * completions - so we use the _ONSTACK() variant for those that
+ * are on the kernel stack:
+ */
+#ifdef CONFIG_LOCKDEP
+# define DECLARE_COMPLETION_ONSTACK(work) \
+ struct completion work = ({ init_completion(&work); work; })
+#else
+# define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work)
+#endif
+
static inline void init_completion(struct completion *x)
{
x->done = 0;
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 0dd1610a94a..471781ffeab 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -114,6 +114,18 @@ struct dentry {
unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
};
+/*
+ * dentry->d_lock spinlock nesting subclasses:
+ *
+ * 0: normal
+ * 1: nested
+ */
+enum dentry_d_lock_class
+{
+ DENTRY_D_LOCK_NORMAL, /* implicitly used by plain spin_lock() APIs. */
+ DENTRY_D_LOCK_NESTED
+};
+
struct dentry_operations {
int (*d_revalidate)(struct dentry *, struct nameidata *);
int (*d_hash) (struct dentry *, struct qstr *);
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h
new file mode 100644
index 00000000000..6a7047851e4
--- /dev/null
+++ b/include/linux/debug_locks.h
@@ -0,0 +1,69 @@
+#ifndef __LINUX_DEBUG_LOCKING_H
+#define __LINUX_DEBUG_LOCKING_H
+
+extern int debug_locks;
+extern int debug_locks_silent;
+
+/*
+ * Generic 'turn off all lock debugging' function:
+ */
+extern int debug_locks_off(void);
+
+/*
+ * In the debug case we carry the caller's instruction pointer into
+ * other functions, but we dont want the function argument overhead
+ * in the nondebug case - hence these macros:
+ */
+#define _RET_IP_ (unsigned long)__builtin_return_address(0)
+#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
+
+#define DEBUG_LOCKS_WARN_ON(c) \
+({ \
+ int __ret = 0; \
+ \
+ if (unlikely(c)) { \
+ if (debug_locks_off()) \
+ WARN_ON(1); \
+ __ret = 1; \
+ } \
+ __ret; \
+})
+
+#ifdef CONFIG_SMP
+# define SMP_DEBUG_LOCKS_WARN_ON(c) DEBUG_LOCKS_WARN_ON(c)
+#else
+# define SMP_DEBUG_LOCKS_WARN_ON(c) do { } while (0)
+#endif
+
+#ifdef CONFIG_DEBUG_LOCKING_API_SELFTESTS
+ extern void locking_selftest(void);
+#else
+# define locking_selftest() do { } while (0)
+#endif
+
+#ifdef CONFIG_LOCKDEP
+extern void debug_show_all_locks(void);
+extern void debug_show_held_locks(struct task_struct *task);
+extern void debug_check_no_locks_freed(const void *from, unsigned long len);
+extern void debug_check_no_locks_held(struct task_struct *task);
+#else
+static inline void debug_show_all_locks(void)
+{
+}
+
+static inline void debug_show_held_locks(struct task_struct *task)
+{
+}
+
+static inline void
+debug_check_no_locks_freed(const void *from, unsigned long len)
+{
+}
+
+static inline void
+debug_check_no_locks_held(struct task_struct *task)
+{
+}
+#endif
+
+#endif
diff --git a/include/linux/fs.h b/include/linux/fs.h
index e04a5cfe874..134b3206824 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -436,6 +436,21 @@ struct block_device {
};
/*
+ * bdev->bd_mutex nesting subclasses for the lock validator:
+ *
+ * 0: normal
+ * 1: 'whole'
+ * 2: 'partition'
+ */
+enum bdev_bd_mutex_lock_class
+{
+ BD_MUTEX_NORMAL,
+ BD_MUTEX_WHOLE,
+ BD_MUTEX_PARTITION
+};
+
+
+/*
* Radix-tree tags, for tagging dirty and writeback pages within the pagecache
* radix trees
*/
@@ -543,6 +558,25 @@ struct inode {
};
/*
+ * inode->i_mutex nesting subclasses for the lock validator:
+ *
+ * 0: the object of the current VFS operation
+ * 1: parent
+ * 2: child/target
+ * 3: quota file
+ *
+ * The locking order between these classes is
+ * parent -> child -> normal -> quota
+ */
+enum inode_i_mutex_lock_class
+{
+ I_MUTEX_NORMAL,
+ I_MUTEX_PARENT,
+ I_MUTEX_CHILD,
+ I_MUTEX_QUOTA
+};
+
+/*
* NOTE: in a 32bit arch with a preemptable kernel and
* an UP compile the i_size_read/write must be atomic
* with respect to the local cpu (unlike with preempt disabled),
@@ -1276,6 +1310,8 @@ struct file_system_type {
struct module *owner;
struct file_system_type * next;
struct list_head fs_supers;
+ struct lock_class_key s_lock_key;
+ struct lock_class_key s_umount_key;
};
extern int get_sb_bdev(struct file_system_type *fs_type,
@@ -1404,6 +1440,7 @@ extern void bd_set_size(struct block_device *, loff_t size);
extern void bd_forget(struct inode *inode);
extern void bdput(struct block_device *);
extern struct block_device *open_by_devnum(dev_t, unsigned);
+extern struct block_device *open_partition_by_devnum(dev_t, unsigned);
extern const struct file_operations def_blk_fops;
extern const struct address_space_operations def_blk_aops;
extern const struct file_operations def_chr_fops;
@@ -1414,6 +1451,7 @@ extern int blkdev_ioctl(struct inode *, struct file *, unsigned, unsigned long);
extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long);
extern int blkdev_get(struct block_device *, mode_t, unsigned);
extern int blkdev_put(struct block_device *);
+extern int blkdev_put_partition(struct block_device *);
extern int bd_claim(struct block_device *, void *);
extern void bd_release(struct block_device *);
#ifdef CONFIG_SYSFS
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 114ae583cca..50d8b5744cf 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -3,6 +3,7 @@
#include <linux/preempt.h>
#include <linux/smp_lock.h>
+#include <linux/lockdep.h>
#include <asm/hardirq.h>
#include <asm/system.h>
@@ -86,9 +87,6 @@ extern void synchronize_irq(unsigned int irq);
# define synchronize_irq(irq) barrier()
#endif
-#define nmi_enter() irq_enter()
-#define nmi_exit() sub_preempt_count(HARDIRQ_OFFSET)
-
struct task_struct;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
@@ -97,12 +95,35 @@ static inline void account_system_vtime(struct task_struct *tsk)
}
#endif
+/*
+ * It is safe to do non-atomic ops on ->hardirq_context,
+ * because NMI handlers may not preempt and the ops are
+ * always balanced, so the interrupted value of ->hardirq_context
+ * will always be restored.
+ */
#define irq_enter() \
do { \
account_system_vtime(current); \
add_preempt_count(HARDIRQ_OFFSET); \
+ trace_hardirq_enter(); \
+ } while (0)
+
+/*
+ * Exit irq context without processing softirqs:
+ */
+#define __irq_exit() \
+ do { \
+ trace_hardirq_exit(); \
+ account_system_vtime(current); \
+ sub_preempt_count(HARDIRQ_OFFSET); \
} while (0)
+/*
+ * Exit irq context and process softirqs if needed:
+ */
extern void irq_exit(void);
+#define nmi_enter() do { lockdep_off(); irq_enter(); } while (0)
+#define nmi_exit() do { __irq_exit(); lockdep_on(); } while (0)
+
#endif /* LINUX_HARDIRQ_H */
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 07d7305f131..e4bccbcc275 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -91,6 +91,7 @@ struct hrtimer_base {
ktime_t (*get_softirq_time)(void);
struct hrtimer *curr_timer;
ktime_t softirq_time;
+ struct lock_class_key lock_key;
};
/*
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 285316c836b..dc7abef1096 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -1359,7 +1359,7 @@ extern struct semaphore ide_cfg_sem;
* ide_drive_t->hwif: constant, no locking
*/
-#define local_irq_set(flags) do { local_save_flags((flags)); local_irq_enable(); } while (0)
+#define local_irq_set(flags) do { local_save_flags((flags)); local_irq_enable_in_hardirq(); } while (0)
extern struct bus_type ide_bus_type;
diff --git a/include/linux/idr.h b/include/linux/idr.h
index f559a719dbe..826803449db 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -66,7 +66,7 @@ struct idr {
.id_free = NULL, \
.layers = 0, \
.id_free_cnt = 0, \
- .lock = SPIN_LOCK_UNLOCKED, \
+ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
}
#define DEFINE_IDR(name) struct idr name = IDR_INIT(name)
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 3a256957fb5..60aac2cea0c 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -3,6 +3,8 @@
#include <linux/file.h>
#include <linux/rcupdate.h>
+#include <linux/irqflags.h>
+#include <linux/lockdep.h>
#define INIT_FDTABLE \
{ \
@@ -21,7 +23,7 @@
.count = ATOMIC_INIT(1), \
.fdt = &init_files.fdtab, \
.fdtab = INIT_FDTABLE, \
- .file_lock = SPIN_LOCK_UNLOCKED, \
+ .file_lock = __SPIN_LOCK_UNLOCKED(init_task.file_lock), \
.next_fd = 0, \
.close_on_exec_init = { { 0, } }, \
.open_fds_init = { { 0, } }, \
@@ -36,7 +38,7 @@
.user_id = 0, \
.next = NULL, \
.wait = __WAIT_QUEUE_HEAD_INITIALIZER(name.wait), \
- .ctx_lock = SPIN_LOCK_UNLOCKED, \
+ .ctx_lock = __SPIN_LOCK_UNLOCKED(name.ctx_lock), \
.reqs_active = 0U, \
.max_reqs = ~0U, \
}
@@ -48,7 +50,7 @@
.mm_users = ATOMIC_INIT(2), \
.mm_count = ATOMIC_INIT(1), \
.mmap_sem = __RWSEM_INITIALIZER(name.mmap_sem), \
- .page_table_lock = SPIN_LOCK_UNLOCKED, \
+ .page_table_lock = __SPIN_LOCK_UNLOCKED(name.page_table_lock), \
.mmlist = LIST_HEAD_INIT(name.mmlist), \
.cpu_vm_mask = CPU_MASK_ALL, \
}
@@ -69,7 +71,7 @@
#define INIT_SIGHAND(sighand) { \
.count = ATOMIC_INIT(1), \
.action = { { { .sa_handler = NULL, } }, }, \
- .siglock = SPIN_LOCK_UNLOCKED, \
+ .siglock = __SPIN_LOCK_UNLOCKED(sighand.siglock), \
}
extern struct group_info init_groups;
@@ -119,12 +121,13 @@ extern struct group_info init_groups;
.list = LIST_HEAD_INIT(tsk.pending.list), \
.signal = {{0}}}, \
.blocked = {{0}}, \
- .alloc_lock = SPIN_LOCK_UNLOCKED, \
+ .alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \
.journal_info = NULL, \
.cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
.fs_excl = ATOMIC_INIT(0), \
.pi_lock = SPIN_LOCK_UNLOCKED, \
- INIT_RT_MUTEXES(tsk) \
+ INIT_TRACE_IRQFLAGS \
+ INIT_LOCKDEP \
}
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index da3e0dbe61d..d5afee95fd4 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -10,6 +10,7 @@
#include <linux/irqreturn.h>
#include <linux/hardirq.h>
#include <linux/sched.h>
+#include <linux/irqflags.h>
#include <asm/atomic.h>
#include <asm/ptrace.h>
#include <asm/system.h>
@@ -80,12 +81,64 @@ extern int request_irq(unsigned int,
unsigned long, const char *, void *);
extern void free_irq(unsigned int, void *);
+/*
+ * On lockdep we dont want to enable hardirqs in hardirq
+ * context. Use local_irq_enable_in_hardirq() to annotate
+ * kernel code that has to do this nevertheless (pretty much
+ * the only valid case is for old/broken hardware that is
+ * insanely slow).
+ *
+ * NOTE: in theory this might break fragile code that relies
+ * on hardirq delivery - in practice we dont seem to have such
+ * places left. So the only effect should be slightly increased
+ * irqs-off latencies.
+ */
+#ifdef CONFIG_LOCKDEP
+# define local_irq_enable_in_hardirq() do { } while (0)
+#else
+# define local_irq_enable_in_hardirq() local_irq_enable()
+#endif
#ifdef CONFIG_GENERIC_HARDIRQS
extern void disable_irq_nosync(unsigned int irq);
extern void disable_irq(unsigned int irq);
extern void enable_irq(unsigned int irq);
+/*
+ * Special lockdep variants of irq disabling/enabling.
+ * These should be used for locking constructs that
+ * know that a particular irq context which is disabled,
+ * and which is the only irq-context user of a lock,
+ * that it's safe to take the lock in the irq-disabled
+ * section without disabling hardirqs.
+ *
+ * On !CONFIG_LOCKDEP they are equivalent to the normal
+ * irq disable/enable methods.
+ */
+static inline void disable_irq_nosync_lockdep(unsigned int irq)
+{
+ disable_irq_nosync(irq);
+#ifdef CONFIG_LOCKDEP
+ local_irq_disable();
+#endif
+}
+
+static inline void disable_irq_lockdep(unsigned int irq)
+{
+ disable_irq(irq);
+#ifdef CONFIG_LOCKDEP
+ local_irq_disable();
+#endif
+}
+
+static inline void enable_irq_lockdep(unsigned int irq)
+{
+#ifdef CONFIG_LOCKDEP
+ local_irq_enable();
+#endif
+ enable_irq(irq);
+}
+
/* IRQ wakeup (PM) control: */
extern int set_irq_wake(unsigned int irq, unsigned int on);
@@ -99,7 +152,19 @@ static inline int disable_irq_wake(unsigned int irq)
return set_irq_wake(irq, 0);
}
-#endif
+#else /* !CONFIG_GENERIC_HARDIRQS */
+/*
+ * NOTE: non-genirq architectures, if they want to support the lock
+ * validator need to define the methods below in their asm/irq.h
+ * files, under an #ifdef CONFIG_LOCKDEP section.
+ */
+# ifndef CONFIG_LOCKDEP
+# define disable_irq_nosync_lockdep(irq) disable_irq_nosync(irq)
+# define disable_irq_lockdep(irq) disable_irq(irq)
+# define enable_irq_lockdep(irq) enable_irq(irq)
+# endif
+
+#endif /* CONFIG_GENERIC_HARDIRQS */
#ifndef __ARCH_SET_SOFTIRQ_PENDING
#define set_softirq_pending(x) (local_softirq_pending() = (x))
@@ -135,13 +200,11 @@ static inline void __deprecated save_and_cli(unsigned long *x)
#define save_and_cli(x) save_and_cli(&x)
#endif /* CONFIG_SMP */
-/* SoftIRQ primitives. */
-#define local_bh_disable() \
- do { add_preempt_count(SOFTIRQ_OFFSET); barrier(); } while (0)
-#define __local_bh_enable() \
- do { barrier(); sub_preempt_count(SOFTIRQ_OFFSET); } while (0)
-
+extern void local_bh_disable(void);
+extern void __local_bh_enable(void);
+extern void _local_bh_enable(void);
extern void local_bh_enable(void);
+extern void local_bh_enable_ip(unsigned long ip);
/* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
frequency threaded job scheduling. For almost all the purposes
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 87a9fc039b4..5612dfeeae5 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -55,6 +55,7 @@ struct resource_list {
#define IORESOURCE_IRQ_LOWEDGE (1<<1)
#define IORESOURCE_IRQ_HIGHLEVEL (1<<2)
#define IORESOURCE_IRQ_LOWLEVEL (1<<3)
+#define IORESOURCE_IRQ_SHAREABLE (1<<4)
/* ISA PnP DMA specific bits (IORESOURCE_BITS) */
#define IORESOURCE_DMA_TYPE_MASK (3<<0)
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
new file mode 100644
index 00000000000..412e025bc5c
--- /dev/null
+++ b/include/linux/irqflags.h
@@ -0,0 +1,96 @@
+/*
+ * include/linux/irqflags.h
+ *
+ * IRQ flags tracing: follow the state of the hardirq and softirq flags and
+ * provide callbacks for transitions between ON and OFF states.
+ *
+ * This file gets included from lowlevel asm headers too, to provide
+ * wrapped versions of the local_irq_*() APIs, based on the
+ * raw_local_irq_*() macros from the lowlevel headers.
+ */
+#ifndef _LINUX_TRACE_IRQFLAGS_H
+#define _LINUX_TRACE_IRQFLAGS_H
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+ extern void trace_hardirqs_on(void);
+ extern void trace_hardirqs_off(void);
+ extern void trace_softirqs_on(unsigned long ip);
+ extern void trace_softirqs_off(unsigned long ip);
+# define trace_hardirq_context(p) ((p)->hardirq_context)
+# define trace_softirq_context(p) ((p)->softirq_context)
+# define trace_hardirqs_enabled(p) ((p)->hardirqs_enabled)
+# define trace_softirqs_enabled(p) ((p)->softirqs_enabled)
+# define trace_hardirq_enter() do { current->hardirq_context++; } while (0)
+# define trace_hardirq_exit() do { current->hardirq_context--; } while (0)
+# define trace_softirq_enter() do { current->softirq_context++; } while (0)
+# define trace_softirq_exit() do { current->softirq_context--; } while (0)
+# define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1,
+#else
+# define trace_hardirqs_on() do { } while (0)
+# define trace_hardirqs_off() do { } while (0)
+# define trace_softirqs_on(ip) do { } while (0)
+# define trace_softirqs_off(ip) do { } while (0)
+# define trace_hardirq_context(p) 0
+# define trace_softirq_context(p) 0
+# define trace_hardirqs_enabled(p) 0
+# define trace_softirqs_enabled(p) 0
+# define trace_hardirq_enter() do { } while (0)
+# define trace_hardirq_exit() do { } while (0)
+# define trace_softirq_enter() do { } while (0)
+# define trace_softirq_exit() do { } while (0)
+# define INIT_TRACE_IRQFLAGS
+#endif
+
+#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
+
+#include <asm/irqflags.h>
+
+#define local_irq_enable() \
+ do { trace_hardirqs_on(); raw_local_irq_enable(); } while (0)
+#define local_irq_disable() \
+ do { raw_local_irq_disable(); trace_hardirqs_off(); } while (0)
+#define local_irq_save(flags) \
+ do { raw_local_irq_save(flags); trace_hardirqs_off(); } while (0)
+
+#define local_irq_restore(flags) \
+ do { \
+ if (raw_irqs_disabled_flags(flags)) { \
+ raw_local_irq_restore(flags); \
+ trace_hardirqs_off(); \
+ } else { \
+ trace_hardirqs_on(); \
+ raw_local_irq_restore(flags); \
+ } \
+ } while (0)
+#else /* !CONFIG_TRACE_IRQFLAGS_SUPPORT */
+/*
+ * The local_irq_*() APIs are equal to the raw_local_irq*()
+ * if !TRACE_IRQFLAGS.
+ */
+# define raw_local_irq_disable() local_irq_disable()
+# define raw_local_irq_enable() local_irq_enable()
+# define raw_local_irq_save(flags) local_irq_save(flags)
+# define raw_local_irq_restore(flags) local_irq_restore(flags)
+#endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */
+
+#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
+#define safe_halt() \
+ do { \
+ trace_hardirqs_on(); \
+ raw_safe_halt(); \
+ } while (0)
+
+#define local_save_flags(flags) raw_local_save_flags(flags)
+
+#define irqs_disabled() \
+({ \
+ unsigned long flags; \
+ \
+ raw_local_save_flags(flags); \
+ raw_irqs_disabled_flags(flags); \
+})
+
+#define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags)
+#endif /* CONFIG_X86 */
+
+#endif
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index 54e2549f96b..849043ce4ed 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -57,10 +57,25 @@ do { \
#define print_fn_descriptor_symbol(fmt, addr) print_symbol(fmt, addr)
#endif
-#define print_symbol(fmt, addr) \
-do { \
- __check_printsym_format(fmt, ""); \
- __print_symbol(fmt, addr); \
+static inline void print_symbol(const char *fmt, unsigned long addr)
+{
+ __check_printsym_format(fmt, "");
+ __print_symbol(fmt, (unsigned long)
+ __builtin_extract_return_addr((void *)addr));
+}
+
+#ifndef CONFIG_64BIT
+#define print_ip_sym(ip) \
+do { \
+ printk("[<%08lx>]", ip); \
+ print_symbol(" %s\n", ip); \
} while(0)
+#else
+#define print_ip_sym(ip) \
+do { \
+ printk("[<%016lx>]", ip); \
+ print_symbol(" %s\n", ip); \
+} while(0)
+#endif
#endif /*_LINUX_KALLSYMS_H*/
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
new file mode 100644
index 00000000000..316e0fb8d7b
--- /dev/null
+++ b/include/linux/lockdep.h
@@ -0,0 +1,353 @@
+/*
+ * Runtime locking correctness validator
+ *
+ * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ *
+ * see Documentation/lockdep-design.txt for more details.
+ */
+#ifndef __LINUX_LOCKDEP_H
+#define __LINUX_LOCKDEP_H
+
+#include <linux/linkage.h>
+#include <linux/list.h>
+#include <linux/debug_locks.h>
+#include <linux/stacktrace.h>
+
+#ifdef CONFIG_LOCKDEP
+
+/*
+ * Lock-class usage-state bits:
+ */
+enum lock_usage_bit
+{
+ LOCK_USED = 0,
+ LOCK_USED_IN_HARDIRQ,
+ LOCK_USED_IN_SOFTIRQ,
+ LOCK_ENABLED_SOFTIRQS,
+ LOCK_ENABLED_HARDIRQS,
+ LOCK_USED_IN_HARDIRQ_READ,
+ LOCK_USED_IN_SOFTIRQ_READ,
+ LOCK_ENABLED_SOFTIRQS_READ,
+ LOCK_ENABLED_HARDIRQS_READ,
+ LOCK_USAGE_STATES
+};
+
+/*
+ * Usage-state bitmasks:
+ */
+#define LOCKF_USED (1 << LOCK_USED)
+#define LOCKF_USED_IN_HARDIRQ (1 << LOCK_USED_IN_HARDIRQ)
+#define LOCKF_USED_IN_SOFTIRQ (1 << LOCK_USED_IN_SOFTIRQ)
+#define LOCKF_ENABLED_HARDIRQS (1 << LOCK_ENABLED_HARDIRQS)
+#define LOCKF_ENABLED_SOFTIRQS (1 << LOCK_ENABLED_SOFTIRQS)
+
+#define LOCKF_ENABLED_IRQS (LOCKF_ENABLED_HARDIRQS | LOCKF_ENABLED_SOFTIRQS)
+#define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
+
+#define LOCKF_USED_IN_HARDIRQ_READ (1 << LOCK_USED_IN_HARDIRQ_READ)
+#define LOCKF_USED_IN_SOFTIRQ_READ (1 << LOCK_USED_IN_SOFTIRQ_READ)
+#define LOCKF_ENABLED_HARDIRQS_READ (1 << LOCK_ENABLED_HARDIRQS_READ)
+#define LOCKF_ENABLED_SOFTIRQS_READ (1 << LOCK_ENABLED_SOFTIRQS_READ)
+
+#define LOCKF_ENABLED_IRQS_READ \
+ (LOCKF_ENABLED_HARDIRQS_READ | LOCKF_ENABLED_SOFTIRQS_READ)
+#define LOCKF_USED_IN_IRQ_READ \
+ (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
+
+#define MAX_LOCKDEP_SUBCLASSES 8UL
+
+/*
+ * Lock-classes are keyed via unique addresses, by embedding the
+ * lockclass-key into the kernel (or module) .data section. (For
+ * static locks we use the lock address itself as the key.)
+ */
+struct lockdep_subclass_key {
+ char __one_byte;
+} __attribute__ ((__packed__));
+
+struct lock_class_key {
+ struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
+};
+
+/*
+ * The lock-class itself:
+ */
+struct lock_class {
+ /*
+ * class-hash:
+ */
+ struct list_head hash_entry;
+
+ /*
+ * global list of all lock-classes:
+ */
+ struct list_head lock_entry;
+
+ struct lockdep_subclass_key *key;
+ unsigned int subclass;
+
+ /*
+ * IRQ/softirq usage tracking bits:
+ */
+ unsigned long usage_mask;
+ struct stack_trace usage_traces[LOCK_USAGE_STATES];
+
+ /*
+ * These fields represent a directed graph of lock dependencies,
+ * to every node we attach a list of "forward" and a list of
+ * "backward" graph nodes.
+ */
+ struct list_head locks_after, locks_before;
+
+ /*
+ * Generation counter, when doing certain classes of graph walking,
+ * to ensure that we check one node only once:
+ */
+ unsigned int version;
+
+ /*
+ * Statistics counter:
+ */
+ unsigned long ops;
+
+ const char *name;
+ int name_version;
+};
+
+/*
+ * Map the lock object (the lock instance) to the lock-class object.
+ * This is embedded into specific lock instances:
+ */
+struct lockdep_map {
+ struct lock_class_key *key;
+ struct lock_class *class[MAX_LOCKDEP_SUBCLASSES];
+ const char *name;
+};
+
+/*
+ * Every lock has a list of other locks that were taken after it.
+ * We only grow the list, never remove from it:
+ */
+struct lock_list {
+ struct list_head entry;
+ struct lock_class *class;
+ struct stack_trace trace;
+};
+
+/*
+ * We record lock dependency chains, so that we can cache them:
+ */
+struct lock_chain {
+ struct list_head entry;
+ u64 chain_key;
+};
+
+struct held_lock {
+ /*
+ * One-way hash of the dependency chain up to this point. We
+ * hash the hashes step by step as the dependency chain grows.
+ *
+ * We use it for dependency-caching and we skip detection
+ * passes and dependency-updates if there is a cache-hit, so
+ * it is absolutely critical for 100% coverage of the validator
+ * to have a unique key value for every unique dependency path
+ * that can occur in the system, to make a unique hash value
+ * as likely as possible - hence the 64-bit width.
+ *
+ * The task struct holds the current hash value (initialized
+ * with zero), here we store the previous hash value:
+ */
+ u64 prev_chain_key;
+ struct lock_class *class;
+ unsigned long acquire_ip;
+ struct lockdep_map *instance;
+
+ /*
+ * The lock-stack is unified in that the lock chains of interrupt
+ * contexts nest ontop of process context chains, but we 'separate'
+ * the hashes by starting with 0 if we cross into an interrupt
+ * context, and we also keep do not add cross-context lock
+ * dependencies - the lock usage graph walking covers that area
+ * anyway, and we'd just unnecessarily increase the number of
+ * dependencies otherwise. [Note: hardirq and softirq contexts
+ * are separated from each other too.]
+ *
+ * The following field is used to detect when we cross into an
+ * interrupt context:
+ */
+ int irq_context;
+ int trylock;
+ int read;
+ int check;
+ int hardirqs_off;
+};
+
+/*
+ * Initialization, self-test and debugging-output methods:
+ */
+extern void lockdep_init(void);
+extern void lockdep_info(void);
+extern void lockdep_reset(void);
+extern void lockdep_reset_lock(struct lockdep_map *lock);
+extern void lockdep_free_key_range(void *start, unsigned long size);
+
+extern void lockdep_off(void);
+extern void lockdep_on(void);
+extern int lockdep_internal(void);
+
+/*
+ * These methods are used by specific locking variants (spinlocks,
+ * rwlocks, mutexes and rwsems) to pass init/acquire/release events
+ * to lockdep:
+ */
+
+extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
+ struct lock_class_key *key);
+
+/*
+ * Reinitialize a lock key - for cases where there is special locking or
+ * special initialization of locks so that the validator gets the scope
+ * of dependencies wrong: they are either too broad (they need a class-split)
+ * or they are too narrow (they suffer from a false class-split):
+ */
+#define lockdep_set_class(lock, key) \
+ lockdep_init_map(&(lock)->dep_map, #key, key)
+#define lockdep_set_class_and_name(lock, key, name) \
+ lockdep_init_map(&(lock)->dep_map, name, key)
+
+/*
+ * Acquire a lock.
+ *
+ * Values for "read":
+ *
+ * 0: exclusive (write) acquire
+ * 1: read-acquire (no recursion allowed)
+ * 2: read-acquire with same-instance recursion allowed
+ *
+ * Values for check:
+ *
+ * 0: disabled
+ * 1: simple checks (freeing, held-at-exit-time, etc.)
+ * 2: full validation
+ */
+extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
+ int trylock, int read, int check, unsigned long ip);
+
+extern void lock_release(struct lockdep_map *lock, int nested,
+ unsigned long ip);
+
+# define INIT_LOCKDEP .lockdep_recursion = 0,
+
+#else /* !LOCKDEP */
+
+static inline void lockdep_off(void)
+{
+}
+
+static inline void lockdep_on(void)
+{
+}
+
+static inline int lockdep_internal(void)
+{
+ return 0;
+}
+
+# define lock_acquire(l, s, t, r, c, i) do { } while (0)
+# define lock_release(l, n, i) do { } while (0)
+# define lockdep_init() do { } while (0)
+# define lockdep_info() do { } while (0)
+# define lockdep_init_map(lock, name, key) do { (void)(key); } while (0)
+# define lockdep_set_class(lock, key) do { (void)(key); } while (0)
+# define lockdep_set_class_and_name(lock, key, name) \
+ do { (void)(key); } while (0)
+# define INIT_LOCKDEP
+# define lockdep_reset() do { debug_locks = 1; } while (0)
+# define lockdep_free_key_range(start, size) do { } while (0)
+/*
+ * The class key takes no space if lockdep is disabled:
+ */
+struct lock_class_key { };
+#endif /* !LOCKDEP */
+
+#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS)
+extern void early_init_irq_lock_class(void);
+#else
+# define early_init_irq_lock_class() do { } while (0)
+#endif
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+extern void early_boot_irqs_off(void);
+extern void early_boot_irqs_on(void);
+#else
+# define early_boot_irqs_off() do { } while (0)
+# define early_boot_irqs_on() do { } while (0)
+#endif
+
+/*
+ * For trivial one-depth nesting of a lock-class, the following
+ * global define can be used. (Subsystems with multiple levels
+ * of nesting should define their own lock-nesting subclasses.)
+ */
+#define SINGLE_DEPTH_NESTING 1
+
+/*
+ * Map the dependency ops to NOP or to real lockdep ops, depending
+ * on the per lock-class debug mode:
+ */
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# ifdef CONFIG_PROVE_LOCKING
+# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
+# else
+# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
+# endif
+# define spin_release(l, n, i) lock_release(l, n, i)
+#else
+# define spin_acquire(l, s, t, i) do { } while (0)
+# define spin_release(l, n, i) do { } while (0)
+#endif
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# ifdef CONFIG_PROVE_LOCKING
+# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
+# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, i)
+# else
+# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
+# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, i)
+# endif
+# define rwlock_release(l, n, i) lock_release(l, n, i)
+#else
+# define rwlock_acquire(l, s, t, i) do { } while (0)
+# define rwlock_acquire_read(l, s, t, i) do { } while (0)
+# define rwlock_release(l, n, i) do { } while (0)
+#endif
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# ifdef CONFIG_PROVE_LOCKING
+# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
+# else
+# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
+# endif
+# define mutex_release(l, n, i) lock_release(l, n, i)
+#else
+# define mutex_acquire(l, s, t, i) do { } while (0)
+# define mutex_release(l, n, i) do { } while (0)
+#endif
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# ifdef CONFIG_PROVE_LOCKING
+# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
+# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, i)
+# else
+# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
+# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, i)
+# endif
+# define rwsem_release(l, n, i) lock_release(l, n, i)
+#else
+# define rwsem_acquire(l, s, t, i) do { } while (0)
+# define rwsem_acquire_read(l, s, t, i) do { } while (0)
+# define rwsem_release(l, n, i) do { } while (0)
+#endif
+
+#endif /* __LINUX_LOCKDEP_H */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 75179529e39..990957e0929 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -14,6 +14,7 @@
#include <linux/prio_tree.h>
#include <linux/fs.h>
#include <linux/mutex.h>
+#include <linux/debug_locks.h>
struct mempolicy;
struct anon_vma;
@@ -1034,13 +1035,6 @@ static inline void vm_stat_account(struct mm_struct *mm,
}
#endif /* CONFIG_PROC_FS */
-static inline void
-debug_check_no_locks_freed(const void *from, unsigned long len)
-{
- mutex_debug_check_no_locks_freed(from, len);
- rt_mutex_debug_check_no_locks_freed(from, len);
-}
-
#ifndef CONFIG_DEBUG_PAGEALLOC
static inline void
kernel_map_pages(struct page *page, int numpages, int enable)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 27e748eb72b..656b588a9f9 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -150,6 +150,10 @@ struct zone {
unsigned long lowmem_reserve[MAX_NR_ZONES];
#ifdef CONFIG_NUMA
+ /*
+ * zone reclaim becomes active if more unmapped pages exist.
+ */
+ unsigned long min_unmapped_ratio;
struct per_cpu_pageset *pageset[NR_CPUS];
#else
struct per_cpu_pageset pageset[NR_CPUS];
@@ -414,6 +418,8 @@ int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, struct file *,
void __user *, size_t *, loff_t *);
int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, struct file *,
void __user *, size_t *, loff_t *);
+int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
+ struct file *, void __user *, size_t *, loff_t *);
#include <linux/topology.h>
/* Returns the number of the current Node. */
diff --git a/include/linux/module.h b/include/linux/module.h
index 9e9dc7c24d9..d06c74fb8c2 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -358,6 +358,7 @@ static inline int module_is_live(struct module *mod)
/* Is this address in a module? (second is with no locks, for oops) */
struct module *module_text_address(unsigned long addr);
struct module *__module_text_address(unsigned long addr);
+int is_module_address(unsigned long addr);
/* Returns module and fills in value, defined and namebuf, or NULL if
symnum out of range. */
@@ -496,6 +497,11 @@ static inline struct module *__module_text_address(unsigned long addr)
return NULL;
}
+static inline int is_module_address(unsigned long addr)
+{
+ return 0;
+}
+
/* Get/put a kernel symbol (calls should be symmetric) */
#define symbol_get(x) ({ extern typeof(x) x __attribute__((weak)); &(x); })
#define symbol_put(x) do { } while(0)
diff --git a/include/linux/mutex-debug.h b/include/linux/mutex-debug.h
index 8b5769f0046..2537285e106 100644
--- a/include/linux/mutex-debug.h
+++ b/include/linux/mutex-debug.h
@@ -2,22 +2,22 @@
#define __LINUX_MUTEX_DEBUG_H
#include <linux/linkage.h>
+#include <linux/lockdep.h>
/*
* Mutexes - debugging helpers:
*/
-#define __DEBUG_MUTEX_INITIALIZER(lockname) \
- , .held_list = LIST_HEAD_INIT(lockname.held_list), \
- .name = #lockname , .magic = &lockname
+#define __DEBUG_MUTEX_INITIALIZER(lockname) \
+ , .magic = &lockname
-#define mutex_init(sem) __mutex_init(sem, __FUNCTION__)
+#define mutex_init(mutex) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __mutex_init((mutex), #mutex, &__key); \
+} while (0)
extern void FASTCALL(mutex_destroy(struct mutex *lock));
-extern void mutex_debug_show_all_locks(void);
-extern void mutex_debug_show_held_locks(struct task_struct *filter);
-extern void mutex_debug_check_no_locks_held(struct task_struct *task);
-extern void mutex_debug_check_no_locks_freed(const void *from, unsigned long len);
-
#endif
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index f1ac507fa20..27c48daa318 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -13,6 +13,7 @@
#include <linux/list.h>
#include <linux/spinlock_types.h>
#include <linux/linkage.h>
+#include <linux/lockdep.h>
#include <asm/atomic.h>
@@ -50,11 +51,12 @@ struct mutex {
struct list_head wait_list;
#ifdef CONFIG_DEBUG_MUTEXES
struct thread_info *owner;
- struct list_head held_list;
- unsigned long acquire_ip;
const char *name;
void *magic;
#endif
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
};
/*
@@ -74,24 +76,34 @@ struct mutex_waiter {
# include <linux/mutex-debug.h>
#else
# define __DEBUG_MUTEX_INITIALIZER(lockname)
-# define mutex_init(mutex) __mutex_init(mutex, NULL)
+# define mutex_init(mutex) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __mutex_init((mutex), #mutex, &__key); \
+} while (0)
# define mutex_destroy(mutex) do { } while (0)
-# define mutex_debug_show_all_locks() do { } while (0)
-# define mutex_debug_show_held_locks(p) do { } while (0)
-# define mutex_debug_check_no_locks_held(task) do { } while (0)
-# define mutex_debug_check_no_locks_freed(from, len) do { } while (0)
+#endif
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
+ , .dep_map = { .name = #lockname }
+#else
+# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
#endif
#define __MUTEX_INITIALIZER(lockname) \
{ .count = ATOMIC_INIT(1) \
, .wait_lock = SPIN_LOCK_UNLOCKED \
, .wait_list = LIST_HEAD_INIT(lockname.wait_list) \
- __DEBUG_MUTEX_INITIALIZER(lockname) }
+ __DEBUG_MUTEX_INITIALIZER(lockname) \
+ __DEP_MAP_MUTEX_INITIALIZER(lockname) }
#define DEFINE_MUTEX(mutexname) \
struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
-extern void fastcall __mutex_init(struct mutex *lock, const char *name);
+extern void __mutex_init(struct mutex *lock, const char *name,
+ struct lock_class_key *key);
/***
* mutex_is_locked - is the mutex locked
@@ -110,6 +122,13 @@ static inline int fastcall mutex_is_locked(struct mutex *lock)
*/
extern void fastcall mutex_lock(struct mutex *lock);
extern int fastcall mutex_lock_interruptible(struct mutex *lock);
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
+#else
+# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
+#endif
+
/*
* NOTE: mutex_trylock() follows the spin_trylock() convention,
* not the down_trylock() convention!
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index 51dbab9710c..7ff386a6ae8 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -65,7 +65,7 @@ struct raw_notifier_head {
} while (0)
#define ATOMIC_NOTIFIER_INIT(name) { \
- .lock = SPIN_LOCK_UNLOCKED, \
+ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
.head = NULL }
#define BLOCKING_NOTIFIER_INIT(name) { \
.rwsem = __RWSEM_INITIALIZER((name).rwsem), \
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index fa4a3b82ba7..5d41dee82f8 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -29,8 +29,6 @@ struct rt_mutex {
struct task_struct *owner;
#ifdef CONFIG_DEBUG_RT_MUTEXES
int save_state;
- struct list_head held_list_entry;
- unsigned long acquire_ip;
const char *name, *file;
int line;
void *magic;
@@ -98,14 +96,6 @@ extern int rt_mutex_trylock(struct rt_mutex *lock);
extern void rt_mutex_unlock(struct rt_mutex *lock);
-#ifdef CONFIG_DEBUG_RT_MUTEXES
-# define INIT_RT_MUTEX_DEBUG(tsk) \
- .held_list_head = LIST_HEAD_INIT(tsk.held_list_head), \
- .held_list_lock = SPIN_LOCK_UNLOCKED
-#else
-# define INIT_RT_MUTEX_DEBUG(tsk)
-#endif
-
#ifdef CONFIG_RT_MUTEXES
# define INIT_RT_MUTEXES(tsk) \
.pi_waiters = PLIST_HEAD_INIT(tsk.pi_waiters, tsk.pi_lock), \
diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h
index f30f805080a..ae1fcadd598 100644
--- a/include/linux/rwsem-spinlock.h
+++ b/include/linux/rwsem-spinlock.h
@@ -32,30 +32,37 @@ struct rw_semaphore {
__s32 activity;
spinlock_t wait_lock;
struct list_head wait_list;
-#if RWSEM_DEBUG
- int debug;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
#endif
};
-/*
- * initialisation
- */
-#if RWSEM_DEBUG
-#define __RWSEM_DEBUG_INIT , 0
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
#else
-#define __RWSEM_DEBUG_INIT /* */
+# define __RWSEM_DEP_MAP_INIT(lockname)
#endif
#define __RWSEM_INITIALIZER(name) \
-{ 0, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) __RWSEM_DEBUG_INIT }
+{ 0, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
#define DECLARE_RWSEM(name) \
struct rw_semaphore name = __RWSEM_INITIALIZER(name)
-extern void FASTCALL(init_rwsem(struct rw_semaphore *sem));
+extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
+ struct lock_class_key *key);
+
+#define init_rwsem(sem) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __init_rwsem((sem), #sem, &__key); \
+} while (0)
+
extern void FASTCALL(__down_read(struct rw_semaphore *sem));
extern int FASTCALL(__down_read_trylock(struct rw_semaphore *sem));
extern void FASTCALL(__down_write(struct rw_semaphore *sem));
+extern void FASTCALL(__down_write_nested(struct rw_semaphore *sem, int subclass));
extern int FASTCALL(__down_write_trylock(struct rw_semaphore *sem));
extern void FASTCALL(__up_read(struct rw_semaphore *sem));
extern void FASTCALL(__up_write(struct rw_semaphore *sem));
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index f99fe90732a..658afb37c3f 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -9,8 +9,6 @@
#include <linux/linkage.h>
-#define RWSEM_DEBUG 0
-
#ifdef __KERNEL__
#include <linux/types.h>
@@ -26,89 +24,58 @@ struct rw_semaphore;
#include <asm/rwsem.h> /* use an arch-specific implementation */
#endif
-#ifndef rwsemtrace
-#if RWSEM_DEBUG
-extern void FASTCALL(rwsemtrace(struct rw_semaphore *sem, const char *str));
-#else
-#define rwsemtrace(SEM,FMT)
-#endif
-#endif
-
/*
* lock for reading
*/
-static inline void down_read(struct rw_semaphore *sem)
-{
- might_sleep();
- rwsemtrace(sem,"Entering down_read");
- __down_read(sem);
- rwsemtrace(sem,"Leaving down_read");
-}
+extern void down_read(struct rw_semaphore *sem);
/*
* trylock for reading -- returns 1 if successful, 0 if contention
*/
-static inline int down_read_trylock(struct rw_semaphore *sem)
-{
- int ret;
- rwsemtrace(sem,"Entering down_read_trylock");
- ret = __down_read_trylock(sem);
- rwsemtrace(sem,"Leaving down_read_trylock");
- return ret;
-}
+extern int down_read_trylock(struct rw_semaphore *sem);
/*
* lock for writing
*/
-static inline void down_write(struct rw_semaphore *sem)
-{
- might_sleep();
- rwsemtrace(sem,"Entering down_write");
- __down_write(sem);
- rwsemtrace(sem,"Leaving down_write");
-}
+extern void down_write(struct rw_semaphore *sem);
/*
* trylock for writing -- returns 1 if successful, 0 if contention
*/
-static inline int down_write_trylock(struct rw_semaphore *sem)
-{
- int ret;
- rwsemtrace(sem,"Entering down_write_trylock");
- ret = __down_write_trylock(sem);
- rwsemtrace(sem,"Leaving down_write_trylock");
- return ret;
-}
+extern int down_write_trylock(struct rw_semaphore *sem);
/*
* release a read lock
*/
-static inline void up_read(struct rw_semaphore *sem)
-{
- rwsemtrace(sem,"Entering up_read");
- __up_read(sem);
- rwsemtrace(sem,"Leaving up_read");
-}
+extern void up_read(struct rw_semaphore *sem);
/*
* release a write lock
*/
-static inline void up_write(struct rw_semaphore *sem)
-{
- rwsemtrace(sem,"Entering up_write");
- __up_write(sem);
- rwsemtrace(sem,"Leaving up_write");
-}
+extern void up_write(struct rw_semaphore *sem);
/*
* downgrade write lock to read lock
*/
-static inline void downgrade_write(struct rw_semaphore *sem)
-{
- rwsemtrace(sem,"Entering downgrade_write");
- __downgrade_write(sem);
- rwsemtrace(sem,"Leaving downgrade_write");
-}
+extern void downgrade_write(struct rw_semaphore *sem);
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+/*
+ * nested locking:
+ */
+extern void down_read_nested(struct rw_semaphore *sem, int subclass);
+extern void down_write_nested(struct rw_semaphore *sem, int subclass);
+/*
+ * Take/release a lock when not the owner will release it:
+ */
+extern void down_read_non_owner(struct rw_semaphore *sem);
+extern void up_read_non_owner(struct rw_semaphore *sem);
+#else
+# define down_read_nested(sem, subclass) down_read(sem)
+# define down_write_nested(sem, subclass) down_write(sem)
+# define down_read_non_owner(sem) down_read(sem)
+# define up_read_non_owner(sem) up_read(sem)
+#endif
#endif /* __KERNEL__ */
#endif /* _LINUX_RWSEM_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index aaf723308ed..1c876e27ff9 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -184,11 +184,11 @@ extern unsigned long weighted_cpuload(const int cpu);
extern rwlock_t tasklist_lock;
extern spinlock_t mmlist_lock;
-typedef struct task_struct task_t;
+struct task_struct;
extern void sched_init(void);
extern void sched_init_smp(void);
-extern void init_idle(task_t *idle, int cpu);
+extern void init_idle(struct task_struct *idle, int cpu);
extern cpumask_t nohz_cpu_mask;
@@ -383,7 +383,7 @@ struct signal_struct {
wait_queue_head_t wait_chldexit; /* for wait4() */
/* current thread group signal load-balancing target: */
- task_t *curr_target;
+ struct task_struct *curr_target;
/* shared signal handling: */
struct sigpending shared_pending;
@@ -534,7 +534,6 @@ extern struct user_struct *find_user(uid_t);
extern struct user_struct root_user;
#define INIT_USER (&root_user)
-typedef struct prio_array prio_array_t;
struct backing_dev_info;
struct reclaim_state;
@@ -699,7 +698,7 @@ extern int groups_search(struct group_info *group_info, gid_t grp);
((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK])
#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
-extern void prefetch_stack(struct task_struct*);
+extern void prefetch_stack(struct task_struct *t);
#else
static inline void prefetch_stack(struct task_struct *t) { }
#endif
@@ -715,6 +714,8 @@ enum sleep_type {
SLEEP_INTERRUPTED,
};
+struct prio_array;
+
struct task_struct {
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
struct thread_info *thread_info;
@@ -732,7 +733,7 @@ struct task_struct {
int load_weight; /* for niceness load balancing purposes */
int prio, static_prio, normal_prio;
struct list_head run_list;
- prio_array_t *array;
+ struct prio_array *array;
unsigned short ioprio;
unsigned int btrace_seq;
@@ -865,16 +866,34 @@ struct task_struct {
struct plist_head pi_waiters;
/* Deadlock detection and priority inheritance handling */
struct rt_mutex_waiter *pi_blocked_on;
-# ifdef CONFIG_DEBUG_RT_MUTEXES
- spinlock_t held_list_lock;
- struct list_head held_list_head;
-# endif
#endif
#ifdef CONFIG_DEBUG_MUTEXES
/* mutex deadlock detection */
struct mutex_waiter *blocked_on;
#endif
+#ifdef CONFIG_TRACE_IRQFLAGS
+ unsigned int irq_events;
+ int hardirqs_enabled;
+ unsigned long hardirq_enable_ip;
+ unsigned int hardirq_enable_event;
+ unsigned long hardirq_disable_ip;
+ unsigned int hardirq_disable_event;
+ int softirqs_enabled;
+ unsigned long softirq_disable_ip;
+ unsigned int softirq_disable_event;
+ unsigned long softirq_enable_ip;
+ unsigned int softirq_enable_event;
+ int hardirq_context;
+ int softirq_context;
+#endif
+#ifdef CONFIG_LOCKDEP
+# define MAX_LOCK_DEPTH 30UL
+ u64 curr_chain_key;
+ int lockdep_depth;
+ struct held_lock held_locks[MAX_LOCK_DEPTH];
+ unsigned int lockdep_recursion;
+#endif
/* journalling filesystem info */
void *journal_info;
@@ -1013,9 +1032,9 @@ static inline void put_task_struct(struct task_struct *t)
#define used_math() tsk_used_math(current)
#ifdef CONFIG_SMP
-extern int set_cpus_allowed(task_t *p, cpumask_t new_mask);
+extern int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask);
#else
-static inline int set_cpus_allowed(task_t *p, cpumask_t new_mask)
+static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
{
if (!cpu_isset(0, new_mask))
return -EINVAL;
@@ -1024,7 +1043,8 @@ static inline int set_cpus_allowed(task_t *p, cpumask_t new_mask)
#endif
extern unsigned long long sched_clock(void);
-extern unsigned long long current_sched_time(const task_t *current_task);
+extern unsigned long long
+current_sched_time(const struct task_struct *current_task);
/* sched_exec is called by processes performing an exec */
#ifdef CONFIG_SMP
@@ -1042,27 +1062,27 @@ static inline void idle_task_exit(void) {}
extern void sched_idle_next(void);
#ifdef CONFIG_RT_MUTEXES
-extern int rt_mutex_getprio(task_t *p);
-extern void rt_mutex_setprio(task_t *p, int prio);
-extern void rt_mutex_adjust_pi(task_t *p);
+extern int rt_mutex_getprio(struct task_struct *p);
+extern void rt_mutex_setprio(struct task_struct *p, int prio);
+extern void rt_mutex_adjust_pi(struct task_struct *p);
#else
-static inline int rt_mutex_getprio(task_t *p)
+static inline int rt_mutex_getprio(struct task_struct *p)
{
return p->normal_prio;
}
# define rt_mutex_adjust_pi(p) do { } while (0)
#endif
-extern void set_user_nice(task_t *p, long nice);
-extern int task_prio(const task_t *p);
-extern int task_nice(const task_t *p);
-extern int can_nice(const task_t *p, const int nice);
-extern int task_curr(const task_t *p);
+extern void set_user_nice(struct task_struct *p, long nice);
+extern int task_prio(const struct task_struct *p);
+extern int task_nice(const struct task_struct *p);
+extern int can_nice(const struct task_struct *p, const int nice);
+extern int task_curr(const struct task_struct *p);
extern int idle_cpu(int cpu);
extern int sched_setscheduler(struct task_struct *, int, struct sched_param *);
-extern task_t *idle_task(int cpu);
-extern task_t *curr_task(int cpu);
-extern void set_curr_task(int cpu, task_t *p);
+extern struct task_struct *idle_task(int cpu);
+extern struct task_struct *curr_task(int cpu);
+extern void set_curr_task(int cpu, struct task_struct *p);
void yield(void);
@@ -1119,8 +1139,8 @@ extern void FASTCALL(wake_up_new_task(struct task_struct * tsk,
#else
static inline void kick_process(struct task_struct *tsk) { }
#endif
-extern void FASTCALL(sched_fork(task_t * p, int clone_flags));
-extern void FASTCALL(sched_exit(task_t * p));
+extern void FASTCALL(sched_fork(struct task_struct * p, int clone_flags));
+extern void FASTCALL(sched_exit(struct task_struct * p));
extern int in_group_p(gid_t);
extern int in_egroup_p(gid_t);
@@ -1225,17 +1245,17 @@ extern NORET_TYPE void do_group_exit(int);
extern void daemonize(const char *, ...);
extern int allow_signal(int);
extern int disallow_signal(int);
-extern task_t *child_reaper;
+extern struct task_struct *child_reaper;
extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *);
extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *);
-task_t *fork_idle(int);
+struct task_struct *fork_idle(int);
extern void set_task_comm(struct task_struct *tsk, char *from);
extern void get_task_comm(char *to, struct task_struct *tsk);
#ifdef CONFIG_SMP
-extern void wait_task_inactive(task_t * p);
+extern void wait_task_inactive(struct task_struct * p);
#else
#define wait_task_inactive(p) do { } while (0)
#endif
@@ -1261,13 +1281,13 @@ extern void wait_task_inactive(task_t * p);
/* de_thread depends on thread_group_leader not being a pid based check */
#define thread_group_leader(p) (p == p->group_leader)
-static inline task_t *next_thread(const task_t *p)
+static inline struct task_struct *next_thread(const struct task_struct *p)
{
return list_entry(rcu_dereference(p->thread_group.next),
- task_t, thread_group);
+ struct task_struct, thread_group);
}
-static inline int thread_group_empty(task_t *p)
+static inline int thread_group_empty(struct task_struct *p)
{
return list_empty(&p->thread_group);
}
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 7bc5c7c12b5..46000936f8f 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -38,9 +38,17 @@ typedef struct {
* These macros triggered gcc-3.x compile-time problems. We think these are
* OK now. Be cautious.
*/
-#define SEQLOCK_UNLOCKED { 0, SPIN_LOCK_UNLOCKED }
-#define seqlock_init(x) do { *(x) = (seqlock_t) SEQLOCK_UNLOCKED; } while (0)
+#define __SEQLOCK_UNLOCKED(lockname) \
+ { 0, __SPIN_LOCK_UNLOCKED(lockname) }
+#define SEQLOCK_UNLOCKED \
+ __SEQLOCK_UNLOCKED(old_style_seqlock_init)
+
+#define seqlock_init(x) \
+ do { *(x) = (seqlock_t) __SEQLOCK_UNLOCKED(x); } while (0)
+
+#define DEFINE_SEQLOCK(x) \
+ seqlock_t x = __SEQLOCK_UNLOCKED(x)
/* Lock out other writers and update the count.
* Acts like a normal spin_lock/unlock.
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 57d7d4965f9..3597b4f1438 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -604,9 +604,12 @@ static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
return list_->qlen;
}
+extern struct lock_class_key skb_queue_lock_key;
+
static inline void skb_queue_head_init(struct sk_buff_head *list)
{
spin_lock_init(&list->lock);
+ lockdep_set_class(&list->lock, &skb_queue_lock_key);
list->prev = list->next = (struct sk_buff *)list;
list->qlen = 0;
}
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index ae23beef9cc..31473db92d3 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -82,14 +82,40 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock);
/*
* Pull the __raw*() functions/declarations (UP-nondebug doesnt need them):
*/
-#if defined(CONFIG_SMP)
+#ifdef CONFIG_SMP
# include <asm/spinlock.h>
#else
# include <linux/spinlock_up.h>
#endif
-#define spin_lock_init(lock) do { *(lock) = SPIN_LOCK_UNLOCKED; } while (0)
-#define rwlock_init(lock) do { *(lock) = RW_LOCK_UNLOCKED; } while (0)
+#ifdef CONFIG_DEBUG_SPINLOCK
+ extern void __spin_lock_init(spinlock_t *lock, const char *name,
+ struct lock_class_key *key);
+# define spin_lock_init(lock) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __spin_lock_init((lock), #lock, &__key); \
+} while (0)
+
+#else
+# define spin_lock_init(lock) \
+ do { *(lock) = SPIN_LOCK_UNLOCKED; } while (0)
+#endif
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+ extern void __rwlock_init(rwlock_t *lock, const char *name,
+ struct lock_class_key *key);
+# define rwlock_init(lock) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __rwlock_init((lock), #lock, &__key); \
+} while (0)
+#else
+# define rwlock_init(lock) \
+ do { *(lock) = RW_LOCK_UNLOCKED; } while (0)
+#endif
#define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock)
@@ -113,7 +139,6 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock);
#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
extern int _raw_spin_trylock(spinlock_t *lock);
extern void _raw_spin_unlock(spinlock_t *lock);
-
extern void _raw_read_lock(rwlock_t *lock);
extern int _raw_read_trylock(rwlock_t *lock);
extern void _raw_read_unlock(rwlock_t *lock);
@@ -121,17 +146,17 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock);
extern int _raw_write_trylock(rwlock_t *lock);
extern void _raw_write_unlock(rwlock_t *lock);
#else
-# define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock)
-# define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock)
# define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock)
# define _raw_spin_lock_flags(lock, flags) \
__raw_spin_lock_flags(&(lock)->raw_lock, *(flags))
+# define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock)
+# define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock)
# define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock)
-# define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock)
-# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock)
-# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock)
# define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock)
+# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock)
+# define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock)
# define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock)
+# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock)
#endif
#define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock)
@@ -147,6 +172,13 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock);
#define write_trylock(lock) __cond_lock(_write_trylock(lock))
#define spin_lock(lock) _spin_lock(lock)
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass)
+#else
+# define spin_lock_nested(lock, subclass) _spin_lock(lock)
+#endif
+
#define write_lock(lock) _write_lock(lock)
#define read_lock(lock) _read_lock(lock)
@@ -172,21 +204,18 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock);
/*
* We inline the unlock functions in the nondebug case:
*/
-#if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP)
+#if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || \
+ !defined(CONFIG_SMP)
# define spin_unlock(lock) _spin_unlock(lock)
# define read_unlock(lock) _read_unlock(lock)
# define write_unlock(lock) _write_unlock(lock)
-#else
-# define spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock)
-# define read_unlock(lock) __raw_read_unlock(&(lock)->raw_lock)
-# define write_unlock(lock) __raw_write_unlock(&(lock)->raw_lock)
-#endif
-
-#if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP)
# define spin_unlock_irq(lock) _spin_unlock_irq(lock)
# define read_unlock_irq(lock) _read_unlock_irq(lock)
# define write_unlock_irq(lock) _write_unlock_irq(lock)
#else
+# define spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock)
+# define read_unlock(lock) __raw_read_unlock(&(lock)->raw_lock)
+# define write_unlock(lock) __raw_write_unlock(&(lock)->raw_lock)
# define spin_unlock_irq(lock) \
do { __raw_spin_unlock(&(lock)->raw_lock); local_irq_enable(); } while (0)
# define read_unlock_irq(lock) \
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index 78e6989ffb5..b2c4f829946 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -20,6 +20,8 @@ int in_lock_functions(unsigned long addr);
#define assert_spin_locked(x) BUG_ON(!spin_is_locked(x))
void __lockfunc _spin_lock(spinlock_t *lock) __acquires(spinlock_t);
+void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
+ __acquires(spinlock_t);
void __lockfunc _read_lock(rwlock_t *lock) __acquires(rwlock_t);
void __lockfunc _write_lock(rwlock_t *lock) __acquires(rwlock_t);
void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(spinlock_t);
diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h
index cd81cee566f..67faa044c5f 100644
--- a/include/linux/spinlock_api_up.h
+++ b/include/linux/spinlock_api_up.h
@@ -49,6 +49,7 @@
do { local_irq_restore(flags); __UNLOCK(lock); } while (0)
#define _spin_lock(lock) __LOCK(lock)
+#define _spin_lock_nested(lock, subclass) __LOCK(lock)
#define _read_lock(lock) __LOCK(lock)
#define _write_lock(lock) __LOCK(lock)
#define _spin_lock_bh(lock) __LOCK_BH(lock)
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
index 9cb51e07039..dc5fb69e4de 100644
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
@@ -9,6 +9,8 @@
* Released under the General Public License (GPL).
*/
+#include <linux/lockdep.h>
+
#if defined(CONFIG_SMP)
# include <asm/spinlock_types.h>
#else
@@ -24,6 +26,9 @@ typedef struct {
unsigned int magic, owner_cpu;
void *owner;
#endif
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
} spinlock_t;
#define SPINLOCK_MAGIC 0xdead4ead
@@ -37,31 +42,53 @@ typedef struct {
unsigned int magic, owner_cpu;
void *owner;
#endif
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
} rwlock_t;
#define RWLOCK_MAGIC 0xdeaf1eed
#define SPINLOCK_OWNER_INIT ((void *)-1L)
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
+#else
+# define SPIN_DEP_MAP_INIT(lockname)
+#endif
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
+#else
+# define RW_DEP_MAP_INIT(lockname)
+#endif
+
#ifdef CONFIG_DEBUG_SPINLOCK
-# define SPIN_LOCK_UNLOCKED \
+# define __SPIN_LOCK_UNLOCKED(lockname) \
(spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
.magic = SPINLOCK_MAGIC, \
.owner = SPINLOCK_OWNER_INIT, \
- .owner_cpu = -1 }
-#define RW_LOCK_UNLOCKED \
+ .owner_cpu = -1, \
+ SPIN_DEP_MAP_INIT(lockname) }
+#define __RW_LOCK_UNLOCKED(lockname) \
(rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \
.magic = RWLOCK_MAGIC, \
.owner = SPINLOCK_OWNER_INIT, \
- .owner_cpu = -1 }
+ .owner_cpu = -1, \
+ RW_DEP_MAP_INIT(lockname) }
#else
-# define SPIN_LOCK_UNLOCKED \
- (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED }
-#define RW_LOCK_UNLOCKED \
- (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED }
+# define __SPIN_LOCK_UNLOCKED(lockname) \
+ (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
+ SPIN_DEP_MAP_INIT(lockname) }
+#define __RW_LOCK_UNLOCKED(lockname) \
+ (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \
+ RW_DEP_MAP_INIT(lockname) }
#endif
-#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED
-#define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED
+#define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init)
+#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init)
+
+#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
+#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x)
#endif /* __LINUX_SPINLOCK_TYPES_H */
diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h
index 04135b0e198..27644af20b7 100644
--- a/include/linux/spinlock_types_up.h
+++ b/include/linux/spinlock_types_up.h
@@ -12,10 +12,14 @@
* Released under the General Public License (GPL).
*/
-#ifdef CONFIG_DEBUG_SPINLOCK
+#if defined(CONFIG_DEBUG_SPINLOCK) || \
+ defined(CONFIG_DEBUG_LOCK_ALLOC)
typedef struct {
volatile unsigned int slock;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
} raw_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
@@ -30,6 +34,9 @@ typedef struct { } raw_spinlock_t;
typedef struct {
/* no debug version on UP */
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
} raw_rwlock_t;
#define __RAW_RW_LOCK_UNLOCKED { }
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h
index 31accf2f0b1..ea54c4c9a4e 100644
--- a/include/linux/spinlock_up.h
+++ b/include/linux/spinlock_up.h
@@ -18,7 +18,6 @@
*/
#ifdef CONFIG_DEBUG_SPINLOCK
-
#define __raw_spin_is_locked(x) ((x)->slock == 0)
static inline void __raw_spin_lock(raw_spinlock_t *lock)
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h
new file mode 100644
index 00000000000..9cc81e57222
--- /dev/null
+++ b/include/linux/stacktrace.h
@@ -0,0 +1,20 @@
+#ifndef __LINUX_STACKTRACE_H
+#define __LINUX_STACKTRACE_H
+
+#ifdef CONFIG_STACKTRACE
+struct stack_trace {
+ unsigned int nr_entries, max_entries;
+ unsigned long *entries;
+};
+
+extern void save_stack_trace(struct stack_trace *trace,
+ struct task_struct *task, int all_contexts,
+ unsigned int skip);
+
+extern void print_stack_trace(struct stack_trace *trace, int spaces);
+#else
+# define save_stack_trace(trace, task, all, skip) do { } while (0)
+# define print_stack_trace(trace) do { } while (0)
+#endif
+
+#endif
diff --git a/include/linux/swap.h b/include/linux/swap.h
index cf6ca6e377b..5e59184c909 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -189,6 +189,7 @@ extern long vm_total_pages;
#ifdef CONFIG_NUMA
extern int zone_reclaim_mode;
+extern int sysctl_min_unmapped_ratio;
extern int zone_reclaim(struct zone *, gfp_t, unsigned int);
#else
#define zone_reclaim_mode 0
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 46e4d8f2771..e4b1a4d4dcf 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -188,7 +188,7 @@ enum
VM_DROP_PAGECACHE=29, /* int: nuke lots of pagecache */
VM_PERCPU_PAGELIST_FRACTION=30,/* int: fraction of pages in each percpu_pagelist */
VM_ZONE_RECLAIM_MODE=31, /* reclaim local zone memory before going off node */
- VM_ZONE_RECLAIM_INTERVAL=32, /* time period to wait after reclaim failure */
+ VM_MIN_UNMAPPED=32, /* Set min percent of unmapped pages */
VM_PANIC_ON_OOM=33, /* panic at out-of-memory */
VM_VDSO_ENABLED=34, /* map VDSO into new processes? */
};
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 544e855c7c0..794be7af58a 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -68,7 +68,7 @@ struct task_struct;
wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
- .lock = SPIN_LOCK_UNLOCKED, \
+ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
.task_list = { &(name).task_list, &(name).task_list } }
#define DECLARE_WAIT_QUEUE_HEAD(name) \
@@ -77,9 +77,15 @@ struct task_struct;
#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
{ .flags = word, .bit_nr = bit, }
+/*
+ * lockdep: we want one lock-class for all waitqueue locks.
+ */
+extern struct lock_class_key waitqueue_lock_key;
+
static inline void init_waitqueue_head(wait_queue_head_t *q)
{
spin_lock_init(&q->lock);
+ lockdep_set_class(&q->lock, &waitqueue_lock_key);
INIT_LIST_HEAD(&q->task_list);
}