aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug26
-rw-r--r--lib/Makefile9
-rw-r--r--lib/bitrev.c3
-rw-r--r--lib/bug.c2
-rw-r--r--lib/debugobjects.c15
-rw-r--r--lib/div64.c10
-rw-r--r--lib/percpu_counter.c7
-rw-r--r--lib/radix-tree.c122
-rw-r--r--lib/smp_processor_id.c6
-rw-r--r--lib/textsearch.c1
-rw-r--r--lib/ts_bm.c2
-rw-r--r--lib/vsprintf.c128
12 files changed, 220 insertions, 111 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index d2099f41aa1..df27132a56f 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -419,7 +419,6 @@ config DEBUG_LOCKING_API_SELFTESTS
config STACKTRACE
bool
- depends on DEBUG_KERNEL
depends on STACKTRACE_SUPPORT
config DEBUG_KOBJECT
@@ -531,16 +530,34 @@ config BOOT_PRINTK_DELAY
config RCU_TORTURE_TEST
tristate "torture tests for RCU"
depends on DEBUG_KERNEL
- depends on m
default n
help
This option provides a kernel module that runs torture tests
on the RCU infrastructure. The kernel module may be built
after the fact on the running kernel to be tested, if desired.
+ Say Y here if you want RCU torture tests to be built into
+ the kernel.
Say M if you want the RCU torture tests to build as a module.
Say N if you are unsure.
+config RCU_TORTURE_TEST_RUNNABLE
+ bool "torture tests for RCU runnable by default"
+ depends on RCU_TORTURE_TEST = y
+ default n
+ help
+ This option provides a way to build the RCU torture tests
+ directly into the kernel without them starting up at boot
+ time. You can use /proc/sys/kernel/rcutorture_runnable
+ to manually override this setting. This /proc file is
+ available only when the RCU torture tests have been built
+ into the kernel.
+
+ Say Y here if you want the RCU torture tests to start during
+ boot (you probably don't).
+ Say N here if you want the RCU torture tests to start only
+ after being manually enabled via /proc.
+
config KPROBES_SANITY_TEST
bool "Kprobes sanity tests"
depends on DEBUG_KERNEL
@@ -563,6 +580,9 @@ config BACKTRACE_SELF_TEST
for distributions or general kernels, but only for kernel
developers working on architecture code.
+ Note that if you want to also test saved backtraces, you will
+ have to enable STACKTRACE as well.
+
Say N if you are unsure.
config LKDTM
@@ -634,6 +654,8 @@ config LATENCYTOP
Enable this option if you want to use the LatencyTOP tool
to find out which userspace is blocking on what kernel operations.
+source kernel/trace/Kconfig
+
config PROVIDE_OHCI1394_DMA_INIT
bool "Remote debugging over FireWire early on boot"
depends on PCI && X86
diff --git a/lib/Makefile b/lib/Makefile
index 237a8298f8c..2c62a9c06fb 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -8,6 +8,15 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
sha1.o irq_regs.o reciprocal_div.o argv_split.o \
proportions.o prio_heap.o ratelimit.o
+ifdef CONFIG_FTRACE
+# Do not profile string.o, since it may be used in early boot or vdso
+CFLAGS_REMOVE_string.o = -pg
+# Also do not profile any debug utilities
+CFLAGS_REMOVE_spinlock_debug.o = -pg
+CFLAGS_REMOVE_list_debug.o = -pg
+CFLAGS_REMOVE_debugobjects.o = -pg
+endif
+
lib-$(CONFIG_MMU) += ioremap.o
lib-$(CONFIG_SMP) += cpumask.o
diff --git a/lib/bitrev.c b/lib/bitrev.c
index 989aff73f88..3956203456d 100644
--- a/lib/bitrev.c
+++ b/lib/bitrev.c
@@ -42,10 +42,11 @@ const u8 byte_rev_table[256] = {
};
EXPORT_SYMBOL_GPL(byte_rev_table);
-static __always_inline u16 bitrev16(u16 x)
+u16 bitrev16(u16 x)
{
return (bitrev8(x & 0xff) << 8) | bitrev8(x >> 8);
}
+EXPORT_SYMBOL(bitrev16);
/**
* bitrev32 - reverse the order of bits in a u32 value
diff --git a/lib/bug.c b/lib/bug.c
index 530f38f5578..bfeafd60ee9 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -37,6 +37,7 @@
*/
#include <linux/list.h>
#include <linux/module.h>
+#include <linux/kernel.h>
#include <linux/bug.h>
#include <linux/sched.h>
@@ -149,6 +150,7 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
(void *)bugaddr);
show_regs(regs);
+ add_taint(TAINT_WARN);
return BUG_TRAP_TYPE_WARN;
}
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index a76a5e122ae..85b18d79be8 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -68,6 +68,7 @@ static int fill_pool(void)
{
gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
struct debug_obj *new;
+ unsigned long flags;
if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL))
return obj_pool_free;
@@ -81,10 +82,10 @@ static int fill_pool(void)
if (!new)
return obj_pool_free;
- spin_lock(&pool_lock);
+ spin_lock_irqsave(&pool_lock, flags);
hlist_add_head(&new->node, &obj_pool);
obj_pool_free++;
- spin_unlock(&pool_lock);
+ spin_unlock_irqrestore(&pool_lock, flags);
}
return obj_pool_free;
}
@@ -110,16 +111,13 @@ static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
}
/*
- * Allocate a new object. If the pool is empty and no refill possible,
- * switch off the debugger.
+ * Allocate a new object. If the pool is empty, switch off the debugger.
*/
static struct debug_obj *
alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
{
struct debug_obj *obj = NULL;
- int retry = 0;
-repeat:
spin_lock(&pool_lock);
if (obj_pool.first) {
obj = hlist_entry(obj_pool.first, typeof(*obj), node);
@@ -141,9 +139,6 @@ repeat:
}
spin_unlock(&pool_lock);
- if (fill_pool() && !obj && !retry++)
- goto repeat;
-
return obj;
}
@@ -261,6 +256,8 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
struct debug_obj *obj;
unsigned long flags;
+ fill_pool();
+
db = get_bucket((unsigned long) addr);
spin_lock_irqsave(&db->lock, flags);
diff --git a/lib/div64.c b/lib/div64.c
index bb5bd0c0f03..a111eb8de9c 100644
--- a/lib/div64.c
+++ b/lib/div64.c
@@ -98,3 +98,13 @@ EXPORT_SYMBOL(div64_u64);
#endif
#endif /* BITS_PER_LONG == 32 */
+
+/*
+ * Iterative div/mod for use when dividend is not expected to be much
+ * bigger than divisor.
+ */
+u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
+{
+ return __iter_div_u64_rem(dividend, divisor, remainder);
+}
+EXPORT_SYMBOL(iter_div_u64_rem);
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 119174494cb..4a8ba4bf5f6 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -52,7 +52,7 @@ EXPORT_SYMBOL(__percpu_counter_add);
* Add up all the per-cpu counts, return the result. This is a more accurate
* but much slower version of percpu_counter_read_positive()
*/
-s64 __percpu_counter_sum(struct percpu_counter *fbc)
+s64 __percpu_counter_sum(struct percpu_counter *fbc, int set)
{
s64 ret;
int cpu;
@@ -62,7 +62,12 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc)
for_each_online_cpu(cpu) {
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
ret += *pcount;
+ if (set)
+ *pcount = 0;
}
+ if (set)
+ fbc->count = ret;
+
spin_unlock(&fbc->lock);
return ret;
}
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index bd521716ab1..56ec21a7f73 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -1,7 +1,7 @@
/*
* Copyright (C) 2001 Momchil Velikov
* Portions Copyright (C) 2001 Christoph Hellwig
- * Copyright (C) 2005 SGI, Christoph Lameter <clameter@sgi.com>
+ * Copyright (C) 2005 SGI, Christoph Lameter
* Copyright (C) 2006 Nick Piggin
*
* This program is free software; you can redistribute it and/or
@@ -88,6 +88,57 @@ static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
return root->gfp_mask & __GFP_BITS_MASK;
}
+static inline void tag_set(struct radix_tree_node *node, unsigned int tag,
+ int offset)
+{
+ __set_bit(offset, node->tags[tag]);
+}
+
+static inline void tag_clear(struct radix_tree_node *node, unsigned int tag,
+ int offset)
+{
+ __clear_bit(offset, node->tags[tag]);
+}
+
+static inline int tag_get(struct radix_tree_node *node, unsigned int tag,
+ int offset)
+{
+ return test_bit(offset, node->tags[tag]);
+}
+
+static inline void root_tag_set(struct radix_tree_root *root, unsigned int tag)
+{
+ root->gfp_mask |= (__force gfp_t)(1 << (tag + __GFP_BITS_SHIFT));
+}
+
+static inline void root_tag_clear(struct radix_tree_root *root, unsigned int tag)
+{
+ root->gfp_mask &= (__force gfp_t)~(1 << (tag + __GFP_BITS_SHIFT));
+}
+
+static inline void root_tag_clear_all(struct radix_tree_root *root)
+{
+ root->gfp_mask &= __GFP_BITS_MASK;
+}
+
+static inline int root_tag_get(struct radix_tree_root *root, unsigned int tag)
+{
+ return (__force unsigned)root->gfp_mask & (1 << (tag + __GFP_BITS_SHIFT));
+}
+
+/*
+ * Returns 1 if any slot in the node has this tag set.
+ * Otherwise returns 0.
+ */
+static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag)
+{
+ int idx;
+ for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) {
+ if (node->tags[tag][idx])
+ return 1;
+ }
+ return 0;
+}
/*
* This assumes that the caller has performed appropriate preallocation, and
* that the caller has pinned this thread of control to the current CPU.
@@ -124,6 +175,17 @@ static void radix_tree_node_rcu_free(struct rcu_head *head)
{
struct radix_tree_node *node =
container_of(head, struct radix_tree_node, rcu_head);
+
+ /*
+ * must only free zeroed nodes into the slab. radix_tree_shrink
+ * can leave us with a non-NULL entry in the first slot, so clear
+ * that here to make sure.
+ */
+ tag_clear(node, 0, 0);
+ tag_clear(node, 1, 0);
+ node->slots[0] = NULL;
+ node->count = 0;
+
kmem_cache_free(radix_tree_node_cachep, node);
}
@@ -165,59 +227,6 @@ out:
}
EXPORT_SYMBOL(radix_tree_preload);
-static inline void tag_set(struct radix_tree_node *node, unsigned int tag,
- int offset)
-{
- __set_bit(offset, node->tags[tag]);
-}
-
-static inline void tag_clear(struct radix_tree_node *node, unsigned int tag,
- int offset)
-{
- __clear_bit(offset, node->tags[tag]);
-}
-
-static inline int tag_get(struct radix_tree_node *node, unsigned int tag,
- int offset)
-{
- return test_bit(offset, node->tags[tag]);
-}
-
-static inline void root_tag_set(struct radix_tree_root *root, unsigned int tag)
-{
- root->gfp_mask |= (__force gfp_t)(1 << (tag + __GFP_BITS_SHIFT));
-}
-
-
-static inline void root_tag_clear(struct radix_tree_root *root, unsigned int tag)
-{
- root->gfp_mask &= (__force gfp_t)~(1 << (tag + __GFP_BITS_SHIFT));
-}
-
-static inline void root_tag_clear_all(struct radix_tree_root *root)
-{
- root->gfp_mask &= __GFP_BITS_MASK;
-}
-
-static inline int root_tag_get(struct radix_tree_root *root, unsigned int tag)
-{
- return (__force unsigned)root->gfp_mask & (1 << (tag + __GFP_BITS_SHIFT));
-}
-
-/*
- * Returns 1 if any slot in the node has this tag set.
- * Otherwise returns 0.
- */
-static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag)
-{
- int idx;
- for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) {
- if (node->tags[tag][idx])
- return 1;
- }
- return 0;
-}
-
/*
* Return the maximum key which can be store into a
* radix tree with height HEIGHT.
@@ -930,11 +939,6 @@ static inline void radix_tree_shrink(struct radix_tree_root *root)
newptr = radix_tree_ptr_to_indirect(newptr);
root->rnode = newptr;
root->height--;
- /* must only free zeroed nodes into the slab */
- tag_clear(to_free, 0, 0);
- tag_clear(to_free, 1, 0);
- to_free->slots[0] = NULL;
- to_free->count = 0;
radix_tree_node_free(to_free);
}
}
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index 6c90fb90e19..3b4dc098181 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -7,7 +7,7 @@
#include <linux/kallsyms.h>
#include <linux/sched.h>
-unsigned int debug_smp_processor_id(void)
+notrace unsigned int debug_smp_processor_id(void)
{
unsigned long preempt_count = preempt_count();
int this_cpu = raw_smp_processor_id();
@@ -37,7 +37,7 @@ unsigned int debug_smp_processor_id(void)
/*
* Avoid recursion:
*/
- preempt_disable();
+ preempt_disable_notrace();
if (!printk_ratelimit())
goto out_enable;
@@ -49,7 +49,7 @@ unsigned int debug_smp_processor_id(void)
dump_stack();
out_enable:
- preempt_enable_no_resched();
+ preempt_enable_no_resched_notrace();
out:
return this_cpu;
}
diff --git a/lib/textsearch.c b/lib/textsearch.c
index be8bda3862f..a3e500ad51d 100644
--- a/lib/textsearch.c
+++ b/lib/textsearch.c
@@ -97,6 +97,7 @@
#include <linux/types.h>
#include <linux/string.h>
#include <linux/init.h>
+#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/err.h>
#include <linux/textsearch.h>
diff --git a/lib/ts_bm.c b/lib/ts_bm.c
index d90822c378a..4a7fce72898 100644
--- a/lib/ts_bm.c
+++ b/lib/ts_bm.c
@@ -63,7 +63,7 @@ static unsigned int bm_find(struct ts_config *conf, struct ts_state *state)
struct ts_bm *bm = ts_config_priv(conf);
unsigned int i, text_len, consumed = state->offset;
const u8 *text;
- int shift = bm->patlen, bs;
+ int shift = bm->patlen - 1, bs;
for (;;) {
text_len = conf->get_next_block(consumed, &text, conf, state);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 6021757a449..1dc2d1d18fa 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -22,6 +22,8 @@
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/kernel.h>
+#include <linux/kallsyms.h>
+#include <linux/uaccess.h>
#include <asm/page.h> /* for PAGE_SIZE */
#include <asm/div64.h>
@@ -482,6 +484,89 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int
return buf;
}
+static char *string(char *buf, char *end, char *s, int field_width, int precision, int flags)
+{
+ int len, i;
+
+ if ((unsigned long)s < PAGE_SIZE)
+ s = "<NULL>";
+
+ len = strnlen(s, precision);
+
+ if (!(flags & LEFT)) {
+ while (len < field_width--) {
+ if (buf < end)
+ *buf = ' ';
+ ++buf;
+ }
+ }
+ for (i = 0; i < len; ++i) {
+ if (buf < end)
+ *buf = *s;
+ ++buf; ++s;
+ }
+ while (len < field_width--) {
+ if (buf < end)
+ *buf = ' ';
+ ++buf;
+ }
+ return buf;
+}
+
+static inline void *dereference_function_descriptor(void *ptr)
+{
+#if defined(CONFIG_IA64) || defined(CONFIG_PPC64)
+ void *p;
+ if (!probe_kernel_address(ptr, p))
+ ptr = p;
+#endif
+ return ptr;
+}
+
+static char *symbol_string(char *buf, char *end, void *ptr, int field_width, int precision, int flags)
+{
+ unsigned long value = (unsigned long) ptr;
+#ifdef CONFIG_KALLSYMS
+ char sym[KSYM_SYMBOL_LEN];
+ sprint_symbol(sym, value);
+ return string(buf, end, sym, field_width, precision, flags);
+#else
+ field_width = 2*sizeof(void *);
+ flags |= SPECIAL | SMALL | ZEROPAD;
+ return number(buf, end, value, 16, field_width, precision, flags);
+#endif
+}
+
+/*
+ * Show a '%p' thing. A kernel extension is that the '%p' is followed
+ * by an extra set of alphanumeric characters that are extended format
+ * specifiers.
+ *
+ * Right now we just handle 'F' (for symbolic Function descriptor pointers)
+ * and 'S' (for Symbolic direct pointers), but this can easily be
+ * extended in the future (network address types etc).
+ *
+ * The difference between 'S' and 'F' is that on ia64 and ppc64 function
+ * pointers are really function descriptors, which contain a pointer the
+ * real address.
+ */
+static char *pointer(const char *fmt, char *buf, char *end, void *ptr, int field_width, int precision, int flags)
+{
+ switch (*fmt) {
+ case 'F':
+ ptr = dereference_function_descriptor(ptr);
+ /* Fallthrough */
+ case 'S':
+ return symbol_string(buf, end, ptr, field_width, precision, flags);
+ }
+ flags |= SMALL;
+ if (field_width == -1) {
+ field_width = 2*sizeof(void *);
+ flags |= ZEROPAD;
+ }
+ return number(buf, end, (unsigned long) ptr, 16, field_width, precision, flags);
+}
+
/**
* vsnprintf - Format a string and place it in a buffer
* @buf: The buffer to place the result into
@@ -502,11 +587,9 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int
*/
int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
{
- int len;
unsigned long long num;
- int i, base;
+ int base;
char *str, *end, c;
- const char *s;
int flags; /* flags to number() */
@@ -622,43 +705,18 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
continue;
case 's':
- s = va_arg(args, char *);
- if ((unsigned long)s < PAGE_SIZE)
- s = "<NULL>";
-
- len = strnlen(s, precision);
-
- if (!(flags & LEFT)) {
- while (len < field_width--) {
- if (str < end)
- *str = ' ';
- ++str;
- }
- }
- for (i = 0; i < len; ++i) {
- if (str < end)
- *str = *s;
- ++str; ++s;
- }
- while (len < field_width--) {
- if (str < end)
- *str = ' ';
- ++str;
- }
+ str = string(str, end, va_arg(args, char *), field_width, precision, flags);
continue;
case 'p':
- flags |= SMALL;
- if (field_width == -1) {
- field_width = 2*sizeof(void *);
- flags |= ZEROPAD;
- }
- str = number(str, end,
- (unsigned long) va_arg(args, void *),
- 16, field_width, precision, flags);
+ str = pointer(fmt+1, str, end,
+ va_arg(args, void *),
+ field_width, precision, flags);
+ /* Skip all alphanumeric pointer suffixes */
+ while (isalnum(fmt[1]))
+ fmt++;
continue;
-
case 'n':
/* FIXME:
* What does C99 say about the overflow case here? */