aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug23
-rw-r--r--lib/radix-tree.c11
-rw-r--r--lib/rbtree.c12
-rw-r--r--lib/sort.c30
4 files changed, 43 insertions, 33 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 2e75478e9c6..4c9ae6085c7 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -512,6 +512,13 @@ config DEBUG_VIRTUAL
If unsure, say N.
+config DEBUG_NOMMU_REGIONS
+ bool "Debug the global anon/private NOMMU mapping region tree"
+ depends on DEBUG_KERNEL && !MMU
+ help
+ This option causes the global tree of anonymous and private mapping
+ regions to be regularly checked for invalid topology.
+
config DEBUG_WRITECOUNT
bool "Debug filesystem writers count"
depends on DEBUG_KERNEL
@@ -566,14 +573,14 @@ config DEBUG_NOTIFIERS
config FRAME_POINTER
bool "Compile the kernel with frame pointers"
depends on DEBUG_KERNEL && \
- (X86 || CRIS || M68K || M68KNOMMU || FRV || UML || S390 || \
- AVR32 || SUPERH || BLACKFIN || MN10300)
- default y if DEBUG_INFO && UML
- help
- If you say Y here the resulting kernel image will be slightly larger
- and slower, but it might give very useful debugging information on
- some architectures or if you use external debuggers.
- If you don't debug the kernel, you can say N.
+ (CRIS || M68K || M68KNOMMU || FRV || UML || S390 || \
+ AVR32 || SUPERH || BLACKFIN || MN10300) || \
+ ARCH_WANT_FRAME_POINTERS
+ default y if (DEBUG_INFO && UML) || ARCH_WANT_FRAME_POINTERS
+ help
+ If you say Y here the resulting kernel image will be slightly
+ larger and slower, but it gives very useful debugging information
+ in case of kernel bugs. (precise oopses/stacktraces/warnings)
config BOOT_PRINTK_DELAY
bool "Delay each boot printk message by N milliseconds"
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 8d3fb0bd128..4bb42a0344e 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -640,13 +640,14 @@ EXPORT_SYMBOL(radix_tree_tag_get);
*
* Returns: the index of the hole if found, otherwise returns an index
* outside of the set specified (in which case 'return - index >= max_scan'
- * will be true).
+ * will be true). In rare cases of index wrap-around, 0 will be returned.
*
* radix_tree_next_hole may be called under rcu_read_lock. However, like
- * radix_tree_gang_lookup, this will not atomically search a snapshot of the
- * tree at a single point in time. For example, if a hole is created at index
- * 5, then subsequently a hole is created at index 10, radix_tree_next_hole
- * covering both indexes may return 10 if called under rcu_read_lock.
+ * radix_tree_gang_lookup, this will not atomically search a snapshot of
+ * the tree at a single point in time. For example, if a hole is created
+ * at index 5, then subsequently a hole is created at index 10,
+ * radix_tree_next_hole covering both indexes may return 10 if called
+ * under rcu_read_lock.
*/
unsigned long radix_tree_next_hole(struct radix_tree_root *root,
unsigned long index, unsigned long max_scan)
diff --git a/lib/rbtree.c b/lib/rbtree.c
index 48499c2d88c..9956b99649f 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -292,7 +292,7 @@ EXPORT_SYMBOL(rb_erase);
/*
* This function returns the first node (in sort order) of the tree.
*/
-struct rb_node *rb_first(struct rb_root *root)
+struct rb_node *rb_first(const struct rb_root *root)
{
struct rb_node *n;
@@ -305,7 +305,7 @@ struct rb_node *rb_first(struct rb_root *root)
}
EXPORT_SYMBOL(rb_first);
-struct rb_node *rb_last(struct rb_root *root)
+struct rb_node *rb_last(const struct rb_root *root)
{
struct rb_node *n;
@@ -318,7 +318,7 @@ struct rb_node *rb_last(struct rb_root *root)
}
EXPORT_SYMBOL(rb_last);
-struct rb_node *rb_next(struct rb_node *node)
+struct rb_node *rb_next(const struct rb_node *node)
{
struct rb_node *parent;
@@ -331,7 +331,7 @@ struct rb_node *rb_next(struct rb_node *node)
node = node->rb_right;
while (node->rb_left)
node=node->rb_left;
- return node;
+ return (struct rb_node *)node;
}
/* No right-hand children. Everything down and left is
@@ -347,7 +347,7 @@ struct rb_node *rb_next(struct rb_node *node)
}
EXPORT_SYMBOL(rb_next);
-struct rb_node *rb_prev(struct rb_node *node)
+struct rb_node *rb_prev(const struct rb_node *node)
{
struct rb_node *parent;
@@ -360,7 +360,7 @@ struct rb_node *rb_prev(struct rb_node *node)
node = node->rb_left;
while (node->rb_right)
node=node->rb_right;
- return node;
+ return (struct rb_node *)node;
}
/* No left-hand children. Go up till we find an ancestor which
diff --git a/lib/sort.c b/lib/sort.c
index 6abbaf3d585..926d00429ed 100644
--- a/lib/sort.c
+++ b/lib/sort.c
@@ -32,11 +32,11 @@ static void generic_swap(void *a, void *b, int size)
* @base: pointer to data to sort
* @num: number of elements
* @size: size of each element
- * @cmp: pointer to comparison function
- * @swap: pointer to swap function or NULL
+ * @cmp_func: pointer to comparison function
+ * @swap_func: pointer to swap function or NULL
*
* This function does a heapsort on the given array. You may provide a
- * swap function optimized to your element type.
+ * swap_func function optimized to your element type.
*
* Sorting time is O(n log n) both on average and worst-case. While
* qsort is about 20% faster on average, it suffers from exploitable
@@ -45,37 +45,39 @@ static void generic_swap(void *a, void *b, int size)
*/
void sort(void *base, size_t num, size_t size,
- int (*cmp)(const void *, const void *),
- void (*swap)(void *, void *, int size))
+ int (*cmp_func)(const void *, const void *),
+ void (*swap_func)(void *, void *, int size))
{
/* pre-scale counters for performance */
int i = (num/2 - 1) * size, n = num * size, c, r;
- if (!swap)
- swap = (size == 4 ? u32_swap : generic_swap);
+ if (!swap_func)
+ swap_func = (size == 4 ? u32_swap : generic_swap);
/* heapify */
for ( ; i >= 0; i -= size) {
for (r = i; r * 2 + size < n; r = c) {
c = r * 2 + size;
- if (c < n - size && cmp(base + c, base + c + size) < 0)
+ if (c < n - size &&
+ cmp_func(base + c, base + c + size) < 0)
c += size;
- if (cmp(base + r, base + c) >= 0)
+ if (cmp_func(base + r, base + c) >= 0)
break;
- swap(base + r, base + c, size);
+ swap_func(base + r, base + c, size);
}
}
/* sort */
for (i = n - size; i > 0; i -= size) {
- swap(base, base + i, size);
+ swap_func(base, base + i, size);
for (r = 0; r * 2 + size < i; r = c) {
c = r * 2 + size;
- if (c < i - size && cmp(base + c, base + c + size) < 0)
+ if (c < i - size &&
+ cmp_func(base + c, base + c + size) < 0)
c += size;
- if (cmp(base + r, base + c) >= 0)
+ if (cmp_func(base + r, base + c) >= 0)
break;
- swap(base + r, base + c, size);
+ swap_func(base + r, base + c, size);
}
}
}