diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 7 | ||||
-rw-r--r-- | lib/Kconfig.debug | 29 | ||||
-rw-r--r-- | lib/Kconfig.kgdb | 3 | ||||
-rw-r--r-- | lib/Makefile | 6 | ||||
-rw-r--r-- | lib/bitrev.c | 3 | ||||
-rw-r--r-- | lib/bug.c | 2 | ||||
-rw-r--r-- | lib/crc-t10dif.c | 67 | ||||
-rw-r--r-- | lib/debugobjects.c | 15 | ||||
-rw-r--r-- | lib/div64.c | 10 | ||||
-rw-r--r-- | lib/kobject.c | 11 | ||||
-rw-r--r-- | lib/kobject_uevent.c | 3 | ||||
-rw-r--r-- | lib/percpu_counter.c | 7 | ||||
-rw-r--r-- | lib/radix-tree.c | 122 | ||||
-rw-r--r-- | lib/smp_processor_id.c | 6 | ||||
-rw-r--r-- | lib/textsearch.c | 17 | ||||
-rw-r--r-- | lib/ts_bm.c | 28 | ||||
-rw-r--r-- | lib/ts_fsm.c | 6 | ||||
-rw-r--r-- | lib/ts_kmp.c | 29 | ||||
-rw-r--r-- | lib/vsprintf.c | 128 |
19 files changed, 360 insertions, 139 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 8cc8e8722a3..c7ad7a5b353 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -29,6 +29,13 @@ config CRC16 the kernel tree does. Such modules that use library CRC16 functions require M here. +config CRC_T10DIF + tristate "CRC calculation for the T10 Data Integrity Field" + help + This option is only needed if a module that's not in the + kernel tree needs to calculate CRC checks for use with the + SCSI data integrity subsystem. + config CRC_ITU_T tristate "CRC ITU-T V.41 functions" help diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index d2099f41aa1..ba106db5a65 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -74,6 +74,9 @@ config DEBUG_FS debugging files into. Enable this option to be able to read and write to these files. + For detailed documentation on the debugfs API, see + Documentation/DocBook/filesystems. + If unsure, say N. config HEADERS_CHECK @@ -419,7 +422,6 @@ config DEBUG_LOCKING_API_SELFTESTS config STACKTRACE bool - depends on DEBUG_KERNEL depends on STACKTRACE_SUPPORT config DEBUG_KOBJECT @@ -531,16 +533,34 @@ config BOOT_PRINTK_DELAY config RCU_TORTURE_TEST tristate "torture tests for RCU" depends on DEBUG_KERNEL - depends on m default n help This option provides a kernel module that runs torture tests on the RCU infrastructure. The kernel module may be built after the fact on the running kernel to be tested, if desired. + Say Y here if you want RCU torture tests to be built into + the kernel. Say M if you want the RCU torture tests to build as a module. Say N if you are unsure. +config RCU_TORTURE_TEST_RUNNABLE + bool "torture tests for RCU runnable by default" + depends on RCU_TORTURE_TEST = y + default n + help + This option provides a way to build the RCU torture tests + directly into the kernel without them starting up at boot + time. You can use /proc/sys/kernel/rcutorture_runnable + to manually override this setting. This /proc file is + available only when the RCU torture tests have been built + into the kernel. + + Say Y here if you want the RCU torture tests to start during + boot (you probably don't). + Say N here if you want the RCU torture tests to start only + after being manually enabled via /proc. + config KPROBES_SANITY_TEST bool "Kprobes sanity tests" depends on DEBUG_KERNEL @@ -563,6 +583,9 @@ config BACKTRACE_SELF_TEST for distributions or general kernels, but only for kernel developers working on architecture code. + Note that if you want to also test saved backtraces, you will + have to enable STACKTRACE as well. + Say N if you are unsure. config LKDTM @@ -634,6 +657,8 @@ config LATENCYTOP Enable this option if you want to use the LatencyTOP tool to find out which userspace is blocking on what kernel operations. +source kernel/trace/Kconfig + config PROVIDE_OHCI1394_DMA_INIT bool "Remote debugging over FireWire early on boot" depends on PCI && X86 diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb index a5d4b1dac2a..2cfd2721f7e 100644 --- a/lib/Kconfig.kgdb +++ b/lib/Kconfig.kgdb @@ -1,7 +1,4 @@ -config HAVE_ARCH_KGDB_SHADOW_INFO - bool - config HAVE_ARCH_KGDB bool diff --git a/lib/Makefile b/lib/Makefile index 74b0cfb1fcc..818c4d45551 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -2,6 +2,11 @@ # Makefile for some libs needed in the kernel. # +ifdef CONFIG_FTRACE +ORIG_CFLAGS := $(KBUILD_CFLAGS) +KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS)) +endif + lib-y := ctype.o string.o vsprintf.o cmdline.o \ rbtree.o radix-tree.o dump_stack.o \ idr.o int_sqrt.o extable.o prio_tree.o \ @@ -45,6 +50,7 @@ endif obj-$(CONFIG_BITREVERSE) += bitrev.o obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o obj-$(CONFIG_CRC16) += crc16.o +obj-$(CONFIG_CRC_T10DIF)+= crc-t10dif.o obj-$(CONFIG_CRC_ITU_T) += crc-itu-t.o obj-$(CONFIG_CRC32) += crc32.o obj-$(CONFIG_CRC7) += crc7.o diff --git a/lib/bitrev.c b/lib/bitrev.c index 989aff73f88..3956203456d 100644 --- a/lib/bitrev.c +++ b/lib/bitrev.c @@ -42,10 +42,11 @@ const u8 byte_rev_table[256] = { }; EXPORT_SYMBOL_GPL(byte_rev_table); -static __always_inline u16 bitrev16(u16 x) +u16 bitrev16(u16 x) { return (bitrev8(x & 0xff) << 8) | bitrev8(x >> 8); } +EXPORT_SYMBOL(bitrev16); /** * bitrev32 - reverse the order of bits in a u32 value diff --git a/lib/bug.c b/lib/bug.c index 530f38f5578..bfeafd60ee9 100644 --- a/lib/bug.c +++ b/lib/bug.c @@ -37,6 +37,7 @@ */ #include <linux/list.h> #include <linux/module.h> +#include <linux/kernel.h> #include <linux/bug.h> #include <linux/sched.h> @@ -149,6 +150,7 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs) (void *)bugaddr); show_regs(regs); + add_taint(TAINT_WARN); return BUG_TRAP_TYPE_WARN; } diff --git a/lib/crc-t10dif.c b/lib/crc-t10dif.c new file mode 100644 index 00000000000..fbbd66ed86c --- /dev/null +++ b/lib/crc-t10dif.c @@ -0,0 +1,67 @@ +/* + * T10 Data Integrity Field CRC16 calculation + * + * Copyright (c) 2007 Oracle Corporation. All rights reserved. + * Written by Martin K. Petersen <martin.petersen@oracle.com> + * + * This source code is licensed under the GNU General Public License, + * Version 2. See the file COPYING for more details. + */ + +#include <linux/types.h> +#include <linux/module.h> +#include <linux/crc-t10dif.h> + +/* Table generated using the following polynomium: + * x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x + 1 + * gt: 0x8bb7 + */ +static const __u16 t10_dif_crc_table[256] = { + 0x0000, 0x8BB7, 0x9CD9, 0x176E, 0xB205, 0x39B2, 0x2EDC, 0xA56B, + 0xEFBD, 0x640A, 0x7364, 0xF8D3, 0x5DB8, 0xD60F, 0xC161, 0x4AD6, + 0x54CD, 0xDF7A, 0xC814, 0x43A3, 0xE6C8, 0x6D7F, 0x7A11, 0xF1A6, + 0xBB70, 0x30C7, 0x27A9, 0xAC1E, 0x0975, 0x82C2, 0x95AC, 0x1E1B, + 0xA99A, 0x222D, 0x3543, 0xBEF4, 0x1B9F, 0x9028, 0x8746, 0x0CF1, + 0x4627, 0xCD90, 0xDAFE, 0x5149, 0xF422, 0x7F95, 0x68FB, 0xE34C, + 0xFD57, 0x76E0, 0x618E, 0xEA39, 0x4F52, 0xC4E5, 0xD38B, 0x583C, + 0x12EA, 0x995D, 0x8E33, 0x0584, 0xA0EF, 0x2B58, 0x3C36, 0xB781, + 0xD883, 0x5334, 0x445A, 0xCFED, 0x6A86, 0xE131, 0xF65F, 0x7DE8, + 0x373E, 0xBC89, 0xABE7, 0x2050, 0x853B, 0x0E8C, 0x19E2, 0x9255, + 0x8C4E, 0x07F9, 0x1097, 0x9B20, 0x3E4B, 0xB5FC, 0xA292, 0x2925, + 0x63F3, 0xE844, 0xFF2A, 0x749D, 0xD1F6, 0x5A41, 0x4D2F, 0xC698, + 0x7119, 0xFAAE, 0xEDC0, 0x6677, 0xC31C, 0x48AB, 0x5FC5, 0xD472, + 0x9EA4, 0x1513, 0x027D, 0x89CA, 0x2CA1, 0xA716, 0xB078, 0x3BCF, + 0x25D4, 0xAE63, 0xB90D, 0x32BA, 0x97D1, 0x1C66, 0x0B08, 0x80BF, + 0xCA69, 0x41DE, 0x56B0, 0xDD07, 0x786C, 0xF3DB, 0xE4B5, 0x6F02, + 0x3AB1, 0xB106, 0xA668, 0x2DDF, 0x88B4, 0x0303, 0x146D, 0x9FDA, + 0xD50C, 0x5EBB, 0x49D5, 0xC262, 0x6709, 0xECBE, 0xFBD0, 0x7067, + 0x6E7C, 0xE5CB, 0xF2A5, 0x7912, 0xDC79, 0x57CE, 0x40A0, 0xCB17, + 0x81C1, 0x0A76, 0x1D18, 0x96AF, 0x33C4, 0xB873, 0xAF1D, 0x24AA, + 0x932B, 0x189C, 0x0FF2, 0x8445, 0x212E, 0xAA99, 0xBDF7, 0x3640, + 0x7C96, 0xF721, 0xE04F, 0x6BF8, 0xCE93, 0x4524, 0x524A, 0xD9FD, + 0xC7E6, 0x4C51, 0x5B3F, 0xD088, 0x75E3, 0xFE54, 0xE93A, 0x628D, + 0x285B, 0xA3EC, 0xB482, 0x3F35, 0x9A5E, 0x11E9, 0x0687, 0x8D30, + 0xE232, 0x6985, 0x7EEB, 0xF55C, 0x5037, 0xDB80, 0xCCEE, 0x4759, + 0x0D8F, 0x8638, 0x9156, 0x1AE1, 0xBF8A, 0x343D, 0x2353, 0xA8E4, + 0xB6FF, 0x3D48, 0x2A26, 0xA191, 0x04FA, 0x8F4D, 0x9823, 0x1394, + 0x5942, 0xD2F5, 0xC59B, 0x4E2C, 0xEB47, 0x60F0, 0x779E, 0xFC29, + 0x4BA8, 0xC01F, 0xD771, 0x5CC6, 0xF9AD, 0x721A, 0x6574, 0xEEC3, + 0xA415, 0x2FA2, 0x38CC, 0xB37B, 0x1610, 0x9DA7, 0x8AC9, 0x017E, + 0x1F65, 0x94D2, 0x83BC, 0x080B, 0xAD60, 0x26D7, 0x31B9, 0xBA0E, + 0xF0D8, 0x7B6F, 0x6C01, 0xE7B6, 0x42DD, 0xC96A, 0xDE04, 0x55B3 +}; + +__u16 crc_t10dif(const unsigned char *buffer, size_t len) +{ + __u16 crc = 0; + unsigned int i; + + for (i = 0 ; i < len ; i++) + crc = (crc << 8) ^ t10_dif_crc_table[((crc >> 8) ^ buffer[i]) & 0xff]; + + return crc; +} +EXPORT_SYMBOL(crc_t10dif); + +MODULE_DESCRIPTION("T10 DIF CRC calculation"); +MODULE_LICENSE("GPL"); diff --git a/lib/debugobjects.c b/lib/debugobjects.c index a76a5e122ae..85b18d79be8 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -68,6 +68,7 @@ static int fill_pool(void) { gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; struct debug_obj *new; + unsigned long flags; if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL)) return obj_pool_free; @@ -81,10 +82,10 @@ static int fill_pool(void) if (!new) return obj_pool_free; - spin_lock(&pool_lock); + spin_lock_irqsave(&pool_lock, flags); hlist_add_head(&new->node, &obj_pool); obj_pool_free++; - spin_unlock(&pool_lock); + spin_unlock_irqrestore(&pool_lock, flags); } return obj_pool_free; } @@ -110,16 +111,13 @@ static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) } /* - * Allocate a new object. If the pool is empty and no refill possible, - * switch off the debugger. + * Allocate a new object. If the pool is empty, switch off the debugger. */ static struct debug_obj * alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) { struct debug_obj *obj = NULL; - int retry = 0; -repeat: spin_lock(&pool_lock); if (obj_pool.first) { obj = hlist_entry(obj_pool.first, typeof(*obj), node); @@ -141,9 +139,6 @@ repeat: } spin_unlock(&pool_lock); - if (fill_pool() && !obj && !retry++) - goto repeat; - return obj; } @@ -261,6 +256,8 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) struct debug_obj *obj; unsigned long flags; + fill_pool(); + db = get_bucket((unsigned long) addr); spin_lock_irqsave(&db->lock, flags); diff --git a/lib/div64.c b/lib/div64.c index bb5bd0c0f03..a111eb8de9c 100644 --- a/lib/div64.c +++ b/lib/div64.c @@ -98,3 +98,13 @@ EXPORT_SYMBOL(div64_u64); #endif #endif /* BITS_PER_LONG == 32 */ + +/* + * Iterative div/mod for use when dividend is not expected to be much + * bigger than divisor. + */ +u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) +{ + return __iter_div_u64_rem(dividend, divisor, remainder); +} +EXPORT_SYMBOL(iter_div_u64_rem); diff --git a/lib/kobject.c b/lib/kobject.c index 718e5101c26..744401571ed 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -216,13 +216,19 @@ static int kobject_add_internal(struct kobject *kobj) static int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list vargs) { - /* Free the old name, if necessary. */ - kfree(kobj->name); + const char *old_name = kobj->name; + char *s; kobj->name = kvasprintf(GFP_KERNEL, fmt, vargs); if (!kobj->name) return -ENOMEM; + /* ewww... some of these buggers have '/' in the name ... */ + s = strchr(kobj->name, '/'); + if (s) + s[0] = '!'; + + kfree(old_name); return 0; } @@ -439,6 +445,7 @@ out: return error; } +EXPORT_SYMBOL_GPL(kobject_rename); /** * kobject_move - move object to another parent diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 2fa545a6316..9f8d599459d 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -245,7 +245,8 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, if (retval) goto exit; - call_usermodehelper(argv[0], argv, env->envp, UMH_WAIT_EXEC); + retval = call_usermodehelper(argv[0], argv, + env->envp, UMH_WAIT_EXEC); } exit: diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index 119174494cb..4a8ba4bf5f6 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c @@ -52,7 +52,7 @@ EXPORT_SYMBOL(__percpu_counter_add); * Add up all the per-cpu counts, return the result. This is a more accurate * but much slower version of percpu_counter_read_positive() */ -s64 __percpu_counter_sum(struct percpu_counter *fbc) +s64 __percpu_counter_sum(struct percpu_counter *fbc, int set) { s64 ret; int cpu; @@ -62,7 +62,12 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc) for_each_online_cpu(cpu) { s32 *pcount = per_cpu_ptr(fbc->counters, cpu); ret += *pcount; + if (set) + *pcount = 0; } + if (set) + fbc->count = ret; + spin_unlock(&fbc->lock); return ret; } diff --git a/lib/radix-tree.c b/lib/radix-tree.c index bd521716ab1..56ec21a7f73 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -1,7 +1,7 @@ /* * Copyright (C) 2001 Momchil Velikov * Portions Copyright (C) 2001 Christoph Hellwig - * Copyright (C) 2005 SGI, Christoph Lameter <clameter@sgi.com> + * Copyright (C) 2005 SGI, Christoph Lameter * Copyright (C) 2006 Nick Piggin * * This program is free software; you can redistribute it and/or @@ -88,6 +88,57 @@ static inline gfp_t root_gfp_mask(struct radix_tree_root *root) return root->gfp_mask & __GFP_BITS_MASK; } +static inline void tag_set(struct radix_tree_node *node, unsigned int tag, + int offset) +{ + __set_bit(offset, node->tags[tag]); +} + +static inline void tag_clear(struct radix_tree_node *node, unsigned int tag, + int offset) +{ + __clear_bit(offset, node->tags[tag]); +} + +static inline int tag_get(struct radix_tree_node *node, unsigned int tag, + int offset) +{ + return test_bit(offset, node->tags[tag]); +} + +static inline void root_tag_set(struct radix_tree_root *root, unsigned int tag) +{ + root->gfp_mask |= (__force gfp_t)(1 << (tag + __GFP_BITS_SHIFT)); +} + +static inline void root_tag_clear(struct radix_tree_root *root, unsigned int tag) +{ + root->gfp_mask &= (__force gfp_t)~(1 << (tag + __GFP_BITS_SHIFT)); +} + +static inline void root_tag_clear_all(struct radix_tree_root *root) +{ + root->gfp_mask &= __GFP_BITS_MASK; +} + +static inline int root_tag_get(struct radix_tree_root *root, unsigned int tag) +{ + return (__force unsigned)root->gfp_mask & (1 << (tag + __GFP_BITS_SHIFT)); +} + +/* + * Returns 1 if any slot in the node has this tag set. + * Otherwise returns 0. + */ +static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag) +{ + int idx; + for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { + if (node->tags[tag][idx]) + return 1; + } + return 0; +} /* * This assumes that the caller has performed appropriate preallocation, and * that the caller has pinned this thread of control to the current CPU. @@ -124,6 +175,17 @@ static void radix_tree_node_rcu_free(struct rcu_head *head) { struct radix_tree_node *node = container_of(head, struct radix_tree_node, rcu_head); + + /* + * must only free zeroed nodes into the slab. radix_tree_shrink + * can leave us with a non-NULL entry in the first slot, so clear + * that here to make sure. + */ + tag_clear(node, 0, 0); + tag_clear(node, 1, 0); + node->slots[0] = NULL; + node->count = 0; + kmem_cache_free(radix_tree_node_cachep, node); } @@ -165,59 +227,6 @@ out: } EXPORT_SYMBOL(radix_tree_preload); -static inline void tag_set(struct radix_tree_node *node, unsigned int tag, - int offset) -{ - __set_bit(offset, node->tags[tag]); -} - -static inline void tag_clear(struct radix_tree_node *node, unsigned int tag, - int offset) -{ - __clear_bit(offset, node->tags[tag]); -} - -static inline int tag_get(struct radix_tree_node *node, unsigned int tag, - int offset) -{ - return test_bit(offset, node->tags[tag]); -} - -static inline void root_tag_set(struct radix_tree_root *root, unsigned int tag) -{ - root->gfp_mask |= (__force gfp_t)(1 << (tag + __GFP_BITS_SHIFT)); -} - - -static inline void root_tag_clear(struct radix_tree_root *root, unsigned int tag) -{ - root->gfp_mask &= (__force gfp_t)~(1 << (tag + __GFP_BITS_SHIFT)); -} - -static inline void root_tag_clear_all(struct radix_tree_root *root) -{ - root->gfp_mask &= __GFP_BITS_MASK; -} - -static inline int root_tag_get(struct radix_tree_root *root, unsigned int tag) -{ - return (__force unsigned)root->gfp_mask & (1 << (tag + __GFP_BITS_SHIFT)); -} - -/* - * Returns 1 if any slot in the node has this tag set. - * Otherwise returns 0. - */ -static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag) -{ - int idx; - for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { - if (node->tags[tag][idx]) - return 1; - } - return 0; -} - /* * Return the maximum key which can be store into a * radix tree with height HEIGHT. @@ -930,11 +939,6 @@ static inline void radix_tree_shrink(struct radix_tree_root *root) newptr = radix_tree_ptr_to_indirect(newptr); root->rnode = newptr; root->height--; - /* must only free zeroed nodes into the slab */ - tag_clear(to_free, 0, 0); - tag_clear(to_free, 1, 0); - to_free->slots[0] = NULL; - to_free->count = 0; radix_tree_node_free(to_free); } } diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c index 6c90fb90e19..3b4dc098181 100644 --- a/lib/smp_processor_id.c +++ b/lib/smp_processor_id.c @@ -7,7 +7,7 @@ #include <linux/kallsyms.h> #include <linux/sched.h> -unsigned int debug_smp_processor_id(void) +notrace unsigned int debug_smp_processor_id(void) { unsigned long preempt_count = preempt_count(); int this_cpu = raw_smp_processor_id(); @@ -37,7 +37,7 @@ unsigned int debug_smp_processor_id(void) /* * Avoid recursion: */ - preempt_disable(); + preempt_disable_notrace(); if (!printk_ratelimit()) goto out_enable; @@ -49,7 +49,7 @@ unsigned int debug_smp_processor_id(void) dump_stack(); out_enable: - preempt_enable_no_resched(); + preempt_enable_no_resched_notrace(); out: return this_cpu; } diff --git a/lib/textsearch.c b/lib/textsearch.c index be8bda3862f..9fbcb44c554 100644 --- a/lib/textsearch.c +++ b/lib/textsearch.c @@ -54,10 +54,13 @@ * USAGE * * Before a search can be performed, a configuration must be created - * by calling textsearch_prepare() specyfing the searching algorithm and - * the pattern to look for. The returned configuration may then be used - * for an arbitary amount of times and even in parallel as long as a - * separate struct ts_state variable is provided to every instance. + * by calling textsearch_prepare() specifying the searching algorithm, + * the pattern to look for and flags. As a flag, you can set TS_IGNORECASE + * to perform case insensitive matching. But it might slow down + * performance of algorithm, so you should use it at own your risk. + * The returned configuration may then be used for an arbitary + * amount of times and even in parallel as long as a separate struct + * ts_state variable is provided to every instance. * * The actual search is performed by either calling textsearch_find_- * continuous() for linear data or by providing an own get_next_block() @@ -89,7 +92,6 @@ * panic("Oh my god, dancing chickens at %d\n", pos); * * textsearch_destroy(conf); - * * ========================================================================== */ @@ -97,6 +99,7 @@ #include <linux/types.h> #include <linux/string.h> #include <linux/init.h> +#include <linux/rculist.h> #include <linux/rcupdate.h> #include <linux/err.h> #include <linux/textsearch.h> @@ -264,7 +267,7 @@ struct ts_config *textsearch_prepare(const char *algo, const void *pattern, return ERR_PTR(-EINVAL); ops = lookup_ts_algo(algo); -#ifdef CONFIG_KMOD +#ifdef CONFIG_MODULES /* * Why not always autoload you may ask. Some users are * in a situation where requesting a module may deadlock, @@ -279,7 +282,7 @@ struct ts_config *textsearch_prepare(const char *algo, const void *pattern, if (ops == NULL) goto errout; - conf = ops->init(pattern, len, gfp_mask); + conf = ops->init(pattern, len, gfp_mask, flags); if (IS_ERR(conf)) { err = PTR_ERR(conf); goto errout; diff --git a/lib/ts_bm.c b/lib/ts_bm.c index d90822c378a..9e66ee4020e 100644 --- a/lib/ts_bm.c +++ b/lib/ts_bm.c @@ -39,6 +39,7 @@ #include <linux/module.h> #include <linux/types.h> #include <linux/string.h> +#include <linux/ctype.h> #include <linux/textsearch.h> /* Alphabet size, use ASCII */ @@ -63,7 +64,8 @@ static unsigned int bm_find(struct ts_config *conf, struct ts_state *state) struct ts_bm *bm = ts_config_priv(conf); unsigned int i, text_len, consumed = state->offset; const u8 *text; - int shift = bm->patlen, bs; + int shift = bm->patlen - 1, bs; + const u8 icase = conf->flags & TS_IGNORECASE; for (;;) { text_len = conf->get_next_block(consumed, &text, conf, state); @@ -75,7 +77,9 @@ static unsigned int bm_find(struct ts_config *conf, struct ts_state *state) DEBUGP("Searching in position %d (%c)\n", shift, text[shift]); for (i = 0; i < bm->patlen; i++) - if (text[shift-i] != bm->pattern[bm->patlen-1-i]) + if ((icase ? toupper(text[shift-i]) + : text[shift-i]) + != bm->pattern[bm->patlen-1-i]) goto next; /* London calling... */ @@ -111,14 +115,18 @@ static int subpattern(u8 *pattern, int i, int j, int g) return ret; } -static void compute_prefix_tbl(struct ts_bm *bm) +static void compute_prefix_tbl(struct ts_bm *bm, int flags) { int i, j, g; for (i = 0; i < ASIZE; i++) bm->bad_shift[i] = bm->patlen; - for (i = 0; i < bm->patlen - 1; i++) + for (i = 0; i < bm->patlen - 1; i++) { bm->bad_shift[bm->pattern[i]] = bm->patlen - 1 - i; + if (flags & TS_IGNORECASE) + bm->bad_shift[tolower(bm->pattern[i])] + = bm->patlen - 1 - i; + } /* Compute the good shift array, used to match reocurrences * of a subpattern */ @@ -135,10 +143,11 @@ static void compute_prefix_tbl(struct ts_bm *bm) } static struct ts_config *bm_init(const void *pattern, unsigned int len, - gfp_t gfp_mask) + gfp_t gfp_mask, int flags) { struct ts_config *conf; struct ts_bm *bm; + int i; unsigned int prefix_tbl_len = len * sizeof(unsigned int); size_t priv_size = sizeof(*bm) + len + prefix_tbl_len; @@ -146,11 +155,16 @@ static struct ts_config *bm_init(const void *pattern, unsigned int len, if (IS_ERR(conf)) return conf; + conf->flags = flags; bm = ts_config_priv(conf); bm->patlen = len; bm->pattern = (u8 *) bm->good_shift + prefix_tbl_len; - memcpy(bm->pattern, pattern, len); - compute_prefix_tbl(bm); + if (flags & TS_IGNORECASE) + for (i = 0; i < len; i++) + bm->pattern[i] = toupper(((u8 *)pattern)[i]); + else + memcpy(bm->pattern, pattern, len); + compute_prefix_tbl(bm, flags); return conf; } diff --git a/lib/ts_fsm.c b/lib/ts_fsm.c index af575b61526..5696a35184e 100644 --- a/lib/ts_fsm.c +++ b/lib/ts_fsm.c @@ -257,7 +257,7 @@ found_match: } static struct ts_config *fsm_init(const void *pattern, unsigned int len, - gfp_t gfp_mask) + gfp_t gfp_mask, int flags) { int i, err = -EINVAL; struct ts_config *conf; @@ -269,6 +269,9 @@ static struct ts_config *fsm_init(const void *pattern, unsigned int len, if (len % sizeof(struct ts_fsm_token) || ntokens < 1) goto errout; + if (flags & TS_IGNORECASE) + goto errout; + for (i = 0; i < ntokens; i++) { struct ts_fsm_token *t = &tokens[i]; @@ -284,6 +287,7 @@ static struct ts_config *fsm_init(const void *pattern, unsigned int len, if (IS_ERR(conf)) return conf; + conf->flags = flags; fsm = ts_config_priv(conf); fsm->ntokens = ntokens; memcpy(fsm->tokens, pattern, len); diff --git a/lib/ts_kmp.c b/lib/ts_kmp.c index 3ced628cab4..632f783e65f 100644 --- a/lib/ts_kmp.c +++ b/lib/ts_kmp.c @@ -33,6 +33,7 @@ #include <linux/module.h> #include <linux/types.h> #include <linux/string.h> +#include <linux/ctype.h> #include <linux/textsearch.h> struct ts_kmp @@ -47,6 +48,7 @@ static unsigned int kmp_find(struct ts_config *conf, struct ts_state *state) struct ts_kmp *kmp = ts_config_priv(conf); unsigned int i, q = 0, text_len, consumed = state->offset; const u8 *text; + const int icase = conf->flags & TS_IGNORECASE; for (;;) { text_len = conf->get_next_block(consumed, &text, conf, state); @@ -55,9 +57,11 @@ static unsigned int kmp_find(struct ts_config *conf, struct ts_state *state) break; for (i = 0; i < text_len; i++) { - while (q > 0 && kmp->pattern[q] != text[i]) + while (q > 0 && kmp->pattern[q] + != (icase ? toupper(text[i]) : text[i])) q = kmp->prefix_tbl[q - 1]; - if (kmp->pattern[q] == text[i]) + if (kmp->pattern[q] + == (icase ? toupper(text[i]) : text[i])) q++; if (unlikely(q == kmp->pattern_len)) { state->offset = consumed + i + 1; @@ -72,24 +76,28 @@ static unsigned int kmp_find(struct ts_config *conf, struct ts_state *state) } static inline void compute_prefix_tbl(const u8 *pattern, unsigned int len, - unsigned int *prefix_tbl) + unsigned int *prefix_tbl, int flags) { unsigned int k, q; + const u8 icase = flags & TS_IGNORECASE; for (k = 0, q = 1; q < len; q++) { - while (k > 0 && pattern[k] != pattern[q]) + while (k > 0 && (icase ? toupper(pattern[k]) : pattern[k]) + != (icase ? toupper(pattern[q]) : pattern[q])) k = prefix_tbl[k-1]; - if (pattern[k] == pattern[q]) + if ((icase ? toupper(pattern[k]) : pattern[k]) + == (icase ? toupper(pattern[q]) : pattern[q])) k++; prefix_tbl[q] = k; } } static struct ts_config *kmp_init(const void *pattern, unsigned int len, - gfp_t gfp_mask) + gfp_t gfp_mask, int flags) { struct ts_config *conf; struct ts_kmp *kmp; + int i; unsigned int prefix_tbl_len = len * sizeof(unsigned int); size_t priv_size = sizeof(*kmp) + len + prefix_tbl_len; @@ -97,11 +105,16 @@ static struct ts_config *kmp_init(const void *pattern, unsigned int len, if (IS_ERR(conf)) return conf; + conf->flags = flags; kmp = ts_config_priv(conf); kmp->pattern_len = len; - compute_prefix_tbl(pattern, len, kmp->prefix_tbl); + compute_prefix_tbl(pattern, len, kmp->prefix_tbl, flags); kmp->pattern = (u8 *) kmp->prefix_tbl + prefix_tbl_len; - memcpy(kmp->pattern, pattern, len); + if (flags & TS_IGNORECASE) + for (i = 0; i < len; i++) + kmp->pattern[i] = toupper(((u8 *)pattern)[i]); + else + memcpy(kmp->pattern, pattern, len); return conf; } diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 6021757a449..1dc2d1d18fa 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -22,6 +22,8 @@ #include <linux/string.h> #include <linux/ctype.h> #include <linux/kernel.h> +#include <linux/kallsyms.h> +#include <linux/uaccess.h> #include <asm/page.h> /* for PAGE_SIZE */ #include <asm/div64.h> @@ -482,6 +484,89 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int return buf; } +static char *string(char *buf, char *end, char *s, int field_width, int precision, int flags) +{ + int len, i; + + if ((unsigned long)s < PAGE_SIZE) + s = "<NULL>"; + + len = strnlen(s, precision); + + if (!(flags & LEFT)) { + while (len < field_width--) { + if (buf < end) + *buf = ' '; + ++buf; + } + } + for (i = 0; i < len; ++i) { + if (buf < end) + *buf = *s; + ++buf; ++s; + } + while (len < field_width--) { + if (buf < end) + *buf = ' '; + ++buf; + } + return buf; +} + +static inline void *dereference_function_descriptor(void *ptr) +{ +#if defined(CONFIG_IA64) || defined(CONFIG_PPC64) + void *p; + if (!probe_kernel_address(ptr, p)) + ptr = p; +#endif + return ptr; +} + +static char *symbol_string(char *buf, char *end, void *ptr, int field_width, int precision, int flags) +{ + unsigned long value = (unsigned long) ptr; +#ifdef CONFIG_KALLSYMS + char sym[KSYM_SYMBOL_LEN]; + sprint_symbol(sym, value); + return string(buf, end, sym, field_width, precision, flags); +#else + field_width = 2*sizeof(void *); + flags |= SPECIAL | SMALL | ZEROPAD; + return number(buf, end, value, 16, field_width, precision, flags); +#endif +} + +/* + * Show a '%p' thing. A kernel extension is that the '%p' is followed + * by an extra set of alphanumeric characters that are extended format + * specifiers. + * + * Right now we just handle 'F' (for symbolic Function descriptor pointers) + * and 'S' (for Symbolic direct pointers), but this can easily be + * extended in the future (network address types etc). + * + * The difference between 'S' and 'F' is that on ia64 and ppc64 function + * pointers are really function descriptors, which contain a pointer the + * real address. + */ +static char *pointer(const char *fmt, char *buf, char *end, void *ptr, int field_width, int precision, int flags) +{ + switch (*fmt) { + case 'F': + ptr = dereference_function_descriptor(ptr); + /* Fallthrough */ + case 'S': + return symbol_string(buf, end, ptr, field_width, precision, flags); + } + flags |= SMALL; + if (field_width == -1) { + field_width = 2*sizeof(void *); + flags |= ZEROPAD; + } + return number(buf, end, (unsigned long) ptr, 16, field_width, precision, flags); +} + /** * vsnprintf - Format a string and place it in a buffer * @buf: The buffer to place the result into @@ -502,11 +587,9 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int */ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) { - int len; unsigned long long num; - int i, base; + int base; char *str, *end, c; - const char *s; int flags; /* flags to number() */ @@ -622,43 +705,18 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) continue; case 's': - s = va_arg(args, char *); - if ((unsigned long)s < PAGE_SIZE) - s = "<NULL>"; - - len = strnlen(s, precision); - - if (!(flags & LEFT)) { - while (len < field_width--) { - if (str < end) - *str = ' '; - ++str; - } - } - for (i = 0; i < len; ++i) { - if (str < end) - *str = *s; - ++str; ++s; - } - while (len < field_width--) { - if (str < end) - *str = ' '; - ++str; - } + str = string(str, end, va_arg(args, char *), field_width, precision, flags); continue; case 'p': - flags |= SMALL; - if (field_width == -1) { - field_width = 2*sizeof(void *); - flags |= ZEROPAD; - } - str = number(str, end, - (unsigned long) va_arg(args, void *), - 16, field_width, precision, flags); + str = pointer(fmt+1, str, end, + va_arg(args, void *), + field_width, precision, flags); + /* Skip all alphanumeric pointer suffixes */ + while (isalnum(fmt[1])) + fmt++; continue; - case 'n': /* FIXME: * What does C99 say about the overflow case here? */ |