diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 9 | ||||
-rw-r--r-- | lib/Kconfig.debug | 38 | ||||
-rw-r--r-- | lib/Kconfig.kmemcheck | 91 | ||||
-rw-r--r-- | lib/Makefile | 7 | ||||
-rw-r--r-- | lib/atomic64.c | 175 | ||||
-rw-r--r-- | lib/checksum.c | 193 | ||||
-rw-r--r-- | lib/cpumask.c | 23 | ||||
-rw-r--r-- | lib/dec_and_lock.c | 3 | ||||
-rw-r--r-- | lib/extable.c | 21 | ||||
-rw-r--r-- | lib/gcd.c | 18 | ||||
-rw-r--r-- | lib/genalloc.c | 1 | ||||
-rw-r--r-- | lib/hexdump.c | 15 | ||||
-rw-r--r-- | lib/kobject.c | 7 | ||||
-rw-r--r-- | lib/radix-tree.c | 110 | ||||
-rw-r--r-- | lib/rational.c | 62 | ||||
-rw-r--r-- | lib/rbtree.c | 34 | ||||
-rw-r--r-- | lib/vsprintf.c | 56 |
17 files changed, 765 insertions, 98 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 8ade0a7a91e..bb1326d3839 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -10,6 +10,9 @@ menu "Library routines" config BITREVERSE tristate +config RATIONAL + boolean + config GENERIC_FIND_FIRST_BIT bool @@ -191,4 +194,10 @@ config DISABLE_OBSOLETE_CPUMASK_FUNCTIONS config NLATTR bool +# +# Generic 64-bit atomic support is selected if needed +# +config GENERIC_ATOMIC64 + bool + endmenu diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 3be4b7caf5b..23067ab1a73 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -300,7 +300,7 @@ config DEBUG_OBJECTS_ENABLE_DEFAULT config DEBUG_SLAB bool "Debug slab memory allocations" - depends on DEBUG_KERNEL && SLAB + depends on DEBUG_KERNEL && SLAB && !KMEMCHECK help Say Y here to have the kernel do limited verification on memory allocation as well as poisoning memory on free to catch use of freed @@ -312,7 +312,7 @@ config DEBUG_SLAB_LEAK config SLUB_DEBUG_ON bool "SLUB debugging on by default" - depends on SLUB && SLUB_DEBUG + depends on SLUB && SLUB_DEBUG && !KMEMCHECK default n help Boot with debugging on by default. SLUB boots by default with @@ -336,6 +336,38 @@ config SLUB_STATS out which slabs are relevant to a particular load. Try running: slabinfo -DA +config DEBUG_KMEMLEAK + bool "Kernel memory leak detector" + depends on DEBUG_KERNEL && EXPERIMENTAL && (X86 || ARM) && \ + !MEMORY_HOTPLUG + select DEBUG_SLAB if SLAB + select SLUB_DEBUG if SLUB + select DEBUG_FS if SYSFS + select STACKTRACE if STACKTRACE_SUPPORT + select KALLSYMS + help + Say Y here if you want to enable the memory leak + detector. The memory allocation/freeing is traced in a way + similar to the Boehm's conservative garbage collector, the + difference being that the orphan objects are not freed but + only shown in /sys/kernel/debug/kmemleak. Enabling this + feature will introduce an overhead to memory + allocations. See Documentation/kmemleak.txt for more + details. + + In order to access the kmemleak file, debugfs needs to be + mounted (usually at /sys/kernel/debug). + +config DEBUG_KMEMLEAK_TEST + tristate "Simple test for the kernel memory leak detector" + depends on DEBUG_KMEMLEAK + help + Say Y or M here to build a test for the kernel memory leak + detector. This option enables a module that explicitly leaks + memory. + + If unsure, say N. + config DEBUG_PREEMPT bool "Debug preemptible kernel" depends on DEBUG_KERNEL && PREEMPT && (TRACE_IRQFLAGS_SUPPORT || PPC64) @@ -964,3 +996,5 @@ config DMA_API_DEBUG source "samples/Kconfig" source "lib/Kconfig.kgdb" + +source "lib/Kconfig.kmemcheck" diff --git a/lib/Kconfig.kmemcheck b/lib/Kconfig.kmemcheck new file mode 100644 index 00000000000..603c81b6654 --- /dev/null +++ b/lib/Kconfig.kmemcheck @@ -0,0 +1,91 @@ +config HAVE_ARCH_KMEMCHECK + bool + +menuconfig KMEMCHECK + bool "kmemcheck: trap use of uninitialized memory" + depends on DEBUG_KERNEL + depends on !X86_USE_3DNOW + depends on SLUB || SLAB + depends on !CC_OPTIMIZE_FOR_SIZE + depends on !FUNCTION_TRACER + select FRAME_POINTER + select STACKTRACE + default n + help + This option enables tracing of dynamically allocated kernel memory + to see if memory is used before it has been given an initial value. + Be aware that this requires half of your memory for bookkeeping and + will insert extra code at *every* read and write to tracked memory + thus slow down the kernel code (but user code is unaffected). + + The kernel may be started with kmemcheck=0 or kmemcheck=1 to disable + or enable kmemcheck at boot-time. If the kernel is started with + kmemcheck=0, the large memory and CPU overhead is not incurred. + +choice + prompt "kmemcheck: default mode at boot" + depends on KMEMCHECK + default KMEMCHECK_ONESHOT_BY_DEFAULT + help + This option controls the default behaviour of kmemcheck when the + kernel boots and no kmemcheck= parameter is given. + +config KMEMCHECK_DISABLED_BY_DEFAULT + bool "disabled" + depends on KMEMCHECK + +config KMEMCHECK_ENABLED_BY_DEFAULT + bool "enabled" + depends on KMEMCHECK + +config KMEMCHECK_ONESHOT_BY_DEFAULT + bool "one-shot" + depends on KMEMCHECK + help + In one-shot mode, only the first error detected is reported before + kmemcheck is disabled. + +endchoice + +config KMEMCHECK_QUEUE_SIZE + int "kmemcheck: error queue size" + depends on KMEMCHECK + default 64 + help + Select the maximum number of errors to store in the queue. Since + errors can occur virtually anywhere and in any context, we need a + temporary storage area which is guarantueed not to generate any + other faults. The queue will be emptied as soon as a tasklet may + be scheduled. If the queue is full, new error reports will be + lost. + +config KMEMCHECK_SHADOW_COPY_SHIFT + int "kmemcheck: shadow copy size (5 => 32 bytes, 6 => 64 bytes)" + depends on KMEMCHECK + range 2 8 + default 5 + help + Select the number of shadow bytes to save along with each entry of + the queue. These bytes indicate what parts of an allocation are + initialized, uninitialized, etc. and will be displayed when an + error is detected to help the debugging of a particular problem. + +config KMEMCHECK_PARTIAL_OK + bool "kmemcheck: allow partially uninitialized memory" + depends on KMEMCHECK + default y + help + This option works around certain GCC optimizations that produce + 32-bit reads from 16-bit variables where the upper 16 bits are + thrown away afterwards. This may of course also hide some real + bugs. + +config KMEMCHECK_BITOPS_OK + bool "kmemcheck: allow bit-field manipulation" + depends on KMEMCHECK + default n + help + This option silences warnings that would be generated for bit-field + accesses where not all the bits are initialized at the same time. + This may also hide some real bugs. + diff --git a/lib/Makefile b/lib/Makefile index 33a40e40e3e..b6d1857bbf0 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -21,7 +21,7 @@ lib-y += kobject.o kref.o klist.o obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ - string_helpers.o + string_helpers.o gcd.o ifeq ($(CONFIG_DEBUG_KOBJECT),y) CFLAGS_kobject.o += -DDEBUG @@ -50,6 +50,7 @@ ifneq ($(CONFIG_HAVE_DEC_LOCK),y) endif obj-$(CONFIG_BITREVERSE) += bitrev.o +obj-$(CONFIG_RATIONAL) += rational.o obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o obj-$(CONFIG_CRC16) += crc16.o obj-$(CONFIG_CRC_T10DIF)+= crc-t10dif.o @@ -92,6 +93,10 @@ obj-$(CONFIG_NLATTR) += nlattr.o obj-$(CONFIG_DMA_API_DEBUG) += dma-debug.o +obj-$(CONFIG_GENERIC_CSUM) += checksum.o + +obj-$(CONFIG_GENERIC_ATOMIC64) += atomic64.o + hostprogs-y := gen_crc32table clean-files := crc32table.h diff --git a/lib/atomic64.c b/lib/atomic64.c new file mode 100644 index 00000000000..c5e72556241 --- /dev/null +++ b/lib/atomic64.c @@ -0,0 +1,175 @@ +/* + * Generic implementation of 64-bit atomics using spinlocks, + * useful on processors that don't have 64-bit atomic instructions. + * + * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#include <linux/types.h> +#include <linux/cache.h> +#include <linux/spinlock.h> +#include <linux/init.h> +#include <asm/atomic.h> + +/* + * We use a hashed array of spinlocks to provide exclusive access + * to each atomic64_t variable. Since this is expected to used on + * systems with small numbers of CPUs (<= 4 or so), we use a + * relatively small array of 16 spinlocks to avoid wasting too much + * memory on the spinlock array. + */ +#define NR_LOCKS 16 + +/* + * Ensure each lock is in a separate cacheline. + */ +static union { + spinlock_t lock; + char pad[L1_CACHE_BYTES]; +} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp; + +static inline spinlock_t *lock_addr(const atomic64_t *v) +{ + unsigned long addr = (unsigned long) v; + + addr >>= L1_CACHE_SHIFT; + addr ^= (addr >> 8) ^ (addr >> 16); + return &atomic64_lock[addr & (NR_LOCKS - 1)].lock; +} + +long long atomic64_read(const atomic64_t *v) +{ + unsigned long flags; + spinlock_t *lock = lock_addr(v); + long long val; + + spin_lock_irqsave(lock, flags); + val = v->counter; + spin_unlock_irqrestore(lock, flags); + return val; +} + +void atomic64_set(atomic64_t *v, long long i) +{ + unsigned long flags; + spinlock_t *lock = lock_addr(v); + + spin_lock_irqsave(lock, flags); + v->counter = i; + spin_unlock_irqrestore(lock, flags); +} + +void atomic64_add(long long a, atomic64_t *v) +{ + unsigned long flags; + spinlock_t *lock = lock_addr(v); + + spin_lock_irqsave(lock, flags); + v->counter += a; + spin_unlock_irqrestore(lock, flags); +} + +long long atomic64_add_return(long long a, atomic64_t *v) +{ + unsigned long flags; + spinlock_t *lock = lock_addr(v); + long long val; + + spin_lock_irqsave(lock, flags); + val = v->counter += a; + spin_unlock_irqrestore(lock, flags); + return val; +} + +void atomic64_sub(long long a, atomic64_t *v) +{ + unsigned long flags; + spinlock_t *lock = lock_addr(v); + + spin_lock_irqsave(lock, flags); + v->counter -= a; + spin_unlock_irqrestore(lock, flags); +} + +long long atomic64_sub_return(long long a, atomic64_t *v) +{ + unsigned long flags; + spinlock_t *lock = lock_addr(v); + long long val; + + spin_lock_irqsave(lock, flags); + val = v->counter -= a; + spin_unlock_irqrestore(lock, flags); + return val; +} + +long long atomic64_dec_if_positive(atomic64_t *v) +{ + unsigned long flags; + spinlock_t *lock = lock_addr(v); + long long val; + + spin_lock_irqsave(lock, flags); + val = v->counter - 1; + if (val >= 0) + v->counter = val; + spin_unlock_irqrestore(lock, flags); + return val; +} + +long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n) +{ + unsigned long flags; + spinlock_t *lock = lock_addr(v); + long long val; + + spin_lock_irqsave(lock, flags); + val = v->counter; + if (val == o) + v->counter = n; + spin_unlock_irqrestore(lock, flags); + return val; +} + +long long atomic64_xchg(atomic64_t *v, long long new) +{ + unsigned long flags; + spinlock_t *lock = lock_addr(v); + long long val; + + spin_lock_irqsave(lock, flags); + val = v->counter; + v->counter = new; + spin_unlock_irqrestore(lock, flags); + return val; +} + +int atomic64_add_unless(atomic64_t *v, long long a, long long u) +{ + unsigned long flags; + spinlock_t *lock = lock_addr(v); + int ret = 1; + + spin_lock_irqsave(lock, flags); + if (v->counter != u) { + v->counter += a; + ret = 0; + } + spin_unlock_irqrestore(lock, flags); + return ret; +} + +static int init_atomic64_lock(void) +{ + int i; + + for (i = 0; i < NR_LOCKS; ++i) + spin_lock_init(&atomic64_lock[i].lock); + return 0; +} + +pure_initcall(init_atomic64_lock); diff --git a/lib/checksum.c b/lib/checksum.c new file mode 100644 index 00000000000..12e5a1c91cd --- /dev/null +++ b/lib/checksum.c @@ -0,0 +1,193 @@ +/* + * + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. INET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * IP/TCP/UDP checksumming routines + * + * Authors: Jorge Cwik, <jorge@laser.satlink.net> + * Arnt Gulbrandsen, <agulbra@nvg.unit.no> + * Tom May, <ftom@netcom.com> + * Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de> + * Lots of code moved from tcp.c and ip.c; see those files + * for more names. + * + * 03/02/96 Jes Sorensen, Andreas Schwab, Roman Hodek: + * Fixed some nasty bugs, causing some horrible crashes. + * A: At some points, the sum (%0) was used as + * length-counter instead of the length counter + * (%1). Thanks to Roman Hodek for pointing this out. + * B: GCC seems to mess up if one uses too many + * data-registers to hold input values and one tries to + * specify d0 and d1 as scratch registers. Letting gcc + * choose these registers itself solves the problem. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +/* Revised by Kenneth Albanowski for m68knommu. Basic problem: unaligned access + kills, so most of the assembly has to go. */ + +#include <linux/module.h> +#include <net/checksum.h> + +#include <asm/byteorder.h> + +static inline unsigned short from32to16(unsigned long x) +{ + /* add up 16-bit and 16-bit for 16+c bit */ + x = (x & 0xffff) + (x >> 16); + /* add up carry.. */ + x = (x & 0xffff) + (x >> 16); + return x; +} + +static unsigned int do_csum(const unsigned char *buff, int len) +{ + int odd, count; + unsigned long result = 0; + + if (len <= 0) + goto out; + odd = 1 & (unsigned long) buff; + if (odd) { + result = *buff; + len--; + buff++; + } + count = len >> 1; /* nr of 16-bit words.. */ + if (count) { + if (2 & (unsigned long) buff) { + result += *(unsigned short *) buff; + count--; + len -= 2; + buff += 2; + } + count >>= 1; /* nr of 32-bit words.. */ + if (count) { + unsigned long carry = 0; + do { + unsigned long w = *(unsigned long *) buff; + count--; + buff += 4; + result += carry; + result += w; + carry = (w > result); + } while (count); + result += carry; + result = (result & 0xffff) + (result >> 16); + } + if (len & 2) { + result += *(unsigned short *) buff; + buff += 2; + } + } + if (len & 1) + result += (*buff << 8); + result = from32to16(result); + if (odd) + result = ((result >> 8) & 0xff) | ((result & 0xff) << 8); +out: + return result; +} + +/* + * This is a version of ip_compute_csum() optimized for IP headers, + * which always checksum on 4 octet boundaries. + */ +__sum16 ip_fast_csum(const void *iph, unsigned int ihl) +{ + return (__force __sum16)~do_csum(iph, ihl*4); +} +EXPORT_SYMBOL(ip_fast_csum); + +/* + * computes the checksum of a memory block at buff, length len, + * and adds in "sum" (32-bit) + * + * returns a 32-bit number suitable for feeding into itself + * or csum_tcpudp_magic + * + * this function must be called with even lengths, except + * for the last fragment, which may be odd + * + * it's best to have buff aligned on a 32-bit boundary + */ +__wsum csum_partial(const void *buff, int len, __wsum wsum) +{ + unsigned int sum = (__force unsigned int)wsum; + unsigned int result = do_csum(buff, len); + + /* add in old sum, and carry.. */ + result += sum; + if (sum > result) + result += 1; + return (__force __wsum)result; +} +EXPORT_SYMBOL(csum_partial); + +/* + * this routine is used for miscellaneous IP-like checksums, mainly + * in icmp.c + */ +__sum16 ip_compute_csum(const void *buff, int len) +{ + return (__force __sum16)~do_csum(buff, len); +} +EXPORT_SYMBOL(ip_compute_csum); + +/* + * copy from fs while checksumming, otherwise like csum_partial + */ +__wsum +csum_partial_copy_from_user(const void __user *src, void *dst, int len, + __wsum sum, int *csum_err) +{ + int missing; + + missing = __copy_from_user(dst, src, len); + if (missing) { + memset(dst + len - missing, 0, missing); + *csum_err = -EFAULT; + } else + *csum_err = 0; + + return csum_partial(dst, len, sum); +} +EXPORT_SYMBOL(csum_partial_copy_from_user); + +/* + * copy from ds while checksumming, otherwise like csum_partial + */ +__wsum +csum_partial_copy(const void *src, void *dst, int len, __wsum sum) +{ + memcpy(dst, src, len); + return csum_partial(dst, len, sum); +} +EXPORT_SYMBOL(csum_partial_copy); + +#ifndef csum_tcpudp_nofold +__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, + unsigned short len, + unsigned short proto, + __wsum sum) +{ + unsigned long long s = (__force u32)sum; + + s += (__force u32)saddr; + s += (__force u32)daddr; +#ifdef __BIG_ENDIAN + s += proto + len; +#else + s += (proto + len) << 8; +#endif + s += (s >> 32); + return (__force __wsum)s; +} +EXPORT_SYMBOL(csum_tcpudp_nofold); +#endif diff --git a/lib/cpumask.c b/lib/cpumask.c index 1f71b97de0f..7bb4142a502 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -92,15 +92,8 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu) */ bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node) { - if (likely(slab_is_available())) - *mask = kmalloc_node(cpumask_size(), flags, node); - else { -#ifdef CONFIG_DEBUG_PER_CPU_MAPS - printk(KERN_ERR - "=> alloc_cpumask_var: kmalloc not available!\n"); -#endif - *mask = NULL; - } + *mask = kmalloc_node(cpumask_size(), flags, node); + #ifdef CONFIG_DEBUG_PER_CPU_MAPS if (!*mask) { printk(KERN_ERR "=> alloc_cpumask_var: failed!\n"); @@ -119,6 +112,12 @@ bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node) } EXPORT_SYMBOL(alloc_cpumask_var_node); +bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node) +{ + return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node); +} +EXPORT_SYMBOL(zalloc_cpumask_var_node); + /** * alloc_cpumask_var - allocate a struct cpumask * @mask: pointer to cpumask_var_t where the cpumask is returned @@ -135,6 +134,12 @@ bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) } EXPORT_SYMBOL(alloc_cpumask_var); +bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) +{ + return alloc_cpumask_var(mask, flags | __GFP_ZERO); +} +EXPORT_SYMBOL(zalloc_cpumask_var); + /** * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena. * @mask: pointer to cpumask_var_t where the cpumask is returned diff --git a/lib/dec_and_lock.c b/lib/dec_and_lock.c index a65c3145554..e73822aa6e9 100644 --- a/lib/dec_and_lock.c +++ b/lib/dec_and_lock.c @@ -19,11 +19,10 @@ */ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) { -#ifdef CONFIG_SMP /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ if (atomic_add_unless(atomic, -1, 1)) return 0; -#endif + /* Otherwise do it the slow way */ spin_lock(lock); if (atomic_dec_and_test(atomic)) diff --git a/lib/extable.c b/lib/extable.c index 179c0874559..4cac81ec225 100644 --- a/lib/extable.c +++ b/lib/extable.c @@ -39,7 +39,26 @@ void sort_extable(struct exception_table_entry *start, sort(start, finish - start, sizeof(struct exception_table_entry), cmp_ex, NULL); } -#endif + +#ifdef CONFIG_MODULES +/* + * If the exception table is sorted, any referring to the module init + * will be at the beginning or the end. + */ +void trim_init_extable(struct module *m) +{ + /*trim the beginning*/ + while (m->num_exentries && within_module_init(m->extable[0].insn, m)) { + m->extable++; + m->num_exentries--; + } + /*trim the end*/ + while (m->num_exentries && + within_module_init(m->extable[m->num_exentries-1].insn, m)) + m->num_exentries--; +} +#endif /* CONFIG_MODULES */ +#endif /* !ARCH_HAS_SORT_EXTABLE */ #ifndef ARCH_HAS_SEARCH_EXTABLE /* diff --git a/lib/gcd.c b/lib/gcd.c new file mode 100644 index 00000000000..f879033d982 --- /dev/null +++ b/lib/gcd.c @@ -0,0 +1,18 @@ +#include <linux/kernel.h> +#include <linux/gcd.h> +#include <linux/module.h> + +/* Greatest common divisor */ +unsigned long gcd(unsigned long a, unsigned long b) +{ + unsigned long r; + + if (a < b) + swap(a, b); + while ((r = a % b) != 0) { + a = b; + b = r; + } + return b; +} +EXPORT_SYMBOL_GPL(gcd); diff --git a/lib/genalloc.c b/lib/genalloc.c index f6d276db2d5..eed2bdb865e 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c @@ -85,7 +85,6 @@ void gen_pool_destroy(struct gen_pool *pool) int bit, end_bit; - write_lock(&pool->lock); list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); list_del(&chunk->next_chunk); diff --git a/lib/hexdump.c b/lib/hexdump.c index f07c0db81d2..39af2560f76 100644 --- a/lib/hexdump.c +++ b/lib/hexdump.c @@ -65,7 +65,8 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize, for (j = 0; j < ngroups; j++) lx += scnprintf(linebuf + lx, linebuflen - lx, - "%16.16llx ", (unsigned long long)*(ptr8 + j)); + "%s%16.16llx", j ? " " : "", + (unsigned long long)*(ptr8 + j)); ascii_column = 17 * ngroups + 2; break; } @@ -76,7 +77,7 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize, for (j = 0; j < ngroups; j++) lx += scnprintf(linebuf + lx, linebuflen - lx, - "%8.8x ", *(ptr4 + j)); + "%s%8.8x", j ? " " : "", *(ptr4 + j)); ascii_column = 9 * ngroups + 2; break; } @@ -87,19 +88,21 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize, for (j = 0; j < ngroups; j++) lx += scnprintf(linebuf + lx, linebuflen - lx, - "%4.4x ", *(ptr2 + j)); + "%s%4.4x", j ? " " : "", *(ptr2 + j)); ascii_column = 5 * ngroups + 2; break; } default: - for (j = 0; (j < rowsize) && (j < len) && (lx + 4) < linebuflen; - j++) { + for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) { ch = ptr[j]; linebuf[lx++] = hex_asc_hi(ch); linebuf[lx++] = hex_asc_lo(ch); linebuf[lx++] = ' '; } + if (j) + lx--; + ascii_column = 3 * rowsize + 2; break; } @@ -108,7 +111,7 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize, while (lx < (linebuflen - 1) && lx < (ascii_column - 1)) linebuf[lx++] = ' '; - for (j = 0; (j < rowsize) && (j < len) && (lx + 2) < linebuflen; j++) + for (j = 0; (j < len) && (lx + 2) < linebuflen; j++) linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j] : '.'; nil: diff --git a/lib/kobject.c b/lib/kobject.c index bacf6fe4f7a..b512b746d2a 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -793,11 +793,16 @@ static struct kset *kset_create(const char *name, struct kobject *parent_kobj) { struct kset *kset; + int retval; kset = kzalloc(sizeof(*kset), GFP_KERNEL); if (!kset) return NULL; - kobject_set_name(&kset->kobj, name); + retval = kobject_set_name(&kset->kobj, name); + if (retval) { + kfree(kset); + return NULL; + } kset->uevent_ops = uevent_ops; kset->kobj.parent = parent_kobj; diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 4bb42a0344e..23abbd93cae 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -351,20 +351,12 @@ int radix_tree_insert(struct radix_tree_root *root, } EXPORT_SYMBOL(radix_tree_insert); -/** - * radix_tree_lookup_slot - lookup a slot in a radix tree - * @root: radix tree root - * @index: index key - * - * Returns: the slot corresponding to the position @index in the - * radix tree @root. This is useful for update-if-exists operations. - * - * This function can be called under rcu_read_lock iff the slot is not - * modified by radix_tree_replace_slot, otherwise it must be called - * exclusive from other writers. Any dereference of the slot must be done - * using radix_tree_deref_slot. +/* + * is_slot == 1 : search for the slot. + * is_slot == 0 : search for the node. */ -void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) +static void *radix_tree_lookup_element(struct radix_tree_root *root, + unsigned long index, int is_slot) { unsigned int height, shift; struct radix_tree_node *node, **slot; @@ -376,7 +368,7 @@ void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) if (!radix_tree_is_indirect_ptr(node)) { if (index > 0) return NULL; - return (void **)&root->rnode; + return is_slot ? (void *)&root->rnode : node; } node = radix_tree_indirect_to_ptr(node); @@ -397,7 +389,25 @@ void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) height--; } while (height > 0); - return (void **)slot; + return is_slot ? (void *)slot:node; +} + +/** + * radix_tree_lookup_slot - lookup a slot in a radix tree + * @root: radix tree root + * @index: index key + * + * Returns: the slot corresponding to the position @index in the + * radix tree @root. This is useful for update-if-exists operations. + * + * This function can be called under rcu_read_lock iff the slot is not + * modified by radix_tree_replace_slot, otherwise it must be called + * exclusive from other writers. Any dereference of the slot must be done + * using radix_tree_deref_slot. + */ +void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) +{ + return (void **)radix_tree_lookup_element(root, index, 1); } EXPORT_SYMBOL(radix_tree_lookup_slot); @@ -415,38 +425,7 @@ EXPORT_SYMBOL(radix_tree_lookup_slot); */ void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index) { - unsigned int height, shift; - struct radix_tree_node *node, **slot; - - node = rcu_dereference(root->rnode); - if (node == NULL) - return NULL; - - if (!radix_tree_is_indirect_ptr(node)) { - if (index > 0) - return NULL; - return node; - } - node = radix_tree_indirect_to_ptr(node); - - height = node->height; - if (index > radix_tree_maxindex(height)) - return NULL; - - shift = (height-1) * RADIX_TREE_MAP_SHIFT; - - do { - slot = (struct radix_tree_node **) - (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK)); - node = rcu_dereference(*slot); - if (node == NULL) - return NULL; - - shift -= RADIX_TREE_MAP_SHIFT; - height--; - } while (height > 0); - - return node; + return radix_tree_lookup_element(root, index, 0); } EXPORT_SYMBOL(radix_tree_lookup); @@ -666,6 +645,43 @@ unsigned long radix_tree_next_hole(struct radix_tree_root *root, } EXPORT_SYMBOL(radix_tree_next_hole); +/** + * radix_tree_prev_hole - find the prev hole (not-present entry) + * @root: tree root + * @index: index key + * @max_scan: maximum range to search + * + * Search backwards in the range [max(index-max_scan+1, 0), index] + * for the first hole. + * + * Returns: the index of the hole if found, otherwise returns an index + * outside of the set specified (in which case 'index - return >= max_scan' + * will be true). In rare cases of wrap-around, LONG_MAX will be returned. + * + * radix_tree_next_hole may be called under rcu_read_lock. However, like + * radix_tree_gang_lookup, this will not atomically search a snapshot of + * the tree at a single point in time. For example, if a hole is created + * at index 10, then subsequently a hole is created at index 5, + * radix_tree_prev_hole covering both indexes may return 5 if called under + * rcu_read_lock. + */ +unsigned long radix_tree_prev_hole(struct radix_tree_root *root, + unsigned long index, unsigned long max_scan) +{ + unsigned long i; + + for (i = 0; i < max_scan; i++) { + if (!radix_tree_lookup(root, index)) + break; + index--; + if (index == LONG_MAX) + break; + } + + return index; +} +EXPORT_SYMBOL(radix_tree_prev_hole); + static unsigned int __lookup(struct radix_tree_node *slot, void ***results, unsigned long index, unsigned int max_items, unsigned long *next_index) diff --git a/lib/rational.c b/lib/rational.c new file mode 100644 index 00000000000..b3c099b5478 --- /dev/null +++ b/lib/rational.c @@ -0,0 +1,62 @@ +/* + * rational fractions + * + * Copyright (C) 2009 emlix GmbH, Oskar Schirmer <os@emlix.com> + * + * helper functions when coping with rational numbers + */ + +#include <linux/rational.h> + +/* + * calculate best rational approximation for a given fraction + * taking into account restricted register size, e.g. to find + * appropriate values for a pll with 5 bit denominator and + * 8 bit numerator register fields, trying to set up with a + * frequency ratio of 3.1415, one would say: + * + * rational_best_approximation(31415, 10000, + * (1 << 8) - 1, (1 << 5) - 1, &n, &d); + * + * you may look at given_numerator as a fixed point number, + * with the fractional part size described in given_denominator. + * + * for theoretical background, see: + * http://en.wikipedia.org/wiki/Continued_fraction + */ + +void rational_best_approximation( + unsigned long given_numerator, unsigned long given_denominator, + unsigned long max_numerator, unsigned long max_denominator, + unsigned long *best_numerator, unsigned long *best_denominator) +{ + unsigned long n, d, n0, d0, n1, d1; + n = given_numerator; + d = given_denominator; + n0 = d1 = 0; + n1 = d0 = 1; + for (;;) { + unsigned long t, a; + if ((n1 > max_numerator) || (d1 > max_denominator)) { + n1 = n0; + d1 = d0; + break; + } + if (d == 0) + break; + t = d; + a = n / d; + d = n % d; + n = t; + t = n0 + a * n1; + n0 = n1; + n1 = t; + t = d0 + a * d1; + d0 = d1; + d1 = t; + } + *best_numerator = n1; + *best_denominator = d1; +} + +EXPORT_SYMBOL(rational_best_approximation); diff --git a/lib/rbtree.c b/lib/rbtree.c index f653659e0bc..e2aa3be2985 100644 --- a/lib/rbtree.c +++ b/lib/rbtree.c @@ -231,34 +231,34 @@ void rb_erase(struct rb_node *node, struct rb_root *root) node = node->rb_right; while ((left = node->rb_left) != NULL) node = left; + + if (rb_parent(old)) { + if (rb_parent(old)->rb_left == old) + rb_parent(old)->rb_left = node; + else + rb_parent(old)->rb_right = node; + } else + root->rb_node = node; + child = node->rb_right; parent = rb_parent(node); color = rb_color(node); - if (child) - rb_set_parent(child, parent); if (parent == old) { - parent->rb_right = child; parent = node; - } else + } else { + if (child) + rb_set_parent(child, parent); parent->rb_left = child; + node->rb_right = old->rb_right; + rb_set_parent(old->rb_right, node); + } + node->rb_parent_color = old->rb_parent_color; - node->rb_right = old->rb_right; node->rb_left = old->rb_left; - - if (rb_parent(old)) - { - if (rb_parent(old)->rb_left == old) - rb_parent(old)->rb_left = node; - else - rb_parent(old)->rb_right = node; - } else - root->rb_node = node; - rb_set_parent(old->rb_left, node); - if (old->rb_right) - rb_set_parent(old->rb_right, node); + goto color; } diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 7536acea135..756ccafa9ce 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -408,6 +408,8 @@ enum format_type { FORMAT_TYPE_LONG_LONG, FORMAT_TYPE_ULONG, FORMAT_TYPE_LONG, + FORMAT_TYPE_UBYTE, + FORMAT_TYPE_BYTE, FORMAT_TYPE_USHORT, FORMAT_TYPE_SHORT, FORMAT_TYPE_UINT, @@ -573,12 +575,15 @@ static char *string(char *buf, char *end, char *s, struct printf_spec spec) } static char *symbol_string(char *buf, char *end, void *ptr, - struct printf_spec spec) + struct printf_spec spec, char ext) { unsigned long value = (unsigned long) ptr; #ifdef CONFIG_KALLSYMS char sym[KSYM_SYMBOL_LEN]; - sprint_symbol(sym, value); + if (ext != 'f') + sprint_symbol(sym, value); + else + kallsyms_lookup(value, NULL, NULL, NULL, sym); return string(buf, end, sym, spec); #else spec.field_width = 2*sizeof(void *); @@ -690,7 +695,8 @@ static char *ip4_addr_string(char *buf, char *end, u8 *addr, * * Right now we handle: * - * - 'F' For symbolic function descriptor pointers + * - 'F' For symbolic function descriptor pointers with offset + * - 'f' For simple symbolic function names without offset * - 'S' For symbolic direct pointers * - 'R' For a struct resource pointer, it prints the range of * addresses (not the name nor the flags) @@ -713,10 +719,11 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr, switch (*fmt) { case 'F': + case 'f': ptr = dereference_function_descriptor(ptr); /* Fallthrough */ case 'S': - return symbol_string(buf, end, ptr, spec); + return symbol_string(buf, end, ptr, spec, *fmt); case 'R': return resource_string(buf, end, ptr, spec); case 'm': @@ -853,11 +860,15 @@ qualifier: spec->qualifier = -1; if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || *fmt == 'Z' || *fmt == 'z' || *fmt == 't') { - spec->qualifier = *fmt; - ++fmt; - if (spec->qualifier == 'l' && *fmt == 'l') { - spec->qualifier = 'L'; - ++fmt; + spec->qualifier = *fmt++; + if (unlikely(spec->qualifier == *fmt)) { + if (spec->qualifier == 'l') { + spec->qualifier = 'L'; + ++fmt; + } else if (spec->qualifier == 'h') { + spec->qualifier = 'H'; + ++fmt; + } } } @@ -919,6 +930,11 @@ qualifier: spec->type = FORMAT_TYPE_SIZE_T; } else if (spec->qualifier == 't') { spec->type = FORMAT_TYPE_PTRDIFF; + } else if (spec->qualifier == 'H') { + if (spec->flags & SIGN) + spec->type = FORMAT_TYPE_BYTE; + else + spec->type = FORMAT_TYPE_UBYTE; } else if (spec->qualifier == 'h') { if (spec->flags & SIGN) spec->type = FORMAT_TYPE_SHORT; @@ -943,7 +959,8 @@ qualifier: * * This function follows C99 vsnprintf, but has some extensions: * %pS output the name of a text symbol - * %pF output the name of a function pointer + * %pF output the name of a function pointer with its offset + * %pf output the name of a function pointer without its offset * %pR output the address range in a struct resource * * The return value is the number of characters which would @@ -1087,6 +1104,12 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) case FORMAT_TYPE_PTRDIFF: num = va_arg(args, ptrdiff_t); break; + case FORMAT_TYPE_UBYTE: + num = (unsigned char) va_arg(args, int); + break; + case FORMAT_TYPE_BYTE: + num = (signed char) va_arg(args, int); + break; case FORMAT_TYPE_USHORT: num = (unsigned short) va_arg(args, int); break; @@ -1363,6 +1386,10 @@ do { \ case FORMAT_TYPE_PTRDIFF: save_arg(ptrdiff_t); break; + case FORMAT_TYPE_UBYTE: + case FORMAT_TYPE_BYTE: + save_arg(char); + break; case FORMAT_TYPE_USHORT: case FORMAT_TYPE_SHORT: save_arg(short); @@ -1391,7 +1418,8 @@ EXPORT_SYMBOL_GPL(vbin_printf); * * The format follows C99 vsnprintf, but has some extensions: * %pS output the name of a text symbol - * %pF output the name of a function pointer + * %pF output the name of a function pointer with its offset + * %pf output the name of a function pointer without its offset * %pR output the address range in a struct resource * %n is ignored * @@ -1538,6 +1566,12 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) case FORMAT_TYPE_PTRDIFF: num = get_arg(ptrdiff_t); break; + case FORMAT_TYPE_UBYTE: + num = get_arg(unsigned char); + break; + case FORMAT_TYPE_BYTE: + num = get_arg(signed char); + break; case FORMAT_TYPE_USHORT: num = get_arg(unsigned short); break; |