diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 41 | ||||
-rw-r--r-- | lib/Kconfig.kgdb | 3 | ||||
-rw-r--r-- | lib/Makefile | 2 | ||||
-rw-r--r-- | lib/bcd.c | 14 | ||||
-rw-r--r-- | lib/cpumask.c | 9 | ||||
-rw-r--r-- | lib/debugobjects.c | 4 | ||||
-rw-r--r-- | lib/kobject.c | 10 | ||||
-rw-r--r-- | lib/kobject_uevent.c | 3 | ||||
-rw-r--r-- | lib/scatterlist.c | 176 | ||||
-rw-r--r-- | lib/smp_processor_id.c | 6 | ||||
-rw-r--r-- | lib/textsearch.c | 2 |
11 files changed, 209 insertions, 61 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index df27132a56f..e1d4764435e 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -74,6 +74,9 @@ config DEBUG_FS debugging files into. Enable this option to be able to read and write to these files. + For detailed documentation on the debugfs API, see + Documentation/DocBook/filesystems. + If unsure, say N. config HEADERS_CHECK @@ -147,7 +150,7 @@ config DETECT_SOFTLOCKUP help Say Y here to enable the kernel to detect "soft lockups", which are bugs that cause the kernel to loop in kernel - mode for more than 10 seconds, without giving other tasks a + mode for more than 60 seconds, without giving other tasks a chance to run. When a soft-lockup is detected, the kernel will print the @@ -159,6 +162,30 @@ config DETECT_SOFTLOCKUP can be detected via the NMI-watchdog, on platforms that support it.) +config BOOTPARAM_SOFTLOCKUP_PANIC + bool "Panic (Reboot) On Soft Lockups" + depends on DETECT_SOFTLOCKUP + help + Say Y here to enable the kernel to panic on "soft lockups", + which are bugs that cause the kernel to loop in kernel + mode for more than 60 seconds, without giving other tasks a + chance to run. + + The panic can be used in combination with panic_timeout, + to cause the system to reboot automatically after a + lockup has been detected. This feature is useful for + high-availability systems that have uptime guarantees and + where a lockup must be resolved ASAP. + + Say N if unsure. + +config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE + int + depends on DETECT_SOFTLOCKUP + range 0 1 + default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC + default 1 if BOOTPARAM_SOFTLOCKUP_PANIC + config SCHED_DEBUG bool "Collect scheduler debugging info" depends on DEBUG_KERNEL && PROC_FS @@ -478,6 +505,18 @@ config DEBUG_WRITECOUNT If unsure, say N. +config DEBUG_MEMORY_INIT + bool "Debug memory initialisation" if EMBEDDED + default !EMBEDDED + help + Enable this for additional checks during memory initialisation. + The sanity checks verify aspects of the VM such as the memory model + and other information provided by the architecture. Verbose + information will be printed at KERN_DEBUG loglevel depending + on the mminit_loglevel= command-line option. + + If unsure, say Y + config DEBUG_LIST bool "Debug linked list manipulation" depends on DEBUG_KERNEL diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb index a5d4b1dac2a..2cfd2721f7e 100644 --- a/lib/Kconfig.kgdb +++ b/lib/Kconfig.kgdb @@ -1,7 +1,4 @@ -config HAVE_ARCH_KGDB_SHADOW_INFO - bool - config HAVE_ARCH_KGDB bool diff --git a/lib/Makefile b/lib/Makefile index 818c4d45551..9085ad6fa53 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -18,7 +18,7 @@ lib-$(CONFIG_SMP) += cpumask.o lib-y += kobject.o kref.o klist.o -obj-y += div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ +obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o ifeq ($(CONFIG_DEBUG_KOBJECT),y) diff --git a/lib/bcd.c b/lib/bcd.c new file mode 100644 index 00000000000..d74257fd0fe --- /dev/null +++ b/lib/bcd.c @@ -0,0 +1,14 @@ +#include <linux/bcd.h> +#include <linux/module.h> + +unsigned bcd2bin(unsigned char val) +{ + return (val & 0x0f) + (val >> 4) * 10; +} +EXPORT_SYMBOL(bcd2bin); + +unsigned char bin2bcd(unsigned val) +{ + return ((val / 10) << 4) + val % 10; +} +EXPORT_SYMBOL(bin2bcd); diff --git a/lib/cpumask.c b/lib/cpumask.c index bb4f76d3c3e..5f97dc25ef9 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -15,6 +15,15 @@ int __next_cpu(int n, const cpumask_t *srcp) } EXPORT_SYMBOL(__next_cpu); +#if NR_CPUS > 64 +int __next_cpu_nr(int n, const cpumask_t *srcp) +{ + return min_t(int, nr_cpu_ids, + find_next_bit(srcp->bits, nr_cpu_ids, n+1)); +} +EXPORT_SYMBOL(__next_cpu_nr); +#endif + int __any_online_cpu(const cpumask_t *mask) { int cpu; diff --git a/lib/debugobjects.c b/lib/debugobjects.c index 85b18d79be8..f86196390cf 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -226,15 +226,13 @@ debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state), static void debug_object_is_on_stack(void *addr, int onstack) { - void *stack = current->stack; int is_on_stack; static int limit; if (limit > 4) return; - is_on_stack = (addr >= stack && addr < (stack + THREAD_SIZE)); - + is_on_stack = object_is_on_stack(addr); if (is_on_stack == onstack) return; diff --git a/lib/kobject.c b/lib/kobject.c index dcade0543bd..744401571ed 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -216,13 +216,19 @@ static int kobject_add_internal(struct kobject *kobj) static int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list vargs) { - /* Free the old name, if necessary. */ - kfree(kobj->name); + const char *old_name = kobj->name; + char *s; kobj->name = kvasprintf(GFP_KERNEL, fmt, vargs); if (!kobj->name) return -ENOMEM; + /* ewww... some of these buggers have '/' in the name ... */ + s = strchr(kobj->name, '/'); + if (s) + s[0] = '!'; + + kfree(old_name); return 0; } diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 2fa545a6316..9f8d599459d 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -245,7 +245,8 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, if (retval) goto exit; - call_usermodehelper(argv[0], argv, env->envp, UMH_WAIT_EXEC); + retval = call_usermodehelper(argv[0], argv, + env->envp, UMH_WAIT_EXEC); } exit: diff --git a/lib/scatterlist.c b/lib/scatterlist.c index b80c21100d7..876ba6d5b67 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -295,6 +295,117 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) EXPORT_SYMBOL(sg_alloc_table); /** + * sg_miter_start - start mapping iteration over a sg list + * @miter: sg mapping iter to be started + * @sgl: sg list to iterate over + * @nents: number of sg entries + * + * Description: + * Starts mapping iterator @miter. + * + * Context: + * Don't care. + */ +void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl, + unsigned int nents, unsigned int flags) +{ + memset(miter, 0, sizeof(struct sg_mapping_iter)); + + miter->__sg = sgl; + miter->__nents = nents; + miter->__offset = 0; + miter->__flags = flags; +} +EXPORT_SYMBOL(sg_miter_start); + +/** + * sg_miter_next - proceed mapping iterator to the next mapping + * @miter: sg mapping iter to proceed + * + * Description: + * Proceeds @miter@ to the next mapping. @miter@ should have been + * started using sg_miter_start(). On successful return, + * @miter@->page, @miter@->addr and @miter@->length point to the + * current mapping. + * + * Context: + * IRQ disabled if SG_MITER_ATOMIC. IRQ must stay disabled till + * @miter@ is stopped. May sleep if !SG_MITER_ATOMIC. + * + * Returns: + * true if @miter contains the next mapping. false if end of sg + * list is reached. + */ +bool sg_miter_next(struct sg_mapping_iter *miter) +{ + unsigned int off, len; + + /* check for end and drop resources from the last iteration */ + if (!miter->__nents) + return false; + + sg_miter_stop(miter); + + /* get to the next sg if necessary. __offset is adjusted by stop */ + if (miter->__offset == miter->__sg->length && --miter->__nents) { + miter->__sg = sg_next(miter->__sg); + miter->__offset = 0; + } + + /* map the next page */ + off = miter->__sg->offset + miter->__offset; + len = miter->__sg->length - miter->__offset; + + miter->page = nth_page(sg_page(miter->__sg), off >> PAGE_SHIFT); + off &= ~PAGE_MASK; + miter->length = min_t(unsigned int, len, PAGE_SIZE - off); + miter->consumed = miter->length; + + if (miter->__flags & SG_MITER_ATOMIC) + miter->addr = kmap_atomic(miter->page, KM_BIO_SRC_IRQ) + off; + else + miter->addr = kmap(miter->page) + off; + + return true; +} +EXPORT_SYMBOL(sg_miter_next); + +/** + * sg_miter_stop - stop mapping iteration + * @miter: sg mapping iter to be stopped + * + * Description: + * Stops mapping iterator @miter. @miter should have been started + * started using sg_miter_start(). A stopped iteration can be + * resumed by calling sg_miter_next() on it. This is useful when + * resources (kmap) need to be released during iteration. + * + * Context: + * IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise. + */ +void sg_miter_stop(struct sg_mapping_iter *miter) +{ + WARN_ON(miter->consumed > miter->length); + + /* drop resources from the last iteration */ + if (miter->addr) { + miter->__offset += miter->consumed; + + if (miter->__flags & SG_MITER_ATOMIC) { + WARN_ON(!irqs_disabled()); + kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ); + } else + kunmap(miter->addr); + + miter->page = NULL; + miter->addr = NULL; + miter->length = 0; + miter->consumed = 0; + } +} +EXPORT_SYMBOL(sg_miter_stop); + +/** * sg_copy_buffer - Copy data between a linear buffer and an SG list * @sgl: The SG list * @nents: Number of SG entries @@ -309,56 +420,29 @@ EXPORT_SYMBOL(sg_alloc_table); static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, size_t buflen, int to_buffer) { - struct scatterlist *sg; - size_t buf_off = 0; - int i; - - WARN_ON(!irqs_disabled()); - - for_each_sg(sgl, sg, nents, i) { - struct page *page; - int n = 0; - unsigned int sg_off = sg->offset; - unsigned int sg_copy = sg->length; - - if (sg_copy > buflen) - sg_copy = buflen; - buflen -= sg_copy; - - while (sg_copy > 0) { - unsigned int page_copy; - void *p; - - page_copy = PAGE_SIZE - sg_off; - if (page_copy > sg_copy) - page_copy = sg_copy; - - page = nth_page(sg_page(sg), n); - p = kmap_atomic(page, KM_BIO_SRC_IRQ); - - if (to_buffer) - memcpy(buf + buf_off, p + sg_off, page_copy); - else { - memcpy(p + sg_off, buf + buf_off, page_copy); - flush_kernel_dcache_page(page); - } - - kunmap_atomic(p, KM_BIO_SRC_IRQ); - - buf_off += page_copy; - sg_off += page_copy; - if (sg_off == PAGE_SIZE) { - sg_off = 0; - n++; - } - sg_copy -= page_copy; + unsigned int offset = 0; + struct sg_mapping_iter miter; + + sg_miter_start(&miter, sgl, nents, SG_MITER_ATOMIC); + + while (sg_miter_next(&miter) && offset < buflen) { + unsigned int len; + + len = min(miter.length, buflen - offset); + + if (to_buffer) + memcpy(buf + offset, miter.addr, len); + else { + memcpy(miter.addr, buf + offset, len); + flush_kernel_dcache_page(miter.page); } - if (!buflen) - break; + offset += len; } - return buf_off; + sg_miter_stop(&miter); + + return offset; } /** diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c index 3b4dc098181..c4381d9516f 100644 --- a/lib/smp_processor_id.c +++ b/lib/smp_processor_id.c @@ -11,7 +11,7 @@ notrace unsigned int debug_smp_processor_id(void) { unsigned long preempt_count = preempt_count(); int this_cpu = raw_smp_processor_id(); - cpumask_t this_mask; + cpumask_of_cpu_ptr_declare(this_mask); if (likely(preempt_count)) goto out; @@ -23,9 +23,9 @@ notrace unsigned int debug_smp_processor_id(void) * Kernel threads bound to a single CPU can safely use * smp_processor_id(): */ - this_mask = cpumask_of_cpu(this_cpu); + cpumask_of_cpu_ptr_next(this_mask, this_cpu); - if (cpus_equal(current->cpus_allowed, this_mask)) + if (cpus_equal(current->cpus_allowed, *this_mask)) goto out; /* diff --git a/lib/textsearch.c b/lib/textsearch.c index 4b7c6075256..9fbcb44c554 100644 --- a/lib/textsearch.c +++ b/lib/textsearch.c @@ -267,7 +267,7 @@ struct ts_config *textsearch_prepare(const char *algo, const void *pattern, return ERR_PTR(-EINVAL); ops = lookup_ts_algo(algo); -#ifdef CONFIG_KMOD +#ifdef CONFIG_MODULES /* * Why not always autoload you may ask. Some users are * in a situation where requesting a module may deadlock, |