diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 38 | ||||
-rw-r--r-- | lib/Kconfig.kgdb | 3 | ||||
-rw-r--r-- | lib/Makefile | 6 | ||||
-rw-r--r-- | lib/bcd.c | 14 | ||||
-rw-r--r-- | lib/cmdline.c | 16 | ||||
-rw-r--r-- | lib/cpumask.c | 9 | ||||
-rw-r--r-- | lib/debugobjects.c | 19 | ||||
-rw-r--r-- | lib/idr.c | 142 | ||||
-rw-r--r-- | lib/inflate.c | 52 | ||||
-rw-r--r-- | lib/iomap.c | 3 | ||||
-rw-r--r-- | lib/kobject.c | 9 | ||||
-rw-r--r-- | lib/kobject_uevent.c | 6 | ||||
-rw-r--r-- | lib/list_debug.c | 50 | ||||
-rw-r--r-- | lib/lzo/lzo1x_decompress.c | 6 | ||||
-rw-r--r-- | lib/plist.c | 13 | ||||
-rw-r--r-- | lib/radix-tree.c | 180 | ||||
-rw-r--r-- | lib/ratelimit.c | 55 | ||||
-rw-r--r-- | lib/scatterlist.c | 176 | ||||
-rw-r--r-- | lib/show_mem.c | 63 | ||||
-rw-r--r-- | lib/smp_processor_id.c | 6 | ||||
-rw-r--r-- | lib/swiotlb.c | 4 | ||||
-rw-r--r-- | lib/syscall.c | 75 |
22 files changed, 697 insertions, 248 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index ba106db5a65..e1d4764435e 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -150,7 +150,7 @@ config DETECT_SOFTLOCKUP help Say Y here to enable the kernel to detect "soft lockups", which are bugs that cause the kernel to loop in kernel - mode for more than 10 seconds, without giving other tasks a + mode for more than 60 seconds, without giving other tasks a chance to run. When a soft-lockup is detected, the kernel will print the @@ -162,6 +162,30 @@ config DETECT_SOFTLOCKUP can be detected via the NMI-watchdog, on platforms that support it.) +config BOOTPARAM_SOFTLOCKUP_PANIC + bool "Panic (Reboot) On Soft Lockups" + depends on DETECT_SOFTLOCKUP + help + Say Y here to enable the kernel to panic on "soft lockups", + which are bugs that cause the kernel to loop in kernel + mode for more than 60 seconds, without giving other tasks a + chance to run. + + The panic can be used in combination with panic_timeout, + to cause the system to reboot automatically after a + lockup has been detected. This feature is useful for + high-availability systems that have uptime guarantees and + where a lockup must be resolved ASAP. + + Say N if unsure. + +config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE + int + depends on DETECT_SOFTLOCKUP + range 0 1 + default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC + default 1 if BOOTPARAM_SOFTLOCKUP_PANIC + config SCHED_DEBUG bool "Collect scheduler debugging info" depends on DEBUG_KERNEL && PROC_FS @@ -481,6 +505,18 @@ config DEBUG_WRITECOUNT If unsure, say N. +config DEBUG_MEMORY_INIT + bool "Debug memory initialisation" if EMBEDDED + default !EMBEDDED + help + Enable this for additional checks during memory initialisation. + The sanity checks verify aspects of the VM such as the memory model + and other information provided by the architecture. Verbose + information will be printed at KERN_DEBUG loglevel depending + on the mminit_loglevel= command-line option. + + If unsure, say Y + config DEBUG_LIST bool "Debug linked list manipulation" depends on DEBUG_KERNEL diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb index a5d4b1dac2a..2cfd2721f7e 100644 --- a/lib/Kconfig.kgdb +++ b/lib/Kconfig.kgdb @@ -1,7 +1,4 @@ -config HAVE_ARCH_KGDB_SHADOW_INFO - bool - config HAVE_ARCH_KGDB bool diff --git a/lib/Makefile b/lib/Makefile index 818c4d45551..3b1f94bbe9d 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -11,14 +11,14 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ rbtree.o radix-tree.o dump_stack.o \ idr.o int_sqrt.o extable.o prio_tree.o \ sha1.o irq_regs.o reciprocal_div.o argv_split.o \ - proportions.o prio_heap.o ratelimit.o + proportions.o prio_heap.o ratelimit.o show_mem.o lib-$(CONFIG_MMU) += ioremap.o lib-$(CONFIG_SMP) += cpumask.o lib-y += kobject.o kref.o klist.o -obj-y += div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ +obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o ifeq ($(CONFIG_DEBUG_KOBJECT),y) @@ -78,6 +78,8 @@ lib-$(CONFIG_GENERIC_BUG) += bug.o obj-$(CONFIG_HAVE_LMB) += lmb.o +obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o + hostprogs-y := gen_crc32table clean-files := crc32table.h diff --git a/lib/bcd.c b/lib/bcd.c new file mode 100644 index 00000000000..d74257fd0fe --- /dev/null +++ b/lib/bcd.c @@ -0,0 +1,14 @@ +#include <linux/bcd.h> +#include <linux/module.h> + +unsigned bcd2bin(unsigned char val) +{ + return (val & 0x0f) + (val >> 4) * 10; +} +EXPORT_SYMBOL(bcd2bin); + +unsigned char bin2bcd(unsigned val) +{ + return ((val / 10) << 4) + val % 10; +} +EXPORT_SYMBOL(bin2bcd); diff --git a/lib/cmdline.c b/lib/cmdline.c index f596c08d213..5ba8a942a47 100644 --- a/lib/cmdline.c +++ b/lib/cmdline.c @@ -116,7 +116,7 @@ char *get_options(const char *str, int nints, int *ints) /** * memparse - parse a string with mem suffixes into a number * @ptr: Where parse begins - * @retptr: (output) Pointer to next char after parse completes + * @retptr: (output) Optional pointer to next char after parse completes * * Parses a string into a number. The number stored at @ptr is * potentially suffixed with %K (for kilobytes, or 1024 bytes), @@ -126,11 +126,13 @@ char *get_options(const char *str, int nints, int *ints) * megabyte, or one gigabyte, respectively. */ -unsigned long long memparse (char *ptr, char **retptr) +unsigned long long memparse(char *ptr, char **retptr) { - unsigned long long ret = simple_strtoull (ptr, retptr, 0); + char *endptr; /* local pointer to end of parsed string */ - switch (**retptr) { + unsigned long long ret = simple_strtoull(ptr, &endptr, 0); + + switch (*endptr) { case 'G': case 'g': ret <<= 10; @@ -140,10 +142,14 @@ unsigned long long memparse (char *ptr, char **retptr) case 'K': case 'k': ret <<= 10; - (*retptr)++; + endptr++; default: break; } + + if (retptr) + *retptr = endptr; + return ret; } diff --git a/lib/cpumask.c b/lib/cpumask.c index bb4f76d3c3e..5f97dc25ef9 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -15,6 +15,15 @@ int __next_cpu(int n, const cpumask_t *srcp) } EXPORT_SYMBOL(__next_cpu); +#if NR_CPUS > 64 +int __next_cpu_nr(int n, const cpumask_t *srcp) +{ + return min_t(int, nr_cpu_ids, + find_next_bit(srcp->bits, nr_cpu_ids, n+1)); +} +EXPORT_SYMBOL(__next_cpu_nr); +#endif + int __any_online_cpu(const cpumask_t *mask) { int cpu; diff --git a/lib/debugobjects.c b/lib/debugobjects.c index 85b18d79be8..45a6bde762d 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -205,9 +205,8 @@ static void debug_print_object(struct debug_obj *obj, char *msg) if (limit < 5 && obj->descr != descr_test) { limit++; - printk(KERN_ERR "ODEBUG: %s %s object type: %s\n", msg, + WARN(1, KERN_ERR "ODEBUG: %s %s object type: %s\n", msg, obj_states[obj->state], obj->descr->name); - WARN_ON(1); } debug_objects_warnings++; } @@ -226,15 +225,13 @@ debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state), static void debug_object_is_on_stack(void *addr, int onstack) { - void *stack = current->stack; int is_on_stack; static int limit; if (limit > 4) return; - is_on_stack = (addr >= stack && addr < (stack + THREAD_SIZE)); - + is_on_stack = object_is_on_stack(addr); if (is_on_stack == onstack) return; @@ -735,26 +732,22 @@ check_results(void *addr, enum debug_obj_state state, int fixups, int warnings) obj = lookup_object(addr, db); if (!obj && state != ODEBUG_STATE_NONE) { - printk(KERN_ERR "ODEBUG: selftest object not found\n"); - WARN_ON(1); + WARN(1, KERN_ERR "ODEBUG: selftest object not found\n"); goto out; } if (obj && obj->state != state) { - printk(KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n", + WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n", obj->state, state); - WARN_ON(1); goto out; } if (fixups != debug_objects_fixups) { - printk(KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n", + WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n", fixups, debug_objects_fixups); - WARN_ON(1); goto out; } if (warnings != debug_objects_warnings) { - printk(KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n", + WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n", warnings, debug_objects_warnings); - WARN_ON(1); goto out; } res = 0; diff --git a/lib/idr.c b/lib/idr.c index 7a02e173f02..e728c7fccc4 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -6,6 +6,8 @@ * Modified by George Anzinger to reuse immediately and to use * find bit instructions. Also removed _irq on spinlocks. * + * Modified by Nadia Derbey to make it RCU safe. + * * Small id to pointer translation service. * * It uses a radix tree like structure as a sparse array indexed @@ -35,7 +37,7 @@ static struct kmem_cache *idr_layer_cache; -static struct idr_layer *alloc_layer(struct idr *idp) +static struct idr_layer *get_from_free_list(struct idr *idp) { struct idr_layer *p; unsigned long flags; @@ -50,15 +52,28 @@ static struct idr_layer *alloc_layer(struct idr *idp) return(p); } +static void idr_layer_rcu_free(struct rcu_head *head) +{ + struct idr_layer *layer; + + layer = container_of(head, struct idr_layer, rcu_head); + kmem_cache_free(idr_layer_cache, layer); +} + +static inline void free_layer(struct idr_layer *p) +{ + call_rcu(&p->rcu_head, idr_layer_rcu_free); +} + /* only called when idp->lock is held */ -static void __free_layer(struct idr *idp, struct idr_layer *p) +static void __move_to_free_list(struct idr *idp, struct idr_layer *p) { p->ary[0] = idp->id_free; idp->id_free = p; idp->id_free_cnt++; } -static void free_layer(struct idr *idp, struct idr_layer *p) +static void move_to_free_list(struct idr *idp, struct idr_layer *p) { unsigned long flags; @@ -66,7 +81,7 @@ static void free_layer(struct idr *idp, struct idr_layer *p) * Depends on the return element being zeroed. */ spin_lock_irqsave(&idp->lock, flags); - __free_layer(idp, p); + __move_to_free_list(idp, p); spin_unlock_irqrestore(&idp->lock, flags); } @@ -96,7 +111,7 @@ static void idr_mark_full(struct idr_layer **pa, int id) * @gfp_mask: memory allocation flags * * This function should be called prior to locking and calling the - * following function. It preallocates enough memory to satisfy + * idr_get_new* functions. It preallocates enough memory to satisfy * the worst possible allocation. * * If the system is REALLY out of memory this function returns 0, @@ -109,7 +124,7 @@ int idr_pre_get(struct idr *idp, gfp_t gfp_mask) new = kmem_cache_alloc(idr_layer_cache, gfp_mask); if (new == NULL) return (0); - free_layer(idp, new); + move_to_free_list(idp, new); } return 1; } @@ -143,7 +158,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) /* if already at the top layer, we need to grow */ if (!(p = pa[l])) { *starting_id = id; - return -2; + return IDR_NEED_TO_GROW; } /* If we need to go up one layer, continue the @@ -160,16 +175,17 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) id = ((id >> sh) ^ n ^ m) << sh; } if ((id >= MAX_ID_BIT) || (id < 0)) - return -3; + return IDR_NOMORE_SPACE; if (l == 0) break; /* * Create the layer below if it is missing. */ if (!p->ary[m]) { - if (!(new = alloc_layer(idp))) + new = get_from_free_list(idp); + if (!new) return -1; - p->ary[m] = new; + rcu_assign_pointer(p->ary[m], new); p->count++; } pa[l--] = p; @@ -192,7 +208,7 @@ build_up: p = idp->top; layers = idp->layers; if (unlikely(!p)) { - if (!(p = alloc_layer(idp))) + if (!(p = get_from_free_list(idp))) return -1; layers = 1; } @@ -204,7 +220,7 @@ build_up: layers++; if (!p->count) continue; - if (!(new = alloc_layer(idp))) { + if (!(new = get_from_free_list(idp))) { /* * The allocation failed. If we built part of * the structure tear it down. @@ -214,7 +230,7 @@ build_up: p = p->ary[0]; new->ary[0] = NULL; new->bitmap = new->count = 0; - __free_layer(idp, new); + __move_to_free_list(idp, new); } spin_unlock_irqrestore(&idp->lock, flags); return -1; @@ -225,10 +241,10 @@ build_up: __set_bit(0, &new->bitmap); p = new; } - idp->top = p; + rcu_assign_pointer(idp->top, p); idp->layers = layers; v = sub_alloc(idp, &id, pa); - if (v == -2) + if (v == IDR_NEED_TO_GROW) goto build_up; return(v); } @@ -244,7 +260,8 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) * Successfully found an empty slot. Install the user * pointer and mark the slot full. */ - pa[0]->ary[id & IDR_MASK] = (struct idr_layer *)ptr; + rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], + (struct idr_layer *)ptr); pa[0]->count++; idr_mark_full(pa, id); } @@ -277,12 +294,8 @@ int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) * This is a cheap hack until the IDR code can be fixed to * return proper error values. */ - if (rv < 0) { - if (rv == -1) - return -EAGAIN; - else /* Will be -3 */ - return -ENOSPC; - } + if (rv < 0) + return _idr_rc_to_errno(rv); *id = rv; return 0; } @@ -312,12 +325,8 @@ int idr_get_new(struct idr *idp, void *ptr, int *id) * This is a cheap hack until the IDR code can be fixed to * return proper error values. */ - if (rv < 0) { - if (rv == -1) - return -EAGAIN; - else /* Will be -3 */ - return -ENOSPC; - } + if (rv < 0) + return _idr_rc_to_errno(rv); *id = rv; return 0; } @@ -325,7 +334,8 @@ EXPORT_SYMBOL(idr_get_new); static void idr_remove_warning(int id) { - printk("idr_remove called for id=%d which is not allocated.\n", id); + printk(KERN_WARNING + "idr_remove called for id=%d which is not allocated.\n", id); dump_stack(); } @@ -334,6 +344,7 @@ static void sub_remove(struct idr *idp, int shift, int id) struct idr_layer *p = idp->top; struct idr_layer **pa[MAX_LEVEL]; struct idr_layer ***paa = &pa[0]; + struct idr_layer *to_free; int n; *paa = NULL; @@ -349,13 +360,18 @@ static void sub_remove(struct idr *idp, int shift, int id) n = id & IDR_MASK; if (likely(p != NULL && test_bit(n, &p->bitmap))){ __clear_bit(n, &p->bitmap); - p->ary[n] = NULL; + rcu_assign_pointer(p->ary[n], NULL); + to_free = NULL; while(*paa && ! --((**paa)->count)){ - free_layer(idp, **paa); + if (to_free) + free_layer(to_free); + to_free = **paa; **paa-- = NULL; } if (!*paa) idp->layers = 0; + if (to_free) + free_layer(to_free); } else idr_remove_warning(id); } @@ -368,22 +384,34 @@ static void sub_remove(struct idr *idp, int shift, int id) void idr_remove(struct idr *idp, int id) { struct idr_layer *p; + struct idr_layer *to_free; /* Mask off upper bits we don't use for the search. */ id &= MAX_ID_MASK; sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); if (idp->top && idp->top->count == 1 && (idp->layers > 1) && - idp->top->ary[0]) { // We can drop a layer - + idp->top->ary[0]) { + /* + * Single child at leftmost slot: we can shrink the tree. + * This level is not needed anymore since when layers are + * inserted, they are inserted at the top of the existing + * tree. + */ + to_free = idp->top; p = idp->top->ary[0]; - idp->top->bitmap = idp->top->count = 0; - free_layer(idp, idp->top); - idp->top = p; + rcu_assign_pointer(idp->top, p); --idp->layers; + to_free->bitmap = to_free->count = 0; + free_layer(to_free); } while (idp->id_free_cnt >= IDR_FREE_MAX) { - p = alloc_layer(idp); + p = get_from_free_list(idp); + /* + * Note: we don't call the rcu callback here, since the only + * layers that fall into the freelist are those that have been + * preallocated. + */ kmem_cache_free(idr_layer_cache, p); } return; @@ -424,15 +452,13 @@ void idr_remove_all(struct idr *idp) id += 1 << n; while (n < fls(id)) { - if (p) { - memset(p, 0, sizeof *p); - free_layer(idp, p); - } + if (p) + free_layer(p); n += IDR_BITS; p = *--paa; } } - idp->top = NULL; + rcu_assign_pointer(idp->top, NULL); idp->layers = 0; } EXPORT_SYMBOL(idr_remove_all); @@ -444,7 +470,7 @@ EXPORT_SYMBOL(idr_remove_all); void idr_destroy(struct idr *idp) { while (idp->id_free_cnt) { - struct idr_layer *p = alloc_layer(idp); + struct idr_layer *p = get_from_free_list(idp); kmem_cache_free(idr_layer_cache, p); } } @@ -459,7 +485,8 @@ EXPORT_SYMBOL(idr_destroy); * return indicates that @id is not valid or you passed %NULL in * idr_get_new(). * - * The caller must serialize idr_find() vs idr_get_new() and idr_remove(). + * This function can be called under rcu_read_lock(), given that the leaf + * pointers lifetimes are correctly managed. */ void *idr_find(struct idr *idp, int id) { @@ -467,7 +494,7 @@ void *idr_find(struct idr *idp, int id) struct idr_layer *p; n = idp->layers * IDR_BITS; - p = idp->top; + p = rcu_dereference(idp->top); /* Mask off upper bits we don't use for the search. */ id &= MAX_ID_MASK; @@ -477,7 +504,7 @@ void *idr_find(struct idr *idp, int id) while (n > 0 && p) { n -= IDR_BITS; - p = p->ary[(id >> n) & IDR_MASK]; + p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); } return((void *)p); } @@ -510,7 +537,7 @@ int idr_for_each(struct idr *idp, struct idr_layer **paa = &pa[0]; n = idp->layers * IDR_BITS; - p = idp->top; + p = rcu_dereference(idp->top); max = 1 << n; id = 0; @@ -518,7 +545,7 @@ int idr_for_each(struct idr *idp, while (n > 0 && p) { n -= IDR_BITS; *paa++ = p; - p = p->ary[(id >> n) & IDR_MASK]; + p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); } if (p) { @@ -548,7 +575,7 @@ EXPORT_SYMBOL(idr_for_each); * A -ENOENT return indicates that @id was not found. * A -EINVAL return indicates that @id was not within valid constraints. * - * The caller must serialize vs idr_find(), idr_get_new(), and idr_remove(). + * The caller must serialize with writers. */ void *idr_replace(struct idr *idp, void *ptr, int id) { @@ -574,13 +601,13 @@ void *idr_replace(struct idr *idp, void *ptr, int id) return ERR_PTR(-ENOENT); old_p = p->ary[n]; - p->ary[n] = ptr; + rcu_assign_pointer(p->ary[n], ptr); return old_p; } EXPORT_SYMBOL(idr_replace); -static void idr_cache_ctor(struct kmem_cache *idr_layer_cache, void *idr_layer) +static void idr_cache_ctor(void *idr_layer) { memset(idr_layer, 0, sizeof(struct idr_layer)); } @@ -694,12 +721,8 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) restart: /* get vacant slot */ t = idr_get_empty_slot(&ida->idr, idr_id, pa); - if (t < 0) { - if (t == -1) - return -EAGAIN; - else /* will be -3 */ - return -ENOSPC; - } + if (t < 0) + return _idr_rc_to_errno(t); if (t * IDA_BITMAP_BITS >= MAX_ID_BIT) return -ENOSPC; @@ -720,7 +743,8 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) return -EAGAIN; memset(bitmap, 0, sizeof(struct ida_bitmap)); - pa[0]->ary[idr_id & IDR_MASK] = (void *)bitmap; + rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK], + (void *)bitmap); pa[0]->count++; } @@ -749,7 +773,7 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) * allocation. */ if (ida->idr.id_free_cnt || ida->free_bitmap) { - struct idr_layer *p = alloc_layer(&ida->idr); + struct idr_layer *p = get_from_free_list(&ida->idr); if (p) kmem_cache_free(idr_layer_cache, p); } diff --git a/lib/inflate.c b/lib/inflate.c index 9762294be06..1a8e8a97812 100644 --- a/lib/inflate.c +++ b/lib/inflate.c @@ -230,6 +230,45 @@ STATIC const ush mask_bits[] = { #define NEEDBITS(n) {while(k<(n)){b|=((ulg)NEXTBYTE())<<k;k+=8;}} #define DUMPBITS(n) {b>>=(n);k-=(n);} +#ifndef NO_INFLATE_MALLOC +/* A trivial malloc implementation, adapted from + * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994 + */ + +static unsigned long malloc_ptr; +static int malloc_count; + +static void *malloc(int size) +{ + void *p; + + if (size < 0) + error("Malloc error"); + if (!malloc_ptr) + malloc_ptr = free_mem_ptr; + + malloc_ptr = (malloc_ptr + 3) & ~3; /* Align */ + + p = (void *)malloc_ptr; + malloc_ptr += size; + + if (free_mem_end_ptr && malloc_ptr >= free_mem_end_ptr) + error("Out of memory"); + + malloc_count++; + return p; +} + +static void free(void *where) +{ + malloc_count--; + if (!malloc_count) + malloc_ptr = free_mem_ptr; +} +#else +#define malloc(a) kmalloc(a, GFP_KERNEL) +#define free(a) kfree(a) +#endif /* Huffman code decoding is performed using a multi-level table lookup. @@ -1045,7 +1084,6 @@ STATIC int INIT inflate(void) int e; /* last block flag */ int r; /* result code */ unsigned h; /* maximum struct huft's malloc'ed */ - void *ptr; /* initialize window, bit buffer */ wp = 0; @@ -1057,12 +1095,12 @@ STATIC int INIT inflate(void) h = 0; do { hufts = 0; - gzip_mark(&ptr); - if ((r = inflate_block(&e)) != 0) { - gzip_release(&ptr); - return r; - } - gzip_release(&ptr); +#ifdef ARCH_HAS_DECOMP_WDOG + arch_decomp_wdog(); +#endif + r = inflate_block(&e); + if (r) + return r; if (hufts > h) h = hufts; } while (!e); diff --git a/lib/iomap.c b/lib/iomap.c index 37a3ea4cac9..d3222938515 100644 --- a/lib/iomap.c +++ b/lib/iomap.c @@ -40,8 +40,7 @@ static void bad_io_access(unsigned long port, const char *access) static int count = 10; if (count) { count--; - printk(KERN_ERR "Bad IO access at port %#lx (%s)\n", port, access); - WARN_ON(1); + WARN(1, KERN_ERR "Bad IO access at port %#lx (%s)\n", port, access); } } diff --git a/lib/kobject.c b/lib/kobject.c index 744401571ed..bd732ffebc8 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -164,9 +164,8 @@ static int kobject_add_internal(struct kobject *kobj) return -ENOENT; if (!kobj->name || !kobj->name[0]) { - pr_debug("kobject: (%p): attempted to be registered with empty " + WARN(1, "kobject: (%p): attempted to be registered with empty " "name!\n", kobj); - WARN_ON(1); return -EINVAL; } @@ -583,12 +582,10 @@ static void kobject_release(struct kref *kref) void kobject_put(struct kobject *kobj) { if (kobj) { - if (!kobj->state_initialized) { - printk(KERN_WARNING "kobject: '%s' (%p): is not " + if (!kobj->state_initialized) + WARN(1, KERN_WARNING "kobject: '%s' (%p): is not " "initialized, yet kobject_put() is being " "called.\n", kobject_name(kobj), kobj); - WARN_ON(1); - } kref_put(&kobj->kref, kobject_release); } } diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 9f8d599459d..3f914725bda 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -285,8 +285,7 @@ int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...) int len; if (env->envp_idx >= ARRAY_SIZE(env->envp)) { - printk(KERN_ERR "add_uevent_var: too many keys\n"); - WARN_ON(1); + WARN(1, KERN_ERR "add_uevent_var: too many keys\n"); return -ENOMEM; } @@ -297,8 +296,7 @@ int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...) va_end(args); if (len >= (sizeof(env->buf) - env->buflen)) { - printk(KERN_ERR "add_uevent_var: buffer size too small\n"); - WARN_ON(1); + WARN(1, KERN_ERR "add_uevent_var: buffer size too small\n"); return -ENOMEM; } diff --git a/lib/list_debug.c b/lib/list_debug.c index 4350ba9655b..1a39f4e3ae1 100644 --- a/lib/list_debug.c +++ b/lib/list_debug.c @@ -20,18 +20,14 @@ void __list_add(struct list_head *new, struct list_head *prev, struct list_head *next) { - if (unlikely(next->prev != prev)) { - printk(KERN_ERR "list_add corruption. next->prev should be " - "prev (%p), but was %p. (next=%p).\n", - prev, next->prev, next); - BUG(); - } - if (unlikely(prev->next != next)) { - printk(KERN_ERR "list_add corruption. prev->next should be " - "next (%p), but was %p. (prev=%p).\n", - next, prev->next, prev); - BUG(); - } + WARN(next->prev != prev, + "list_add corruption. next->prev should be " + "prev (%p), but was %p. (next=%p).\n", + prev, next->prev, next); + WARN(prev->next != next, + "list_add corruption. prev->next should be " + "next (%p), but was %p. (prev=%p).\n", + next, prev->next, prev); next->prev = new; new->next = next; new->prev = prev; @@ -40,20 +36,6 @@ void __list_add(struct list_head *new, EXPORT_SYMBOL(__list_add); /** - * list_add - add a new entry - * @new: new entry to be added - * @head: list head to add it after - * - * Insert a new entry after the specified head. - * This is good for implementing stacks. - */ -void list_add(struct list_head *new, struct list_head *head) -{ - __list_add(new, head, head->next); -} -EXPORT_SYMBOL(list_add); - -/** * list_del - deletes entry from list. * @entry: the element to delete from the list. * Note: list_empty on entry does not return true after this, the entry is @@ -61,16 +43,12 @@ EXPORT_SYMBOL(list_add); */ void list_del(struct list_head *entry) { - if (unlikely(entry->prev->next != entry)) { - printk(KERN_ERR "list_del corruption. prev->next should be %p, " - "but was %p\n", entry, entry->prev->next); - BUG(); - } - if (unlikely(entry->next->prev != entry)) { - printk(KERN_ERR "list_del corruption. next->prev should be %p, " - "but was %p\n", entry, entry->next->prev); - BUG(); - } + WARN(entry->prev->next != entry, + "list_del corruption. prev->next should be %p, " + "but was %p\n", entry, entry->prev->next); + WARN(entry->next->prev != entry, + "list_del corruption. next->prev should be %p, " + "but was %p\n", entry, entry->next->prev); __list_del(entry->prev, entry->next); entry->next = LIST_POISON1; entry->prev = LIST_POISON2; diff --git a/lib/lzo/lzo1x_decompress.c b/lib/lzo/lzo1x_decompress.c index 77f0f9b775a..5dc6b29c157 100644 --- a/lib/lzo/lzo1x_decompress.c +++ b/lib/lzo/lzo1x_decompress.c @@ -138,8 +138,7 @@ match: t += 31 + *ip++; } m_pos = op - 1; - m_pos -= le16_to_cpu(get_unaligned( - (const unsigned short *)ip)) >> 2; + m_pos -= get_unaligned_le16(ip) >> 2; ip += 2; } else if (t >= 16) { m_pos = op; @@ -157,8 +156,7 @@ match: } t += 7 + *ip++; } - m_pos -= le16_to_cpu(get_unaligned( - (const unsigned short *)ip)) >> 2; + m_pos -= get_unaligned_le16(ip) >> 2; ip += 2; if (m_pos == op) goto eof_found; diff --git a/lib/plist.c b/lib/plist.c index 3074a02272f..d6c64a824e1 100644 --- a/lib/plist.c +++ b/lib/plist.c @@ -31,12 +31,13 @@ static void plist_check_prev_next(struct list_head *t, struct list_head *p, struct list_head *n) { - if (n->prev != p || p->next != n) { - printk("top: %p, n: %p, p: %p\n", t, t->next, t->prev); - printk("prev: %p, n: %p, p: %p\n", p, p->next, p->prev); - printk("next: %p, n: %p, p: %p\n", n, n->next, n->prev); - WARN_ON(1); - } + WARN(n->prev != p || p->next != n, + "top: %p, n: %p, p: %p\n" + "prev: %p, n: %p, p: %p\n" + "next: %p, n: %p, p: %p\n", + t, t->next, t->prev, + p, p->next, p->prev, + n, n->next, n->prev); } static void plist_check_list(struct list_head *top) diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 56ec21a7f73..be86b32bc87 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -359,18 +359,17 @@ EXPORT_SYMBOL(radix_tree_insert); * Returns: the slot corresponding to the position @index in the * radix tree @root. This is useful for update-if-exists operations. * - * This function cannot be called under rcu_read_lock, it must be - * excluded from writers, as must the returned slot for subsequent - * use by radix_tree_deref_slot() and radix_tree_replace slot. - * Caller must hold tree write locked across slot lookup and - * replace. + * This function can be called under rcu_read_lock iff the slot is not + * modified by radix_tree_replace_slot, otherwise it must be called + * exclusive from other writers. Any dereference of the slot must be done + * using radix_tree_deref_slot. */ void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) { unsigned int height, shift; struct radix_tree_node *node, **slot; - node = root->rnode; + node = rcu_dereference(root->rnode); if (node == NULL) return NULL; @@ -390,7 +389,7 @@ void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) do { slot = (struct radix_tree_node **) (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK)); - node = *slot; + node = rcu_dereference(*slot); if (node == NULL) return NULL; @@ -667,7 +666,7 @@ unsigned long radix_tree_next_hole(struct radix_tree_root *root, EXPORT_SYMBOL(radix_tree_next_hole); static unsigned int -__lookup(struct radix_tree_node *slot, void **results, unsigned long index, +__lookup(struct radix_tree_node *slot, void ***results, unsigned long index, unsigned int max_items, unsigned long *next_index) { unsigned int nr_found = 0; @@ -701,11 +700,9 @@ __lookup(struct radix_tree_node *slot, void **results, unsigned long index, /* Bottom level: grab some items */ for (i = index & RADIX_TREE_MAP_MASK; i < RADIX_TREE_MAP_SIZE; i++) { - struct radix_tree_node *node; index++; - node = slot->slots[i]; - if (node) { - results[nr_found++] = rcu_dereference(node); + if (slot->slots[i]) { + results[nr_found++] = &(slot->slots[i]); if (nr_found == max_items) goto out; } @@ -759,13 +756,22 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results, ret = 0; while (ret < max_items) { - unsigned int nr_found; + unsigned int nr_found, slots_found, i; unsigned long next_index; /* Index of next search */ if (cur_index > max_index) break; - nr_found = __lookup(node, results + ret, cur_index, + slots_found = __lookup(node, (void ***)results + ret, cur_index, max_items - ret, &next_index); + nr_found = 0; + for (i = 0; i < slots_found; i++) { + struct radix_tree_node *slot; + slot = *(((void ***)results)[ret + i]); + if (!slot) + continue; + results[ret + nr_found] = rcu_dereference(slot); + nr_found++; + } ret += nr_found; if (next_index == 0) break; @@ -776,12 +782,71 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results, } EXPORT_SYMBOL(radix_tree_gang_lookup); +/** + * radix_tree_gang_lookup_slot - perform multiple slot lookup on radix tree + * @root: radix tree root + * @results: where the results of the lookup are placed + * @first_index: start the lookup from this key + * @max_items: place up to this many items at *results + * + * Performs an index-ascending scan of the tree for present items. Places + * their slots at *@results and returns the number of items which were + * placed at *@results. + * + * The implementation is naive. + * + * Like radix_tree_gang_lookup as far as RCU and locking goes. Slots must + * be dereferenced with radix_tree_deref_slot, and if using only RCU + * protection, radix_tree_deref_slot may fail requiring a retry. + */ +unsigned int +radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results, + unsigned long first_index, unsigned int max_items) +{ + unsigned long max_index; + struct radix_tree_node *node; + unsigned long cur_index = first_index; + unsigned int ret; + + node = rcu_dereference(root->rnode); + if (!node) + return 0; + + if (!radix_tree_is_indirect_ptr(node)) { + if (first_index > 0) + return 0; + results[0] = (void **)&root->rnode; + return 1; + } + node = radix_tree_indirect_to_ptr(node); + + max_index = radix_tree_maxindex(node->height); + + ret = 0; + while (ret < max_items) { + unsigned int slots_found; + unsigned long next_index; /* Index of next search */ + + if (cur_index > max_index) + break; + slots_found = __lookup(node, results + ret, cur_index, + max_items - ret, &next_index); + ret += slots_found; + if (next_index == 0) + break; + cur_index = next_index; + } + + return ret; +} +EXPORT_SYMBOL(radix_tree_gang_lookup_slot); + /* * FIXME: the two tag_get()s here should use find_next_bit() instead of * open-coding the search. */ static unsigned int -__lookup_tag(struct radix_tree_node *slot, void **results, unsigned long index, +__lookup_tag(struct radix_tree_node *slot, void ***results, unsigned long index, unsigned int max_items, unsigned long *next_index, unsigned int tag) { unsigned int nr_found = 0; @@ -811,11 +876,9 @@ __lookup_tag(struct radix_tree_node *slot, void **results, unsigned long index, unsigned long j = index & RADIX_TREE_MAP_MASK; for ( ; j < RADIX_TREE_MAP_SIZE; j++) { - struct radix_tree_node *node; index++; if (!tag_get(slot, tag, j)) continue; - node = slot->slots[j]; /* * Even though the tag was found set, we need to * recheck that we have a non-NULL node, because @@ -826,9 +889,8 @@ __lookup_tag(struct radix_tree_node *slot, void **results, unsigned long index, * lookup ->slots[x] without a lock (ie. can't * rely on its value remaining the same). */ - if (node) { - node = rcu_dereference(node); - results[nr_found++] = node; + if (slot->slots[j]) { + results[nr_found++] = &(slot->slots[j]); if (nr_found == max_items) goto out; } @@ -887,13 +949,22 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, ret = 0; while (ret < max_items) { - unsigned int nr_found; + unsigned int nr_found, slots_found, i; unsigned long next_index; /* Index of next search */ if (cur_index > max_index) break; - nr_found = __lookup_tag(node, results + ret, cur_index, - max_items - ret, &next_index, tag); + slots_found = __lookup_tag(node, (void ***)results + ret, + cur_index, max_items - ret, &next_index, tag); + nr_found = 0; + for (i = 0; i < slots_found; i++) { + struct radix_tree_node *slot; + slot = *(((void ***)results)[ret + i]); + if (!slot) + continue; + results[ret + nr_found] = rcu_dereference(slot); + nr_found++; + } ret += nr_found; if (next_index == 0) break; @@ -905,6 +976,67 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, EXPORT_SYMBOL(radix_tree_gang_lookup_tag); /** + * radix_tree_gang_lookup_tag_slot - perform multiple slot lookup on a + * radix tree based on a tag + * @root: radix tree root + * @results: where the results of the lookup are placed + * @first_index: start the lookup from this key + * @max_items: place up to this many items at *results + * @tag: the tag index (< RADIX_TREE_MAX_TAGS) + * + * Performs an index-ascending scan of the tree for present items which + * have the tag indexed by @tag set. Places the slots at *@results and + * returns the number of slots which were placed at *@results. + */ +unsigned int +radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, + unsigned long first_index, unsigned int max_items, + unsigned int tag) +{ + struct radix_tree_node *node; + unsigned long max_index; + unsigned long cur_index = first_index; + unsigned int ret; + + /* check the root's tag bit */ + if (!root_tag_get(root, tag)) + return 0; + + node = rcu_dereference(root->rnode); + if (!node) + return 0; + + if (!radix_tree_is_indirect_ptr(node)) { + if (first_index > 0) + return 0; + results[0] = (void **)&root->rnode; + return 1; + } + node = radix_tree_indirect_to_ptr(node); + + max_index = radix_tree_maxindex(node->height); + + ret = 0; + while (ret < max_items) { + unsigned int slots_found; + unsigned long next_index; /* Index of next search */ + + if (cur_index > max_index) + break; + slots_found = __lookup_tag(node, results + ret, + cur_index, max_items - ret, &next_index, tag); + ret += slots_found; + if (next_index == 0) + break; + cur_index = next_index; + } + + return ret; +} +EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot); + + +/** * radix_tree_shrink - shrink height of a radix tree to minimal * @root radix tree root */ @@ -1051,7 +1183,7 @@ int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag) EXPORT_SYMBOL(radix_tree_tagged); static void -radix_tree_node_ctor(struct kmem_cache *cachep, void *node) +radix_tree_node_ctor(void *node) { memset(node, 0, sizeof(struct radix_tree_node)); } diff --git a/lib/ratelimit.c b/lib/ratelimit.c index 485e3040dcd..35136671b21 100644 --- a/lib/ratelimit.c +++ b/lib/ratelimit.c @@ -3,6 +3,9 @@ * * Isolated from kernel/printk.c by Dave Young <hidave.darkstar@gmail.com> * + * 2008-05-01 rewrite the function and use a ratelimit_state data struct as + * parameter. Now every user can use their own standalone ratelimit_state. + * * This file is released under the GPLv2. * */ @@ -11,41 +14,43 @@ #include <linux/jiffies.h> #include <linux/module.h> +static DEFINE_SPINLOCK(ratelimit_lock); +static unsigned long flags; + /* * __ratelimit - rate limiting - * @ratelimit_jiffies: minimum time in jiffies between two callbacks - * @ratelimit_burst: number of callbacks we do before ratelimiting + * @rs: ratelimit_state data * - * This enforces a rate limit: not more than @ratelimit_burst callbacks - * in every ratelimit_jiffies + * This enforces a rate limit: not more than @rs->ratelimit_burst callbacks + * in every @rs->ratelimit_jiffies */ -int __ratelimit(int ratelimit_jiffies, int ratelimit_burst) +int __ratelimit(struct ratelimit_state *rs) { - static DEFINE_SPINLOCK(ratelimit_lock); - static unsigned toks = 10 * 5 * HZ; - static unsigned long last_msg; - static int missed; - unsigned long flags; - unsigned long now = jiffies; + if (!rs->interval) + return 1; spin_lock_irqsave(&ratelimit_lock, flags); - toks += now - last_msg; - last_msg = now; - if (toks > (ratelimit_burst * ratelimit_jiffies)) - toks = ratelimit_burst * ratelimit_jiffies; - if (toks >= ratelimit_jiffies) { - int lost = missed; + if (!rs->begin) + rs->begin = jiffies; - missed = 0; - toks -= ratelimit_jiffies; - spin_unlock_irqrestore(&ratelimit_lock, flags); - if (lost) - printk(KERN_WARNING "%s: %d messages suppressed\n", - __func__, lost); - return 1; + if (time_is_before_jiffies(rs->begin + rs->interval)) { + if (rs->missed) + printk(KERN_WARNING "%s: %d callbacks suppressed\n", + __func__, rs->missed); + rs->begin = 0; + rs->printed = 0; + rs->missed = 0; } - missed++; + if (rs->burst && rs->burst > rs->printed) + goto print; + + rs->missed++; spin_unlock_irqrestore(&ratelimit_lock, flags); return 0; + +print: + rs->printed++; + spin_unlock_irqrestore(&ratelimit_lock, flags); + return 1; } EXPORT_SYMBOL(__ratelimit); diff --git a/lib/scatterlist.c b/lib/scatterlist.c index b80c21100d7..876ba6d5b67 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -295,6 +295,117 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) EXPORT_SYMBOL(sg_alloc_table); /** + * sg_miter_start - start mapping iteration over a sg list + * @miter: sg mapping iter to be started + * @sgl: sg list to iterate over + * @nents: number of sg entries + * + * Description: + * Starts mapping iterator @miter. + * + * Context: + * Don't care. + */ +void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl, + unsigned int nents, unsigned int flags) +{ + memset(miter, 0, sizeof(struct sg_mapping_iter)); + + miter->__sg = sgl; + miter->__nents = nents; + miter->__offset = 0; + miter->__flags = flags; +} +EXPORT_SYMBOL(sg_miter_start); + +/** + * sg_miter_next - proceed mapping iterator to the next mapping + * @miter: sg mapping iter to proceed + * + * Description: + * Proceeds @miter@ to the next mapping. @miter@ should have been + * started using sg_miter_start(). On successful return, + * @miter@->page, @miter@->addr and @miter@->length point to the + * current mapping. + * + * Context: + * IRQ disabled if SG_MITER_ATOMIC. IRQ must stay disabled till + * @miter@ is stopped. May sleep if !SG_MITER_ATOMIC. + * + * Returns: + * true if @miter contains the next mapping. false if end of sg + * list is reached. + */ +bool sg_miter_next(struct sg_mapping_iter *miter) +{ + unsigned int off, len; + + /* check for end and drop resources from the last iteration */ + if (!miter->__nents) + return false; + + sg_miter_stop(miter); + + /* get to the next sg if necessary. __offset is adjusted by stop */ + if (miter->__offset == miter->__sg->length && --miter->__nents) { + miter->__sg = sg_next(miter->__sg); + miter->__offset = 0; + } + + /* map the next page */ + off = miter->__sg->offset + miter->__offset; + len = miter->__sg->length - miter->__offset; + + miter->page = nth_page(sg_page(miter->__sg), off >> PAGE_SHIFT); + off &= ~PAGE_MASK; + miter->length = min_t(unsigned int, len, PAGE_SIZE - off); + miter->consumed = miter->length; + + if (miter->__flags & SG_MITER_ATOMIC) + miter->addr = kmap_atomic(miter->page, KM_BIO_SRC_IRQ) + off; + else + miter->addr = kmap(miter->page) + off; + + return true; +} +EXPORT_SYMBOL(sg_miter_next); + +/** + * sg_miter_stop - stop mapping iteration + * @miter: sg mapping iter to be stopped + * + * Description: + * Stops mapping iterator @miter. @miter should have been started + * started using sg_miter_start(). A stopped iteration can be + * resumed by calling sg_miter_next() on it. This is useful when + * resources (kmap) need to be released during iteration. + * + * Context: + * IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise. + */ +void sg_miter_stop(struct sg_mapping_iter *miter) +{ + WARN_ON(miter->consumed > miter->length); + + /* drop resources from the last iteration */ + if (miter->addr) { + miter->__offset += miter->consumed; + + if (miter->__flags & SG_MITER_ATOMIC) { + WARN_ON(!irqs_disabled()); + kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ); + } else + kunmap(miter->addr); + + miter->page = NULL; + miter->addr = NULL; + miter->length = 0; + miter->consumed = 0; + } +} +EXPORT_SYMBOL(sg_miter_stop); + +/** * sg_copy_buffer - Copy data between a linear buffer and an SG list * @sgl: The SG list * @nents: Number of SG entries @@ -309,56 +420,29 @@ EXPORT_SYMBOL(sg_alloc_table); static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, size_t buflen, int to_buffer) { - struct scatterlist *sg; - size_t buf_off = 0; - int i; - - WARN_ON(!irqs_disabled()); - - for_each_sg(sgl, sg, nents, i) { - struct page *page; - int n = 0; - unsigned int sg_off = sg->offset; - unsigned int sg_copy = sg->length; - - if (sg_copy > buflen) - sg_copy = buflen; - buflen -= sg_copy; - - while (sg_copy > 0) { - unsigned int page_copy; - void *p; - - page_copy = PAGE_SIZE - sg_off; - if (page_copy > sg_copy) - page_copy = sg_copy; - - page = nth_page(sg_page(sg), n); - p = kmap_atomic(page, KM_BIO_SRC_IRQ); - - if (to_buffer) - memcpy(buf + buf_off, p + sg_off, page_copy); - else { - memcpy(p + sg_off, buf + buf_off, page_copy); - flush_kernel_dcache_page(page); - } - - kunmap_atomic(p, KM_BIO_SRC_IRQ); - - buf_off += page_copy; - sg_off += page_copy; - if (sg_off == PAGE_SIZE) { - sg_off = 0; - n++; - } - sg_copy -= page_copy; + unsigned int offset = 0; + struct sg_mapping_iter miter; + + sg_miter_start(&miter, sgl, nents, SG_MITER_ATOMIC); + + while (sg_miter_next(&miter) && offset < buflen) { + unsigned int len; + + len = min(miter.length, buflen - offset); + + if (to_buffer) + memcpy(buf + offset, miter.addr, len); + else { + memcpy(miter.addr, buf + offset, len); + flush_kernel_dcache_page(miter.page); } - if (!buflen) - break; + offset += len; } - return buf_off; + sg_miter_stop(&miter); + + return offset; } /** diff --git a/lib/show_mem.c b/lib/show_mem.c new file mode 100644 index 00000000000..238e72a18ce --- /dev/null +++ b/lib/show_mem.c @@ -0,0 +1,63 @@ +/* + * Generic show_mem() implementation + * + * Copyright (C) 2008 Johannes Weiner <hannes@saeurebad.de> + * All code subject to the GPL version 2. + */ + +#include <linux/mm.h> +#include <linux/nmi.h> +#include <linux/quicklist.h> + +void show_mem(void) +{ + pg_data_t *pgdat; + unsigned long total = 0, reserved = 0, shared = 0, + nonshared = 0, highmem = 0; + + printk(KERN_INFO "Mem-Info:\n"); + show_free_areas(); + + for_each_online_pgdat(pgdat) { + unsigned long i, flags; + + pgdat_resize_lock(pgdat, &flags); + for (i = 0; i < pgdat->node_spanned_pages; i++) { + struct page *page; + unsigned long pfn = pgdat->node_start_pfn + i; + + if (unlikely(!(i % MAX_ORDER_NR_PAGES))) + touch_nmi_watchdog(); + + if (!pfn_valid(pfn)) + continue; + + page = pfn_to_page(pfn); + + if (PageHighMem(page)) + highmem++; + + if (PageReserved(page)) + reserved++; + else if (page_count(page) == 1) + nonshared++; + else if (page_count(page) > 1) + shared += page_count(page) - 1; + + total++; + } + pgdat_resize_unlock(pgdat, &flags); + } + + printk(KERN_INFO "%lu pages RAM\n", total); +#ifdef CONFIG_HIGHMEM + printk(KERN_INFO "%lu pages HighMem\n", highmem); +#endif + printk(KERN_INFO "%lu pages reserved\n", reserved); + printk(KERN_INFO "%lu pages shared\n", shared); + printk(KERN_INFO "%lu pages non-shared\n", nonshared); +#ifdef CONFIG_QUICKLIST + printk(KERN_INFO "%lu pages in pagetable cache\n", + quicklist_total_size()); +#endif +} diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c index 3b4dc098181..c4381d9516f 100644 --- a/lib/smp_processor_id.c +++ b/lib/smp_processor_id.c @@ -11,7 +11,7 @@ notrace unsigned int debug_smp_processor_id(void) { unsigned long preempt_count = preempt_count(); int this_cpu = raw_smp_processor_id(); - cpumask_t this_mask; + cpumask_of_cpu_ptr_declare(this_mask); if (likely(preempt_count)) goto out; @@ -23,9 +23,9 @@ notrace unsigned int debug_smp_processor_id(void) * Kernel threads bound to a single CPU can safely use * smp_processor_id(): */ - this_mask = cpumask_of_cpu(this_cpu); + cpumask_of_cpu_ptr_next(this_mask, this_cpu); - if (cpus_equal(current->cpus_allowed, this_mask)) + if (cpus_equal(current->cpus_allowed, *this_mask)) goto out; /* diff --git a/lib/swiotlb.c b/lib/swiotlb.c index d568894df8c..977edbdbc1d 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -492,7 +492,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, */ dma_addr_t handle; handle = swiotlb_map_single(NULL, NULL, size, DMA_FROM_DEVICE); - if (swiotlb_dma_mapping_error(handle)) + if (swiotlb_dma_mapping_error(hwdev, handle)) return NULL; ret = bus_to_virt(handle); @@ -824,7 +824,7 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, } int -swiotlb_dma_mapping_error(dma_addr_t dma_addr) +swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) { return (dma_addr == virt_to_bus(io_tlb_overflow_buffer)); } diff --git a/lib/syscall.c b/lib/syscall.c new file mode 100644 index 00000000000..a4f7067f72f --- /dev/null +++ b/lib/syscall.c @@ -0,0 +1,75 @@ +#include <linux/ptrace.h> +#include <linux/sched.h> +#include <linux/module.h> +#include <asm/syscall.h> + +static int collect_syscall(struct task_struct *target, long *callno, + unsigned long args[6], unsigned int maxargs, + unsigned long *sp, unsigned long *pc) +{ + struct pt_regs *regs = task_pt_regs(target); + if (unlikely(!regs)) + return -EAGAIN; + + *sp = user_stack_pointer(regs); + *pc = instruction_pointer(regs); + + *callno = syscall_get_nr(target, regs); + if (*callno != -1L && maxargs > 0) + syscall_get_arguments(target, regs, 0, maxargs, args); + + return 0; +} + +/** + * task_current_syscall - Discover what a blocked task is doing. + * @target: thread to examine + * @callno: filled with system call number or -1 + * @args: filled with @maxargs system call arguments + * @maxargs: number of elements in @args to fill + * @sp: filled with user stack pointer + * @pc: filled with user PC + * + * If @target is blocked in a system call, returns zero with *@callno + * set to the the call's number and @args filled in with its arguments. + * Registers not used for system call arguments may not be available and + * it is not kosher to use &struct user_regset calls while the system + * call is still in progress. Note we may get this result if @target + * has finished its system call but not yet returned to user mode, such + * as when it's stopped for signal handling or syscall exit tracing. + * + * If @target is blocked in the kernel during a fault or exception, + * returns zero with *@callno set to -1 and does not fill in @args. + * If so, it's now safe to examine @target using &struct user_regset + * get() calls as long as we're sure @target won't return to user mode. + * + * Returns -%EAGAIN if @target does not remain blocked. + * + * Returns -%EINVAL if @maxargs is too large (maximum is six). + */ +int task_current_syscall(struct task_struct *target, long *callno, + unsigned long args[6], unsigned int maxargs, + unsigned long *sp, unsigned long *pc) +{ + long state; + unsigned long ncsw; + + if (unlikely(maxargs > 6)) + return -EINVAL; + + if (target == current) + return collect_syscall(target, callno, args, maxargs, sp, pc); + + state = target->state; + if (unlikely(!state)) + return -EAGAIN; + + ncsw = wait_task_inactive(target, state); + if (unlikely(!ncsw) || + unlikely(collect_syscall(target, callno, args, maxargs, sp, pc)) || + unlikely(wait_task_inactive(target, state) != ncsw)) + return -EAGAIN; + + return 0; +} +EXPORT_SYMBOL_GPL(task_current_syscall); |