diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 6 | ||||
-rw-r--r-- | lib/Kconfig.debug | 14 | ||||
-rw-r--r-- | lib/Makefile | 2 | ||||
-rw-r--r-- | lib/hexdump.c | 149 | ||||
-rw-r--r-- | lib/idr.c | 332 | ||||
-rw-r--r-- | lib/kobject.c | 29 | ||||
-rw-r--r-- | lib/lzo/Makefile | 5 | ||||
-rw-r--r-- | lib/lzo/lzo1x_compress.c | 226 | ||||
-rw-r--r-- | lib/lzo/lzo1x_decompress.c | 254 | ||||
-rw-r--r-- | lib/lzo/lzodefs.h | 43 | ||||
-rw-r--r-- | lib/radix-tree.c | 1 |
11 files changed, 994 insertions, 67 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 2e7ae6b9215..3eb29d5dc4f 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -64,6 +64,12 @@ config ZLIB_INFLATE config ZLIB_DEFLATE tristate +config LZO_COMPRESS + tristate + +config LZO_DECOMPRESS + tristate + # # Generic allocator support is selected if needed # diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 1ba77ca7d16..fab32a28637 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -105,6 +105,15 @@ config DETECT_SOFTLOCKUP can be detected via the NMI-watchdog, on platforms that support it.) +config SCHED_DEBUG + bool "Collect scheduler debugging info" + depends on DEBUG_KERNEL && PROC_FS + default y + help + If you say Y here, the /proc/sched_debug file will be provided + that can help debug the scheduler. The runtime overhead of this + option is minimal. + config SCHEDSTATS bool "Collect scheduler statistics" depends on DEBUG_KERNEL && PROC_FS @@ -126,7 +135,10 @@ config TIMER_STATS reprogrammed. The statistics can be read from /proc/timer_stats. The statistics collection is started by writing 1 to /proc/timer_stats, writing 0 stops it. This feature is useful to collect information - about timer usage patterns in kernel and userspace. + about timer usage patterns in kernel and userspace. This feature + is lightweight if enabled in the kernel config but not activated + (it defaults to deactivated on bootup and will only be activated + if some application like powertop activates it explicitly). config DEBUG_SLAB bool "Debug slab memory allocations" diff --git a/lib/Makefile b/lib/Makefile index c8c8e20784c..d1b366bdf86 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -49,6 +49,8 @@ obj-$(CONFIG_GENERIC_ALLOCATOR) += genalloc.o obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/ obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate/ obj-$(CONFIG_REED_SOLOMON) += reed_solomon/ +obj-$(CONFIG_LZO_COMPRESS) += lzo/ +obj-$(CONFIG_LZO_DECOMPRESS) += lzo/ obj-$(CONFIG_TEXTSEARCH) += textsearch.o obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o diff --git a/lib/hexdump.c b/lib/hexdump.c index e6da5b7fc29..473f5aed6ca 100644 --- a/lib/hexdump.c +++ b/lib/hexdump.c @@ -16,42 +16,98 @@ * hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory * @buf: data blob to dump * @len: number of bytes in the @buf + * @rowsize: number of bytes to print per line; must be 16 or 32 + * @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1) * @linebuf: where to put the converted data * @linebuflen: total size of @linebuf, including space for terminating NUL + * @ascii: include ASCII after the hex output * * hex_dump_to_buffer() works on one "line" of output at a time, i.e., - * 16 bytes of input data converted to hex + ASCII output. + * 16 or 32 bytes of input data converted to hex + ASCII output. * * Given a buffer of u8 data, hex_dump_to_buffer() converts the input data * to a hex + ASCII dump at the supplied memory location. * The converted output is always NUL-terminated. * * E.g.: - * hex_dump_to_buffer(frame->data, frame->len, linebuf, sizeof(linebuf)); + * hex_dump_to_buffer(frame->data, frame->len, 16, 1, + * linebuf, sizeof(linebuf), 1); * * example output buffer: - * 40414243 44454647 48494a4b 4c4d4e4f @ABCDEFGHIJKLMNO + * 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO */ -void hex_dump_to_buffer(const void *buf, size_t len, char *linebuf, - size_t linebuflen) +void hex_dump_to_buffer(const void *buf, size_t len, int rowsize, + int groupsize, char *linebuf, size_t linebuflen, + bool ascii) { const u8 *ptr = buf; u8 ch; int j, lx = 0; + int ascii_column; - for (j = 0; (j < 16) && (j < len) && (lx + 3) < linebuflen; j++) { - if (j && !(j % 4)) + if (rowsize != 16 && rowsize != 32) + rowsize = 16; + + if (!len) + goto nil; + if (len > rowsize) /* limit to one line at a time */ + len = rowsize; + if ((len % groupsize) != 0) /* no mixed size output */ + groupsize = 1; + + switch (groupsize) { + case 8: { + const u64 *ptr8 = buf; + int ngroups = len / groupsize; + + for (j = 0; j < ngroups; j++) + lx += scnprintf(linebuf + lx, linebuflen - lx, + "%16.16llx ", (unsigned long long)*(ptr8 + j)); + ascii_column = 17 * ngroups + 2; + break; + } + + case 4: { + const u32 *ptr4 = buf; + int ngroups = len / groupsize; + + for (j = 0; j < ngroups; j++) + lx += scnprintf(linebuf + lx, linebuflen - lx, + "%8.8x ", *(ptr4 + j)); + ascii_column = 9 * ngroups + 2; + break; + } + + case 2: { + const u16 *ptr2 = buf; + int ngroups = len / groupsize; + + for (j = 0; j < ngroups; j++) + lx += scnprintf(linebuf + lx, linebuflen - lx, + "%4.4x ", *(ptr2 + j)); + ascii_column = 5 * ngroups + 2; + break; + } + + default: + for (j = 0; (j < rowsize) && (j < len) && (lx + 4) < linebuflen; + j++) { + ch = ptr[j]; + linebuf[lx++] = hex_asc(ch >> 4); + linebuf[lx++] = hex_asc(ch & 0x0f); linebuf[lx++] = ' '; - ch = ptr[j]; - linebuf[lx++] = hex_asc(ch >> 4); - linebuf[lx++] = hex_asc(ch & 0x0f); + } + ascii_column = 3 * rowsize + 2; + break; } - if ((lx + 2) < linebuflen) { - linebuf[lx++] = ' '; + if (!ascii) + goto nil; + + while (lx < (linebuflen - 1) && lx < (ascii_column - 1)) linebuf[lx++] = ' '; - } - for (j = 0; (j < 16) && (j < len) && (lx + 2) < linebuflen; j++) + for (j = 0; (j < rowsize) && (j < len) && (lx + 2) < linebuflen; j++) linebuf[lx++] = isprint(ptr[j]) ? ptr[j] : '.'; +nil: linebuf[lx++] = '\0'; } EXPORT_SYMBOL(hex_dump_to_buffer); @@ -59,46 +115,83 @@ EXPORT_SYMBOL(hex_dump_to_buffer); /** * print_hex_dump - print a text hex dump to syslog for a binary blob of data * @level: kernel log level (e.g. KERN_DEBUG) + * @prefix_str: string to prefix each line with; + * caller supplies trailing spaces for alignment if desired * @prefix_type: controls whether prefix of an offset, address, or none * is printed (%DUMP_PREFIX_OFFSET, %DUMP_PREFIX_ADDRESS, %DUMP_PREFIX_NONE) + * @rowsize: number of bytes to print per line; must be 16 or 32 + * @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1) * @buf: data blob to dump * @len: number of bytes in the @buf + * @ascii: include ASCII after the hex output * * Given a buffer of u8 data, print_hex_dump() prints a hex + ASCII dump * to the kernel log at the specified kernel log level, with an optional * leading prefix. * + * print_hex_dump() works on one "line" of output at a time, i.e., + * 16 or 32 bytes of input data converted to hex + ASCII output. + * print_hex_dump() iterates over the entire input @buf, breaking it into + * "line size" chunks to format and print. + * * E.g.: - * print_hex_dump(KERN_DEBUG, DUMP_PREFIX_ADDRESS, frame->data, frame->len); + * print_hex_dump(KERN_DEBUG, "raw data: ", DUMP_PREFIX_ADDRESS, + * 16, 1, frame->data, frame->len, 1); * - * Example output using %DUMP_PREFIX_OFFSET: - * 0009ab42: 40414243 44454647 48494a4b 4c4d4e4f @ABCDEFGHIJKLMNO - * Example output using %DUMP_PREFIX_ADDRESS: - * ffffffff88089af0: 70717273 74757677 78797a7b 7c7d7e7f pqrstuvwxyz{|}~. + * Example output using %DUMP_PREFIX_OFFSET and 1-byte mode: + * 0009ab42: 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO + * Example output using %DUMP_PREFIX_ADDRESS and 4-byte mode: + * ffffffff88089af0: 73727170 77767574 7b7a7978 7f7e7d7c pqrstuvwxyz{|}~. */ -void print_hex_dump(const char *level, int prefix_type, void *buf, size_t len) +void print_hex_dump(const char *level, const char *prefix_str, int prefix_type, + int rowsize, int groupsize, + void *buf, size_t len, bool ascii) { u8 *ptr = buf; int i, linelen, remaining = len; - unsigned char linebuf[100]; + unsigned char linebuf[200]; - for (i = 0; i < len; i += 16) { - linelen = min(remaining, 16); - remaining -= 16; - hex_dump_to_buffer(ptr + i, linelen, linebuf, sizeof(linebuf)); + if (rowsize != 16 && rowsize != 32) + rowsize = 16; + + for (i = 0; i < len; i += rowsize) { + linelen = min(remaining, rowsize); + remaining -= rowsize; + hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize, + linebuf, sizeof(linebuf), ascii); switch (prefix_type) { case DUMP_PREFIX_ADDRESS: - printk("%s%*p: %s\n", level, + printk("%s%s%*p: %s\n", level, prefix_str, (int)(2 * sizeof(void *)), ptr + i, linebuf); break; case DUMP_PREFIX_OFFSET: - printk("%s%.8x: %s\n", level, i, linebuf); + printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf); break; default: - printk("%s%s\n", level, linebuf); + printk("%s%s%s\n", level, prefix_str, linebuf); break; } } } EXPORT_SYMBOL(print_hex_dump); + +/** + * print_hex_dump_bytes - shorthand form of print_hex_dump() with default params + * @prefix_str: string to prefix each line with; + * caller supplies trailing spaces for alignment if desired + * @prefix_type: controls whether prefix of an offset, address, or none + * is printed (%DUMP_PREFIX_OFFSET, %DUMP_PREFIX_ADDRESS, %DUMP_PREFIX_NONE) + * @buf: data blob to dump + * @len: number of bytes in the @buf + * + * Calls print_hex_dump(), with log level of KERN_DEBUG, + * rowsize of 16, groupsize of 1, and ASCII output included. + */ +void print_hex_dump_bytes(const char *prefix_str, int prefix_type, + void *buf, size_t len) +{ + print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, 16, 1, + buf, len, 1); +} +EXPORT_SYMBOL(print_hex_dump_bytes); diff --git a/lib/idr.c b/lib/idr.c index 305117ca2d4..b98f01a2eb9 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -70,6 +70,26 @@ static void free_layer(struct idr *idp, struct idr_layer *p) spin_unlock_irqrestore(&idp->lock, flags); } +static void idr_mark_full(struct idr_layer **pa, int id) +{ + struct idr_layer *p = pa[0]; + int l = 0; + + __set_bit(id & IDR_MASK, &p->bitmap); + /* + * If this layer is full mark the bit in the layer above to + * show that this part of the radix tree is full. This may + * complete the layer above and require walking up the radix + * tree. + */ + while (p->bitmap == IDR_FULL) { + if (!(p = pa[++l])) + break; + id = id >> IDR_BITS; + __set_bit((id & IDR_MASK), &p->bitmap); + } +} + /** * idr_pre_get - reserver resources for idr allocation * @idp: idr handle @@ -95,15 +115,15 @@ int idr_pre_get(struct idr *idp, gfp_t gfp_mask) } EXPORT_SYMBOL(idr_pre_get); -static int sub_alloc(struct idr *idp, void *ptr, int *starting_id) +static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) { int n, m, sh; struct idr_layer *p, *new; - struct idr_layer *pa[MAX_LEVEL]; - int l, id; + int l, id, oid; long bm; id = *starting_id; + restart: p = idp->top; l = idp->layers; pa[l--] = NULL; @@ -117,12 +137,23 @@ static int sub_alloc(struct idr *idp, void *ptr, int *starting_id) if (m == IDR_SIZE) { /* no space available go back to previous layer. */ l++; + oid = id; id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; + + /* if already at the top layer, we need to grow */ if (!(p = pa[l])) { *starting_id = id; return -2; } - continue; + + /* If we need to go up one layer, continue the + * loop; otherwise, restart from the top. + */ + sh = IDR_BITS * (l + 1); + if (oid >> sh == id >> sh) + continue; + else + goto restart; } if (m != n) { sh = IDR_BITS*l; @@ -144,30 +175,13 @@ static int sub_alloc(struct idr *idp, void *ptr, int *starting_id) pa[l--] = p; p = p->ary[m]; } - /* - * We have reached the leaf node, plant the - * users pointer and return the raw id. - */ - p->ary[m] = (struct idr_layer *)ptr; - __set_bit(m, &p->bitmap); - p->count++; - /* - * If this layer is full mark the bit in the layer above - * to show that this part of the radix tree is full. - * This may complete the layer above and require walking - * up the radix tree. - */ - n = id; - while (p->bitmap == IDR_FULL) { - if (!(p = pa[++l])) - break; - n = n >> IDR_BITS; - __set_bit((n & IDR_MASK), &p->bitmap); - } - return(id); + + pa[l] = p; + return id; } -static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) +static int idr_get_empty_slot(struct idr *idp, int starting_id, + struct idr_layer **pa) { struct idr_layer *p, *new; int layers, v, id; @@ -213,12 +227,31 @@ build_up: } idp->top = p; idp->layers = layers; - v = sub_alloc(idp, ptr, &id); + v = sub_alloc(idp, &id, pa); if (v == -2) goto build_up; return(v); } +static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) +{ + struct idr_layer *pa[MAX_LEVEL]; + int id; + + id = idr_get_empty_slot(idp, starting_id, pa); + if (id >= 0) { + /* + * Successfully found an empty slot. Install the user + * pointer and mark the slot full. + */ + pa[0]->ary[id & IDR_MASK] = (struct idr_layer *)ptr; + pa[0]->count++; + idr_mark_full(pa, id); + } + + return id; +} + /** * idr_get_new_above - allocate new idr entry above or equal to a start id * @idp: idr handle @@ -473,3 +506,248 @@ void idr_init(struct idr *idp) spin_lock_init(&idp->lock); } EXPORT_SYMBOL(idr_init); + + +/* + * IDA - IDR based ID allocator + * + * this is id allocator without id -> pointer translation. Memory + * usage is much lower than full blown idr because each id only + * occupies a bit. ida uses a custom leaf node which contains + * IDA_BITMAP_BITS slots. + * + * 2007-04-25 written by Tejun Heo <htejun@gmail.com> + */ + +static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap) +{ + unsigned long flags; + + if (!ida->free_bitmap) { + spin_lock_irqsave(&ida->idr.lock, flags); + if (!ida->free_bitmap) { + ida->free_bitmap = bitmap; + bitmap = NULL; + } + spin_unlock_irqrestore(&ida->idr.lock, flags); + } + + kfree(bitmap); +} + +/** + * ida_pre_get - reserve resources for ida allocation + * @ida: ida handle + * @gfp_mask: memory allocation flag + * + * This function should be called prior to locking and calling the + * following function. It preallocates enough memory to satisfy the + * worst possible allocation. + * + * If the system is REALLY out of memory this function returns 0, + * otherwise 1. + */ +int ida_pre_get(struct ida *ida, gfp_t gfp_mask) +{ + /* allocate idr_layers */ + if (!idr_pre_get(&ida->idr, gfp_mask)) + return 0; + + /* allocate free_bitmap */ + if (!ida->free_bitmap) { + struct ida_bitmap *bitmap; + + bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask); + if (!bitmap) + return 0; + + free_bitmap(ida, bitmap); + } + + return 1; +} +EXPORT_SYMBOL(ida_pre_get); + +/** + * ida_get_new_above - allocate new ID above or equal to a start id + * @ida: ida handle + * @staring_id: id to start search at + * @p_id: pointer to the allocated handle + * + * Allocate new ID above or equal to @ida. It should be called with + * any required locks. + * + * If memory is required, it will return -EAGAIN, you should unlock + * and go back to the ida_pre_get() call. If the ida is full, it will + * return -ENOSPC. + * + * @p_id returns a value in the range 0 ... 0x7fffffff. + */ +int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) +{ + struct idr_layer *pa[MAX_LEVEL]; + struct ida_bitmap *bitmap; + unsigned long flags; + int idr_id = starting_id / IDA_BITMAP_BITS; + int offset = starting_id % IDA_BITMAP_BITS; + int t, id; + + restart: + /* get vacant slot */ + t = idr_get_empty_slot(&ida->idr, idr_id, pa); + if (t < 0) { + if (t == -1) + return -EAGAIN; + else /* will be -3 */ + return -ENOSPC; + } + + if (t * IDA_BITMAP_BITS >= MAX_ID_BIT) + return -ENOSPC; + + if (t != idr_id) + offset = 0; + idr_id = t; + + /* if bitmap isn't there, create a new one */ + bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK]; + if (!bitmap) { + spin_lock_irqsave(&ida->idr.lock, flags); + bitmap = ida->free_bitmap; + ida->free_bitmap = NULL; + spin_unlock_irqrestore(&ida->idr.lock, flags); + + if (!bitmap) + return -EAGAIN; + + memset(bitmap, 0, sizeof(struct ida_bitmap)); + pa[0]->ary[idr_id & IDR_MASK] = (void *)bitmap; + pa[0]->count++; + } + + /* lookup for empty slot */ + t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset); + if (t == IDA_BITMAP_BITS) { + /* no empty slot after offset, continue to the next chunk */ + idr_id++; + offset = 0; + goto restart; + } + + id = idr_id * IDA_BITMAP_BITS + t; + if (id >= MAX_ID_BIT) + return -ENOSPC; + + __set_bit(t, bitmap->bitmap); + if (++bitmap->nr_busy == IDA_BITMAP_BITS) + idr_mark_full(pa, idr_id); + + *p_id = id; + + /* Each leaf node can handle nearly a thousand slots and the + * whole idea of ida is to have small memory foot print. + * Throw away extra resources one by one after each successful + * allocation. + */ + if (ida->idr.id_free_cnt || ida->free_bitmap) { + struct idr_layer *p = alloc_layer(&ida->idr); + if (p) + kmem_cache_free(idr_layer_cache, p); + } + + return 0; +} +EXPORT_SYMBOL(ida_get_new_above); + +/** + * ida_get_new - allocate new ID + * @ida: idr handle + * @p_id: pointer to the allocated handle + * + * Allocate new ID. It should be called with any required locks. + * + * If memory is required, it will return -EAGAIN, you should unlock + * and go back to the idr_pre_get() call. If the idr is full, it will + * return -ENOSPC. + * + * @id returns a value in the range 0 ... 0x7fffffff. + */ +int ida_get_new(struct ida *ida, int *p_id) +{ + return ida_get_new_above(ida, 0, p_id); +} +EXPORT_SYMBOL(ida_get_new); + +/** + * ida_remove - remove the given ID + * @ida: ida handle + * @id: ID to free + */ +void ida_remove(struct ida *ida, int id) +{ + struct idr_layer *p = ida->idr.top; + int shift = (ida->idr.layers - 1) * IDR_BITS; + int idr_id = id / IDA_BITMAP_BITS; + int offset = id % IDA_BITMAP_BITS; + int n; + struct ida_bitmap *bitmap; + + /* clear full bits while looking up the leaf idr_layer */ + while ((shift > 0) && p) { + n = (idr_id >> shift) & IDR_MASK; + __clear_bit(n, &p->bitmap); + p = p->ary[n]; + shift -= IDR_BITS; + } + + if (p == NULL) + goto err; + + n = idr_id & IDR_MASK; + __clear_bit(n, &p->bitmap); + + bitmap = (void *)p->ary[n]; + if (!test_bit(offset, bitmap->bitmap)) + goto err; + + /* update bitmap and remove it if empty */ + __clear_bit(offset, bitmap->bitmap); + if (--bitmap->nr_busy == 0) { + __set_bit(n, &p->bitmap); /* to please idr_remove() */ + idr_remove(&ida->idr, idr_id); + free_bitmap(ida, bitmap); + } + + return; + + err: + printk(KERN_WARNING + "ida_remove called for id=%d which is not allocated.\n", id); +} +EXPORT_SYMBOL(ida_remove); + +/** + * ida_destroy - release all cached layers within an ida tree + * ida: ida handle + */ +void ida_destroy(struct ida *ida) +{ + idr_destroy(&ida->idr); + kfree(ida->free_bitmap); +} +EXPORT_SYMBOL(ida_destroy); + +/** + * ida_init - initialize ida handle + * @ida: ida handle + * + * This function is use to set up the handle (@ida) that you will pass + * to the rest of the functions. + */ +void ida_init(struct ida *ida) +{ + memset(ida, 0, sizeof(struct ida)); + idr_init(&ida->idr); + +} +EXPORT_SYMBOL(ida_init); diff --git a/lib/kobject.c b/lib/kobject.c index fc5f3f6e732..4b08e0ff95c 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -44,7 +44,7 @@ static int populate_dir(struct kobject * kobj) return error; } -static int create_dir(struct kobject * kobj, struct dentry *shadow_parent) +static int create_dir(struct kobject *kobj, struct sysfs_dirent *shadow_parent) { int error = 0; if (kobject_name(kobj)) { @@ -162,7 +162,7 @@ static void unlink(struct kobject * kobj) * @shadow_parent: sysfs directory to add to. */ -int kobject_shadow_add(struct kobject * kobj, struct dentry *shadow_parent) +int kobject_shadow_add(struct kobject *kobj, struct sysfs_dirent *shadow_parent) { int error = 0; struct kobject * parent; @@ -202,14 +202,14 @@ int kobject_shadow_add(struct kobject * kobj, struct dentry *shadow_parent) /* be noisy on error issues */ if (error == -EEXIST) - printk("kobject_add failed for %s with -EEXIST, " - "don't try to register things with the " - "same name in the same directory.\n", + printk(KERN_ERR "kobject_add failed for %s with " + "-EEXIST, don't try to register things with " + "the same name in the same directory.\n", kobject_name(kobj)); else - printk("kobject_add failed for %s (%d)\n", + printk(KERN_ERR "kobject_add failed for %s (%d)\n", kobject_name(kobj), error); - dump_stack(); + dump_stack(); } return error; @@ -338,7 +338,7 @@ int kobject_rename(struct kobject * kobj, const char *new_name) /* Note : if we want to send the new name alone, not the full path, * we could probably use kobject_name(kobj); */ - error = sysfs_rename_dir(kobj, kobj->parent->dentry, new_name); + error = sysfs_rename_dir(kobj, kobj->parent->sd, new_name); /* This function is mostly/only used for network interface. * Some hotplug package track interfaces by their name and @@ -361,8 +361,8 @@ out: * @new_name: object's new name */ -int kobject_shadow_rename(struct kobject * kobj, struct dentry *new_parent, - const char *new_name) +int kobject_shadow_rename(struct kobject *kobj, + struct sysfs_dirent *new_parent, const char *new_name) { int error = 0; @@ -597,10 +597,17 @@ int kset_add(struct kset * k) int kset_register(struct kset * k) { + int err; + if (!k) return -EINVAL; + kset_init(k); - return kset_add(k); + err = kset_add(k); + if (err) + return err; + kobject_uevent(&k->kobj, KOBJ_ADD); + return 0; } diff --git a/lib/lzo/Makefile b/lib/lzo/Makefile new file mode 100644 index 00000000000..e764116ea12 --- /dev/null +++ b/lib/lzo/Makefile @@ -0,0 +1,5 @@ +lzo_compress-objs := lzo1x_compress.o +lzo_decompress-objs := lzo1x_decompress.o + +obj-$(CONFIG_LZO_COMPRESS) += lzo_compress.o +obj-$(CONFIG_LZO_DECOMPRESS) += lzo_decompress.o diff --git a/lib/lzo/lzo1x_compress.c b/lib/lzo/lzo1x_compress.c new file mode 100644 index 00000000000..c935f00073e --- /dev/null +++ b/lib/lzo/lzo1x_compress.c @@ -0,0 +1,226 @@ +/* + * LZO1X Compressor from MiniLZO + * + * Copyright (C) 1996-2005 Markus F.X.J. Oberhumer <markus@oberhumer.com> + * + * The full LZO package can be found at: + * http://www.oberhumer.com/opensource/lzo/ + * + * Changed for kernel use by: + * Nitin Gupta <nitingupta910@gmail.com> + * Richard Purdie <rpurdie@openedhand.com> + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/lzo.h> +#include <asm/unaligned.h> +#include "lzodefs.h" + +static noinline size_t +_lzo1x_1_do_compress(const unsigned char *in, size_t in_len, + unsigned char *out, size_t *out_len, void *wrkmem) +{ + const unsigned char * const in_end = in + in_len; + const unsigned char * const ip_end = in + in_len - M2_MAX_LEN - 5; + const unsigned char ** const dict = wrkmem; + const unsigned char *ip = in, *ii = ip; + const unsigned char *end, *m, *m_pos; + size_t m_off, m_len, dindex; + unsigned char *op = out; + + ip += 4; + + for (;;) { + dindex = ((0x21 * DX3(ip, 5, 5, 6)) >> 5) & D_MASK; + m_pos = dict[dindex]; + + if (m_pos < in) + goto literal; + + if (ip == m_pos || (ip - m_pos) > M4_MAX_OFFSET) + goto literal; + + m_off = ip - m_pos; + if (m_off <= M2_MAX_OFFSET || m_pos[3] == ip[3]) + goto try_match; + + dindex = (dindex & (D_MASK & 0x7ff)) ^ (D_HIGH | 0x1f); + m_pos = dict[dindex]; + + if (m_pos < in) + goto literal; + + if (ip == m_pos || (ip - m_pos) > M4_MAX_OFFSET) + goto literal; + + m_off = ip - m_pos; + if (m_off <= M2_MAX_OFFSET || m_pos[3] == ip[3]) + goto try_match; + + goto literal; + +try_match: + if (get_unaligned((const unsigned short *)m_pos) + == get_unaligned((const unsigned short *)ip)) { + if (likely(m_pos[2] == ip[2])) + goto match; + } + +literal: + dict[dindex] = ip; + ++ip; + if (unlikely(ip >= ip_end)) + break; + continue; + +match: + dict[dindex] = ip; + if (ip != ii) { + size_t t = ip - ii; + + if (t <= 3) { + op[-2] |= t; + } else if (t <= 18) { + *op++ = (t - 3); + } else { + size_t tt = t - 18; + + *op++ = 0; + while (tt > 255) { + tt -= 255; + *op++ = 0; + } + *op++ = tt; + } + do { + *op++ = *ii++; + } while (--t > 0); + } + + ip += 3; + if (m_pos[3] != *ip++ || m_pos[4] != *ip++ + || m_pos[5] != *ip++ || m_pos[6] != *ip++ + || m_pos[7] != *ip++ || m_pos[8] != *ip++) { + --ip; + m_len = ip - ii; + + if (m_off <= M2_MAX_OFFSET) { + m_off -= 1; + *op++ = (((m_len - 1) << 5) + | ((m_off & 7) << 2)); + *op++ = (m_off >> 3); + } else if (m_off <= M3_MAX_OFFSET) { + m_off -= 1; + *op++ = (M3_MARKER | (m_len - 2)); + goto m3_m4_offset; + } else { + m_off -= 0x4000; + + *op++ = (M4_MARKER | ((m_off & 0x4000) >> 11) + | (m_len - 2)); + goto m3_m4_offset; + } + } else { + end = in_end; + m = m_pos + M2_MAX_LEN + 1; + + while (ip < end && *m == *ip) { + m++; + ip++; + } + m_len = ip - ii; + + if (m_off <= M3_MAX_OFFSET) { + m_off -= 1; + if (m_len <= 33) { + *op++ = (M3_MARKER | (m_len - 2)); + } else { + m_len -= 33; + *op++ = M3_MARKER | 0; + goto m3_m4_len; + } + } else { + m_off -= 0x4000; + if (m_len <= M4_MAX_LEN) { + *op++ = (M4_MARKER + | ((m_off & 0x4000) >> 11) + | (m_len - 2)); + } else { + m_len -= M4_MAX_LEN; + *op++ = (M4_MARKER + | ((m_off & 0x4000) >> 11)); +m3_m4_len: + while (m_len > 255) { + m_len -= 255; + *op++ = 0; + } + + *op++ = (m_len); + } + } +m3_m4_offset: + *op++ = ((m_off & 63) << 2); + *op++ = (m_off >> 6); + } + + ii = ip; + if (unlikely(ip >= ip_end)) + break; + } + + *out_len = op - out; + return in_end - ii; +} + +int lzo1x_1_compress(const unsigned char *in, size_t in_len, unsigned char *out, + size_t *out_len, void *wrkmem) +{ + const unsigned char *ii; + unsigned char *op = out; + size_t t; + + if (unlikely(in_len <= M2_MAX_LEN + 5)) { + t = in_len; + } else { + t = _lzo1x_1_do_compress(in, in_len, op, out_len, wrkmem); + op += *out_len; + } + + if (t > 0) { + ii = in + in_len - t; + + if (op == out && t <= 238) { + *op++ = (17 + t); + } else if (t <= 3) { + op[-2] |= t; + } else if (t <= 18) { + *op++ = (t - 3); + } else { + size_t tt = t - 18; + + *op++ = 0; + while (tt > 255) { + tt -= 255; + *op++ = 0; + } + + *op++ = tt; + } + do { + *op++ = *ii++; + } while (--t > 0); + } + + *op++ = M4_MARKER | 1; + *op++ = 0; + *op++ = 0; + + *out_len = op - out; + return LZO_E_OK; +} +EXPORT_SYMBOL_GPL(lzo1x_1_compress); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("LZO1X-1 Compressor"); + diff --git a/lib/lzo/lzo1x_decompress.c b/lib/lzo/lzo1x_decompress.c new file mode 100644 index 00000000000..9dc7056e552 --- /dev/null +++ b/lib/lzo/lzo1x_decompress.c @@ -0,0 +1,254 @@ +/* + * LZO1X Decompressor from MiniLZO + * + * Copyright (C) 1996-2005 Markus F.X.J. Oberhumer <markus@oberhumer.com> + * + * The full LZO package can be found at: + * http://www.oberhumer.com/opensource/lzo/ + * + * Changed for kernel use by: + * Nitin Gupta <nitingupta910@gmail.com> + * Richard Purdie <rpurdie@openedhand.com> + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/lzo.h> +#include <asm/byteorder.h> +#include <asm/unaligned.h> +#include "lzodefs.h" + +#define HAVE_IP(x, ip_end, ip) ((size_t)(ip_end - ip) < (x)) +#define HAVE_OP(x, op_end, op) ((size_t)(op_end - op) < (x)) +#define HAVE_LB(m_pos, out, op) (m_pos < out || m_pos >= op) + +#define COPY4(dst, src) \ + put_unaligned(get_unaligned((const u32 *)(src)), (u32 *)(dst)) + +int lzo1x_decompress_safe(const unsigned char *in, size_t in_len, + unsigned char *out, size_t *out_len) +{ + const unsigned char * const ip_end = in + in_len; + unsigned char * const op_end = out + *out_len; + const unsigned char *ip = in, *m_pos; + unsigned char *op = out; + size_t t; + + *out_len = 0; + + if (*ip > 17) { + t = *ip++ - 17; + if (t < 4) + goto match_next; + if (HAVE_OP(t, op_end, op)) + goto output_overrun; + if (HAVE_IP(t + 1, ip_end, ip)) + goto input_overrun; + do { + *op++ = *ip++; + } while (--t > 0); + goto first_literal_run; + } + + while ((ip < ip_end)) { + t = *ip++; + if (t >= 16) + goto match; + if (t == 0) { + if (HAVE_IP(1, ip_end, ip)) + goto input_overrun; + while (*ip == 0) { + t += 255; + ip++; + if (HAVE_IP(1, ip_end, ip)) + goto input_overrun; + } + t += 15 + *ip++; + } + if (HAVE_OP(t + 3, op_end, op)) + goto output_overrun; + if (HAVE_IP(t + 4, ip_end, ip)) + goto input_overrun; + + COPY4(op, ip); + op += 4; + ip += 4; + if (--t > 0) { + if (t >= 4) { + do { + COPY4(op, ip); + op += 4; + ip += 4; + t -= 4; + } while (t >= 4); + if (t > 0) { + do { + *op++ = *ip++; + } while (--t > 0); + } + } else { + do { + *op++ = *ip++; + } while (--t > 0); + } + } + +first_literal_run: + t = *ip++; + if (t >= 16) + goto match; + m_pos = op - (1 + M2_MAX_OFFSET); + m_pos -= t >> 2; + m_pos -= *ip++ << 2; + + if (HAVE_LB(m_pos, out, op)) + goto lookbehind_overrun; + + if (HAVE_OP(3, op_end, op)) + goto output_overrun; + *op++ = *m_pos++; + *op++ = *m_pos++; + *op++ = *m_pos; + + goto match_done; + + do { +match: + if (t >= 64) { + m_pos = op - 1; + m_pos -= (t >> 2) & 7; + m_pos -= *ip++ << 3; + t = (t >> 5) - 1; + if (HAVE_LB(m_pos, out, op)) + goto lookbehind_overrun; + if (HAVE_OP(t + 3 - 1, op_end, op)) + goto output_overrun; + goto copy_match; + } else if (t >= 32) { + t &= 31; + if (t == 0) { + if (HAVE_IP(1, ip_end, ip)) + goto input_overrun; + while (*ip == 0) { + t += 255; + ip++; + if (HAVE_IP(1, ip_end, ip)) + goto input_overrun; + } + t += 31 + *ip++; + } + m_pos = op - 1; + m_pos -= le16_to_cpu(get_unaligned( + (const unsigned short *)ip)) >> 2; + ip += 2; + } else if (t >= 16) { + m_pos = op; + m_pos -= (t & 8) << 11; + + t &= 7; + if (t == 0) { + if (HAVE_IP(1, ip_end, ip)) + goto input_overrun; + while (*ip == 0) { + t += 255; + ip++; + if (HAVE_IP(1, ip_end, ip)) + goto input_overrun; + } + t += 7 + *ip++; + } + m_pos -= le16_to_cpu(get_unaligned( + (const unsigned short *)ip) >> 2); + ip += 2; + if (m_pos == op) + goto eof_found; + m_pos -= 0x4000; + } else { + m_pos = op - 1; + m_pos -= t >> 2; + m_pos -= *ip++ << 2; + + if (HAVE_LB(m_pos, out, op)) + goto lookbehind_overrun; + if (HAVE_OP(2, op_end, op)) + goto output_overrun; + + *op++ = *m_pos++; + *op++ = *m_pos; + goto match_done; + } + + if (HAVE_LB(m_pos, out, op)) + goto lookbehind_overrun; + if (HAVE_OP(t + 3 - 1, op_end, op)) + goto output_overrun; + + if (t >= 2 * 4 - (3 - 1) && (op - m_pos) >= 4) { + COPY4(op, m_pos); + op += 4; + m_pos += 4; + t -= 4 - (3 - 1); + do { + COPY4(op, m_pos); + op += 4; + m_pos += 4; + t -= 4; + } while (t >= 4); + if (t > 0) + do { + *op++ = *m_pos++; + } while (--t > 0); + } else { +copy_match: + *op++ = *m_pos++; + *op++ = *m_pos++; + do { + *op++ = *m_pos++; + } while (--t > 0); + } +match_done: + t = ip[-2] & 3; + if (t == 0) + break; +match_next: + if (HAVE_OP(t, op_end, op)) + goto output_overrun; + if (HAVE_IP(t + 1, ip_end, ip)) + goto input_overrun; + + *op++ = *ip++; + if (t > 1) { + *op++ = *ip++; + if (t > 2) + *op++ = *ip++; + } + + t = *ip++; + } while (ip < ip_end); + } + + *out_len = op - out; + return LZO_E_EOF_NOT_FOUND; + +eof_found: + *out_len = op - out; + return (ip == ip_end ? LZO_E_OK : + (ip < ip_end ? LZO_E_INPUT_NOT_CONSUMED : LZO_E_INPUT_OVERRUN)); +input_overrun: + *out_len = op - out; + return LZO_E_INPUT_OVERRUN; + +output_overrun: + *out_len = op - out; + return LZO_E_OUTPUT_OVERRUN; + +lookbehind_overrun: + *out_len = op - out; + return LZO_E_LOOKBEHIND_OVERRUN; +} + +EXPORT_SYMBOL_GPL(lzo1x_decompress_safe); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("LZO1X Decompressor"); + diff --git a/lib/lzo/lzodefs.h b/lib/lzo/lzodefs.h new file mode 100644 index 00000000000..b6d482c492e --- /dev/null +++ b/lib/lzo/lzodefs.h @@ -0,0 +1,43 @@ +/* + * lzodefs.h -- architecture, OS and compiler specific defines + * + * Copyright (C) 1996-2005 Markus F.X.J. Oberhumer <markus@oberhumer.com> + * + * The full LZO package can be found at: + * http://www.oberhumer.com/opensource/lzo/ + * + * Changed for kernel use by: + * Nitin Gupta <nitingupta910@gmail.com> + * Richard Purdie <rpurdie@openedhand.com> + */ + +#define LZO_VERSION 0x2020 +#define LZO_VERSION_STRING "2.02" +#define LZO_VERSION_DATE "Oct 17 2005" + +#define M1_MAX_OFFSET 0x0400 +#define M2_MAX_OFFSET 0x0800 +#define M3_MAX_OFFSET 0x4000 +#define M4_MAX_OFFSET 0xbfff + +#define M1_MIN_LEN 2 +#define M1_MAX_LEN 2 +#define M2_MIN_LEN 3 +#define M2_MAX_LEN 8 +#define M3_MIN_LEN 3 +#define M3_MAX_LEN 33 +#define M4_MIN_LEN 3 +#define M4_MAX_LEN 9 + +#define M1_MARKER 0 +#define M2_MARKER 64 +#define M3_MARKER 32 +#define M4_MARKER 16 + +#define D_BITS 14 +#define D_MASK ((1u << D_BITS) - 1) +#define D_HIGH ((D_MASK >> 1) + 1) + +#define DX2(p, s1, s2) (((((size_t)((p)[2]) << (s2)) ^ (p)[1]) \ + << (s1)) ^ (p)[0]) +#define DX3(p, s1, s2, s3) ((DX2((p)+1, s2, s3) << (s1)) ^ (p)[0]) diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 402eb4eb6b2..9927cca14cb 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -151,6 +151,7 @@ int radix_tree_preload(gfp_t gfp_mask) out: return ret; } +EXPORT_SYMBOL(radix_tree_preload); static inline void tag_set(struct radix_tree_node *node, unsigned int tag, int offset) |