aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/bug.c19
-rw-r--r--lib/cpumask.c79
-rw-r--r--lib/dynamic_printk.c6
-rw-r--r--lib/idr.c22
-rw-r--r--lib/percpu_counter.c7
-rw-r--r--lib/scatterlist.c2
-rw-r--r--lib/swiotlb.c10
7 files changed, 128 insertions, 17 deletions
diff --git a/lib/bug.c b/lib/bug.c
index bfeafd60ee9..300e41afbf9 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -5,6 +5,8 @@
CONFIG_BUG - emit BUG traps. Nothing happens without this.
CONFIG_GENERIC_BUG - enable this code.
+ CONFIG_GENERIC_BUG_RELATIVE_POINTERS - use 32-bit pointers relative to
+ the containing struct bug_entry for bug_addr and file.
CONFIG_DEBUG_BUGVERBOSE - emit full file+line information for each BUG
CONFIG_BUG and CONFIG_DEBUG_BUGVERBOSE are potentially user-settable
@@ -43,6 +45,15 @@
extern const struct bug_entry __start___bug_table[], __stop___bug_table[];
+static inline unsigned long bug_addr(const struct bug_entry *bug)
+{
+#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
+ return bug->bug_addr;
+#else
+ return (unsigned long)bug + bug->bug_addr_disp;
+#endif
+}
+
#ifdef CONFIG_MODULES
static LIST_HEAD(module_bug_list);
@@ -55,7 +66,7 @@ static const struct bug_entry *module_find_bug(unsigned long bugaddr)
unsigned i;
for (i = 0; i < mod->num_bugs; ++i, ++bug)
- if (bugaddr == bug->bug_addr)
+ if (bugaddr == bug_addr(bug))
return bug;
}
return NULL;
@@ -108,7 +119,7 @@ const struct bug_entry *find_bug(unsigned long bugaddr)
const struct bug_entry *bug;
for (bug = __start___bug_table; bug < __stop___bug_table; ++bug)
- if (bugaddr == bug->bug_addr)
+ if (bugaddr == bug_addr(bug))
return bug;
return module_find_bug(bugaddr);
@@ -133,7 +144,11 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
if (bug) {
#ifdef CONFIG_DEBUG_BUGVERBOSE
+#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
file = bug->file;
+#else
+ file = (const char *)bug + bug->file_disp;
+#endif
line = bug->line;
#endif
warning = (bug->flags & BUGFLAG_WARNING) != 0;
diff --git a/lib/cpumask.c b/lib/cpumask.c
index 5f97dc25ef9..8d03f22c6ce 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -2,6 +2,7 @@
#include <linux/bitops.h>
#include <linux/cpumask.h>
#include <linux/module.h>
+#include <linux/bootmem.h>
int __first_cpu(const cpumask_t *srcp)
{
@@ -35,3 +36,81 @@ int __any_online_cpu(const cpumask_t *mask)
return cpu;
}
EXPORT_SYMBOL(__any_online_cpu);
+
+/**
+ * cpumask_next_and - get the next cpu in *src1p & *src2p
+ * @n: the cpu prior to the place to search (ie. return will be > @n)
+ * @src1p: the first cpumask pointer
+ * @src2p: the second cpumask pointer
+ *
+ * Returns >= nr_cpu_ids if no further cpus set in both.
+ */
+int cpumask_next_and(int n, const struct cpumask *src1p,
+ const struct cpumask *src2p)
+{
+ while ((n = cpumask_next(n, src1p)) < nr_cpu_ids)
+ if (cpumask_test_cpu(n, src2p))
+ break;
+ return n;
+}
+EXPORT_SYMBOL(cpumask_next_and);
+
+/**
+ * cpumask_any_but - return a "random" in a cpumask, but not this one.
+ * @mask: the cpumask to search
+ * @cpu: the cpu to ignore.
+ *
+ * Often used to find any cpu but smp_processor_id() in a mask.
+ * Returns >= nr_cpu_ids if no cpus set.
+ */
+int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
+{
+ unsigned int i;
+
+ cpumask_check(cpu);
+ for_each_cpu(i, mask)
+ if (i != cpu)
+ break;
+ return i;
+}
+
+/* These are not inline because of header tangles. */
+#ifdef CONFIG_CPUMASK_OFFSTACK
+bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
+{
+ if (likely(slab_is_available()))
+ *mask = kmalloc(cpumask_size(), flags);
+ else {
+#ifdef CONFIG_DEBUG_PER_CPU_MAPS
+ printk(KERN_ERR
+ "=> alloc_cpumask_var: kmalloc not available!\n");
+ dump_stack();
+#endif
+ *mask = NULL;
+ }
+#ifdef CONFIG_DEBUG_PER_CPU_MAPS
+ if (!*mask) {
+ printk(KERN_ERR "=> alloc_cpumask_var: failed!\n");
+ dump_stack();
+ }
+#endif
+ return *mask != NULL;
+}
+EXPORT_SYMBOL(alloc_cpumask_var);
+
+void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
+{
+ *mask = alloc_bootmem(cpumask_size());
+}
+
+void free_cpumask_var(cpumask_var_t mask)
+{
+ kfree(mask);
+}
+EXPORT_SYMBOL(free_cpumask_var);
+
+void __init free_bootmem_cpumask_var(cpumask_var_t mask)
+{
+ free_bootmem((unsigned long)mask, cpumask_size());
+}
+#endif
diff --git a/lib/dynamic_printk.c b/lib/dynamic_printk.c
index d83660fd6fd..8e30295e856 100644
--- a/lib/dynamic_printk.c
+++ b/lib/dynamic_printk.c
@@ -135,7 +135,7 @@ int unregister_dynamic_debug_module(char *mod_name)
nr_entries--;
out:
up(&debug_list_mutex);
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(unregister_dynamic_debug_module);
@@ -289,7 +289,7 @@ static ssize_t pr_debug_write(struct file *file, const char __user *buf,
dynamic_enabled = DYNAMIC_ENABLED_SOME;
err = 0;
printk(KERN_DEBUG
- "debugging enabled for module %s",
+ "debugging enabled for module %s\n",
elem->name);
} else if (!value && (elem->enable == 1)) {
elem->enable = 0;
@@ -309,7 +309,7 @@ static ssize_t pr_debug_write(struct file *file, const char __user *buf,
err = 0;
printk(KERN_DEBUG
"debugging disabled for module "
- "%s", elem->name);
+ "%s\n", elem->name);
}
}
}
diff --git a/lib/idr.c b/lib/idr.c
index e728c7fccc4..1c4f9281f41 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -185,6 +185,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
new = get_from_free_list(idp);
if (!new)
return -1;
+ new->layer = l-1;
rcu_assign_pointer(p->ary[m], new);
p->count++;
}
@@ -210,6 +211,7 @@ build_up:
if (unlikely(!p)) {
if (!(p = get_from_free_list(idp)))
return -1;
+ p->layer = 0;
layers = 1;
}
/*
@@ -218,8 +220,14 @@ build_up:
*/
while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
layers++;
- if (!p->count)
+ if (!p->count) {
+ /* special case: if the tree is currently empty,
+ * then we grow the tree by moving the top node
+ * upwards.
+ */
+ p->layer++;
continue;
+ }
if (!(new = get_from_free_list(idp))) {
/*
* The allocation failed. If we built part of
@@ -237,6 +245,7 @@ build_up:
}
new->ary[0] = p;
new->count = 1;
+ new->layer = layers-1;
if (p->bitmap == IDR_FULL)
__set_bit(0, &new->bitmap);
p = new;
@@ -493,17 +502,21 @@ void *idr_find(struct idr *idp, int id)
int n;
struct idr_layer *p;
- n = idp->layers * IDR_BITS;
p = rcu_dereference(idp->top);
+ if (!p)
+ return NULL;
+ n = (p->layer+1) * IDR_BITS;
/* Mask off upper bits we don't use for the search. */
id &= MAX_ID_MASK;
if (id >= (1 << n))
return NULL;
+ BUG_ON(n == 0);
while (n > 0 && p) {
n -= IDR_BITS;
+ BUG_ON(n != p->layer*IDR_BITS);
p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
}
return((void *)p);
@@ -582,8 +595,11 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
int n;
struct idr_layer *p, *old_p;
- n = idp->layers * IDR_BITS;
p = idp->top;
+ if (!p)
+ return ERR_PTR(-EINVAL);
+
+ n = (p->layer+1) * IDR_BITS;
id &= MAX_ID_MASK;
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index a8663890a88..b255b939bc1 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -62,10 +62,7 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc)
for_each_online_cpu(cpu) {
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
ret += *pcount;
- *pcount = 0;
}
- fbc->count = ret;
-
spin_unlock(&fbc->lock);
return ret;
}
@@ -104,13 +101,13 @@ void percpu_counter_destroy(struct percpu_counter *fbc)
if (!fbc->counters)
return;
- free_percpu(fbc->counters);
- fbc->counters = NULL;
#ifdef CONFIG_HOTPLUG_CPU
mutex_lock(&percpu_counters_lock);
list_del(&fbc->list);
mutex_unlock(&percpu_counters_lock);
#endif
+ free_percpu(fbc->counters);
+ fbc->counters = NULL;
}
EXPORT_SYMBOL(percpu_counter_destroy);
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 8d2688ff135..b7b449dafbe 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -395,7 +395,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
WARN_ON(!irqs_disabled());
kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ);
} else
- kunmap(miter->addr);
+ kunmap(miter->page);
miter->page = NULL;
miter->addr = NULL;
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 78330c37a61..5f6c629a924 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -467,9 +467,13 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t dev_addr;
void *ret;
int order = get_order(size);
+ u64 dma_mask = DMA_32BIT_MASK;
+
+ if (hwdev && hwdev->coherent_dma_mask)
+ dma_mask = hwdev->coherent_dma_mask;
ret = (void *)__get_free_pages(flags, order);
- if (ret && address_needs_mapping(hwdev, virt_to_bus(ret), size)) {
+ if (ret && !is_buffer_dma_capable(dma_mask, virt_to_bus(ret), size)) {
/*
* The allocated memory isn't reachable by the device.
* Fall back on swiotlb_map_single().
@@ -493,9 +497,9 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dev_addr = virt_to_bus(ret);
/* Confirm address can be DMA'd by device */
- if (address_needs_mapping(hwdev, dev_addr, size)) {
+ if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) {
printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
- (unsigned long long)*hwdev->dma_mask,
+ (unsigned long long)dma_mask,
(unsigned long long)dev_addr);
/* DMA_TO_DEVICE to avoid memcpy in unmap_single */