aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug1
-rw-r--r--lib/bitmap.c81
-rw-r--r--lib/genalloc.c33
-rw-r--r--lib/iommu-helper.c59
-rw-r--r--lib/lmb.c7
-rw-r--r--lib/swiotlb.c4
-rw-r--r--lib/vsprintf.c13
7 files changed, 123 insertions, 75 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 8cf9938dd14..25c3ed594c5 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -360,6 +360,7 @@ config DEBUG_KMEMLEAK
select DEBUG_FS if SYSFS
select STACKTRACE if STACKTRACE_SUPPORT
select KALLSYMS
+ select CRC32
help
Say Y here if you want to enable the memory leak
detector. The memory allocation/freeing is traced in a way
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 702565821c9..11bf4975058 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -271,6 +271,87 @@ int __bitmap_weight(const unsigned long *bitmap, int bits)
}
EXPORT_SYMBOL(__bitmap_weight);
+#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG))
+
+void bitmap_set(unsigned long *map, int start, int nr)
+{
+ unsigned long *p = map + BIT_WORD(start);
+ const int size = start + nr;
+ int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
+ unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
+
+ while (nr - bits_to_set >= 0) {
+ *p |= mask_to_set;
+ nr -= bits_to_set;
+ bits_to_set = BITS_PER_LONG;
+ mask_to_set = ~0UL;
+ p++;
+ }
+ if (nr) {
+ mask_to_set &= BITMAP_LAST_WORD_MASK(size);
+ *p |= mask_to_set;
+ }
+}
+EXPORT_SYMBOL(bitmap_set);
+
+void bitmap_clear(unsigned long *map, int start, int nr)
+{
+ unsigned long *p = map + BIT_WORD(start);
+ const int size = start + nr;
+ int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
+ unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
+
+ while (nr - bits_to_clear >= 0) {
+ *p &= ~mask_to_clear;
+ nr -= bits_to_clear;
+ bits_to_clear = BITS_PER_LONG;
+ mask_to_clear = ~0UL;
+ p++;
+ }
+ if (nr) {
+ mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
+ *p &= ~mask_to_clear;
+ }
+}
+EXPORT_SYMBOL(bitmap_clear);
+
+/*
+ * bitmap_find_next_zero_area - find a contiguous aligned zero area
+ * @map: The address to base the search on
+ * @size: The bitmap size in bits
+ * @start: The bitnumber to start searching at
+ * @nr: The number of zeroed bits we're looking for
+ * @align_mask: Alignment mask for zero area
+ *
+ * The @align_mask should be one less than a power of 2; the effect is that
+ * the bit offset of all zero areas this function finds is multiples of that
+ * power of 2. A @align_mask of 0 means no alignment is required.
+ */
+unsigned long bitmap_find_next_zero_area(unsigned long *map,
+ unsigned long size,
+ unsigned long start,
+ unsigned int nr,
+ unsigned long align_mask)
+{
+ unsigned long index, end, i;
+again:
+ index = find_next_zero_bit(map, size, start);
+
+ /* Align allocation */
+ index = __ALIGN_MASK(index, align_mask);
+
+ end = index + nr;
+ if (end > size)
+ return end;
+ i = find_next_bit(map, end, index);
+ if (i < end) {
+ start = i + 1;
+ goto again;
+ }
+ return index;
+}
+EXPORT_SYMBOL(bitmap_find_next_zero_area);
+
/*
* Bitmap printing & parsing functions: first version by Bill Irwin,
* second version by Paul Jackson, third by Joe Korty.
diff --git a/lib/genalloc.c b/lib/genalloc.c
index eed2bdb865e..e67f97495dd 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -11,6 +11,7 @@
*/
#include <linux/module.h>
+#include <linux/bitmap.h>
#include <linux/genalloc.h>
@@ -114,7 +115,7 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
struct gen_pool_chunk *chunk;
unsigned long addr, flags;
int order = pool->min_alloc_order;
- int nbits, bit, start_bit, end_bit;
+ int nbits, start_bit, end_bit;
if (size == 0)
return 0;
@@ -129,29 +130,19 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
end_bit -= nbits + 1;
spin_lock_irqsave(&chunk->lock, flags);
- bit = -1;
- while (bit + 1 < end_bit) {
- bit = find_next_zero_bit(chunk->bits, end_bit, bit + 1);
- if (bit >= end_bit)
- break;
-
- start_bit = bit;
- if (nbits > 1) {
- bit = find_next_bit(chunk->bits, bit + nbits,
- bit + 1);
- if (bit - start_bit < nbits)
- continue;
- }
-
- addr = chunk->start_addr +
- ((unsigned long)start_bit << order);
- while (nbits--)
- __set_bit(start_bit++, chunk->bits);
+ start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit, 0,
+ nbits, 0);
+ if (start_bit >= end_bit) {
spin_unlock_irqrestore(&chunk->lock, flags);
- read_unlock(&pool->lock);
- return addr;
+ continue;
}
+
+ addr = chunk->start_addr + ((unsigned long)start_bit << order);
+
+ bitmap_set(chunk->bits, start_bit, nbits);
spin_unlock_irqrestore(&chunk->lock, flags);
+ read_unlock(&pool->lock);
+ return addr;
}
read_unlock(&pool->lock);
return 0;
diff --git a/lib/iommu-helper.c b/lib/iommu-helper.c
index 75dbda03f4f..c0251f4ad08 100644
--- a/lib/iommu-helper.c
+++ b/lib/iommu-helper.c
@@ -3,41 +3,7 @@
*/
#include <linux/module.h>
-#include <linux/bitops.h>
-
-static unsigned long find_next_zero_area(unsigned long *map,
- unsigned long size,
- unsigned long start,
- unsigned int nr,
- unsigned long align_mask)
-{
- unsigned long index, end, i;
-again:
- index = find_next_zero_bit(map, size, start);
-
- /* Align allocation */
- index = (index + align_mask) & ~align_mask;
-
- end = index + nr;
- if (end >= size)
- return -1;
- for (i = index; i < end; i++) {
- if (test_bit(i, map)) {
- start = i+1;
- goto again;
- }
- }
- return index;
-}
-
-void iommu_area_reserve(unsigned long *map, unsigned long i, int len)
-{
- unsigned long end = i + len;
- while (i < end) {
- __set_bit(i, map);
- i++;
- }
-}
+#include <linux/bitmap.h>
int iommu_is_span_boundary(unsigned int index, unsigned int nr,
unsigned long shift,
@@ -55,31 +21,24 @@ unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
unsigned long align_mask)
{
unsigned long index;
+
+ /* We don't want the last of the limit */
+ size -= 1;
again:
- index = find_next_zero_area(map, size, start, nr, align_mask);
- if (index != -1) {
+ index = bitmap_find_next_zero_area(map, size, start, nr, align_mask);
+ if (index < size) {
if (iommu_is_span_boundary(index, nr, shift, boundary_size)) {
/* we could do more effectively */
start = index + 1;
goto again;
}
- iommu_area_reserve(map, index, nr);
+ bitmap_set(map, index, nr);
+ return index;
}
- return index;
+ return -1;
}
EXPORT_SYMBOL(iommu_area_alloc);
-void iommu_area_free(unsigned long *map, unsigned long start, unsigned int nr)
-{
- unsigned long end = start + nr;
-
- while (start < end) {
- __clear_bit(start, map);
- start++;
- }
-}
-EXPORT_SYMBOL(iommu_area_free);
-
unsigned long iommu_num_pages(unsigned long addr, unsigned long len,
unsigned long io_page_size)
{
diff --git a/lib/lmb.c b/lib/lmb.c
index 0343c05609f..9cee17142b2 100644
--- a/lib/lmb.c
+++ b/lib/lmb.c
@@ -263,7 +263,7 @@ long __init lmb_reserve(u64 base, u64 size)
return lmb_add_region(_rgn, base, size);
}
-long __init lmb_overlaps_region(struct lmb_region *rgn, u64 base, u64 size)
+long lmb_overlaps_region(struct lmb_region *rgn, u64 base, u64 size)
{
unsigned long i;
@@ -493,6 +493,11 @@ int __init lmb_is_reserved(u64 addr)
return 0;
}
+int lmb_is_region_reserved(u64 base, u64 size)
+{
+ return lmb_overlaps_region(&lmb.reserved, base, size);
+}
+
/*
* Given a <base, len>, find which memory regions belong to this range.
* Adjust the request and return a contiguous chunk.
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 5bc01803f8f..437eedb5a53 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -549,7 +549,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_mask = hwdev->coherent_dma_mask;
ret = (void *)__get_free_pages(flags, order);
- if (ret && swiotlb_virt_to_bus(hwdev, ret) + size > dma_mask) {
+ if (ret && swiotlb_virt_to_bus(hwdev, ret) + size - 1 > dma_mask) {
/*
* The allocated memory isn't reachable by the device.
*/
@@ -571,7 +571,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dev_addr = swiotlb_virt_to_bus(hwdev, ret);
/* Confirm address can be DMA'd by device */
- if (dev_addr + size > dma_mask) {
+ if (dev_addr + size - 1 > dma_mask) {
printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
(unsigned long long)dma_mask,
(unsigned long long)dev_addr);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 735343fc857..d4996cf46eb 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -1179,7 +1179,18 @@ qualifier:
* %ps output the name of a text symbol without offset
* %pF output the name of a function pointer with its offset
* %pf output the name of a function pointer without its offset
- * %pR output the address range in a struct resource
+ * %pR output the address range in a struct resource with decoded flags
+ * %pr output the address range in a struct resource with raw flags
+ * %pM output a 6-byte MAC address with colons
+ * %pm output a 6-byte MAC address without colons
+ * %pI4 print an IPv4 address without leading zeros
+ * %pi4 print an IPv4 address with leading zeros
+ * %pI6 print an IPv6 address with colons
+ * %pi6 print an IPv6 address without colons
+ * %pI6c print an IPv6 address as specified by
+ * http://www.ietf.org/id/draft-kawamura-ipv6-text-representation-03.txt
+ * %pU[bBlL] print a UUID/GUID in big or little endian using lower or upper
+ * case.
* %n is ignored
*
* The return value is the number of characters which would