aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/swiotlb.c56
1 files changed, 32 insertions, 24 deletions
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index bc684d1fd42..067eed5b275 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -1,7 +1,7 @@
/*
* Dynamic DMA mapping support.
*
- * This implementation is for IA-64 and EM64T platforms that do not support
+ * This implementation is a fallback for platforms that do not support
* I/O TLBs (aka DMA address translation hardware).
* Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
* Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
@@ -129,23 +129,25 @@ __setup("swiotlb=", setup_io_tlb_npages);
* Statically reserve bounce buffer space and initialize bounce buffer data
* structures for the software IO TLB used to implement the DMA API.
*/
-void
-swiotlb_init_with_default_size (size_t default_size)
+void __init
+swiotlb_init_with_default_size(size_t default_size)
{
- unsigned long i;
+ unsigned long i, bytes;
if (!io_tlb_nslabs) {
io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
}
+ bytes = io_tlb_nslabs << IO_TLB_SHIFT;
+
/*
* Get IO TLB memory from the low pages
*/
- io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs * (1 << IO_TLB_SHIFT));
+ io_tlb_start = alloc_bootmem_low_pages(bytes);
if (!io_tlb_start)
panic("Cannot allocate SWIOTLB buffer");
- io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT);
+ io_tlb_end = io_tlb_start + bytes;
/*
* Allocate and initialize the free list array. This array is used
@@ -162,12 +164,15 @@ swiotlb_init_with_default_size (size_t default_size)
* Get the overflow emergency buffer
*/
io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
+ if (!io_tlb_overflow_buffer)
+ panic("Cannot allocate SWIOTLB overflow buffer!\n");
+
printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n",
virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end));
}
-void
-swiotlb_init (void)
+void __init
+swiotlb_init(void)
{
swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */
}
@@ -178,9 +183,9 @@ swiotlb_init (void)
* This should be just like above, but with some error catching.
*/
int
-swiotlb_late_init_with_default_size (size_t default_size)
+swiotlb_late_init_with_default_size(size_t default_size)
{
- unsigned long i, req_nslabs = io_tlb_nslabs;
+ unsigned long i, bytes, req_nslabs = io_tlb_nslabs;
unsigned int order;
if (!io_tlb_nslabs) {
@@ -191,8 +196,9 @@ swiotlb_late_init_with_default_size (size_t default_size)
/*
* Get IO TLB memory from the low pages
*/
- order = get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT));
+ order = get_order(io_tlb_nslabs << IO_TLB_SHIFT);
io_tlb_nslabs = SLABS_PER_PAGE << order;
+ bytes = io_tlb_nslabs << IO_TLB_SHIFT;
while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
io_tlb_start = (char *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
@@ -205,13 +211,14 @@ swiotlb_late_init_with_default_size (size_t default_size)
if (!io_tlb_start)
goto cleanup1;
- if (order != get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT))) {
+ if (order != get_order(bytes)) {
printk(KERN_WARNING "Warning: only able to allocate %ld MB "
"for software IO TLB\n", (PAGE_SIZE << order) >> 20);
io_tlb_nslabs = SLABS_PER_PAGE << order;
+ bytes = io_tlb_nslabs << IO_TLB_SHIFT;
}
- io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT);
- memset(io_tlb_start, 0, io_tlb_nslabs * (1 << IO_TLB_SHIFT));
+ io_tlb_end = io_tlb_start + bytes;
+ memset(io_tlb_start, 0, bytes);
/*
* Allocate and initialize the free list array. This array is used
@@ -242,8 +249,8 @@ swiotlb_late_init_with_default_size (size_t default_size)
if (!io_tlb_overflow_buffer)
goto cleanup4;
- printk(KERN_INFO "Placing %ldMB software IO TLB between 0x%lx - "
- "0x%lx\n", (io_tlb_nslabs * (1 << IO_TLB_SHIFT)) >> 20,
+ printk(KERN_INFO "Placing %luMB software IO TLB between 0x%lx - "
+ "0x%lx\n", bytes >> 20,
virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end));
return 0;
@@ -256,8 +263,8 @@ cleanup3:
free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
sizeof(int)));
io_tlb_list = NULL;
- io_tlb_end = NULL;
cleanup2:
+ io_tlb_end = NULL;
free_pages((unsigned long)io_tlb_start, order);
io_tlb_start = NULL;
cleanup1:
@@ -433,7 +440,7 @@ void *
swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags)
{
- unsigned long dev_addr;
+ dma_addr_t dev_addr;
void *ret;
int order = get_order(size);
@@ -473,8 +480,9 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
/* Confirm address can be DMA'd by device */
if (address_needs_mapping(hwdev, dev_addr)) {
- printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016lx\n",
- (unsigned long long)*hwdev->dma_mask, dev_addr);
+ printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
+ (unsigned long long)*hwdev->dma_mask,
+ (unsigned long long)dev_addr);
panic("swiotlb_alloc_coherent: allocated memory is out of "
"range for device");
}
@@ -504,7 +512,7 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
* When the mapping is small enough return a static buffer to limit
* the damage, or panic when the transfer is too big.
*/
- printk(KERN_ERR "DMA: Out of SW-IOMMU space for %lu bytes at "
+ printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at "
"device %s\n", size, dev ? dev->bus_id : "?");
if (size > io_tlb_overflow && do_panic) {
@@ -525,7 +533,7 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
dma_addr_t
swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
{
- unsigned long dev_addr = virt_to_bus(ptr);
+ dma_addr_t dev_addr = virt_to_bus(ptr);
void *map;
BUG_ON(dir == DMA_NONE);
@@ -669,7 +677,7 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
int dir)
{
void *addr;
- unsigned long dev_addr;
+ dma_addr_t dev_addr;
int i;
BUG_ON(dir == DMA_NONE);
@@ -765,7 +773,7 @@ swiotlb_dma_mapping_error(dma_addr_t dma_addr)
* this function.
*/
int
-swiotlb_dma_supported (struct device *hwdev, u64 mask)
+swiotlb_dma_supported(struct device *hwdev, u64 mask)
{
return virt_to_bus(io_tlb_end - 1) <= mask;
}