diff options
author | Glauber Costa <gcosta@redhat.com> | 2008-04-08 13:20:58 -0300 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-04-19 19:19:58 +0200 |
commit | d09d815c1b1d437a3ea89ecd92c91179266d1243 (patch) | |
tree | 860aac1786676a01909d8ad1325bd3ffbe029039 /arch | |
parent | 8e8edc6401205da3000cc3dfa76f3fd28a21d73c (diff) |
x86: isolate coherent mapping functions
i386 implements the declare coherent memory API, and x86_64 does not
it is reflected in pieces of dma_alloc_coherent and dma_free_coherent.
Those pieces are isolated in separate functions, that are declared
as empty macros in x86_64. This way we can make the code the same.
Signed-off-by: Glauber Costa <gcosta@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kernel/pci-dma_32.c | 51 | ||||
-rw-r--r-- | arch/x86/kernel/pci-dma_64.c | 11 |
2 files changed, 45 insertions, 17 deletions
diff --git a/arch/x86/kernel/pci-dma_32.c b/arch/x86/kernel/pci-dma_32.c index 818d95efc3c..78c7640252a 100644 --- a/arch/x86/kernel/pci-dma_32.c +++ b/arch/x86/kernel/pci-dma_32.c @@ -18,27 +18,50 @@ dma_addr_t bad_dma_address __read_mostly = 0x0; EXPORT_SYMBOL(bad_dma_address); -void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp) +static int dma_alloc_from_coherent_mem(struct device *dev, ssize_t size, + dma_addr_t *dma_handle, void **ret) { - void *ret; struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; int order = get_order(size); - /* ignore region specifiers */ - gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); if (mem) { int page = bitmap_find_free_region(mem->bitmap, mem->size, order); if (page >= 0) { *dma_handle = mem->device_base + (page << PAGE_SHIFT); - ret = mem->virt_base + (page << PAGE_SHIFT); - memset(ret, 0, size); - return ret; + *ret = mem->virt_base + (page << PAGE_SHIFT); + memset(*ret, 0, size); } if (mem->flags & DMA_MEMORY_EXCLUSIVE) - return NULL; + *ret = NULL; + } + return (mem != NULL); +} + +static int dma_release_coherent(struct device *dev, int order, void *vaddr) +{ + struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; + + if (mem && vaddr >= mem->virt_base && vaddr < + (mem->virt_base + (mem->size << PAGE_SHIFT))) { + int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; + + bitmap_release_region(mem->bitmap, page, order); + return 1; } + return 0; +} + +void *dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp) +{ + void *ret = NULL; + int order = get_order(size); + /* ignore region specifiers */ + gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); + + if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &ret)) + return ret; if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) gfp |= GFP_DMA; @@ -56,15 +79,11 @@ EXPORT_SYMBOL(dma_alloc_coherent); void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) { - struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; int order = get_order(size); WARN_ON(irqs_disabled()); /* for portability */ - if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) { - int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; - - bitmap_release_region(mem->bitmap, page, order); - } else - free_pages((unsigned long)vaddr, order); + if (dma_release_coherent(dev, order, vaddr)) + return; + free_pages((unsigned long)vaddr, order); } EXPORT_SYMBOL(dma_free_coherent); diff --git a/arch/x86/kernel/pci-dma_64.c b/arch/x86/kernel/pci-dma_64.c index e7d45cf8225..6eacd58e451 100644 --- a/arch/x86/kernel/pci-dma_64.c +++ b/arch/x86/kernel/pci-dma_64.c @@ -39,6 +39,8 @@ dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order) return page ? page_address(page) : NULL; } +#define dma_alloc_from_coherent_mem(dev, size, handle, ret) (0) +#define dma_release_coherent(dev, order, vaddr) (0) /* * Allocate memory for a coherent mapping. */ @@ -50,6 +52,10 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, unsigned long dma_mask = 0; u64 bus; + + if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &memory)) + return memory; + if (!dev) dev = &fallback_dev; dma_mask = dev->coherent_dma_mask; @@ -141,9 +147,12 @@ EXPORT_SYMBOL(dma_alloc_coherent); void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t bus) { + int order = get_order(size); WARN_ON(irqs_disabled()); /* for portability */ + if (dma_release_coherent(dev, order, vaddr)) + return; if (dma_ops->unmap_single) dma_ops->unmap_single(dev, bus, size, 0); - free_pages((unsigned long)vaddr, get_order(size)); + free_pages((unsigned long)vaddr, order); } EXPORT_SYMBOL(dma_free_coherent); |