aboutsummaryrefslogtreecommitdiff
path: root/arch/avr32/mm/dma-coherent.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/avr32/mm/dma-coherent.c')
-rw-r--r--arch/avr32/mm/dma-coherent.c139
1 files changed, 139 insertions, 0 deletions
diff --git a/arch/avr32/mm/dma-coherent.c b/arch/avr32/mm/dma-coherent.c
new file mode 100644
index 00000000000..44ab8a7bdae
--- /dev/null
+++ b/arch/avr32/mm/dma-coherent.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/dma-mapping.h>
+
+#include <asm/addrspace.h>
+#include <asm/cacheflush.h>
+
+void dma_cache_sync(void *vaddr, size_t size, int direction)
+{
+ /*
+ * No need to sync an uncached area
+ */
+ if (PXSEG(vaddr) == P2SEG)
+ return;
+
+ switch (direction) {
+ case DMA_FROM_DEVICE: /* invalidate only */
+ dma_cache_inv(vaddr, size);
+ break;
+ case DMA_TO_DEVICE: /* writeback only */
+ dma_cache_wback(vaddr, size);
+ break;
+ case DMA_BIDIRECTIONAL: /* writeback and invalidate */
+ dma_cache_wback_inv(vaddr, size);
+ break;
+ default:
+ BUG();
+ }
+}
+EXPORT_SYMBOL(dma_cache_sync);
+
+static struct page *__dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *handle, gfp_t gfp)
+{
+ struct page *page, *free, *end;
+ int order;
+
+ size = PAGE_ALIGN(size);
+ order = get_order(size);
+
+ page = alloc_pages(gfp, order);
+ if (!page)
+ return NULL;
+ split_page(page, order);
+
+ /*
+ * When accessing physical memory with valid cache data, we
+ * get a cache hit even if the virtual memory region is marked
+ * as uncached.
+ *
+ * Since the memory is newly allocated, there is no point in
+ * doing a writeback. If the previous owner cares, he should
+ * have flushed the cache before releasing the memory.
+ */
+ invalidate_dcache_region(phys_to_virt(page_to_phys(page)), size);
+
+ *handle = page_to_bus(page);
+ free = page + (size >> PAGE_SHIFT);
+ end = page + (1 << order);
+
+ /*
+ * Free any unused pages
+ */
+ while (free < end) {
+ __free_page(free);
+ free++;
+ }
+
+ return page;
+}
+
+static void __dma_free(struct device *dev, size_t size,
+ struct page *page, dma_addr_t handle)
+{
+ struct page *end = page + (PAGE_ALIGN(size) >> PAGE_SHIFT);
+
+ while (page < end)
+ __free_page(page++);
+}
+
+void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *handle, gfp_t gfp)
+{
+ struct page *page;
+ void *ret = NULL;
+
+ page = __dma_alloc(dev, size, handle, gfp);
+ if (page)
+ ret = phys_to_uncached(page_to_phys(page));
+
+ return ret;
+}
+EXPORT_SYMBOL(dma_alloc_coherent);
+
+void dma_free_coherent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t handle)
+{
+ void *addr = phys_to_cached(uncached_to_phys(cpu_addr));
+ struct page *page;
+
+ pr_debug("dma_free_coherent addr %p (phys %08lx) size %u\n",
+ cpu_addr, (unsigned long)handle, (unsigned)size);
+ BUG_ON(!virt_addr_valid(addr));
+ page = virt_to_page(addr);
+ __dma_free(dev, size, page, handle);
+}
+EXPORT_SYMBOL(dma_free_coherent);
+
+#if 0
+void *dma_alloc_writecombine(struct device *dev, size_t size,
+ dma_addr_t *handle, gfp_t gfp)
+{
+ struct page *page;
+
+ page = __dma_alloc(dev, size, handle, gfp);
+
+ /* Now, map the page into P3 with write-combining turned on */
+ return __ioremap(page_to_phys(page), size, _PAGE_BUFFER);
+}
+EXPORT_SYMBOL(dma_alloc_writecombine);
+
+void dma_free_writecombine(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t handle)
+{
+ struct page *page;
+
+ iounmap(cpu_addr);
+
+ page = bus_to_page(handle);
+ __dma_free(dev, size, page, handle);
+}
+EXPORT_SYMBOL(dma_free_writecombine);
+#endif