diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/mips/Kconfig | 5 | ||||
-rw-r--r-- | arch/mips/mm/Makefile | 14 | ||||
-rw-r--r-- | arch/mips/mm/dma-coherent.c | 254 | ||||
-rw-r--r-- | arch/mips/mm/dma-default.c (renamed from arch/mips/mm/dma-noncoherent.c) | 209 | ||||
-rw-r--r-- | arch/mips/mm/dma-ip27.c | 257 | ||||
-rw-r--r-- | arch/mips/mm/dma-ip32.c | 383 | ||||
-rw-r--r-- | arch/mips/pci/Makefile | 2 | ||||
-rw-r--r-- | arch/mips/pci/pci-dac.c | 79 |
8 files changed, 183 insertions, 1020 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 5fe195a41a8..a92ce6bd7cf 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -598,8 +598,6 @@ config SGI_IP32 select ARC select ARC32 select BOOT_ELF32 - select OWN_DMA - select DMA_IP32 select DMA_NONCOHERENT select HW_HAS_PCI select R5000_CPU_SCACHE @@ -883,9 +881,6 @@ config DMA_NONCOHERENT config DMA_NEED_PCI_MAP_STATE bool -config OWN_DMA - bool - config EARLY_PRINTK bool diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile index 19e41fd186c..de5727385bc 100644 --- a/arch/mips/mm/Makefile +++ b/arch/mips/mm/Makefile @@ -2,8 +2,8 @@ # Makefile for the Linux/MIPS-specific parts of the memory manager. # -obj-y += cache.o extable.o fault.o init.o pgtable.o \ - tlbex.o tlbex-fault.o +obj-y += cache.o dma-default.o extable.o fault.o \ + init.o pgtable.o tlbex.o tlbex-fault.o obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o obj-$(CONFIG_64BIT) += pgtable-64.o @@ -32,14 +32,4 @@ obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o obj-$(CONFIG_MIPS_CPU_SCACHE) += sc-mips.o -# -# Choose one DMA coherency model -# -ifndef CONFIG_OWN_DMA -obj-$(CONFIG_DMA_COHERENT) += dma-coherent.o -obj-$(CONFIG_DMA_NONCOHERENT) += dma-noncoherent.o -endif -obj-$(CONFIG_DMA_IP27) += dma-ip27.o -obj-$(CONFIG_DMA_IP32) += dma-ip32.o - EXTRA_AFLAGS := $(CFLAGS) diff --git a/arch/mips/mm/dma-coherent.c b/arch/mips/mm/dma-coherent.c deleted file mode 100644 index 5697c6e250a..00000000000 --- a/arch/mips/mm/dma-coherent.c +++ /dev/null @@ -1,254 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com> - * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org> - * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. - */ -#include <linux/types.h> -#include <linux/dma-mapping.h> -#include <linux/mm.h> -#include <linux/module.h> -#include <linux/string.h> - -#include <asm/cache.h> -#include <asm/io.h> - -void *dma_alloc_noncoherent(struct device *dev, size_t size, - dma_addr_t * dma_handle, gfp_t gfp) -{ - void *ret; - /* ignore region specifiers */ - gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); - - if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) - gfp |= GFP_DMA; - ret = (void *) __get_free_pages(gfp, get_order(size)); - - if (ret != NULL) { - memset(ret, 0, size); - *dma_handle = virt_to_phys(ret); - } - - return ret; -} - -EXPORT_SYMBOL(dma_alloc_noncoherent); - -void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t * dma_handle, gfp_t gfp) - __attribute__((alias("dma_alloc_noncoherent"))); - -EXPORT_SYMBOL(dma_alloc_coherent); - -void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle) -{ - unsigned long addr = (unsigned long) vaddr; - - free_pages(addr, get_order(size)); -} - -EXPORT_SYMBOL(dma_free_noncoherent); - -void dma_free_coherent(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle) __attribute__((alias("dma_free_noncoherent"))); - -EXPORT_SYMBOL(dma_free_coherent); - -dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); - - return __pa(ptr); -} - -EXPORT_SYMBOL(dma_map_single); - -void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); -} - -EXPORT_SYMBOL(dma_unmap_single); - -int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction direction) -{ - int i; - - BUG_ON(direction == DMA_NONE); - - for (i = 0; i < nents; i++, sg++) { - sg->dma_address = (dma_addr_t)page_to_phys(sg->page) + sg->offset; - } - - return nents; -} - -EXPORT_SYMBOL(dma_map_sg); - -dma_addr_t dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); - - return page_to_phys(page) + offset; -} - -EXPORT_SYMBOL(dma_map_page); - -void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); -} - -EXPORT_SYMBOL(dma_unmap_page); - -void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); -} - -EXPORT_SYMBOL(dma_unmap_sg); - -void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, - size_t size, enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); -} - -EXPORT_SYMBOL(dma_sync_single_for_cpu); - -void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, - size_t size, enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); -} - -EXPORT_SYMBOL(dma_sync_single_for_device); - -void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, - unsigned long offset, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); -} - -EXPORT_SYMBOL(dma_sync_single_range_for_cpu); - -void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, - unsigned long offset, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); -} - -EXPORT_SYMBOL(dma_sync_single_range_for_device); - -void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); -} - -EXPORT_SYMBOL(dma_sync_sg_for_cpu); - -void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); -} - -EXPORT_SYMBOL(dma_sync_sg_for_device); - -int dma_mapping_error(dma_addr_t dma_addr) -{ - return 0; -} - -EXPORT_SYMBOL(dma_mapping_error); - -int dma_supported(struct device *dev, u64 mask) -{ - /* - * we fall back to GFP_DMA when the mask isn't all 1s, - * so we can't guarantee allocations that must be - * within a tighter range than GFP_DMA.. - */ - if (mask < 0x00ffffff) - return 0; - - return 1; -} - -EXPORT_SYMBOL(dma_supported); - -int dma_is_consistent(struct device *dev, dma_addr_t dma_addr) -{ - return 1; -} - -EXPORT_SYMBOL(dma_is_consistent); - -void dma_cache_sync(struct device *dev, void *vaddr, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); -} - -EXPORT_SYMBOL(dma_cache_sync); - -/* The DAC routines are a PCIism.. */ - -#ifdef CONFIG_PCI - -#include <linux/pci.h> - -dma64_addr_t pci_dac_page_to_dma(struct pci_dev *pdev, - struct page *page, unsigned long offset, int direction) -{ - return (dma64_addr_t)page_to_phys(page) + offset; -} - -EXPORT_SYMBOL(pci_dac_page_to_dma); - -struct page *pci_dac_dma_to_page(struct pci_dev *pdev, - dma64_addr_t dma_addr) -{ - return mem_map + (dma_addr >> PAGE_SHIFT); -} - -EXPORT_SYMBOL(pci_dac_dma_to_page); - -unsigned long pci_dac_dma_to_offset(struct pci_dev *pdev, - dma64_addr_t dma_addr) -{ - return dma_addr & ~PAGE_MASK; -} - -EXPORT_SYMBOL(pci_dac_dma_to_offset); - -void pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, - dma64_addr_t dma_addr, size_t len, int direction) -{ - BUG_ON(direction == PCI_DMA_NONE); -} - -EXPORT_SYMBOL(pci_dac_dma_sync_single_for_cpu); - -void pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, - dma64_addr_t dma_addr, size_t len, int direction) -{ - BUG_ON(direction == PCI_DMA_NONE); -} - -EXPORT_SYMBOL(pci_dac_dma_sync_single_for_device); - -#endif /* CONFIG_PCI */ diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-default.c index 8cecef0957c..4a32e939698 100644 --- a/arch/mips/mm/dma-noncoherent.c +++ b/arch/mips/mm/dma-default.c @@ -4,28 +4,39 @@ * for more details. * * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com> - * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org> + * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org> * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. */ + #include <linux/types.h> +#include <linux/dma-mapping.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/string.h> -#include <linux/dma-mapping.h> #include <asm/cache.h> #include <asm/io.h> +#include <dma-coherence.h> + /* * Warning on the terminology - Linux calls an uncached area coherent; * MIPS terminology calls memory areas with hardware maintained coherency * coherent. */ +static inline int cpu_is_noncoherent_r10000(struct device *dev) +{ + return !plat_device_is_coherent(dev) && + (current_cpu_data.cputype == CPU_R10000 && + current_cpu_data.cputype == CPU_R12000); +} + void *dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t * dma_handle, gfp_t gfp) { void *ret; + /* ignore region specifiers */ gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); @@ -35,7 +46,7 @@ void *dma_alloc_noncoherent(struct device *dev, size_t size, if (ret != NULL) { memset(ret, 0, size); - *dma_handle = virt_to_phys(ret); + *dma_handle = plat_map_dma_mem(dev, ret, size); } return ret; @@ -48,10 +59,21 @@ void *dma_alloc_coherent(struct device *dev, size_t size, { void *ret; - ret = dma_alloc_noncoherent(dev, size, dma_handle, gfp); + /* ignore region specifiers */ + gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); + + if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) + gfp |= GFP_DMA; + ret = (void *) __get_free_pages(gfp, get_order(size)); + if (ret) { - dma_cache_wback_inv((unsigned long) ret, size); - ret = UNCAC_ADDR(ret); + memset(ret, 0, size); + *dma_handle = plat_map_dma_mem(dev, ret, size); + + if (!plat_device_is_coherent(dev)) { + dma_cache_wback_inv((unsigned long) ret, size); + ret = UNCAC_ADDR(ret); + } } return ret; @@ -72,7 +94,9 @@ void dma_free_coherent(struct device *dev, size_t size, void *vaddr, { unsigned long addr = (unsigned long) vaddr; - addr = CAC_ADDR(addr); + if (!plat_device_is_coherent(dev)) + addr = CAC_ADDR(addr); + free_pages(addr, get_order(size)); } @@ -104,9 +128,10 @@ dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, { unsigned long addr = (unsigned long) ptr; - __dma_sync(addr, size, direction); + if (!plat_device_is_coherent(dev)) + __dma_sync(addr, size, direction); - return virt_to_phys(ptr); + return plat_map_dma_mem(dev, ptr, size); } EXPORT_SYMBOL(dma_map_single); @@ -114,10 +139,11 @@ EXPORT_SYMBOL(dma_map_single); void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction direction) { - unsigned long addr; - addr = dma_addr + PAGE_OFFSET; + if (cpu_is_noncoherent_r10000(dev)) + __dma_sync(plat_dma_addr_to_phys(dma_addr) + PAGE_OFFSET, size, + direction); - //__dma_sync(addr, size, direction); + plat_unmap_dma_mem(dma_addr); } EXPORT_SYMBOL(dma_unmap_single); @@ -133,11 +159,10 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, unsigned long addr; addr = (unsigned long) page_address(sg->page); - if (addr) { + if (!plat_device_is_coherent(dev) && addr) __dma_sync(addr + sg->offset, sg->length, direction); - sg->dma_address = (dma_addr_t)page_to_phys(sg->page) - + sg->offset; - } + sg->dma_address = plat_map_dma_mem_page(dev, sg->page) + + sg->offset; } return nents; @@ -148,14 +173,16 @@ EXPORT_SYMBOL(dma_map_sg); dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction) { - unsigned long addr; - BUG_ON(direction == DMA_NONE); - addr = (unsigned long) page_address(page) + offset; - dma_cache_wback_inv(addr, size); + if (!plat_device_is_coherent(dev)) { + unsigned long addr; + + addr = (unsigned long) page_address(page) + offset; + dma_cache_wback_inv(addr, size); + } - return page_to_phys(page) + offset; + return plat_map_dma_mem_page(dev, page) + offset; } EXPORT_SYMBOL(dma_map_page); @@ -165,12 +192,14 @@ void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, { BUG_ON(direction == DMA_NONE); - if (direction != DMA_TO_DEVICE) { + if (!plat_device_is_coherent(dev) && direction != DMA_TO_DEVICE) { unsigned long addr; - addr = dma_address + PAGE_OFFSET; + addr = plat_dma_addr_to_phys(dma_address); dma_cache_wback_inv(addr, size); } + + plat_unmap_dma_mem(dma_address); } EXPORT_SYMBOL(dma_unmap_page); @@ -183,13 +212,15 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, BUG_ON(direction == DMA_NONE); - if (direction == DMA_TO_DEVICE) - return; - for (i = 0; i < nhwentries; i++, sg++) { - addr = (unsigned long) page_address(sg->page); - if (addr) - __dma_sync(addr + sg->offset, sg->length, direction); + if (!plat_device_is_coherent(dev) && + direction != DMA_TO_DEVICE) { + addr = (unsigned long) page_address(sg->page); + if (addr) + __dma_sync(addr + sg->offset, sg->length, + direction); + } + plat_unmap_dma_mem(sg->dma_address); } } @@ -198,12 +229,14 @@ EXPORT_SYMBOL(dma_unmap_sg); void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { - unsigned long addr; - BUG_ON(direction == DMA_NONE); - addr = dma_handle + PAGE_OFFSET; - __dma_sync(addr, size, direction); + if (cpu_is_noncoherent_r10000(dev)) { + unsigned long addr; + + addr = PAGE_OFFSET + plat_dma_addr_to_phys(dma_handle); + __dma_sync(addr, size, direction); + } } EXPORT_SYMBOL(dma_sync_single_for_cpu); @@ -211,12 +244,14 @@ EXPORT_SYMBOL(dma_sync_single_for_cpu); void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { - unsigned long addr; - BUG_ON(direction == DMA_NONE); - addr = dma_handle + PAGE_OFFSET; - __dma_sync(addr, size, direction); + if (cpu_is_noncoherent_r10000(dev)) { + unsigned long addr; + + addr = plat_dma_addr_to_phys(dma_handle); + __dma_sync(addr, size, direction); + } } EXPORT_SYMBOL(dma_sync_single_for_device); @@ -224,12 +259,14 @@ EXPORT_SYMBOL(dma_sync_single_for_device); void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction) { - unsigned long addr; - BUG_ON(direction == DMA_NONE); - addr = dma_handle + offset + PAGE_OFFSET; - __dma_sync(addr, size, direction); + if (cpu_is_noncoherent_r10000(dev)) { + unsigned long addr; + + addr = PAGE_OFFSET + plat_dma_addr_to_phys(dma_handle); + __dma_sync(addr + offset, size, direction); + } } EXPORT_SYMBOL(dma_sync_single_range_for_cpu); @@ -237,12 +274,14 @@ EXPORT_SYMBOL(dma_sync_single_range_for_cpu); void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction) { - unsigned long addr; - BUG_ON(direction == DMA_NONE); - addr = dma_handle + offset + PAGE_OFFSET; - __dma_sync(addr, size, direction); + if (cpu_is_noncoherent_r10000(dev)) { + unsigned long addr; + + addr = PAGE_OFFSET + plat_dma_addr_to_phys(dma_handle); + __dma_sync(addr + offset, size, direction); + } } EXPORT_SYMBOL(dma_sync_single_range_for_device); @@ -255,9 +294,12 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, BUG_ON(direction == DMA_NONE); /* Make sure that gcc doesn't leave the empty loop body. */ - for (i = 0; i < nelems; i++, sg++) - __dma_sync((unsigned long)page_address(sg->page), - sg->length, direction); + for (i = 0; i < nelems; i++, sg++) { + if (!plat_device_is_coherent(dev)) + __dma_sync((unsigned long)page_address(sg->page), + sg->length, direction); + plat_unmap_dma_mem(sg->dma_address); + } } EXPORT_SYMBOL(dma_sync_sg_for_cpu); @@ -270,9 +312,12 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nele BUG_ON(direction == DMA_NONE); /* Make sure that gcc doesn't leave the empty loop body. */ - for (i = 0; i < nelems; i++, sg++) - __dma_sync((unsigned long)page_address(sg->page), - sg->length, direction); + for (i = 0; i < nelems; i++, sg++) { + if (!plat_device_is_coherent(dev)) + __dma_sync((unsigned long)page_address(sg->page), + sg->length, direction); + plat_unmap_dma_mem(sg->dma_address); + } } EXPORT_SYMBOL(dma_sync_sg_for_device); @@ -301,70 +346,18 @@ EXPORT_SYMBOL(dma_supported); int dma_is_consistent(struct device *dev, dma_addr_t dma_addr) { - return 1; + return plat_device_is_coherent(dev); } EXPORT_SYMBOL(dma_is_consistent); void dma_cache_sync(struct device *dev, void *vaddr, size_t size, - enum dma_data_direction direction) + enum dma_data_direction direction) { - if (direction == DMA_NONE) - return; + BUG_ON(direction == DMA_NONE); - dma_cache_wback_inv((unsigned long)vaddr, size); + if (!plat_device_is_coherent(dev)) + dma_cache_wback_inv((unsigned long)vaddr, size); } EXPORT_SYMBOL(dma_cache_sync); - -/* The DAC routines are a PCIism.. */ - -#ifdef CONFIG_PCI - -#include <linux/pci.h> - -dma64_addr_t pci_dac_page_to_dma(struct pci_dev *pdev, - struct page *page, unsigned long offset, int direction) -{ - return (dma64_addr_t)page_to_phys(page) + offset; -} - -EXPORT_SYMBOL(pci_dac_page_to_dma); - -struct page *pci_dac_dma_to_page(struct pci_dev *pdev, - dma64_addr_t dma_addr) -{ - return mem_map + (dma_addr >> PAGE_SHIFT); -} - -EXPORT_SYMBOL(pci_dac_dma_to_page); - -unsigned long pci_dac_dma_to_offset(struct pci_dev *pdev, - dma64_addr_t dma_addr) -{ - return dma_addr & ~PAGE_MASK; -} - -EXPORT_SYMBOL(pci_dac_dma_to_offset); - -void pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, - dma64_addr_t dma_addr, size_t len, int direction) -{ - BUG_ON(direction == PCI_DMA_NONE); - - dma_cache_wback_inv(dma_addr + PAGE_OFFSET, len); -} - -EXPORT_SYMBOL(pci_dac_dma_sync_single_for_cpu); - -void pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, - dma64_addr_t dma_addr, size_t len, int direction) -{ - BUG_ON(direction == PCI_DMA_NONE); - - dma_cache_wback_inv(dma_addr + PAGE_OFFSET, len); -} - -EXPORT_SYMBOL(pci_dac_dma_sync_single_for_device); - -#endif /* CONFIG_PCI */ diff --git a/arch/mips/mm/dma-ip27.c b/arch/mips/mm/dma-ip27.c deleted file mode 100644 index f088344db46..00000000000 --- a/arch/mips/mm/dma-ip27.c +++ /dev/null @@ -1,257 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com> - * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org> - * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. - */ -#include <linux/types.h> -#include <linux/mm.h> -#include <linux/module.h> -#include <linux/string.h> -#include <linux/pci.h> - -#include <asm/cache.h> -#include <asm/pci/bridge.h> - -#define pdev_to_baddr(pdev, addr) \ - (BRIDGE_CONTROLLER(pdev->bus)->baddr + (addr)) -#define dev_to_baddr(dev, addr) \ - pdev_to_baddr(to_pci_dev(dev), (addr)) - -void *dma_alloc_noncoherent(struct device *dev, size_t size, - dma_addr_t * dma_handle, gfp_t gfp) -{ - void *ret; - - /* ignore region specifiers */ - gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); - - if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) - gfp |= GFP_DMA; - ret = (void *) __get_free_pages(gfp, get_order(size)); - - if (ret != NULL) { - memset(ret, 0, size); - *dma_handle = dev_to_baddr(dev, virt_to_phys(ret)); - } - - return ret; -} - -EXPORT_SYMBOL(dma_alloc_noncoherent); - -void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t * dma_handle, gfp_t gfp) - __attribute__((alias("dma_alloc_noncoherent"))); - -EXPORT_SYMBOL(dma_alloc_coherent); - -void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle) -{ - unsigned long addr = (unsigned long) vaddr; - - free_pages(addr, get_order(size)); -} - -EXPORT_SYMBOL(dma_free_noncoherent); - -void dma_free_coherent(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle) __attribute__((alias("dma_free_noncoherent"))); - -EXPORT_SYMBOL(dma_free_coherent); - -dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); - - return dev_to_baddr(dev, __pa(ptr)); -} - -EXPORT_SYMBOL(dma_map_single); - -void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); -} - -EXPORT_SYMBOL(dma_unmap_single); - -int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction direction) -{ - int i; - - BUG_ON(direction == DMA_NONE); - - for (i = 0; i < nents; i++, sg++) { - sg->dma_address = (dma_addr_t) dev_to_baddr(dev, - page_to_phys(sg->page) + sg->offset); - } - - return nents; -} - -EXPORT_SYMBOL(dma_map_sg); - -dma_addr_t dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); - - return dev_to_baddr(dev, page_to_phys(page) + offset); -} - -EXPORT_SYMBOL(dma_map_page); - -void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); -} - -EXPORT_SYMBOL(dma_unmap_page); - -void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); -} - -EXPORT_SYMBOL(dma_unmap_sg); - -void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); -} - -EXPORT_SYMBOL(dma_sync_single_for_cpu); - -void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); -} - -EXPORT_SYMBOL(dma_sync_single_for_device); - -void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, - unsigned long offset, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); -} - -EXPORT_SYMBOL(dma_sync_single_range_for_cpu); - -void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, - unsigned long offset, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); -} - -EXPORT_SYMBOL(dma_sync_single_range_for_device); - -void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); -} - -EXPORT_SYMBOL(dma_sync_sg_for_cpu); - -void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); -} - -EXPORT_SYMBOL(dma_sync_sg_for_device); - -int dma_mapping_error(dma_addr_t dma_addr) -{ - return 0; -} - -EXPORT_SYMBOL(dma_mapping_error); - -int dma_supported(struct device *dev, u64 mask) -{ - /* - * we fall back to GFP_DMA when the mask isn't all 1s, - * so we can't guarantee allocations that must be - * within a tighter range than GFP_DMA.. - */ - if (mask < 0x00ffffff) - return 0; - - return 1; -} - -EXPORT_SYMBOL(dma_supported); - -int dma_is_consistent(struct device *dev, dma_addr_t dma_addr) -{ - return 1; -} - -EXPORT_SYMBOL(dma_is_consistent); - -void dma_cache_sync(struct device *dev, void *vaddr, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); -} - -EXPORT_SYMBOL(dma_cache_sync); - -dma64_addr_t pci_dac_page_to_dma(struct pci_dev *pdev, - struct page *page, unsigned long offset, int direction) -{ - dma64_addr_t addr = page_to_phys(page) + offset; - - return (dma64_addr_t) pdev_to_baddr(pdev, addr); -} - -EXPORT_SYMBOL(pci_dac_page_to_dma); - -struct page *pci_dac_dma_to_page(struct pci_dev *pdev, - dma64_addr_t dma_addr) -{ - struct bridge_controller *bc = BRIDGE_CONTROLLER(pdev->bus); - - return pfn_to_page((dma_addr - bc->baddr) >> PAGE_SHIFT); -} - -EXPORT_SYMBOL(pci_dac_dma_to_page); - -unsigned long pci_dac_dma_to_offset(struct pci_dev *pdev, - dma64_addr_t dma_addr) -{ - return dma_addr & ~PAGE_MASK; -} - -EXPORT_SYMBOL(pci_dac_dma_to_offset); - -void pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, - dma64_addr_t dma_addr, size_t len, int direction) -{ - BUG_ON(direction == PCI_DMA_NONE); -} - -EXPORT_SYMBOL(pci_dac_dma_sync_single_for_cpu); - -void pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, - dma64_addr_t dma_addr, size_t len, int direction) -{ - BUG_ON(direction == PCI_DMA_NONE); -} - -EXPORT_SYMBOL(pci_dac_dma_sync_single_for_device); diff --git a/arch/mips/mm/dma-ip32.c b/arch/mips/mm/dma-ip32.c deleted file mode 100644 index b42b6f7456e..00000000000 --- a/arch/mips/mm/dma-ip32.c +++ /dev/null @@ -1,383 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com> - * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org> - * Copyright (C) 2005 Ilya A. Volynets-Evenbakh <ilya@total-knowledge.com> - * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. - * IP32 changes by Ilya. - */ -#include <linux/types.h> -#include <linux/mm.h> -#include <linux/module.h> -#include <linux/string.h> -#include <linux/dma-mapping.h> - -#include <asm/cache.h> -#include <asm/io.h> -#include <asm/ip32/crime.h> - -/* - * Warning on the terminology - Linux calls an uncached area coherent; - * MIPS terminology calls memory areas with hardware maintained coherency - * coherent. - */ - -/* - * Few notes. - * 1. CPU sees memory as two chunks: 0-256M@0x0, and the rest @0x40000000+256M - * 2. PCI sees memory as one big chunk @0x0 (or we could use 0x40000000 for native-endian) - * 3. All other devices see memory as one big chunk at 0x40000000 - * 4. Non-PCI devices will pass NULL as struct device* - * Thus we translate differently, depending on device. - */ - -#define RAM_OFFSET_MASK 0x3fffffff - -void *dma_alloc_noncoherent(struct device *dev, size_t size, - dma_addr_t * dma_handle, gfp_t gfp) -{ - void *ret; - /* ignore region specifiers */ - gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); - - if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) - gfp |= GFP_DMA; - ret = (void *) __get_free_pages(gfp, get_order(size)); - - if (ret != NULL) { - unsigned long addr = virt_to_phys(ret)&RAM_OFFSET_MASK; - memset(ret, 0, size); - if(dev==NULL) - addr+= CRIME_HI_MEM_BASE; - *dma_handle = addr; - } - - return ret; -} - -EXPORT_SYMBOL(dma_alloc_noncoherent); - -void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t * dma_handle, gfp_t gfp) -{ - void *ret; - - ret = dma_alloc_noncoherent(dev, size, dma_handle, gfp); - if (ret) { - dma_cache_wback_inv((unsigned long) ret, size); - ret = UNCAC_ADDR(ret); - } - - return ret; -} - -EXPORT_SYMBOL(dma_alloc_coherent); - -void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle) -{ - free_pages((unsigned long) vaddr, get_order(size)); -} - -EXPORT_SYMBOL(dma_free_noncoherent); - -void dma_free_coherent(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle) -{ - unsigned long addr = (unsigned long) vaddr; - - addr = CAC_ADDR(addr); - free_pages(addr, get_order(size)); -} - -EXPORT_SYMBOL(dma_free_coherent); - -static inline void __dma_sync(unsigned long addr, size_t size, - enum dma_data_direction direction) -{ - switch (direction) { - case DMA_TO_DEVICE: - dma_cache_wback(addr, size); - break; - - case DMA_FROM_DEVICE: - dma_cache_inv(addr, size); - break; - - case DMA_BIDIRECTIONAL: - dma_cache_wback_inv(addr, size); - break; - - default: - BUG(); - } -} - -dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, - enum dma_data_direction direction) -{ - unsigned long addr = (unsigned long) ptr; - - switch (direction) { - case DMA_TO_DEVICE: - dma_cache_wback(addr, size); - break; - - case DMA_FROM_DEVICE: - dma_cache_inv(addr, size); - break; - - case DMA_BIDIRECTIONAL: - dma_cache_wback_inv(addr, size); - break; - - default: - BUG(); - } - - addr = virt_to_phys(ptr)&RAM_OFFSET_MASK; - if(dev == NULL) - addr+=CRIME_HI_MEM_BASE; - return (dma_addr_t)addr; -} - -EXPORT_SYMBOL(dma_map_single); - -void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, - enum dma_data_direction direction) -{ - switch (direction) { - case DMA_TO_DEVICE: - break; - - case DMA_FROM_DEVICE: - break; - - case DMA_BIDIRECTIONAL: - break; - - default: - BUG(); - } -} - -EXPORT_SYMBOL(dma_unmap_single); - -int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction direction) -{ - int i; - - BUG_ON(direction == DMA_NONE); - - for (i = 0; i < nents; i++, sg++) { - unsigned long addr; - - addr = (unsigned long) page_address(sg->page)+sg->offset; - if (addr) - __dma_sync(addr, sg->length, direction); - addr = __pa(addr)&RAM_OFFSET_MASK; - if(dev == NULL) - addr += CRIME_HI_MEM_BASE; - sg->dma_address = (dma_addr_t)addr; - } - - return nents; -} - -EXPORT_SYMBOL(dma_map_sg); - -dma_addr_t dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, enum dma_data_direction direction) -{ - unsigned long addr; - - BUG_ON(direction == DMA_NONE); - - addr = (unsigned long) page_address(page) + offset; - dma_cache_wback_inv(addr, size); - addr = __pa(addr)&RAM_OFFSET_MASK; - if(dev == NULL) - addr += CRIME_HI_MEM_BASE; - - return (dma_addr_t)addr; -} - -EXPORT_SYMBOL(dma_map_page); - -void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); - - if (direction != DMA_TO_DEVICE) { - unsigned long addr; - - dma_address&=RAM_OFFSET_MASK; - addr = dma_address + PAGE_OFFSET; - if(dma_address>=256*1024*1024) - addr+=CRIME_HI_MEM_BASE; - dma_cache_wback_inv(addr, size); - } -} - -EXPORT_SYMBOL(dma_unmap_page); - -void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, - enum dma_data_direction direction) -{ - unsigned long addr; - int i; - - BUG_ON(direction == DMA_NONE); - - if (direction == DMA_TO_DEVICE) - return; - - for (i = 0; i < nhwentries; i++, sg++) { - addr = (unsigned long) page_address(sg->page); - if (!addr) - continue; - dma_cache_wback_inv(addr + sg->offset, sg->length); - } -} - -EXPORT_SYMBOL(dma_unmap_sg); - -void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, - size_t size, enum dma_data_direction direction) -{ - unsigned long addr; - - BUG_ON(direction == DMA_NONE); - - dma_handle&=RAM_OFFSET_MASK; - addr = dma_handle + PAGE_OFFSET; - if(dma_handle>=256*1024*1024) - addr+=CRIME_HI_MEM_BASE; - __dma_sync(addr, size, direction); -} - -EXPORT_SYMBOL(dma_sync_single_for_cpu); - -void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, - size_t size, enum dma_data_direction direction) -{ - unsigned long addr; - - BUG_ON(direction == DMA_NONE); - - dma_handle&=RAM_OFFSET_MASK; - addr = dma_handle + PAGE_OFFSET; - if(dma_handle>=256*1024*1024) - addr+=CRIME_HI_MEM_BASE; - __dma_sync(addr, size, direction); -} - -EXPORT_SYMBOL(dma_sync_single_for_device); - -void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, - unsigned long offset, size_t size, enum dma_data_direction direction) -{ - unsigned long addr; - - BUG_ON(direction == DMA_NONE); - - dma_handle&=RAM_OFFSET_MASK; - addr = dma_handle + offset + PAGE_OFFSET; - if(dma_handle>=256*1024*1024) - addr+=CRIME_HI_MEM_BASE; - __dma_sync(addr, size, direction); -} - -EXPORT_SYMBOL(dma_sync_single_range_for_cpu); - -void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, - unsigned long offset, size_t size, enum dma_data_direction direction) -{ - unsigned long addr; - - BUG_ON(direction == DMA_NONE); - - dma_handle&=RAM_OFFSET_MASK; - addr = dma_handle + offset + PAGE_OFFSET; - if(dma_handle>=256*1024*1024) - addr+=CRIME_HI_MEM_BASE; - __dma_sync(addr, size, direction); -} - -EXPORT_SYMBOL(dma_sync_single_range_for_device); - -void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, - enum dma_data_direction direction) -{ - int i; - - BUG_ON(direction == DMA_NONE); - - /* Make sure that gcc doesn't leave the empty loop body. */ - for (i = 0; i < nelems; i++, sg++) - __dma_sync((unsigned long)page_address(sg->page), - sg->length, direction); -} - -EXPORT_SYMBOL(dma_sync_sg_for_cpu); - -void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, - enum dma_data_direction direction) -{ - int i; - - BUG_ON(direction == DMA_NONE); - - /* Make sure that gcc doesn't leave the empty loop body. */ - for (i = 0; i < nelems; i++, sg++) - __dma_sync((unsigned long)page_address(sg->page), - sg->length, direction); -} - -EXPORT_SYMBOL(dma_sync_sg_for_device); - -int dma_mapping_error(dma_addr_t dma_addr) -{ - return 0; -} - -EXPORT_SYMBOL(dma_mapping_error); - -int dma_supported(struct device *dev, u64 mask) -{ - /* - * we fall back to GFP_DMA when the mask isn't all 1s, - * so we can't guarantee allocations that must be - * within a tighter range than GFP_DMA.. - */ - if (mask < 0x00ffffff) - return 0; - - return 1; -} - -EXPORT_SYMBOL(dma_supported); - -int dma_is_consistent(struct device *dev, dma_addr_t dma_addr) -{ - return 1; -} - -EXPORT_SYMBOL(dma_is_consistent); - -void dma_cache_sync(struct device *dev, void *vaddr, size_t size, - enum dma_data_direction direction) -{ - if (direction == DMA_NONE) - return; - - dma_cache_wback_inv((unsigned long)vaddr, size); -} - -EXPORT_SYMBOL(dma_cache_sync); - diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile index 82b20c28bef..bf85995ca04 100644 --- a/arch/mips/pci/Makefile +++ b/arch/mips/pci/Makefile @@ -2,7 +2,7 @@ # Makefile for the PCI specific kernel interface routines under Linux. # -obj-y += pci.o +obj-y += pci.o pci-dac.o # # PCI bus host bridge specific code diff --git a/arch/mips/pci/pci-dac.c b/arch/mips/pci/pci-dac.c new file mode 100644 index 00000000000..0f0ea1b7d4d --- /dev/null +++ b/arch/mips/pci/pci-dac.c @@ -0,0 +1,79 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com> + * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org> + * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. + */ + +#include <linux/types.h> +#include <linux/dma-mapping.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/string.h> + +#include <asm/cache.h> +#include <asm/io.h> + +#include <dma-coherence.h> + +#include <linux/pci.h> + +dma64_addr_t pci_dac_page_to_dma(struct pci_dev *pdev, + struct page *page, unsigned long offset, int direction) +{ + struct device *dev = &pdev->dev; + + BUG_ON(direction == DMA_NONE); + + if (!plat_device_is_coherent(dev)) { + unsigned long addr; + + addr = (unsigned long) page_address(page) + offset; + dma_cache_wback_inv(addr, PAGE_SIZE); + } + + return plat_map_dma_mem_page(dev, page) + offset; +} + +EXPORT_SYMBOL(pci_dac_page_to_dma); + +struct page *pci_dac_dma_to_page(struct pci_dev *pdev, + dma64_addr_t dma_addr) +{ + return pfn_to_page(plat_dma_addr_to_phys(dma_addr) >> PAGE_SHIFT); +} + +EXPORT_SYMBOL(pci_dac_dma_to_page); + +unsigned long pci_dac_dma_to_offset(struct pci_dev *pdev, + dma64_addr_t dma_addr) +{ + return dma_addr & ~PAGE_MASK; +} + +EXPORT_SYMBOL(pci_dac_dma_to_offset); + +void pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, + dma64_addr_t dma_addr, size_t len, int direction) +{ + BUG_ON(direction == PCI_DMA_NONE); + + if (!plat_device_is_coherent(&pdev->dev)) + dma_cache_wback_inv(dma_addr + PAGE_OFFSET, len); +} + +EXPORT_SYMBOL(pci_dac_dma_sync_single_for_cpu); + +void pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, + dma64_addr_t dma_addr, size_t len, int direction) +{ + BUG_ON(direction == PCI_DMA_NONE); + + if (!plat_device_is_coherent(&pdev->dev)) + dma_cache_wback_inv(dma_addr + PAGE_OFFSET, len); +} + +EXPORT_SYMBOL(pci_dac_dma_sync_single_for_device); |