From a77b67d4023770805141014b8fa9eb5467457817 Mon Sep 17 00:00:00 2001 From: Youquan Song Date: Thu, 16 Oct 2008 16:31:56 -0700 Subject: dmar: Use queued invalidation interface for IOTLB and context invalidation If queued invalidation interface is available and enabled, queued invalidation interface will be used instead of the register based interface. According to Vt-d2 specification, when queued invalidation is enabled, invalidation command submit works only through invalidation queue and not through the command registers interface. Signed-off-by: Youquan Song Signed-off-by: Suresh Siddha Signed-off-by: David Woodhouse --- drivers/pci/intel-iommu.c | 95 ++++++++++++++++++++++------------------------- 1 file changed, 45 insertions(+), 50 deletions(-) (limited to 'drivers/pci/intel-iommu.c') diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index fc5f2dbf532..50947041913 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -567,27 +567,6 @@ static int __iommu_flush_context(struct intel_iommu *iommu, return 0; } -static int inline iommu_flush_context_global(struct intel_iommu *iommu, - int non_present_entry_flush) -{ - return __iommu_flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL, - non_present_entry_flush); -} - -static int inline iommu_flush_context_domain(struct intel_iommu *iommu, u16 did, - int non_present_entry_flush) -{ - return __iommu_flush_context(iommu, did, 0, 0, DMA_CCMD_DOMAIN_INVL, - non_present_entry_flush); -} - -static int inline iommu_flush_context_device(struct intel_iommu *iommu, - u16 did, u16 source_id, u8 function_mask, int non_present_entry_flush) -{ - return __iommu_flush_context(iommu, did, source_id, function_mask, - DMA_CCMD_DEVICE_INVL, non_present_entry_flush); -} - /* return value determine if we need a write buffer flush */ static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, unsigned int size_order, u64 type, @@ -660,20 +639,6 @@ static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, return 0; } -static int inline iommu_flush_iotlb_global(struct intel_iommu *iommu, - int non_present_entry_flush) -{ - return __iommu_flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH, - non_present_entry_flush); -} - -static int inline iommu_flush_iotlb_dsi(struct intel_iommu *iommu, u16 did, - int non_present_entry_flush) -{ - return __iommu_flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH, - non_present_entry_flush); -} - static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, u64 addr, unsigned int pages, int non_present_entry_flush) { @@ -684,8 +649,9 @@ static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, /* Fallback to domain selective flush if no PSI support */ if (!cap_pgsel_inv(iommu->cap)) - return iommu_flush_iotlb_dsi(iommu, did, - non_present_entry_flush); + return iommu->flush.flush_iotlb(iommu, did, 0, 0, + DMA_TLB_DSI_FLUSH, + non_present_entry_flush); /* * PSI requires page size to be 2 ^ x, and the base address is naturally @@ -694,11 +660,12 @@ static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, mask = ilog2(__roundup_pow_of_two(pages)); /* Fallback to domain selective flush if size is too big */ if (mask > cap_max_amask_val(iommu->cap)) - return iommu_flush_iotlb_dsi(iommu, did, - non_present_entry_flush); + return iommu->flush.flush_iotlb(iommu, did, 0, 0, + DMA_TLB_DSI_FLUSH, non_present_entry_flush); - return __iommu_flush_iotlb(iommu, did, addr, mask, - DMA_TLB_PSI_FLUSH, non_present_entry_flush); + return iommu->flush.flush_iotlb(iommu, did, addr, mask, + DMA_TLB_PSI_FLUSH, + non_present_entry_flush); } static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) @@ -1204,11 +1171,13 @@ static int domain_context_mapping_one(struct dmar_domain *domain, __iommu_flush_cache(iommu, context, sizeof(*context)); /* it's a non-present to present mapping */ - if (iommu_flush_context_device(iommu, domain->id, - (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT, 1)) + if (iommu->flush.flush_context(iommu, domain->id, + (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT, + DMA_CCMD_DEVICE_INVL, 1)) iommu_flush_write_buffer(iommu); else - iommu_flush_iotlb_dsi(iommu, 0, 0); + iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH, 0); + spin_unlock_irqrestore(&iommu->lock, flags); return 0; } @@ -1310,8 +1279,10 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova, static void detach_domain_for_dev(struct dmar_domain *domain, u8 bus, u8 devfn) { clear_context_table(domain->iommu, bus, devfn); - iommu_flush_context_global(domain->iommu, 0); - iommu_flush_iotlb_global(domain->iommu, 0); + domain->iommu->flush.flush_context(domain->iommu, 0, 0, 0, + DMA_CCMD_GLOBAL_INVL, 0); + domain->iommu->flush.flush_iotlb(domain->iommu, 0, 0, 0, + DMA_TLB_GLOBAL_FLUSH, 0); } static void domain_remove_dev_info(struct dmar_domain *domain) @@ -1662,6 +1633,28 @@ int __init init_dmars(void) } } + for_each_drhd_unit(drhd) { + if (drhd->ignored) + continue; + + iommu = drhd->iommu; + if (dmar_enable_qi(iommu)) { + /* + * Queued Invalidate not enabled, use Register Based + * Invalidate + */ + iommu->flush.flush_context = __iommu_flush_context; + iommu->flush.flush_iotlb = __iommu_flush_iotlb; + printk(KERN_INFO "IOMMU 0x%Lx: using Register based " + "invalidation\n", drhd->reg_base_addr); + } else { + iommu->flush.flush_context = qi_flush_context; + iommu->flush.flush_iotlb = qi_flush_iotlb; + printk(KERN_INFO "IOMMU 0x%Lx: using Queued " + "invalidation\n", drhd->reg_base_addr); + } + } + /* * For each rmrr * for each dev attached to rmrr @@ -1714,9 +1707,10 @@ int __init init_dmars(void) iommu_set_root_entry(iommu); - iommu_flush_context_global(iommu, 0); - iommu_flush_iotlb_global(iommu, 0); - + iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL, + 0); + iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH, + 0); iommu_disable_protect_mem_regions(iommu); ret = iommu_enable_translation(iommu); @@ -1891,7 +1885,8 @@ static void flush_unmaps(void) struct intel_iommu *iommu = deferred_flush[i].domain[0]->iommu; - iommu_flush_iotlb_global(iommu, 0); + iommu->flush.flush_iotlb(iommu, 0, 0, 0, + DMA_TLB_GLOBAL_FLUSH, 0); for (j = 0; j < deferred_flush[i].next; j++) { __free_iova(&deferred_flush[i].domain[j]->iovad, deferred_flush[i].iova[j]); -- cgit v1.2.3 From 5b6985ce8ec7127b4d60ad450b64ca8b82748a3b Mon Sep 17 00:00:00 2001 From: Fenghua Yu Date: Thu, 16 Oct 2008 18:02:32 -0700 Subject: intel-iommu: IA64 support The current Intel IOMMU code assumes that both host page size and Intel IOMMU page size are 4KiB. The first patch supports variable page size. This provides support for IA64 which has multiple page sizes. This patch also adds some other code hooks for IA64 platform including DMAR_OPERATION_TIMEOUT definition. [dwmw2: some cleanup] Signed-off-by: Fenghua Yu Signed-off-by: Tony Luck Signed-off-by: David Woodhouse --- drivers/pci/intel-iommu.c | 128 ++++++++++++++++++++++++---------------------- 1 file changed, 66 insertions(+), 62 deletions(-) (limited to 'drivers/pci/intel-iommu.c') diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 50947041913..2bf96babbc4 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -18,6 +18,7 @@ * Author: Ashok Raj * Author: Shaohua Li * Author: Anil S Keshavamurthy + * Author: Fenghua Yu */ #include @@ -35,11 +36,13 @@ #include #include #include -#include /* force_iommu in this header in x86-64*/ #include #include #include "pci.h" +#define ROOT_SIZE VTD_PAGE_SIZE +#define CONTEXT_SIZE VTD_PAGE_SIZE + #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) @@ -199,7 +202,7 @@ static struct context_entry * device_to_context_entry(struct intel_iommu *iommu, spin_unlock_irqrestore(&iommu->lock, flags); return NULL; } - __iommu_flush_cache(iommu, (void *)context, PAGE_SIZE_4K); + __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE); phy_addr = virt_to_phys((void *)context); set_root_value(root, phy_addr); set_root_present(root); @@ -345,7 +348,7 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr) return NULL; } __iommu_flush_cache(domain->iommu, tmp_page, - PAGE_SIZE_4K); + PAGE_SIZE); dma_set_pte_addr(*pte, virt_to_phys(tmp_page)); /* * high level table always sets r/w, last level page @@ -408,13 +411,13 @@ static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end) start &= (((u64)1) << addr_width) - 1; end &= (((u64)1) << addr_width) - 1; /* in case it's partial page */ - start = PAGE_ALIGN_4K(start); - end &= PAGE_MASK_4K; + start = PAGE_ALIGN(start); + end &= PAGE_MASK; /* we don't need lock here, nobody else touches the iova range */ while (start < end) { dma_pte_clear_one(domain, start); - start += PAGE_SIZE_4K; + start += VTD_PAGE_SIZE; } } @@ -468,7 +471,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu) if (!root) return -ENOMEM; - __iommu_flush_cache(iommu, root, PAGE_SIZE_4K); + __iommu_flush_cache(iommu, root, ROOT_SIZE); spin_lock_irqsave(&iommu->lock, flags); iommu->root_entry = root; @@ -634,7 +637,8 @@ static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, printk(KERN_ERR"IOMMU: flush IOTLB failed\n"); if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type)) pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n", - DMA_TLB_IIRG(type), DMA_TLB_IAIG(val)); + (unsigned long long)DMA_TLB_IIRG(type), + (unsigned long long)DMA_TLB_IAIG(val)); /* flush context entry will implictly flush write buffer */ return 0; } @@ -644,7 +648,7 @@ static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, { unsigned int mask; - BUG_ON(addr & (~PAGE_MASK_4K)); + BUG_ON(addr & (~VTD_PAGE_MASK)); BUG_ON(pages == 0); /* Fallback to domain selective flush if no PSI support */ @@ -798,7 +802,7 @@ void dmar_msi_read(int irq, struct msi_msg *msg) } static int iommu_page_fault_do_one(struct intel_iommu *iommu, int type, - u8 fault_reason, u16 source_id, u64 addr) + u8 fault_reason, u16 source_id, unsigned long long addr) { const char *reason; @@ -1051,9 +1055,9 @@ static void dmar_init_reserved_ranges(void) if (!r->flags || !(r->flags & IORESOURCE_MEM)) continue; addr = r->start; - addr &= PAGE_MASK_4K; + addr &= PAGE_MASK; size = r->end - addr; - size = PAGE_ALIGN_4K(size); + size = PAGE_ALIGN(size); iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr), IOVA_PFN(size + addr) - 1); if (!iova) @@ -1115,7 +1119,7 @@ static int domain_init(struct dmar_domain *domain, int guest_width) domain->pgd = (struct dma_pte *)alloc_pgtable_page(); if (!domain->pgd) return -ENOMEM; - __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE_4K); + __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE); return 0; } @@ -1131,7 +1135,7 @@ static void domain_exit(struct dmar_domain *domain) /* destroy iovas */ put_iova_domain(&domain->iovad); end = DOMAIN_MAX_ADDR(domain->gaw); - end = end & (~PAGE_MASK_4K); + end = end & (~PAGE_MASK); /* clear ptes */ dma_pte_clear_range(domain, 0, end); @@ -1252,22 +1256,25 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova, u64 start_pfn, end_pfn; struct dma_pte *pte; int index; + int addr_width = agaw_to_width(domain->agaw); + + hpa &= (((u64)1) << addr_width) - 1; if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0) return -EINVAL; - iova &= PAGE_MASK_4K; - start_pfn = ((u64)hpa) >> PAGE_SHIFT_4K; - end_pfn = (PAGE_ALIGN_4K(((u64)hpa) + size)) >> PAGE_SHIFT_4K; + iova &= PAGE_MASK; + start_pfn = ((u64)hpa) >> VTD_PAGE_SHIFT; + end_pfn = (VTD_PAGE_ALIGN(((u64)hpa) + size)) >> VTD_PAGE_SHIFT; index = 0; while (start_pfn < end_pfn) { - pte = addr_to_dma_pte(domain, iova + PAGE_SIZE_4K * index); + pte = addr_to_dma_pte(domain, iova + VTD_PAGE_SIZE * index); if (!pte) return -ENOMEM; /* We don't need lock here, nobody else * touches the iova range */ BUG_ON(dma_pte_addr(*pte)); - dma_set_pte_addr(*pte, start_pfn << PAGE_SHIFT_4K); + dma_set_pte_addr(*pte, start_pfn << VTD_PAGE_SHIFT); dma_set_pte_prot(*pte, prot); __iommu_flush_cache(domain->iommu, pte, sizeof(*pte)); start_pfn++; @@ -1445,11 +1452,13 @@ error: return find_domain(pdev); } -static int iommu_prepare_identity_map(struct pci_dev *pdev, u64 start, u64 end) +static int iommu_prepare_identity_map(struct pci_dev *pdev, + unsigned long long start, + unsigned long long end) { struct dmar_domain *domain; unsigned long size; - u64 base; + unsigned long long base; int ret; printk(KERN_INFO @@ -1461,9 +1470,9 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev, u64 start, u64 end) return -ENOMEM; /* The address might not be aligned */ - base = start & PAGE_MASK_4K; + base = start & PAGE_MASK; size = end - base; - size = PAGE_ALIGN_4K(size); + size = PAGE_ALIGN(size); if (!reserve_iova(&domain->iovad, IOVA_PFN(base), IOVA_PFN(base + size) - 1)) { printk(KERN_ERR "IOMMU: reserve iova failed\n"); @@ -1732,8 +1741,8 @@ error: static inline u64 aligned_size(u64 host_addr, size_t size) { u64 addr; - addr = (host_addr & (~PAGE_MASK_4K)) + size; - return PAGE_ALIGN_4K(addr); + addr = (host_addr & (~PAGE_MASK)) + size; + return PAGE_ALIGN(addr); } struct iova * @@ -1747,7 +1756,7 @@ iommu_alloc_iova(struct dmar_domain *domain, size_t size, u64 end) return NULL; piova = alloc_iova(&domain->iovad, - size >> PAGE_SHIFT_4K, IOVA_PFN(end), 1); + size >> PAGE_SHIFT, IOVA_PFN(end), 1); return piova; } @@ -1807,12 +1816,12 @@ get_valid_domain_for_dev(struct pci_dev *pdev) return domain; } -static dma_addr_t +dma_addr_t intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir) { struct pci_dev *pdev = to_pci_dev(hwdev); struct dmar_domain *domain; - unsigned long start_paddr; + phys_addr_t start_paddr; struct iova *iova; int prot = 0; int ret; @@ -1831,7 +1840,7 @@ intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir) if (!iova) goto error; - start_paddr = iova->pfn_lo << PAGE_SHIFT_4K; + start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT; /* * Check if DMAR supports zero-length reads on write only @@ -1849,27 +1858,23 @@ intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir) * is not a big problem */ ret = domain_page_mapping(domain, start_paddr, - ((u64)paddr) & PAGE_MASK_4K, size, prot); + ((u64)paddr) & PAGE_MASK, size, prot); if (ret) goto error; - pr_debug("Device %s request: %lx@%llx mapping: %lx@%llx, dir %d\n", - pci_name(pdev), size, (u64)paddr, - size, (u64)start_paddr, dir); - /* it's a non-present to present mapping */ ret = iommu_flush_iotlb_psi(domain->iommu, domain->id, - start_paddr, size >> PAGE_SHIFT_4K, 1); + start_paddr, size >> VTD_PAGE_SHIFT, 1); if (ret) iommu_flush_write_buffer(domain->iommu); - return (start_paddr + ((u64)paddr & (~PAGE_MASK_4K))); + return start_paddr + ((u64)paddr & (~PAGE_MASK)); error: if (iova) __free_iova(&domain->iovad, iova); printk(KERN_ERR"Device %s request: %lx@%llx dir %d --- failed\n", - pci_name(pdev), size, (u64)paddr, dir); + pci_name(pdev), size, (unsigned long long)paddr, dir); return 0; } @@ -1931,8 +1936,8 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova) spin_unlock_irqrestore(&async_umap_flush_lock, flags); } -static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, - size_t size, int dir) +void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size, + int dir) { struct pci_dev *pdev = to_pci_dev(dev); struct dmar_domain *domain; @@ -1948,11 +1953,11 @@ static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, if (!iova) return; - start_addr = iova->pfn_lo << PAGE_SHIFT_4K; + start_addr = iova->pfn_lo << PAGE_SHIFT; size = aligned_size((u64)dev_addr, size); pr_debug("Device %s unmapping: %lx@%llx\n", - pci_name(pdev), size, (u64)start_addr); + pci_name(pdev), size, (unsigned long long)start_addr); /* clear the whole page */ dma_pte_clear_range(domain, start_addr, start_addr + size); @@ -1960,7 +1965,7 @@ static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, dma_pte_free_pagetable(domain, start_addr, start_addr + size); if (intel_iommu_strict) { if (iommu_flush_iotlb_psi(domain->iommu, - domain->id, start_addr, size >> PAGE_SHIFT_4K, 0)) + domain->id, start_addr, size >> VTD_PAGE_SHIFT, 0)) iommu_flush_write_buffer(domain->iommu); /* free iova */ __free_iova(&domain->iovad, iova); @@ -1973,13 +1978,13 @@ static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, } } -static void * intel_alloc_coherent(struct device *hwdev, size_t size, - dma_addr_t *dma_handle, gfp_t flags) +void *intel_alloc_coherent(struct device *hwdev, size_t size, + dma_addr_t *dma_handle, gfp_t flags) { void *vaddr; int order; - size = PAGE_ALIGN_4K(size); + size = PAGE_ALIGN(size); order = get_order(size); flags &= ~(GFP_DMA | GFP_DMA32); @@ -1995,12 +2000,12 @@ static void * intel_alloc_coherent(struct device *hwdev, size_t size, return NULL; } -static void intel_free_coherent(struct device *hwdev, size_t size, - void *vaddr, dma_addr_t dma_handle) +void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, + dma_addr_t dma_handle) { int order; - size = PAGE_ALIGN_4K(size); + size = PAGE_ALIGN(size); order = get_order(size); intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL); @@ -2008,8 +2013,9 @@ static void intel_free_coherent(struct device *hwdev, size_t size, } #define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg))) -static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, - int nelems, int dir) + +void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, + int nelems, int dir) { int i; struct pci_dev *pdev = to_pci_dev(hwdev); @@ -2033,7 +2039,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, size += aligned_size((u64)addr, sg->length); } - start_addr = iova->pfn_lo << PAGE_SHIFT_4K; + start_addr = iova->pfn_lo << PAGE_SHIFT; /* clear the whole page */ dma_pte_clear_range(domain, start_addr, start_addr + size); @@ -2041,7 +2047,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, dma_pte_free_pagetable(domain, start_addr, start_addr + size); if (iommu_flush_iotlb_psi(domain->iommu, domain->id, start_addr, - size >> PAGE_SHIFT_4K, 0)) + size >> VTD_PAGE_SHIFT, 0)) iommu_flush_write_buffer(domain->iommu); /* free iova */ @@ -2062,8 +2068,8 @@ static int intel_nontranslate_map_sg(struct device *hddev, return nelems; } -static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, - int nelems, int dir) +int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, + int dir) { void *addr; int i; @@ -2107,14 +2113,14 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) prot |= DMA_PTE_WRITE; - start_addr = iova->pfn_lo << PAGE_SHIFT_4K; + start_addr = iova->pfn_lo << PAGE_SHIFT; offset = 0; for_each_sg(sglist, sg, nelems, i) { addr = SG_ENT_VIRT_ADDRESS(sg); addr = (void *)virt_to_phys(addr); size = aligned_size((u64)addr, sg->length); ret = domain_page_mapping(domain, start_addr + offset, - ((u64)addr) & PAGE_MASK_4K, + ((u64)addr) & PAGE_MASK, size, prot); if (ret) { /* clear the page */ @@ -2128,14 +2134,14 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, return 0; } sg->dma_address = start_addr + offset + - ((u64)addr & (~PAGE_MASK_4K)); + ((u64)addr & (~PAGE_MASK)); sg->dma_length = sg->length; offset += size; } /* it's a non-present to present mapping */ if (iommu_flush_iotlb_psi(domain->iommu, domain->id, - start_addr, offset >> PAGE_SHIFT_4K, 1)) + start_addr, offset >> VTD_PAGE_SHIFT, 1)) iommu_flush_write_buffer(domain->iommu); return nelems; } @@ -2175,7 +2181,6 @@ static inline int iommu_devinfo_cache_init(void) sizeof(struct device_domain_info), 0, SLAB_HWCACHE_ALIGN, - NULL); if (!iommu_devinfo_cache) { printk(KERN_ERR "Couldn't create devinfo cache\n"); @@ -2193,7 +2198,6 @@ static inline int iommu_iova_cache_init(void) sizeof(struct iova), 0, SLAB_HWCACHE_ALIGN, - NULL); if (!iommu_iova_cache) { printk(KERN_ERR "Couldn't create iova cache\n"); @@ -2322,7 +2326,7 @@ void intel_iommu_domain_exit(struct dmar_domain *domain) return; end = DOMAIN_MAX_ADDR(domain->gaw); - end = end & (~PAGE_MASK_4K); + end = end & (~VTD_PAGE_MASK); /* clear ptes */ dma_pte_clear_range(domain, 0, end); @@ -2418,6 +2422,6 @@ u64 intel_iommu_iova_to_pfn(struct dmar_domain *domain, u64 iova) if (pte) pfn = dma_pte_addr(*pte); - return pfn >> PAGE_SHIFT_4K; + return pfn >> VTD_PAGE_SHIFT; } EXPORT_SYMBOL_GPL(intel_iommu_iova_to_pfn); -- cgit v1.2.3 From bb9e6d65078da2f38cfe1067cfd31a896ca867c0 Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Wed, 15 Oct 2008 16:08:28 +0900 Subject: intel-iommu: use coherent_dma_mask in alloc_coherent This patch fixes intel-iommu to use dev->coherent_dma_mask in alloc_coherent. Currently, intel-iommu uses dev->dma_mask in alloc_coherent but alloc_coherent is supposed to use coherent_dma_mask. It could break drivers that uses smaller coherent_dma_mask than dma_mask (though the current code works for the majority that use the same mask for coherent_dma_mask and dma_mask). [dwmw2: dma_mask can be bigger than 'unsigned long'] Signed-off-by: FUJITA Tomonori Reviewed-by: Grant Grundler Signed-off-by: David Woodhouse --- drivers/pci/intel-iommu.c | 29 +++++++++++++++++++---------- 1 file changed, 19 insertions(+), 10 deletions(-) (limited to 'drivers/pci/intel-iommu.c') diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 2bf96babbc4..d315e413fae 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -1762,14 +1762,14 @@ iommu_alloc_iova(struct dmar_domain *domain, size_t size, u64 end) static struct iova * __intel_alloc_iova(struct device *dev, struct dmar_domain *domain, - size_t size) + size_t size, u64 dma_mask) { struct pci_dev *pdev = to_pci_dev(dev); struct iova *iova = NULL; - if ((pdev->dma_mask <= DMA_32BIT_MASK) || (dmar_forcedac)) { - iova = iommu_alloc_iova(domain, size, pdev->dma_mask); - } else { + if (dma_mask <= DMA_32BIT_MASK || dmar_forcedac) + iova = iommu_alloc_iova(domain, size, dma_mask); + else { /* * First try to allocate an io virtual address in * DMA_32BIT_MASK and if that fails then try allocating @@ -1777,7 +1777,7 @@ __intel_alloc_iova(struct device *dev, struct dmar_domain *domain, */ iova = iommu_alloc_iova(domain, size, DMA_32BIT_MASK); if (!iova) - iova = iommu_alloc_iova(domain, size, pdev->dma_mask); + iova = iommu_alloc_iova(domain, size, dma_mask); } if (!iova) { @@ -1816,8 +1816,8 @@ get_valid_domain_for_dev(struct pci_dev *pdev) return domain; } -dma_addr_t -intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir) +static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, + size_t size, int dir, u64 dma_mask) { struct pci_dev *pdev = to_pci_dev(hwdev); struct dmar_domain *domain; @@ -1836,7 +1836,7 @@ intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir) size = aligned_size((u64)paddr, size); - iova = __intel_alloc_iova(hwdev, domain, size); + iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); if (!iova) goto error; @@ -1878,6 +1878,13 @@ error: return 0; } +dma_addr_t intel_map_single(struct device *hwdev, phys_addr_t paddr, + size_t size, int dir) +{ + return __intel_map_single(hwdev, paddr, size, dir, + to_pci_dev(hwdev)->dma_mask); +} + static void flush_unmaps(void) { int i, j; @@ -1993,7 +2000,9 @@ void *intel_alloc_coherent(struct device *hwdev, size_t size, return NULL; memset(vaddr, 0, size); - *dma_handle = intel_map_single(hwdev, virt_to_bus(vaddr), size, DMA_BIDIRECTIONAL); + *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size, + DMA_BIDIRECTIONAL, + hwdev->coherent_dma_mask); if (*dma_handle) return vaddr; free_pages((unsigned long)vaddr, order); @@ -2097,7 +2106,7 @@ int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, size += aligned_size((u64)addr, sg->length); } - iova = __intel_alloc_iova(hwdev, domain, size); + iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); if (!iova) { sglist->dma_length = 0; return 0; -- cgit v1.2.3