aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/kernel/amd_iommu.c
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2008-09-04 18:40:05 +0200
committerIngo Molnar <mingo@elte.hu>2008-09-19 12:59:07 +0200
commit1c65577398589bb44ab0980f9b9d30804b48a5db (patch)
tree4688c194c399de3a34769dbf5cd0bb31ab278d99 /arch/x86/kernel/amd_iommu.c
parent2842e5bf3115193f05dc9dac20f940e7abf44c1a (diff)
AMD IOMMU: implement lazy IO/TLB flushing
The IO/TLB flushing on every unmaping operation is the most expensive part in AMD IOMMU code and not strictly necessary. It is sufficient to do the flush before any entries are reused. This is patch implements lazy IO/TLB flushing which does exactly this. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/amd_iommu.c')
-rw-r--r--arch/x86/kernel/amd_iommu.c26
1 files changed, 22 insertions, 4 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 691e023695a..679f2a8e22e 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -203,6 +203,14 @@ static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid,
return 0;
}
+/* Flush the whole IO/TLB for a given protection domain */
+static void iommu_flush_tlb(struct amd_iommu *iommu, u16 domid)
+{
+ u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
+
+ iommu_queue_inv_iommu_pages(iommu, address, domid, 0, 1);
+}
+
/****************************************************************************
*
* The functions below are used the create the page table mappings for
@@ -386,14 +394,18 @@ static unsigned long dma_ops_alloc_addresses(struct device *dev,
PAGE_SIZE) >> PAGE_SHIFT;
limit = limit < size ? limit : size;
- if (dom->next_bit >= limit)
+ if (dom->next_bit >= limit) {
dom->next_bit = 0;
+ dom->need_flush = true;
+ }
address = iommu_area_alloc(dom->bitmap, limit, dom->next_bit, pages,
0 , boundary_size, 0);
- if (address == -1)
+ if (address == -1) {
address = iommu_area_alloc(dom->bitmap, limit, 0, pages,
0, boundary_size, 0);
+ dom->need_flush = true;
+ }
if (likely(address != -1)) {
dom->next_bit = address + pages;
@@ -553,6 +565,8 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu,
dma_dom->bitmap[0] = 1;
dma_dom->next_bit = 0;
+ dma_dom->need_flush = false;
+
/* Intialize the exclusion range if necessary */
if (iommu->exclusion_start &&
iommu->exclusion_start < dma_dom->aperture_size) {
@@ -795,7 +809,10 @@ static dma_addr_t __map_single(struct device *dev,
}
address += offset;
- if (unlikely(iommu_has_npcache(iommu)))
+ if (unlikely(dma_dom->need_flush && !iommu_fullflush)) {
+ iommu_flush_tlb(iommu, dma_dom->domain.id);
+ dma_dom->need_flush = false;
+ } else if (unlikely(iommu_has_npcache(iommu)))
iommu_flush_pages(iommu, dma_dom->domain.id, address, size);
out:
@@ -829,7 +846,8 @@ static void __unmap_single(struct amd_iommu *iommu,
dma_ops_free_addresses(dma_dom, dma_addr, pages);
- iommu_flush_pages(iommu, dma_dom->domain.id, dma_addr, size);
+ if (iommu_fullflush)
+ iommu_flush_pages(iommu, dma_dom->domain.id, dma_addr, size);
}
/*