aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2008-11-06 14:59:05 +0100
committerJoerg Roedel <joerg.roedel@amd.com>2008-11-06 14:59:05 +0100
commit80be308dfa3798c7bad0fc81760b2faf83870e91 (patch)
tree745661b7a9b6545acfdab389decdce37788ae98a /arch/x86/kernel
parentae9b9403644f3ecc76867af042e7e1cfd5c099d0 (diff)
AMD IOMMU: fix lazy IO/TLB flushing in unmap path
Lazy flushing needs to take care of the unmap path too which is not yet implemented and leads to stale IO/TLB entries. This is fixed by this patch. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/amd_iommu.c7
1 files changed, 6 insertions, 1 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 38e88d40ab1..4755bbc7ae5 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -526,6 +526,9 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom,
{
address >>= PAGE_SHIFT;
iommu_area_free(dom->bitmap, address, pages);
+
+ if (address + pages >= dom->next_bit)
+ dom->need_flush = true;
}
/****************************************************************************
@@ -981,8 +984,10 @@ static void __unmap_single(struct amd_iommu *iommu,
dma_ops_free_addresses(dma_dom, dma_addr, pages);
- if (amd_iommu_unmap_flush)
+ if (amd_iommu_unmap_flush || dma_dom->need_flush) {
iommu_flush_pages(iommu, dma_dom->domain.id, dma_addr, size);
+ dma_dom->need_flush = false;
+ }
}
/*