aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/kernel/amd_iommu.c
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2008-06-26 21:28:01 +0200
committerIngo Molnar <mingo@elte.hu>2008-06-27 10:12:18 +0200
commit4da70b9e4f8576ec906dba9240c5b6bc6584f91d (patch)
tree2dec55824ec43c8b1f8a9a5971295ceff7422b55 /arch/x86/kernel/amd_iommu.c
parentcb76c3229725c6dcae31da65e9ca57f434628c05 (diff)
x86, AMD IOMMU: add dma_ops mapping functions for single mappings
This patch adds the dma_ops specific mapping functions for single mappings. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Cc: iommu@lists.linux-foundation.org Cc: bhavna.sarathy@amd.com Cc: Sebastian.Biemueller@amd.com Cc: robert.richter@amd.com Cc: joro@8bytes.org Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/amd_iommu.c')
-rw-r--r--arch/x86/kernel/amd_iommu.c59
1 files changed, 59 insertions, 0 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index e00a3e7ba35..b4079f6bbd7 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -40,6 +40,11 @@ struct command {
static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
struct unity_map_entry *e);
+static int iommu_has_npcache(struct amd_iommu *iommu)
+{
+ return iommu->cap & IOMMU_CAP_NPCACHE;
+}
+
static int __iommu_queue_command(struct amd_iommu *iommu, struct command *cmd)
{
u32 tail, head;
@@ -641,3 +646,57 @@ static void __unmap_single(struct amd_iommu *iommu,
dma_ops_free_addresses(dma_dom, dma_addr, pages);
}
+static dma_addr_t map_single(struct device *dev, phys_addr_t paddr,
+ size_t size, int dir)
+{
+ unsigned long flags;
+ struct amd_iommu *iommu;
+ struct protection_domain *domain;
+ u16 devid;
+ dma_addr_t addr;
+
+ get_device_resources(dev, &iommu, &domain, &devid);
+
+ if (iommu == NULL || domain == NULL)
+ return (dma_addr_t)paddr;
+
+ spin_lock_irqsave(&domain->lock, flags);
+ addr = __map_single(dev, iommu, domain->priv, paddr, size, dir);
+ if (addr == bad_dma_address)
+ goto out;
+
+ if (iommu_has_npcache(iommu))
+ iommu_flush_pages(iommu, domain->id, addr, size);
+
+ if (iommu->need_sync)
+ iommu_completion_wait(iommu);
+
+out:
+ spin_unlock_irqrestore(&domain->lock, flags);
+
+ return addr;
+}
+
+static void unmap_single(struct device *dev, dma_addr_t dma_addr,
+ size_t size, int dir)
+{
+ unsigned long flags;
+ struct amd_iommu *iommu;
+ struct protection_domain *domain;
+ u16 devid;
+
+ if (!get_device_resources(dev, &iommu, &domain, &devid))
+ return;
+
+ spin_lock_irqsave(&domain->lock, flags);
+
+ __unmap_single(iommu, domain->priv, dma_addr, size, dir);
+
+ iommu_flush_pages(iommu, domain->id, dma_addr, size);
+
+ if (iommu->need_sync)
+ iommu_completion_wait(iommu);
+
+ spin_unlock_irqrestore(&domain->lock, flags);
+}
+