aboutsummaryrefslogtreecommitdiff
path: root/drivers/pci/intel-iommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci/intel-iommu.c')
-rw-r--r--drivers/pci/intel-iommu.c307
1 files changed, 140 insertions, 167 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index c3edcdc08e7..8b51e10b778 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -33,8 +33,8 @@
#include <linux/dma-mapping.h>
#include <linux/mempool.h>
#include <linux/timer.h>
-#include "iova.h"
-#include "intel-iommu.h"
+#include <linux/iova.h>
+#include <linux/intel-iommu.h>
#include <asm/proto.h> /* force_iommu in this header in x86-64*/
#include <asm/cacheflush.h>
#include <asm/iommu.h>
@@ -49,8 +49,6 @@
#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
-#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) /* 10sec */
-
#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
@@ -58,8 +56,6 @@ static void flush_unmaps_timeout(unsigned long data);
DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
-static struct intel_iommu *g_iommus;
-
#define HIGH_WATER_MARK 250
struct deferred_flush_tables {
int next;
@@ -80,7 +76,7 @@ static long list_size;
static void domain_remove_dev_info(struct dmar_domain *domain);
-static int dmar_disabled;
+int dmar_disabled;
static int __initdata dmar_map_gfx = 1;
static int dmar_forcedac;
static int intel_iommu_strict;
@@ -160,7 +156,7 @@ static inline void *alloc_domain_mem(void)
return iommu_kmem_cache_alloc(iommu_domain_cache);
}
-static inline void free_domain_mem(void *vaddr)
+static void free_domain_mem(void *vaddr)
{
kmem_cache_free(iommu_domain_cache, vaddr);
}
@@ -185,13 +181,6 @@ void free_iova_mem(struct iova *iova)
kmem_cache_free(iommu_iova_cache, iova);
}
-static inline void __iommu_flush_cache(
- struct intel_iommu *iommu, void *addr, int size)
-{
- if (!ecap_coherent(iommu->ecap))
- clflush_cache_range(addr, size);
-}
-
/* Gets context entry for a given bus and devfn */
static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
u8 bus, u8 devfn)
@@ -488,19 +477,6 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
return 0;
}
-#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
-{\
- cycles_t start_time = get_cycles();\
- while (1) {\
- sts = op (iommu->reg + offset);\
- if (cond)\
- break;\
- if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\
- panic("DMAR hardware is malfunctioning\n");\
- cpu_relax();\
- }\
-}
-
static void iommu_set_root_entry(struct intel_iommu *iommu)
{
void *addr;
@@ -587,7 +563,7 @@ static int __iommu_flush_context(struct intel_iommu *iommu,
spin_unlock_irqrestore(&iommu->register_lock, flag);
- /* flush context entry will implictly flush write buffer */
+ /* flush context entry will implicitly flush write buffer */
return 0;
}
@@ -680,7 +656,7 @@ static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
DMA_TLB_IIRG(type), DMA_TLB_IAIG(val));
- /* flush context entry will implictly flush write buffer */
+ /* flush iotlb entry will implicitly flush write buffer */
return 0;
}
@@ -990,6 +966,8 @@ static int iommu_init_domains(struct intel_iommu *iommu)
return -ENOMEM;
}
+ spin_lock_init(&iommu->lock);
+
/*
* if Caching mode is set, then invalid translations are tagged
* with domainid 0. Hence we need to pre-allocate it.
@@ -998,62 +976,15 @@ static int iommu_init_domains(struct intel_iommu *iommu)
set_bit(0, iommu->domain_ids);
return 0;
}
-static struct intel_iommu *alloc_iommu(struct intel_iommu *iommu,
- struct dmar_drhd_unit *drhd)
-{
- int ret;
- int map_size;
- u32 ver;
-
- iommu->reg = ioremap(drhd->reg_base_addr, PAGE_SIZE_4K);
- if (!iommu->reg) {
- printk(KERN_ERR "IOMMU: can't map the region\n");
- goto error;
- }
- iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
- iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
-
- /* the registers might be more than one page */
- map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
- cap_max_fault_reg_offset(iommu->cap));
- map_size = PAGE_ALIGN_4K(map_size);
- if (map_size > PAGE_SIZE_4K) {
- iounmap(iommu->reg);
- iommu->reg = ioremap(drhd->reg_base_addr, map_size);
- if (!iommu->reg) {
- printk(KERN_ERR "IOMMU: can't map the region\n");
- goto error;
- }
- }
-
- ver = readl(iommu->reg + DMAR_VER_REG);
- pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
- drhd->reg_base_addr, DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
- iommu->cap, iommu->ecap);
- ret = iommu_init_domains(iommu);
- if (ret)
- goto error_unmap;
- spin_lock_init(&iommu->lock);
- spin_lock_init(&iommu->register_lock);
- drhd->iommu = iommu;
- return iommu;
-error_unmap:
- iounmap(iommu->reg);
-error:
- kfree(iommu);
- return NULL;
-}
static void domain_exit(struct dmar_domain *domain);
-static void free_iommu(struct intel_iommu *iommu)
+
+void free_dmar_iommu(struct intel_iommu *iommu)
{
struct dmar_domain *domain;
int i;
- if (!iommu)
- return;
-
i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
for (; i < cap_ndoms(iommu->cap); ) {
domain = iommu->domains[i];
@@ -1078,10 +1009,6 @@ static void free_iommu(struct intel_iommu *iommu)
/* free context mapping */
free_context_table(iommu);
-
- if (iommu->reg)
- iounmap(iommu->reg);
- kfree(iommu);
}
static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu)
@@ -1414,7 +1341,7 @@ static void domain_remove_dev_info(struct dmar_domain *domain)
* find_domain
* Note: we use struct pci_dev->dev.archdata.iommu stores the info
*/
-struct dmar_domain *
+static struct dmar_domain *
find_domain(struct pci_dev *pdev)
{
struct device_domain_info *info;
@@ -1426,37 +1353,6 @@ find_domain(struct pci_dev *pdev)
return NULL;
}
-static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
- struct pci_dev *dev)
-{
- int index;
-
- while (dev) {
- for (index = 0; index < cnt; index++)
- if (dev == devices[index])
- return 1;
-
- /* Check our parent */
- dev = dev->bus->self;
- }
-
- return 0;
-}
-
-static struct dmar_drhd_unit *
-dmar_find_matched_drhd_unit(struct pci_dev *dev)
-{
- struct dmar_drhd_unit *drhd = NULL;
-
- list_for_each_entry(drhd, &dmar_drhd_units, list) {
- if (drhd->include_all || dmar_pci_device_match(drhd->devices,
- drhd->devices_cnt, dev))
- return drhd;
- }
-
- return NULL;
-}
-
/* domain is initialized */
static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
{
@@ -1729,8 +1625,6 @@ int __init init_dmars(void)
* endfor
*/
for_each_drhd_unit(drhd) {
- if (drhd->ignored)
- continue;
g_num_of_iommus++;
/*
* lock not needed as this is only incremented in the single
@@ -1739,12 +1633,6 @@ int __init init_dmars(void)
*/
}
- g_iommus = kzalloc(g_num_of_iommus * sizeof(*iommu), GFP_KERNEL);
- if (!g_iommus) {
- ret = -ENOMEM;
- goto error;
- }
-
deferred_flush = kzalloc(g_num_of_iommus *
sizeof(struct deferred_flush_tables), GFP_KERNEL);
if (!deferred_flush) {
@@ -1752,16 +1640,15 @@ int __init init_dmars(void)
goto error;
}
- i = 0;
for_each_drhd_unit(drhd) {
if (drhd->ignored)
continue;
- iommu = alloc_iommu(&g_iommus[i], drhd);
- i++;
- if (!iommu) {
- ret = -ENOMEM;
+
+ iommu = drhd->iommu;
+
+ ret = iommu_init_domains(iommu);
+ if (ret)
goto error;
- }
/*
* TBD:
@@ -1845,7 +1732,6 @@ error:
iommu = drhd->iommu;
free_iommu(iommu);
}
- kfree(g_iommus);
return ret;
}
@@ -2002,7 +1888,10 @@ static void flush_unmaps(void)
/* just flush them all */
for (i = 0; i < g_num_of_iommus; i++) {
if (deferred_flush[i].next) {
- iommu_flush_iotlb_global(&g_iommus[i], 0);
+ struct intel_iommu *iommu =
+ deferred_flush[i].domain[0]->iommu;
+
+ iommu_flush_iotlb_global(iommu, 0);
for (j = 0; j < deferred_flush[i].next; j++) {
__free_iova(&deferred_flush[i].domain[j]->iovad,
deferred_flush[i].iova[j]);
@@ -2032,7 +1921,8 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova)
if (list_size == HIGH_WATER_MARK)
flush_unmaps();
- iommu_id = dom->iommu - g_iommus;
+ iommu_id = dom->iommu->seq_id;
+
next = deferred_flush[iommu_id].next;
deferred_flush[iommu_id].domain[next] = dom;
deferred_flush[iommu_id].iova[next] = iova;
@@ -2348,38 +2238,6 @@ static void __init iommu_exit_mempool(void)
}
-static int blacklist_iommu(const struct dmi_system_id *id)
-{
- printk(KERN_INFO "%s detected; disabling IOMMU\n",
- id->ident);
- dmar_disabled = 1;
- return 0;
-}
-
-static struct dmi_system_id __initdata intel_iommu_dmi_table[] = {
- { /* Some DG33BU BIOS revisions advertised non-existent VT-d */
- .callback = blacklist_iommu,
- .ident = "Intel DG33BU",
- { DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
- DMI_MATCH(DMI_BOARD_NAME, "DG33BU"),
- }
- },
- { }
-};
-
-
-void __init detect_intel_iommu(void)
-{
- if (swiotlb || no_iommu || iommu_detected || dmar_disabled)
- return;
- if (early_dmar_detect()) {
- dmi_check_system(intel_iommu_dmi_table);
- if (dmar_disabled)
- return;
- iommu_detected = 1;
- }
-}
-
static void __init init_no_remapping_devices(void)
{
struct dmar_drhd_unit *drhd;
@@ -2426,12 +2284,19 @@ int __init intel_iommu_init(void)
{
int ret = 0;
- if (no_iommu || swiotlb || dmar_disabled)
- return -ENODEV;
-
if (dmar_table_init())
return -ENODEV;
+ if (dmar_dev_scope_init())
+ return -ENODEV;
+
+ /*
+ * Check the need for DMA-remapping initialization now.
+ * Above initialization will also be used by Interrupt-remapping.
+ */
+ if (no_iommu || swiotlb || dmar_disabled)
+ return -ENODEV;
+
iommu_init_mempool();
dmar_init_reserved_ranges();
@@ -2453,3 +2318,111 @@ int __init intel_iommu_init(void)
return 0;
}
+void intel_iommu_domain_exit(struct dmar_domain *domain)
+{
+ u64 end;
+
+ /* Domain 0 is reserved, so dont process it */
+ if (!domain)
+ return;
+
+ end = DOMAIN_MAX_ADDR(domain->gaw);
+ end = end & (~PAGE_MASK_4K);
+
+ /* clear ptes */
+ dma_pte_clear_range(domain, 0, end);
+
+ /* free page tables */
+ dma_pte_free_pagetable(domain, 0, end);
+
+ iommu_free_domain(domain);
+ free_domain_mem(domain);
+}
+EXPORT_SYMBOL_GPL(intel_iommu_domain_exit);
+
+struct dmar_domain *intel_iommu_domain_alloc(struct pci_dev *pdev)
+{
+ struct dmar_drhd_unit *drhd;
+ struct dmar_domain *domain;
+ struct intel_iommu *iommu;
+
+ drhd = dmar_find_matched_drhd_unit(pdev);
+ if (!drhd) {
+ printk(KERN_ERR "intel_iommu_domain_alloc: drhd == NULL\n");
+ return NULL;
+ }
+
+ iommu = drhd->iommu;
+ if (!iommu) {
+ printk(KERN_ERR
+ "intel_iommu_domain_alloc: iommu == NULL\n");
+ return NULL;
+ }
+ domain = iommu_alloc_domain(iommu);
+ if (!domain) {
+ printk(KERN_ERR
+ "intel_iommu_domain_alloc: domain == NULL\n");
+ return NULL;
+ }
+ if (domain_init(domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
+ printk(KERN_ERR
+ "intel_iommu_domain_alloc: domain_init() failed\n");
+ intel_iommu_domain_exit(domain);
+ return NULL;
+ }
+ return domain;
+}
+EXPORT_SYMBOL_GPL(intel_iommu_domain_alloc);
+
+int intel_iommu_context_mapping(
+ struct dmar_domain *domain, struct pci_dev *pdev)
+{
+ int rc;
+ rc = domain_context_mapping(domain, pdev);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(intel_iommu_context_mapping);
+
+int intel_iommu_page_mapping(
+ struct dmar_domain *domain, dma_addr_t iova,
+ u64 hpa, size_t size, int prot)
+{
+ int rc;
+ rc = domain_page_mapping(domain, iova, hpa, size, prot);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(intel_iommu_page_mapping);
+
+void intel_iommu_detach_dev(struct dmar_domain *domain, u8 bus, u8 devfn)
+{
+ detach_domain_for_dev(domain, bus, devfn);
+}
+EXPORT_SYMBOL_GPL(intel_iommu_detach_dev);
+
+struct dmar_domain *
+intel_iommu_find_domain(struct pci_dev *pdev)
+{
+ return find_domain(pdev);
+}
+EXPORT_SYMBOL_GPL(intel_iommu_find_domain);
+
+int intel_iommu_found(void)
+{
+ return g_num_of_iommus;
+}
+EXPORT_SYMBOL_GPL(intel_iommu_found);
+
+u64 intel_iommu_iova_to_pfn(struct dmar_domain *domain, u64 iova)
+{
+ struct dma_pte *pte;
+ u64 pfn;
+
+ pfn = 0;
+ pte = addr_to_dma_pte(domain, iova);
+
+ if (pte)
+ pfn = dma_pte_addr(*pte);
+
+ return pfn >> PAGE_SHIFT_4K;
+}
+EXPORT_SYMBOL_GPL(intel_iommu_iova_to_pfn);