aboutsummaryrefslogtreecommitdiff
path: root/drivers/pci
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/Kconfig10
-rw-r--r--drivers/pci/Makefile6
-rw-r--r--drivers/pci/access.c26
-rw-r--r--drivers/pci/bus.c26
-rw-r--r--drivers/pci/dmar.c379
-rw-r--r--drivers/pci/hotplug/Kconfig4
-rw-r--r--drivers/pci/hotplug/acpi_pcihp.c58
-rw-r--r--drivers/pci/hotplug/acpiphp.h1
-rw-r--r--drivers/pci/hotplug/acpiphp_core.c1
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c77
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_core.c1
-rw-r--r--drivers/pci/hotplug/cpqphp.h167
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c1100
-rw-r--r--drivers/pci/hotplug/cpqphp_ctrl.c371
-rw-r--r--drivers/pci/hotplug/cpqphp_nvram.c97
-rw-r--r--drivers/pci/hotplug/cpqphp_pci.c599
-rw-r--r--drivers/pci/hotplug/cpqphp_sysfs.c3
-rw-r--r--drivers/pci/hotplug/fakephp.c444
-rw-r--r--drivers/pci/hotplug/ibmphp_core.c56
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c155
-rw-r--r--drivers/pci/hotplug/pciehp.h16
-rw-r--r--drivers/pci/hotplug/pciehp_acpi.c21
-rw-r--r--drivers/pci/hotplug/pciehp_core.c130
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c65
-rw-r--r--drivers/pci/hotplug/pcihp_skeleton.c1
-rw-r--r--drivers/pci/hotplug/rpaphp_core.c1
-rw-r--r--drivers/pci/hotplug/sgi_hotplug.c5
-rw-r--r--drivers/pci/hotplug/shpchp.h10
-rw-r--r--drivers/pci/hotplug/shpchp_core.c1
-rw-r--r--drivers/pci/hotplug/shpchp_pci.c2
-rw-r--r--drivers/pci/htirq.c5
-rw-r--r--drivers/pci/intel-iommu.c638
-rw-r--r--drivers/pci/intr_remapping.c219
-rw-r--r--drivers/pci/iov.c683
-rw-r--r--drivers/pci/msi.c512
-rw-r--r--drivers/pci/msi.h20
-rw-r--r--drivers/pci/pci-acpi.c219
-rw-r--r--drivers/pci/pci-driver.c261
-rw-r--r--drivers/pci/pci-sysfs.c134
-rw-r--r--drivers/pci/pci.c587
-rw-r--r--drivers/pci/pci.h66
-rw-r--r--drivers/pci/pcie/aer/Kconfig15
-rw-r--r--drivers/pci/pcie/aer/Kconfig.debug18
-rw-r--r--drivers/pci/pcie/aer/Makefile3
-rw-r--r--drivers/pci/pcie/aer/aer_inject.c473
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c31
-rw-r--r--drivers/pci/pcie/aer/aerdrv.h9
-rw-r--r--drivers/pci/pcie/aer/aerdrv_acpi.c2
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c288
-rw-r--r--drivers/pci/pcie/aer/ecrc.c131
-rw-r--r--drivers/pci/pcie/aspm.c787
-rw-r--r--drivers/pci/pcie/portdrv.h14
-rw-r--r--drivers/pci/pcie/portdrv_bus.c18
-rw-r--r--drivers/pci/pcie/portdrv_core.c381
-rw-r--r--drivers/pci/pcie/portdrv_pci.c52
-rw-r--r--drivers/pci/probe.c225
-rw-r--r--drivers/pci/quirks.c300
-rw-r--r--drivers/pci/remove.c6
-rw-r--r--drivers/pci/search.c32
-rw-r--r--drivers/pci/setup-bus.c66
-rw-r--r--drivers/pci/setup-res.c68
-rw-r--r--drivers/pci/slot.c61
62 files changed, 6277 insertions, 3880 deletions
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 2a4501dd251..fdc864f9cf2 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -59,3 +59,13 @@ config HT_IRQ
This allows native hypertransport devices to use interrupts.
If unsure say Y.
+
+config PCI_IOV
+ bool "PCI IOV support"
+ depends on PCI
+ help
+ I/O Virtualization is a PCI feature supported by some devices
+ which allows them to create virtual devices which share their
+ physical resources.
+
+ If unsure, say N.
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 3d07ce24f6a..1ebd6b4c743 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -2,10 +2,11 @@
# Makefile for the PCI bus specific drivers.
#
-obj-y += access.o bus.o probe.o remove.o pci.o quirks.o slot.o \
+obj-y += access.o bus.o probe.o remove.o pci.o quirks.o \
pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \
irq.o
obj-$(CONFIG_PROC_FS) += proc.o
+obj-$(CONFIG_SYSFS) += slot.o
# Build PCI Express stuff if needed
obj-$(CONFIG_PCIEPORTBUS) += pcie/
@@ -29,6 +30,8 @@ obj-$(CONFIG_DMAR) += dmar.o iova.o intel-iommu.o
obj-$(CONFIG_INTR_REMAP) += dmar.o intr_remapping.o
+obj-$(CONFIG_PCI_IOV) += iov.o
+
#
# Some architectures use the generic PCI setup functions
#
@@ -37,7 +40,6 @@ obj-$(CONFIG_ALPHA) += setup-bus.o setup-irq.o
obj-$(CONFIG_ARM) += setup-bus.o setup-irq.o
obj-$(CONFIG_PARISC) += setup-bus.o
obj-$(CONFIG_SUPERH) += setup-bus.o setup-irq.o
-obj-$(CONFIG_PPC32) += setup-irq.o
obj-$(CONFIG_PPC) += setup-bus.o
obj-$(CONFIG_MIPS) += setup-bus.o setup-irq.o
obj-$(CONFIG_X86_VISWS) += setup-irq.o
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 38144479477..db23200c487 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -66,6 +66,25 @@ EXPORT_SYMBOL(pci_bus_write_config_byte);
EXPORT_SYMBOL(pci_bus_write_config_word);
EXPORT_SYMBOL(pci_bus_write_config_dword);
+/**
+ * pci_bus_set_ops - Set raw operations of pci bus
+ * @bus: pci bus struct
+ * @ops: new raw operations
+ *
+ * Return previous raw operations
+ */
+struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops)
+{
+ struct pci_ops *old_ops;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pci_lock, flags);
+ old_ops = bus->ops;
+ bus->ops = ops;
+ spin_unlock_irqrestore(&pci_lock, flags);
+ return old_ops;
+}
+EXPORT_SYMBOL(pci_bus_set_ops);
/**
* pci_read_vpd - Read one entry from Vital Product Data
@@ -87,8 +106,8 @@ EXPORT_SYMBOL(pci_read_vpd);
* pci_write_vpd - Write entry to Vital Product Data
* @dev: pci device struct
* @pos: offset in vpd space
- * @count: number of bytes to read
- * @val: value to write
+ * @count: number of bytes to write
+ * @buf: buffer containing write data
*
*/
ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf)
@@ -356,7 +375,8 @@ int pci_vpd_truncate(struct pci_dev *dev, size_t size)
return -EINVAL;
dev->vpd->len = size;
- dev->vpd->attr->size = size;
+ if (dev->vpd->attr)
+ dev->vpd->attr->size = size;
return 0;
}
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 52b54f053be..cef28a79103 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -41,9 +41,14 @@ pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
void *alignf_data)
{
int i, ret = -ENOMEM;
+ resource_size_t max = -1;
type_mask |= IORESOURCE_IO | IORESOURCE_MEM;
+ /* don't allocate too high if the pref mem doesn't support 64bit*/
+ if (!(res->flags & IORESOURCE_MEM_64))
+ max = PCIBIOS_MAX_MEM_32;
+
for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
struct resource *r = bus->resource[i];
if (!r)
@@ -62,7 +67,7 @@ pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
/* Ok, try it out.. */
ret = allocate_resource(r, res, size,
r->start ? : min,
- -1, align,
+ max, align,
alignf, alignf_data);
if (ret == 0)
break;
@@ -133,7 +138,7 @@ int pci_bus_add_child(struct pci_bus *bus)
*
* Call hotplug for each new devices.
*/
-void pci_bus_add_devices(struct pci_bus *bus)
+void pci_bus_add_devices(const struct pci_bus *bus)
{
struct pci_dev *dev;
struct pci_bus *child;
@@ -184,8 +189,10 @@ void pci_enable_bridges(struct pci_bus *bus)
list_for_each_entry(dev, &bus->devices, bus_list) {
if (dev->subordinate) {
- retval = pci_enable_device(dev);
- pci_set_master(dev);
+ if (!pci_is_enabled(dev)) {
+ retval = pci_enable_device(dev);
+ pci_set_master(dev);
+ }
pci_enable_bridges(dev->subordinate);
}
}
@@ -199,13 +206,18 @@ void pci_enable_bridges(struct pci_bus *bus)
* Walk the given bus, including any bridged devices
* on buses under this bus. Call the provided callback
* on each device found.
+ *
+ * We check the return of @cb each time. If it returns anything
+ * other than 0, we break out.
+ *
*/
-void pci_walk_bus(struct pci_bus *top, void (*cb)(struct pci_dev *, void *),
+void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
void *userdata)
{
struct pci_dev *dev;
struct pci_bus *bus;
struct list_head *next;
+ int retval;
bus = top;
down_read(&pci_bus_sem);
@@ -229,8 +241,10 @@ void pci_walk_bus(struct pci_bus *top, void (*cb)(struct pci_dev *, void *),
/* Run device routines with the device locked */
down(&dev->dev.sem);
- cb(dev, userdata);
+ retval = cb(dev, userdata);
up(&dev->dev.sem);
+ if (retval)
+ break;
}
up_read(&pci_bus_sem);
}
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index 26c536b51c5..fa3a11365ec 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -31,6 +31,8 @@
#include <linux/iova.h>
#include <linux/intel-iommu.h>
#include <linux/timer.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
#undef PREFIX
#define PREFIX "DMAR:"
@@ -42,6 +44,7 @@
LIST_HEAD(dmar_drhd_units);
static struct acpi_table_header * __initdata dmar_tbl;
+static acpi_size dmar_tbl_size;
static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
{
@@ -170,13 +173,23 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header)
struct dmar_drhd_unit *dmaru;
int ret = 0;
+ drhd = (struct acpi_dmar_hardware_unit *)header;
+ if (!drhd->address) {
+ /* Promote an attitude of violence to a BIOS engineer today */
+ WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
+ "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
+ dmi_get_system_info(DMI_BIOS_VENDOR),
+ dmi_get_system_info(DMI_BIOS_VERSION),
+ dmi_get_system_info(DMI_PRODUCT_VERSION));
+ return -ENODEV;
+ }
dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
if (!dmaru)
return -ENOMEM;
dmaru->hdr = header;
- drhd = (struct acpi_dmar_hardware_unit *)header;
dmaru->reg_base_addr = drhd->address;
+ dmaru->segment = drhd->segment;
dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
ret = alloc_iommu(dmaru);
@@ -288,8 +301,9 @@ static int __init dmar_table_detect(void)
acpi_status status = AE_OK;
/* if we could find DMAR table, then there are DMAR devices */
- status = acpi_get_table(ACPI_SIG_DMAR, 0,
- (struct acpi_table_header **)&dmar_tbl);
+ status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
+ (struct acpi_table_header **)&dmar_tbl,
+ &dmar_tbl_size);
if (ACPI_SUCCESS(status) && !dmar_tbl) {
printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
@@ -489,6 +503,7 @@ void __init detect_intel_iommu(void)
iommu_detected = 1;
#endif
}
+ early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
dmar_tbl = NULL;
}
@@ -506,6 +521,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
return -ENOMEM;
iommu->seq_id = iommu_allocated++;
+ sprintf (iommu->name, "dmar%d", iommu->seq_id);
iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
if (!iommu->reg) {
@@ -748,14 +764,77 @@ int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
}
/*
+ * Disable Queued Invalidation interface.
+ */
+void dmar_disable_qi(struct intel_iommu *iommu)
+{
+ unsigned long flags;
+ u32 sts;
+ cycles_t start_time = get_cycles();
+
+ if (!ecap_qis(iommu->ecap))
+ return;
+
+ spin_lock_irqsave(&iommu->register_lock, flags);
+
+ sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
+ if (!(sts & DMA_GSTS_QIES))
+ goto end;
+
+ /*
+ * Give a chance to HW to complete the pending invalidation requests.
+ */
+ while ((readl(iommu->reg + DMAR_IQT_REG) !=
+ readl(iommu->reg + DMAR_IQH_REG)) &&
+ (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
+ cpu_relax();
+
+ iommu->gcmd &= ~DMA_GCMD_QIE;
+
+ writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
+
+ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
+ !(sts & DMA_GSTS_QIES), sts);
+end:
+ spin_unlock_irqrestore(&iommu->register_lock, flags);
+}
+
+/*
+ * Enable queued invalidation.
+ */
+static void __dmar_enable_qi(struct intel_iommu *iommu)
+{
+ u32 cmd, sts;
+ unsigned long flags;
+ struct q_inval *qi = iommu->qi;
+
+ qi->free_head = qi->free_tail = 0;
+ qi->free_cnt = QI_LENGTH;
+
+ spin_lock_irqsave(&iommu->register_lock, flags);
+
+ /* write zero to the tail reg */
+ writel(0, iommu->reg + DMAR_IQT_REG);
+
+ dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
+
+ cmd = iommu->gcmd | DMA_GCMD_QIE;
+ iommu->gcmd |= DMA_GCMD_QIE;
+ writel(cmd, iommu->reg + DMAR_GCMD_REG);
+
+ /* Make sure hardware complete it */
+ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
+
+ spin_unlock_irqrestore(&iommu->register_lock, flags);
+}
+
+/*
* Enable Queued Invalidation interface. This is a must to support
* interrupt-remapping. Also used by DMA-remapping, which replaces
* register based IOTLB invalidation.
*/
int dmar_enable_qi(struct intel_iommu *iommu)
{
- u32 cmd, sts;
- unsigned long flags;
struct q_inval *qi;
if (!ecap_qis(iommu->ecap))
@@ -767,20 +846,20 @@ int dmar_enable_qi(struct intel_iommu *iommu)
if (iommu->qi)
return 0;
- iommu->qi = kmalloc(sizeof(*qi), GFP_KERNEL);
+ iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
if (!iommu->qi)
return -ENOMEM;
qi = iommu->qi;
- qi->desc = (void *)(get_zeroed_page(GFP_KERNEL));
+ qi->desc = (void *)(get_zeroed_page(GFP_ATOMIC));
if (!qi->desc) {
kfree(qi);
iommu->qi = 0;
return -ENOMEM;
}
- qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_KERNEL);
+ qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
if (!qi->desc_status) {
free_page((unsigned long) qi->desc);
kfree(qi);
@@ -793,19 +872,283 @@ int dmar_enable_qi(struct intel_iommu *iommu)
spin_lock_init(&qi->q_lock);
- spin_lock_irqsave(&iommu->register_lock, flags);
- /* write zero to the tail reg */
- writel(0, iommu->reg + DMAR_IQT_REG);
+ __dmar_enable_qi(iommu);
- dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
+ return 0;
+}
- cmd = iommu->gcmd | DMA_GCMD_QIE;
- iommu->gcmd |= DMA_GCMD_QIE;
- writel(cmd, iommu->reg + DMAR_GCMD_REG);
+/* iommu interrupt handling. Most stuff are MSI-like. */
- /* Make sure hardware complete it */
- IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
- spin_unlock_irqrestore(&iommu->register_lock, flags);
+enum faulttype {
+ DMA_REMAP,
+ INTR_REMAP,
+ UNKNOWN,
+};
+
+static const char *dma_remap_fault_reasons[] =
+{
+ "Software",
+ "Present bit in root entry is clear",
+ "Present bit in context entry is clear",
+ "Invalid context entry",
+ "Access beyond MGAW",
+ "PTE Write access is not set",
+ "PTE Read access is not set",
+ "Next page table ptr is invalid",
+ "Root table address invalid",
+ "Context table ptr is invalid",
+ "non-zero reserved fields in RTP",
+ "non-zero reserved fields in CTP",
+ "non-zero reserved fields in PTE",
+};
+
+static const char *intr_remap_fault_reasons[] =
+{
+ "Detected reserved fields in the decoded interrupt-remapped request",
+ "Interrupt index exceeded the interrupt-remapping table size",
+ "Present field in the IRTE entry is clear",
+ "Error accessing interrupt-remapping table pointed by IRTA_REG",
+ "Detected reserved fields in the IRTE entry",
+ "Blocked a compatibility format interrupt request",
+ "Blocked an interrupt request due to source-id verification failure",
+};
+
+#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
+
+const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
+{
+ if (fault_reason >= 0x20 && (fault_reason <= 0x20 +
+ ARRAY_SIZE(intr_remap_fault_reasons))) {
+ *fault_type = INTR_REMAP;
+ return intr_remap_fault_reasons[fault_reason - 0x20];
+ } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
+ *fault_type = DMA_REMAP;
+ return dma_remap_fault_reasons[fault_reason];
+ } else {
+ *fault_type = UNKNOWN;
+ return "Unknown";
+ }
+}
+
+void dmar_msi_unmask(unsigned int irq)
+{
+ struct intel_iommu *iommu = get_irq_data(irq);
+ unsigned long flag;
+
+ /* unmask it */
+ spin_lock_irqsave(&iommu->register_lock, flag);
+ writel(0, iommu->reg + DMAR_FECTL_REG);
+ /* Read a reg to force flush the post write */
+ readl(iommu->reg + DMAR_FECTL_REG);
+ spin_unlock_irqrestore(&iommu->register_lock, flag);
+}
+
+void dmar_msi_mask(unsigned int irq)
+{
+ unsigned long flag;
+ struct intel_iommu *iommu = get_irq_data(irq);
+
+ /* mask it */
+ spin_lock_irqsave(&iommu->register_lock, flag);
+ writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
+ /* Read a reg to force flush the post write */
+ readl(iommu->reg + DMAR_FECTL_REG);
+ spin_unlock_irqrestore(&iommu->register_lock, flag);
+}
+
+void dmar_msi_write(int irq, struct msi_msg *msg)
+{
+ struct intel_iommu *iommu = get_irq_data(irq);
+ unsigned long flag;
+
+ spin_lock_irqsave(&iommu->register_lock, flag);
+ writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
+ writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
+ writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
+ spin_unlock_irqrestore(&iommu->register_lock, flag);
+}
+
+void dmar_msi_read(int irq, struct msi_msg *msg)
+{
+ struct intel_iommu *iommu = get_irq_data(irq);
+ unsigned long flag;
+
+ spin_lock_irqsave(&iommu->register_lock, flag);
+ msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
+ msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
+ msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
+ spin_unlock_irqrestore(&iommu->register_lock, flag);
+}
+
+static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
+ u8 fault_reason, u16 source_id, unsigned long long addr)
+{
+ const char *reason;
+ int fault_type;
+
+ reason = dmar_get_fault_reason(fault_reason, &fault_type);
+
+ if (fault_type == INTR_REMAP)
+ printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] "
+ "fault index %llx\n"
+ "INTR-REMAP:[fault reason %02d] %s\n",
+ (source_id >> 8), PCI_SLOT(source_id & 0xFF),
+ PCI_FUNC(source_id & 0xFF), addr >> 48,
+ fault_reason, reason);
+ else
+ printk(KERN_ERR
+ "DMAR:[%s] Request device [%02x:%02x.%d] "
+ "fault addr %llx \n"
+ "DMAR:[fault reason %02d] %s\n",
+ (type ? "DMA Read" : "DMA Write"),
+ (source_id >> 8), PCI_SLOT(source_id & 0xFF),
+ PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
+ return 0;
+}
+
+#define PRIMARY_FAULT_REG_LEN (16)
+irqreturn_t dmar_fault(int irq, void *dev_id)
+{
+ struct intel_iommu *iommu = dev_id;
+ int reg, fault_index;
+ u32 fault_status;
+ unsigned long flag;
+
+ spin_lock_irqsave(&iommu->register_lock, flag);
+ fault_status = readl(iommu->reg + DMAR_FSTS_REG);
+ if (fault_status)
+ printk(KERN_ERR "DRHD: handling fault status reg %x\n",
+ fault_status);
+
+ /* TBD: ignore advanced fault log currently */
+ if (!(fault_status & DMA_FSTS_PPF))
+ goto clear_rest;
+
+ fault_index = dma_fsts_fault_record_index(fault_status);
+ reg = cap_fault_reg_offset(iommu->cap);
+ while (1) {
+ u8 fault_reason;
+ u16 source_id;
+ u64 guest_addr;
+ int type;
+ u32 data;
+
+ /* highest 32 bits */
+ data = readl(iommu->reg + reg +
+ fault_index * PRIMARY_FAULT_REG_LEN + 12);
+ if (!(data & DMA_FRCD_F))
+ break;
+
+ fault_reason = dma_frcd_fault_reason(data);
+ type = dma_frcd_type(data);
+
+ data = readl(iommu->reg + reg +
+ fault_index * PRIMARY_FAULT_REG_LEN + 8);
+ source_id = dma_frcd_source_id(data);
+
+ guest_addr = dmar_readq(iommu->reg + reg +
+ fault_index * PRIMARY_FAULT_REG_LEN);
+ guest_addr = dma_frcd_page_addr(guest_addr);
+ /* clear the fault */
+ writel(DMA_FRCD_F, iommu->reg + reg +
+ fault_index * PRIMARY_FAULT_REG_LEN + 12);
+
+ spin_unlock_irqrestore(&iommu->register_lock, flag);
+
+ dmar_fault_do_one(iommu, type, fault_reason,
+ source_id, guest_addr);
+
+ fault_index++;
+ if (fault_index > cap_num_fault_regs(iommu->cap))
+ fault_index = 0;
+ spin_lock_irqsave(&iommu->register_lock, flag);
+ }
+clear_rest:
+ /* clear all the other faults */
+ fault_status = readl(iommu->reg + DMAR_FSTS_REG);
+ writel(fault_status, iommu->reg + DMAR_FSTS_REG);
+
+ spin_unlock_irqrestore(&iommu->register_lock, flag);
+ return IRQ_HANDLED;
+}
+
+int dmar_set_interrupt(struct intel_iommu *iommu)
+{
+ int irq, ret;
+
+ /*
+ * Check if the fault interrupt is already initialized.
+ */
+ if (iommu->irq)
+ return 0;
+
+ irq = create_irq();
+ if (!irq) {
+ printk(KERN_ERR "IOMMU: no free vectors\n");
+ return -EINVAL;
+ }
+
+ set_irq_data(irq, iommu);
+ iommu->irq = irq;
+
+ ret = arch_setup_dmar_msi(irq);
+ if (ret) {
+ set_irq_data(irq, NULL);
+ iommu->irq = 0;
+ destroy_irq(irq);
+ return 0;
+ }
+
+ ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
+ if (ret)
+ printk(KERN_ERR "IOMMU: can't request irq\n");
+ return ret;
+}
+
+int __init enable_drhd_fault_handling(void)
+{
+ struct dmar_drhd_unit *drhd;
+
+ /*
+ * Enable fault control interrupt.
+ */
+ for_each_drhd_unit(drhd) {
+ int ret;
+ struct intel_iommu *iommu = drhd->iommu;
+ ret = dmar_set_interrupt(iommu);
+
+ if (ret) {
+ printk(KERN_ERR "DRHD %Lx: failed to enable fault, "
+ " interrupt, ret %d\n",
+ (unsigned long long)drhd->reg_base_addr, ret);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Re-enable Queued Invalidation interface.
+ */
+int dmar_reenable_qi(struct intel_iommu *iommu)
+{
+ if (!ecap_qis(iommu->ecap))
+ return -ENOENT;
+
+ if (!iommu->qi)
+ return -ENOENT;
+
+ /*
+ * First disable queued invalidation.
+ */
+ dmar_disable_qi(iommu);
+ /*
+ * Then enable queued invalidation again. Since there is no pending
+ * invalidation requests now, it's safe to re-enable queued
+ * invalidation.
+ */
+ __dmar_enable_qi(iommu);
return 0;
}
diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig
index 9aa4fe100a0..66f29bc00be 100644
--- a/drivers/pci/hotplug/Kconfig
+++ b/drivers/pci/hotplug/Kconfig
@@ -4,7 +4,7 @@
menuconfig HOTPLUG_PCI
tristate "Support for PCI Hotplug"
- depends on PCI && HOTPLUG
+ depends on PCI && HOTPLUG && SYSFS
---help---
Say Y here if you have a motherboard with a PCI Hotplug controller.
This allows you to add and remove PCI cards while the machine is
@@ -41,7 +41,7 @@ config HOTPLUG_PCI_FAKE
config HOTPLUG_PCI_COMPAQ
tristate "Compaq PCI Hotplug driver"
- depends on X86 && PCI_BIOS && PCI_LEGACY
+ depends on X86 && PCI_BIOS
help
Say Y here if you have a motherboard with a Compaq PCI Hotplug
controller.
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index 1c114180106..fbc63d5e459 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -30,9 +30,8 @@
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
+#include <linux/acpi.h>
#include <linux/pci-acpi.h>
-#include <acpi/acpi.h>
-#include <acpi/acpi_bus.h>
#define MY_NAME "acpi_pcihp"
@@ -333,19 +332,14 @@ acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus,
{
acpi_status status = AE_NOT_FOUND;
acpi_handle handle, phandle;
- struct pci_bus *pbus = bus;
- struct pci_dev *pdev;
-
- do {
- pdev = pbus->self;
- if (!pdev) {
- handle = acpi_get_pci_rootbridge_handle(
- pci_domain_nr(pbus), pbus->number);
+ struct pci_bus *pbus;
+
+ handle = NULL;
+ for (pbus = bus; pbus; pbus = pbus->parent) {
+ handle = acpi_pci_get_bridge_handle(pbus);
+ if (handle)
break;
- }
- handle = DEVICE_ACPI_HANDLE(&(pdev->dev));
- pbus = pbus->parent;
- } while (!handle);
+ }
/*
* _HPP settings apply to all child buses, until another _HPP is
@@ -378,12 +372,10 @@ EXPORT_SYMBOL_GPL(acpi_get_hp_params_from_firmware);
*
* Attempt to take hotplug control from firmware.
*/
-int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags)
+int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
{
acpi_status status;
acpi_handle chandle, handle;
- struct pci_dev *pdev = dev;
- struct pci_bus *parent;
struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
flags &= (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL |
@@ -408,33 +400,25 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags)
acpi_get_name(handle, ACPI_FULL_PATHNAME, &string);
dbg("Trying to get hotplug control for %s\n",
(char *)string.pointer);
- status = pci_osc_control_set(handle, flags);
+ status = acpi_pci_osc_control_set(handle, flags);
if (ACPI_SUCCESS(status))
goto got_one;
kfree(string.pointer);
string = (struct acpi_buffer){ ACPI_ALLOCATE_BUFFER, NULL };
}
- pdev = dev;
- handle = DEVICE_ACPI_HANDLE(&dev->dev);
- while (!handle) {
+ handle = DEVICE_ACPI_HANDLE(&pdev->dev);
+ if (!handle) {
/*
* This hotplug controller was not listed in the ACPI name
* space at all. Try to get acpi handle of parent pci bus.
*/
- if (!pdev || !pdev->bus->parent)
- break;
- parent = pdev->bus->parent;
- dbg("Could not find %s in acpi namespace, trying parent\n",
- pci_name(pdev));
- if (!parent->self)
- /* Parent must be a host bridge */
- handle = acpi_get_pci_rootbridge_handle(
- pci_domain_nr(parent),
- parent->number);
- else
- handle = DEVICE_ACPI_HANDLE(&(parent->self->dev));
- pdev = parent->self;
+ struct pci_bus *pbus;
+ for (pbus = pdev->bus; pbus; pbus = pbus->parent) {
+ handle = acpi_pci_get_bridge_handle(pbus);
+ if (handle)
+ break;
+ }
}
while (handle) {
@@ -453,13 +437,13 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags)
}
dbg("Cannot get control of hotplug hardware for pci %s\n",
- pci_name(dev));
+ pci_name(pdev));
kfree(string.pointer);
return -ENODEV;
got_one:
- dbg("Gained control for hotplug HW for pci %s (%s)\n", pci_name(dev),
- (char *)string.pointer);
+ dbg("Gained control for hotplug HW for pci %s (%s)\n",
+ pci_name(pdev), (char *)string.pointer);
kfree(string.pointer);
return 0;
}
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index 4fc168b7009..e68d5f20ffb 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -129,7 +129,6 @@ struct acpiphp_func {
struct acpiphp_bridge *bridge; /* Ejectable PCI-to-PCI bridge */
struct list_head sibling;
- struct pci_dev *pci_dev;
struct notifier_block nb;
acpi_handle handle;
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c
index 43c10bd261b..4dd7114964a 100644
--- a/drivers/pci/hotplug/acpiphp_core.c
+++ b/drivers/pci/hotplug/acpiphp_core.c
@@ -77,7 +77,6 @@ static int get_latch_status (struct hotplug_slot *slot, u8 *value);
static int get_adapter_status (struct hotplug_slot *slot, u8 *value);
static struct hotplug_slot_ops acpi_hotplug_slot_ops = {
- .owner = THIS_MODULE,
.enable_slot = enable_slot,
.disable_slot = disable_slot,
.set_attention_status = set_attention_status,
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 803d9ddd6e7..3a6064bce56 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -32,12 +32,11 @@
/*
* Lifetime rules for pci_dev:
- * - The one in acpiphp_func has its refcount elevated by pci_get_slot()
- * when the driver is loaded or when an insertion event occurs. It loses
- * a refcount when its ejected or the driver unloads.
* - The one in acpiphp_bridge has its refcount elevated by pci_get_slot()
* when the bridge is scanned and it loses a refcount when the bridge
* is removed.
+ * - When a P2P bridge is present, we elevate the refcount on the subordinate
+ * bus. It loses the refcount when the the driver unloads.
*/
#include <linux/init.h>
@@ -128,6 +127,7 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
unsigned long long adr, sun;
int device, function, retval;
struct pci_bus *pbus = bridge->pci_bus;
+ struct pci_dev *pdev;
if (!acpi_pci_check_ejectable(pbus, handle) && !is_dock_device(handle))
return AE_OK;
@@ -211,10 +211,10 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
newfunc->slot = slot;
list_add_tail(&newfunc->sibling, &slot->funcs);
- /* associate corresponding pci_dev */
- newfunc->pci_dev = pci_get_slot(pbus, PCI_DEVFN(device, function));
- if (newfunc->pci_dev) {
+ pdev = pci_get_slot(pbus, PCI_DEVFN(device, function));
+ if (pdev) {
slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON);
+ pci_dev_put(pdev);
}
if (is_dock_device(handle)) {
@@ -440,6 +440,12 @@ static void add_p2p_bridge(acpi_handle *handle, struct pci_dev *pci_dev)
goto err;
}
+ /*
+ * Grab a ref to the subordinate PCI bus in case the bus is
+ * removed via PCI core logical hotplug. The ref pins the bus
+ * (which we access during module unload).
+ */
+ get_device(&bridge->pci_bus->dev);
spin_lock_init(&bridge->res_lock);
init_bridge_misc(bridge);
@@ -609,7 +615,6 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge)
if (ACPI_FAILURE(status))
err("failed to remove notify handler\n");
}
- pci_dev_put(func->pci_dev);
list_del(list);
kfree(func);
}
@@ -619,6 +624,12 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge)
slot = next;
}
+ /*
+ * Only P2P bridges have a pci_dev
+ */
+ if (bridge->pci_dev)
+ put_device(&bridge->pci_bus->dev);
+
pci_dev_put(bridge->pci_dev);
list_del(&bridge->list);
kfree(bridge);
@@ -1087,22 +1098,24 @@ static int __ref enable_device(struct acpiphp_slot *slot)
pci_enable_bridges(bus);
pci_bus_add_devices(bus);
- /* associate pci_dev to our representation */
list_for_each (l, &slot->funcs) {
func = list_entry(l, struct acpiphp_func, sibling);
- func->pci_dev = pci_get_slot(bus, PCI_DEVFN(slot->device,
- func->function));
- if (!func->pci_dev)
+ dev = pci_get_slot(bus, PCI_DEVFN(slot->device,
+ func->function));
+ if (!dev)
continue;
- if (func->pci_dev->hdr_type != PCI_HEADER_TYPE_BRIDGE &&
- func->pci_dev->hdr_type != PCI_HEADER_TYPE_CARDBUS)
+ if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE &&
+ dev->hdr_type != PCI_HEADER_TYPE_CARDBUS) {
+ pci_dev_put(dev);
continue;
+ }
status = find_p2p_bridge(func->handle, (u32)1, bus, NULL);
if (ACPI_FAILURE(status))
warn("find_p2p_bridge failed (error code = 0x%x)\n",
status);
+ pci_dev_put(dev);
}
slot->flags |= SLOT_ENABLED;
@@ -1128,17 +1141,14 @@ static void disable_bridges(struct pci_bus *bus)
*/
static int disable_device(struct acpiphp_slot *slot)
{
- int retval = 0;
struct acpiphp_func *func;
- struct list_head *l;
+ struct pci_dev *pdev;
/* is this slot already disabled? */
if (!(slot->flags & SLOT_ENABLED))
goto err_exit;
- list_for_each (l, &slot->funcs) {
- func = list_entry(l, struct acpiphp_func, sibling);
-
+ list_for_each_entry(func, &slot->funcs, sibling) {
if (func->bridge) {
/* cleanup p2p bridges under this P2P bridge */
cleanup_p2p_bridge(func->bridge->handle,
@@ -1146,35 +1156,28 @@ static int disable_device(struct acpiphp_slot *slot)
func->bridge = NULL;
}
- if (func->pci_dev) {
- pci_stop_bus_device(func->pci_dev);
- if (func->pci_dev->subordinate) {
- disable_bridges(func->pci_dev->subordinate);
- pci_disable_device(func->pci_dev);
+ pdev = pci_get_slot(slot->bridge->pci_bus,
+ PCI_DEVFN(slot->device, func->function));
+ if (pdev) {
+ pci_stop_bus_device(pdev);
+ if (pdev->subordinate) {
+ disable_bridges(pdev->subordinate);
+ pci_disable_device(pdev);
}
+ pci_remove_bus_device(pdev);
+ pci_dev_put(pdev);
}
}
- list_for_each (l, &slot->funcs) {
- func = list_entry(l, struct acpiphp_func, sibling);
-
+ list_for_each_entry(func, &slot->funcs, sibling) {
acpiphp_unconfigure_ioapics(func->handle);
acpiphp_bus_trim(func->handle);
- /* try to remove anyway.
- * acpiphp_bus_add might have been failed */
-
- if (!func->pci_dev)
- continue;
-
- pci_remove_bus_device(func->pci_dev);
- pci_dev_put(func->pci_dev);
- func->pci_dev = NULL;
}
slot->flags &= (~SLOT_ENABLED);
- err_exit:
- return retval;
+err_exit:
+ return 0;
}
diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c
index de94f4feef8..a5b9f6ae507 100644
--- a/drivers/pci/hotplug/cpci_hotplug_core.c
+++ b/drivers/pci/hotplug/cpci_hotplug_core.c
@@ -72,7 +72,6 @@ static int get_adapter_status(struct hotplug_slot *slot, u8 * value);
static int get_latch_status(struct hotplug_slot *slot, u8 * value);
static struct hotplug_slot_ops cpci_hotplug_slot_ops = {
- .owner = THIS_MODULE,
.enable_slot = enable_slot,
.disable_slot = disable_slot,
.set_attention_status = set_attention_status,
diff --git a/drivers/pci/hotplug/cpqphp.h b/drivers/pci/hotplug/cpqphp.h
index afaf8f69f73..53836001d51 100644
--- a/drivers/pci/hotplug/cpqphp.h
+++ b/drivers/pci/hotplug/cpqphp.h
@@ -150,25 +150,25 @@ struct ctrl_reg { /* offset */
/* offsets to the controller registers based on the above structure layout */
enum ctrl_offsets {
- SLOT_RST = offsetof(struct ctrl_reg, slot_RST),
+ SLOT_RST = offsetof(struct ctrl_reg, slot_RST),
SLOT_ENABLE = offsetof(struct ctrl_reg, slot_enable),
MISC = offsetof(struct ctrl_reg, misc),
LED_CONTROL = offsetof(struct ctrl_reg, led_control),
INT_INPUT_CLEAR = offsetof(struct ctrl_reg, int_input_clear),
- INT_MASK = offsetof(struct ctrl_reg, int_mask),
- CTRL_RESERVED0 = offsetof(struct ctrl_reg, reserved0),
+ INT_MASK = offsetof(struct ctrl_reg, int_mask),
+ CTRL_RESERVED0 = offsetof(struct ctrl_reg, reserved0),
CTRL_RESERVED1 = offsetof(struct ctrl_reg, reserved1),
CTRL_RESERVED2 = offsetof(struct ctrl_reg, reserved1),
- GEN_OUTPUT_AB = offsetof(struct ctrl_reg, gen_output_AB),
- NON_INT_INPUT = offsetof(struct ctrl_reg, non_int_input),
+ GEN_OUTPUT_AB = offsetof(struct ctrl_reg, gen_output_AB),
+ NON_INT_INPUT = offsetof(struct ctrl_reg, non_int_input),
CTRL_RESERVED3 = offsetof(struct ctrl_reg, reserved3),
CTRL_RESERVED4 = offsetof(struct ctrl_reg, reserved4),
CTRL_RESERVED5 = offsetof(struct ctrl_reg, reserved5),
CTRL_RESERVED6 = offsetof(struct ctrl_reg, reserved6),
CTRL_RESERVED7 = offsetof(struct ctrl_reg, reserved7),
CTRL_RESERVED8 = offsetof(struct ctrl_reg, reserved8),
- SLOT_MASK = offsetof(struct ctrl_reg, slot_mask),
- CTRL_RESERVED9 = offsetof(struct ctrl_reg, reserved9),
+ SLOT_MASK = offsetof(struct ctrl_reg, slot_mask),
+ CTRL_RESERVED9 = offsetof(struct ctrl_reg, reserved9),
CTRL_RESERVED10 = offsetof(struct ctrl_reg, reserved10),
CTRL_RESERVED11 = offsetof(struct ctrl_reg, reserved11),
SLOT_SERR = offsetof(struct ctrl_reg, slot_SERR),
@@ -190,7 +190,9 @@ struct hrt {
u32 reserved2;
} __attribute__ ((packed));
-/* offsets to the hotplug resource table registers based on the above structure layout */
+/* offsets to the hotplug resource table registers based on the above
+ * structure layout
+ */
enum hrt_offsets {
SIG0 = offsetof(struct hrt, sig0),
SIG1 = offsetof(struct hrt, sig1),
@@ -217,18 +219,20 @@ struct slot_rt {
u16 pre_mem_length;
} __attribute__ ((packed));
-/* offsets to the hotplug slot resource table registers based on the above structure layout */
+/* offsets to the hotplug slot resource table registers based on the above
+ * structure layout
+ */
enum slot_rt_offsets {
DEV_FUNC = offsetof(struct slot_rt, dev_func),
- PRIMARY_BUS = offsetof(struct slot_rt, primary_bus),
- SECONDARY_BUS = offsetof(struct slot_rt, secondary_bus),
- MAX_BUS = offsetof(struct slot_rt, max_bus),
- IO_BASE = offsetof(struct slot_rt, io_base),
- IO_LENGTH = offsetof(struct slot_rt, io_length),
- MEM_BASE = offsetof(struct slot_rt, mem_base),
- MEM_LENGTH = offsetof(struct slot_rt, mem_length),
- PRE_MEM_BASE = offsetof(struct slot_rt, pre_mem_base),
- PRE_MEM_LENGTH = offsetof(struct slot_rt, pre_mem_length),
+ PRIMARY_BUS = offsetof(struct slot_rt, primary_bus),
+ SECONDARY_BUS = offsetof(struct slot_rt, secondary_bus),
+ MAX_BUS = offsetof(struct slot_rt, max_bus),
+ IO_BASE = offsetof(struct slot_rt, io_base),
+ IO_LENGTH = offsetof(struct slot_rt, io_length),
+ MEM_BASE = offsetof(struct slot_rt, mem_base),
+ MEM_LENGTH = offsetof(struct slot_rt, mem_length),
+ PRE_MEM_BASE = offsetof(struct slot_rt, pre_mem_base),
+ PRE_MEM_LENGTH = offsetof(struct slot_rt, pre_mem_length),
};
struct pci_func {
@@ -286,8 +290,8 @@ struct event_info {
struct controller {
struct controller *next;
u32 ctrl_int_comp;
- struct mutex crit_sect; /* critical section mutex */
- void __iomem *hpc_reg; /* cookie for our pci controller location */
+ struct mutex crit_sect; /* critical section mutex */
+ void __iomem *hpc_reg; /* cookie for our pci controller location */
struct pci_resource *mem_head;
struct pci_resource *p_mem_head;
struct pci_resource *io_head;
@@ -299,7 +303,7 @@ struct controller {
u8 next_event;
u8 interrupt;
u8 cfgspc_irq;
- u8 bus; /* bus number for the pci hotplug controller */
+ u8 bus; /* bus number for the pci hotplug controller */
u8 rev;
u8 slot_device_offset;
u8 first_slot;
@@ -401,46 +405,57 @@ struct resource_lists {
/* debugfs functions for the hotplug controller info */
-extern void cpqhp_initialize_debugfs (void);
-extern void cpqhp_shutdown_debugfs (void);
-extern void cpqhp_create_debugfs_files (struct controller *ctrl);
-extern void cpqhp_remove_debugfs_files (struct controller *ctrl);
+extern void cpqhp_initialize_debugfs(void);
+extern void cpqhp_shutdown_debugfs(void);
+extern void cpqhp_create_debugfs_files(struct controller *ctrl);
+extern void cpqhp_remove_debugfs_files(struct controller *ctrl);
/* controller functions */
-extern void cpqhp_pushbutton_thread (unsigned long event_pointer);
-extern irqreturn_t cpqhp_ctrl_intr (int IRQ, void *data);
-extern int cpqhp_find_available_resources (struct controller *ctrl, void __iomem *rom_start);
-extern int cpqhp_event_start_thread (void);
-extern void cpqhp_event_stop_thread (void);
-extern struct pci_func *cpqhp_slot_create (unsigned char busnumber);
-extern struct pci_func *cpqhp_slot_find (unsigned char bus, unsigned char device, unsigned char index);
-extern int cpqhp_process_SI (struct controller *ctrl, struct pci_func *func);
-extern int cpqhp_process_SS (struct controller *ctrl, struct pci_func *func);
-extern int cpqhp_hardware_test (struct controller *ctrl, int test_num);
+extern void cpqhp_pushbutton_thread(unsigned long event_pointer);
+extern irqreturn_t cpqhp_ctrl_intr(int IRQ, void *data);
+extern int cpqhp_find_available_resources(struct controller *ctrl,
+ void __iomem *rom_start);
+extern int cpqhp_event_start_thread(void);
+extern void cpqhp_event_stop_thread(void);
+extern struct pci_func *cpqhp_slot_create(unsigned char busnumber);
+extern struct pci_func *cpqhp_slot_find(unsigned char bus, unsigned char device,
+ unsigned char index);
+extern int cpqhp_process_SI(struct controller *ctrl, struct pci_func *func);
+extern int cpqhp_process_SS(struct controller *ctrl, struct pci_func *func);
+extern int cpqhp_hardware_test(struct controller *ctrl, int test_num);
/* resource functions */
extern int cpqhp_resource_sort_and_combine (struct pci_resource **head);
/* pci functions */
-extern int cpqhp_set_irq (u8 bus_num, u8 dev_num, u8 int_pin, u8 irq_num);
-extern int cpqhp_get_bus_dev (struct controller *ctrl, u8 *bus_num, u8 *dev_num, u8 slot);
-extern int cpqhp_save_config (struct controller *ctrl, int busnumber, int is_hot_plug);
-extern int cpqhp_save_base_addr_length (struct controller *ctrl, struct pci_func * func);
-extern int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func);
-extern int cpqhp_configure_board (struct controller *ctrl, struct pci_func * func);
-extern int cpqhp_save_slot_config (struct controller *ctrl, struct pci_func * new_slot);
-extern int cpqhp_valid_replace (struct controller *ctrl, struct pci_func * func);
-extern void cpqhp_destroy_board_resources (struct pci_func * func);
-extern int cpqhp_return_board_resources (struct pci_func * func, struct resource_lists * resources);
-extern void cpqhp_destroy_resource_list (struct resource_lists * resources);
-extern int cpqhp_configure_device (struct controller* ctrl, struct pci_func* func);
-extern int cpqhp_unconfigure_device (struct pci_func* func);
+extern int cpqhp_set_irq(u8 bus_num, u8 dev_num, u8 int_pin, u8 irq_num);
+extern int cpqhp_get_bus_dev(struct controller *ctrl, u8 *bus_num, u8 *dev_num,
+ u8 slot);
+extern int cpqhp_save_config(struct controller *ctrl, int busnumber,
+ int is_hot_plug);
+extern int cpqhp_save_base_addr_length(struct controller *ctrl,
+ struct pci_func *func);
+extern int cpqhp_save_used_resources(struct controller *ctrl,
+ struct pci_func *func);
+extern int cpqhp_configure_board(struct controller *ctrl,
+ struct pci_func *func);
+extern int cpqhp_save_slot_config(struct controller *ctrl,
+ struct pci_func *new_slot);
+extern int cpqhp_valid_replace(struct controller *ctrl, struct pci_func *func);
+extern void cpqhp_destroy_board_resources(struct pci_func *func);
+extern int cpqhp_return_board_resources (struct pci_func *func,
+ struct resource_lists *resources);
+extern void cpqhp_destroy_resource_list(struct resource_lists *resources);
+extern int cpqhp_configure_device(struct controller *ctrl,
+ struct pci_func *func);
+extern int cpqhp_unconfigure_device(struct pci_func *func);
/* Global variables */
extern int cpqhp_debug;
extern int cpqhp_legacy_mode;
extern struct controller *cpqhp_ctrl_list;
extern struct pci_func *cpqhp_slot_list[256];
+extern struct irq_routing_table *cpqhp_routing_table;
/* these can be gotten rid of, but for debugging they are purty */
extern u8 cpqhp_nic_irq;
@@ -449,7 +464,7 @@ extern u8 cpqhp_disk_irq;
/* inline functions */
-static inline char *slot_name(struct slot *slot)
+static inline const char *slot_name(struct slot *slot)
{
return hotplug_slot_name(slot->hotplug_slot);
}
@@ -458,9 +473,9 @@ static inline char *slot_name(struct slot *slot)
* return_resource
*
* Puts node back in the resource list pointed to by head
- *
*/
-static inline void return_resource(struct pci_resource **head, struct pci_resource *node)
+static inline void return_resource(struct pci_resource **head,
+ struct pci_resource *node)
{
if (!node || !head)
return;
@@ -471,7 +486,7 @@ static inline void return_resource(struct pci_resource **head, struct pci_resour
static inline void set_SOGO(struct controller *ctrl)
{
u16 misc;
-
+
misc = readw(ctrl->hpc_reg + MISC);
misc = (misc | 0x0001) & 0xFFFB;
writew(misc, ctrl->hpc_reg + MISC);
@@ -481,7 +496,7 @@ static inline void set_SOGO(struct controller *ctrl)
static inline void amber_LED_on(struct controller *ctrl, u8 slot)
{
u32 led_control;
-
+
led_control = readl(ctrl->hpc_reg + LED_CONTROL);
led_control |= (0x01010000L << slot);
writel(led_control, ctrl->hpc_reg + LED_CONTROL);
@@ -491,7 +506,7 @@ static inline void amber_LED_on(struct controller *ctrl, u8 slot)
static inline void amber_LED_off(struct controller *ctrl, u8 slot)
{
u32 led_control;
-
+
led_control = readl(ctrl->hpc_reg + LED_CONTROL);
led_control &= ~(0x01010000L << slot);
writel(led_control, ctrl->hpc_reg + LED_CONTROL);
@@ -504,7 +519,7 @@ static inline int read_amber_LED(struct controller *ctrl, u8 slot)
led_control = readl(ctrl->hpc_reg + LED_CONTROL);
led_control &= (0x01010000L << slot);
-
+
return led_control ? 1 : 0;
}
@@ -512,7 +527,7 @@ static inline int read_amber_LED(struct controller *ctrl, u8 slot)
static inline void green_LED_on(struct controller *ctrl, u8 slot)
{
u32 led_control;
-
+
led_control = readl(ctrl->hpc_reg + LED_CONTROL);
led_control |= 0x0101L << slot;
writel(led_control, ctrl->hpc_reg + LED_CONTROL);
@@ -521,7 +536,7 @@ static inline void green_LED_on(struct controller *ctrl, u8 slot)
static inline void green_LED_off(struct controller *ctrl, u8 slot)
{
u32 led_control;
-
+
led_control = readl(ctrl->hpc_reg + LED_CONTROL);
led_control &= ~(0x0101L << slot);
writel(led_control, ctrl->hpc_reg + LED_CONTROL);
@@ -531,7 +546,7 @@ static inline void green_LED_off(struct controller *ctrl, u8 slot)
static inline void green_LED_blink(struct controller *ctrl, u8 slot)
{
u32 led_control;
-
+
led_control = readl(ctrl->hpc_reg + LED_CONTROL);
led_control &= ~(0x0101L << slot);
led_control |= (0x0001L << slot);
@@ -575,22 +590,21 @@ static inline u8 read_slot_enable(struct controller *ctrl)
}
-/*
+/**
* get_controller_speed - find the current frequency/mode of controller.
*
* @ctrl: controller to get frequency/mode for.
*
* Returns controller speed.
- *
*/
static inline u8 get_controller_speed(struct controller *ctrl)
{
u8 curr_freq;
- u16 misc;
-
+ u16 misc;
+
if (ctrl->pcix_support) {
curr_freq = readb(ctrl->hpc_reg + NEXT_CURR_FREQ);
- if ((curr_freq & 0xB0) == 0xB0)
+ if ((curr_freq & 0xB0) == 0xB0)
return PCI_SPEED_133MHz_PCIX;
if ((curr_freq & 0xA0) == 0xA0)
return PCI_SPEED_100MHz_PCIX;
@@ -602,19 +616,18 @@ static inline u8 get_controller_speed(struct controller *ctrl)
return PCI_SPEED_33MHz;
}
- misc = readw(ctrl->hpc_reg + MISC);
- return (misc & 0x0800) ? PCI_SPEED_66MHz : PCI_SPEED_33MHz;
+ misc = readw(ctrl->hpc_reg + MISC);
+ return (misc & 0x0800) ? PCI_SPEED_66MHz : PCI_SPEED_33MHz;
}
-
-/*
+
+/**
* get_adapter_speed - find the max supported frequency/mode of adapter.
*
* @ctrl: hotplug controller.
* @hp_slot: hotplug slot where adapter is installed.
*
* Returns adapter speed.
- *
*/
static inline u8 get_adapter_speed(struct controller *ctrl, u8 hp_slot)
{
@@ -672,7 +685,8 @@ static inline int get_slot_enabled(struct controller *ctrl, struct slot *slot)
}
-static inline int cpq_get_latch_status(struct controller *ctrl, struct slot *slot)
+static inline int cpq_get_latch_status(struct controller *ctrl,
+ struct slot *slot)
{
u32 status;
u8 hp_slot;
@@ -687,7 +701,8 @@ static inline int cpq_get_latch_status(struct controller *ctrl, struct slot *slo
}
-static inline int get_presence_status(struct controller *ctrl, struct slot *slot)
+static inline int get_presence_status(struct controller *ctrl,
+ struct slot *slot)
{
int presence_save = 0;
u8 hp_slot;
@@ -696,7 +711,8 @@ static inline int get_presence_status(struct controller *ctrl, struct slot *slot
hp_slot = slot->device - ctrl->slot_device_offset;
tempdword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR);
- presence_save = (int) ((((~tempdword) >> 23) | ((~tempdword) >> 15)) >> hp_slot) & 0x02;
+ presence_save = (int) ((((~tempdword) >> 23) | ((~tempdword) >> 15))
+ >> hp_slot) & 0x02;
return presence_save;
}
@@ -718,5 +734,12 @@ static inline int wait_for_ctrl_irq(struct controller *ctrl)
return retval;
}
-#endif
+#include <asm/pci_x86.h>
+static inline int cpqhp_routing_table_length(void)
+{
+ BUG_ON(cpqhp_routing_table == NULL);
+ return ((cpqhp_routing_table->size - sizeof(struct irq_routing_table)) /
+ sizeof(struct irq_info));
+}
+#endif
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index c2e1bcbb28a..075b4f4b6e0 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -25,8 +25,7 @@
* Send feedback to <greg@kroah.com>
*
* Jan 12, 2003 - Added 66/100/133MHz PCI-X support,
- * Torben Mathiasen <torben.mathiasen@hp.com>
- *
+ * Torben Mathiasen <torben.mathiasen@hp.com>
*/
#include <linux/module.h>
@@ -45,7 +44,6 @@
#include "cpqphp.h"
#include "cpqphp_nvram.h"
-#include <asm/pci_x86.h>
/* Global variables */
@@ -53,6 +51,7 @@ int cpqhp_debug;
int cpqhp_legacy_mode;
struct controller *cpqhp_ctrl_list; /* = NULL */
struct pci_func *cpqhp_slot_list[256];
+struct irq_routing_table *cpqhp_routing_table;
/* local variables */
static void __iomem *smbios_table;
@@ -78,33 +77,6 @@ MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
#define CPQHPC_MODULE_MINOR 208
-static int one_time_init (void);
-static int set_attention_status (struct hotplug_slot *slot, u8 value);
-static int process_SI (struct hotplug_slot *slot);
-static int process_SS (struct hotplug_slot *slot);
-static int hardware_test (struct hotplug_slot *slot, u32 value);
-static int get_power_status (struct hotplug_slot *slot, u8 *value);
-static int get_attention_status (struct hotplug_slot *slot, u8 *value);
-static int get_latch_status (struct hotplug_slot *slot, u8 *value);
-static int get_adapter_status (struct hotplug_slot *slot, u8 *value);
-static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
-static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
-
-static struct hotplug_slot_ops cpqphp_hotplug_slot_ops = {
- .owner = THIS_MODULE,
- .set_attention_status = set_attention_status,
- .enable_slot = process_SI,
- .disable_slot = process_SS,
- .hardware_test = hardware_test,
- .get_power_status = get_power_status,
- .get_attention_status = get_attention_status,
- .get_latch_status = get_latch_status,
- .get_adapter_status = get_adapter_status,
- .get_max_bus_speed = get_max_bus_speed,
- .get_cur_bus_speed = get_cur_bus_speed,
-};
-
-
static inline int is_slot64bit(struct slot *slot)
{
return (readb(slot->p_sm_slot + SMBIOS_SLOT_WIDTH) == 0x06) ? 1 : 0;
@@ -144,7 +116,7 @@ static void __iomem * detect_SMBIOS_pointer(void __iomem *begin, void __iomem *e
break;
}
}
-
+
if (!status)
fp = NULL;
@@ -171,7 +143,7 @@ static int init_SERR(struct controller * ctrl)
tempdword = ctrl->first_slot;
number_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F;
- // Loop through slots
+ /* Loop through slots */
while (number_of_slots) {
physical_slot = tempdword;
writeb(0, ctrl->hpc_reg + SLOT_SERR);
@@ -182,41 +154,42 @@ static int init_SERR(struct controller * ctrl)
return 0;
}
-
-/* nice debugging output */
-static int pci_print_IRQ_route (void)
+static int init_cpqhp_routing_table(void)
{
- struct irq_routing_table *routing_table;
int len;
- int loop;
-
- u8 tbus, tdevice, tslot;
- routing_table = pcibios_get_irq_routing_table();
- if (routing_table == NULL) {
- err("No BIOS Routing Table??? Not good\n");
+ cpqhp_routing_table = pcibios_get_irq_routing_table();
+ if (cpqhp_routing_table == NULL)
return -ENOMEM;
- }
- len = (routing_table->size - sizeof(struct irq_routing_table)) /
- sizeof(struct irq_info);
- // Make sure I got at least one entry
+ len = cpqhp_routing_table_length();
if (len == 0) {
- kfree(routing_table);
+ kfree(cpqhp_routing_table);
+ cpqhp_routing_table = NULL;
return -1;
}
- dbg("bus dev func slot\n");
+ return 0;
+}
+
+/* nice debugging output */
+static void pci_print_IRQ_route(void)
+{
+ int len;
+ int loop;
+ u8 tbus, tdevice, tslot;
+
+ len = cpqhp_routing_table_length();
+ dbg("bus dev func slot\n");
for (loop = 0; loop < len; ++loop) {
- tbus = routing_table->slots[loop].bus;
- tdevice = routing_table->slots[loop].devfn;
- tslot = routing_table->slots[loop].slot;
+ tbus = cpqhp_routing_table->slots[loop].bus;
+ tdevice = cpqhp_routing_table->slots[loop].devfn;
+ tslot = cpqhp_routing_table->slots[loop].slot;
dbg("%d %d %d %d\n", tbus, tdevice >> 3, tdevice & 0x7, tslot);
}
- kfree(routing_table);
- return 0;
+ return;
}
@@ -242,9 +215,9 @@ static void __iomem *get_subsequent_smbios_entry(void __iomem *smbios_start,
void __iomem *p_max;
if (!smbios_table || !curr)
- return(NULL);
+ return NULL;
- // set p_max to the end of the table
+ /* set p_max to the end of the table */
p_max = smbios_start + readw(smbios_table + ST_LENGTH);
p_temp = curr;
@@ -253,20 +226,19 @@ static void __iomem *get_subsequent_smbios_entry(void __iomem *smbios_start,
while ((p_temp < p_max) && !bail) {
/* Look for the double NULL terminator
* The first condition is the previous byte
- * and the second is the curr */
- if (!previous_byte && !(readb(p_temp))) {
+ * and the second is the curr
+ */
+ if (!previous_byte && !(readb(p_temp)))
bail = 1;
- }
previous_byte = readb(p_temp);
p_temp++;
}
- if (p_temp < p_max) {
+ if (p_temp < p_max)
return p_temp;
- } else {
+ else
return NULL;
- }
}
@@ -292,21 +264,18 @@ static void __iomem *get_SMBIOS_entry(void __iomem *smbios_start,
if (!smbios_table)
return NULL;
- if (!previous) {
+ if (!previous)
previous = smbios_start;
- } else {
+ else
previous = get_subsequent_smbios_entry(smbios_start,
smbios_table, previous);
- }
- while (previous) {
- if (readb(previous + SMBIOS_GENERIC_TYPE) != type) {
+ while (previous)
+ if (readb(previous + SMBIOS_GENERIC_TYPE) != type)
previous = get_subsequent_smbios_entry(smbios_start,
smbios_table, previous);
- } else {
+ else
break;
- }
- }
return previous;
}
@@ -322,144 +291,6 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
kfree(slot);
}
-#define SLOT_NAME_SIZE 10
-
-static int ctrl_slot_setup(struct controller *ctrl,
- void __iomem *smbios_start,
- void __iomem *smbios_table)
-{
- struct slot *slot;
- struct hotplug_slot *hotplug_slot;
- struct hotplug_slot_info *hotplug_slot_info;
- u8 number_of_slots;
- u8 slot_device;
- u8 slot_number;
- u8 ctrl_slot;
- u32 tempdword;
- char name[SLOT_NAME_SIZE];
- void __iomem *slot_entry= NULL;
- int result = -ENOMEM;
-
- dbg("%s\n", __func__);
-
- tempdword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR);
-
- number_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F;
- slot_device = readb(ctrl->hpc_reg + SLOT_MASK) >> 4;
- slot_number = ctrl->first_slot;
-
- while (number_of_slots) {
- slot = kzalloc(sizeof(*slot), GFP_KERNEL);
- if (!slot)
- goto error;
-
- slot->hotplug_slot = kzalloc(sizeof(*(slot->hotplug_slot)),
- GFP_KERNEL);
- if (!slot->hotplug_slot)
- goto error_slot;
- hotplug_slot = slot->hotplug_slot;
-
- hotplug_slot->info =
- kzalloc(sizeof(*(hotplug_slot->info)),
- GFP_KERNEL);
- if (!hotplug_slot->info)
- goto error_hpslot;
- hotplug_slot_info = hotplug_slot->info;
-
- slot->ctrl = ctrl;
- slot->bus = ctrl->bus;
- slot->device = slot_device;
- slot->number = slot_number;
- dbg("slot->number = %u\n", slot->number);
-
- slot_entry = get_SMBIOS_entry(smbios_start, smbios_table, 9,
- slot_entry);
-
- while (slot_entry && (readw(slot_entry + SMBIOS_SLOT_NUMBER) !=
- slot->number)) {
- slot_entry = get_SMBIOS_entry(smbios_start,
- smbios_table, 9, slot_entry);
- }
-
- slot->p_sm_slot = slot_entry;
-
- init_timer(&slot->task_event);
- slot->task_event.expires = jiffies + 5 * HZ;
- slot->task_event.function = cpqhp_pushbutton_thread;
-
- //FIXME: these capabilities aren't used but if they are
- // they need to be correctly implemented
- slot->capabilities |= PCISLOT_REPLACE_SUPPORTED;
- slot->capabilities |= PCISLOT_INTERLOCK_SUPPORTED;
-
- if (is_slot64bit(slot))
- slot->capabilities |= PCISLOT_64_BIT_SUPPORTED;
- if (is_slot66mhz(slot))
- slot->capabilities |= PCISLOT_66_MHZ_SUPPORTED;
- if (ctrl->speed == PCI_SPEED_66MHz)
- slot->capabilities |= PCISLOT_66_MHZ_OPERATION;
-
- ctrl_slot =
- slot_device - (readb(ctrl->hpc_reg + SLOT_MASK) >> 4);
-
- // Check presence
- slot->capabilities |=
- ((((~tempdword) >> 23) |
- ((~tempdword) >> 15)) >> ctrl_slot) & 0x02;
- // Check the switch state
- slot->capabilities |=
- ((~tempdword & 0xFF) >> ctrl_slot) & 0x01;
- // Check the slot enable
- slot->capabilities |=
- ((read_slot_enable(ctrl) << 2) >> ctrl_slot) & 0x04;
-
- /* register this slot with the hotplug pci core */
- hotplug_slot->release = &release_slot;
- hotplug_slot->private = slot;
- snprintf(name, SLOT_NAME_SIZE, "%u", slot->number);
- hotplug_slot->ops = &cpqphp_hotplug_slot_ops;
-
- hotplug_slot_info->power_status = get_slot_enabled(ctrl, slot);
- hotplug_slot_info->attention_status =
- cpq_get_attention_status(ctrl, slot);
- hotplug_slot_info->latch_status =
- cpq_get_latch_status(ctrl, slot);
- hotplug_slot_info->adapter_status =
- get_presence_status(ctrl, slot);
-
- dbg("registering bus %d, dev %d, number %d, "
- "ctrl->slot_device_offset %d, slot %d\n",
- slot->bus, slot->device,
- slot->number, ctrl->slot_device_offset,
- slot_number);
- result = pci_hp_register(hotplug_slot,
- ctrl->pci_dev->bus,
- slot->device,
- name);
- if (result) {
- err("pci_hp_register failed with error %d\n", result);
- goto error_info;
- }
-
- slot->next = ctrl->slot;
- ctrl->slot = slot;
-
- number_of_slots--;
- slot_device++;
- slot_number++;
- }
-
- return 0;
-error_info:
- kfree(hotplug_slot_info);
-error_hpslot:
- kfree(hotplug_slot);
-error_slot:
- kfree(slot);
-error:
- return result;
-}
-
static int ctrl_slot_cleanup (struct controller * ctrl)
{
struct slot *old_slot, *next_slot;
@@ -476,36 +307,32 @@ static int ctrl_slot_cleanup (struct controller * ctrl)
cpqhp_remove_debugfs_files(ctrl);
- //Free IRQ associated with hot plug device
+ /* Free IRQ associated with hot plug device */
free_irq(ctrl->interrupt, ctrl);
- //Unmap the memory
+ /* Unmap the memory */
iounmap(ctrl->hpc_reg);
- //Finally reclaim PCI mem
+ /* Finally reclaim PCI mem */
release_mem_region(pci_resource_start(ctrl->pci_dev, 0),
pci_resource_len(ctrl->pci_dev, 0));
- return(0);
+ return 0;
}
-//============================================================================
-// function: get_slot_mapping
-//
-// Description: Attempts to determine a logical slot mapping for a PCI
-// device. Won't work for more than one PCI-PCI bridge
-// in a slot.
-//
-// Input: u8 bus_num - bus number of PCI device
-// u8 dev_num - device number of PCI device
-// u8 *slot - Pointer to u8 where slot number will
-// be returned
-//
-// Output: SUCCESS or FAILURE
-//=============================================================================
+/**
+ * get_slot_mapping - determine logical slot mapping for PCI device
+ *
+ * Won't work for more than one PCI-PCI bridge in a slot.
+ *
+ * @bus_num - bus number of PCI device
+ * @dev_num - device number of PCI device
+ * @slot - Pointer to u8 where slot number will be returned
+ *
+ * Output: SUCCESS or FAILURE
+ */
static int
get_slot_mapping(struct pci_bus *bus, u8 bus_num, u8 dev_num, u8 *slot)
{
- struct irq_routing_table *PCIIRQRoutingInfoLength;
u32 work;
long len;
long loop;
@@ -516,36 +343,25 @@ get_slot_mapping(struct pci_bus *bus, u8 bus_num, u8 dev_num, u8 *slot)
bridgeSlot = 0xFF;
- PCIIRQRoutingInfoLength = pcibios_get_irq_routing_table();
- if (!PCIIRQRoutingInfoLength)
- return -1;
-
- len = (PCIIRQRoutingInfoLength->size -
- sizeof(struct irq_routing_table)) / sizeof(struct irq_info);
- // Make sure I got at least one entry
- if (len == 0) {
- kfree(PCIIRQRoutingInfoLength);
- return -1;
- }
-
+ len = cpqhp_routing_table_length();
for (loop = 0; loop < len; ++loop) {
- tbus = PCIIRQRoutingInfoLength->slots[loop].bus;
- tdevice = PCIIRQRoutingInfoLength->slots[loop].devfn >> 3;
- tslot = PCIIRQRoutingInfoLength->slots[loop].slot;
+ tbus = cpqhp_routing_table->slots[loop].bus;
+ tdevice = cpqhp_routing_table->slots[loop].devfn >> 3;
+ tslot = cpqhp_routing_table->slots[loop].slot;
if ((tbus == bus_num) && (tdevice == dev_num)) {
*slot = tslot;
- kfree(PCIIRQRoutingInfoLength);
return 0;
} else {
/* Did not get a match on the target PCI device. Check
- * if the current IRQ table entry is a PCI-to-PCI bridge
- * device. If so, and it's secondary bus matches the
- * bus number for the target device, I need to save the
- * bridge's slot number. If I can not find an entry for
- * the target device, I will have to assume it's on the
- * other side of the bridge, and assign it the bridge's
- * slot. */
+ * if the current IRQ table entry is a PCI-to-PCI
+ * bridge device. If so, and it's secondary bus
+ * matches the bus number for the target device, I need
+ * to save the bridge's slot number. If I can not find
+ * an entry for the target device, I will have to
+ * assume it's on the other side of the bridge, and
+ * assign it the bridge's slot.
+ */
bus->number = tbus;
pci_bus_read_config_dword(bus, PCI_DEVFN(tdevice, 0),
PCI_CLASS_REVISION, &work);
@@ -555,25 +371,23 @@ get_slot_mapping(struct pci_bus *bus, u8 bus_num, u8 dev_num, u8 *slot)
PCI_DEVFN(tdevice, 0),
PCI_PRIMARY_BUS, &work);
// See if bridge's secondary bus matches target bus.
- if (((work >> 8) & 0x000000FF) == (long) bus_num) {
+ if (((work >> 8) & 0x000000FF) == (long) bus_num)
bridgeSlot = tslot;
- }
}
}
}
- // If we got here, we didn't find an entry in the IRQ mapping table
- // for the target PCI device. If we did determine that the target
- // device is on the other side of a PCI-to-PCI bridge, return the
- // slot number for the bridge.
+ /* If we got here, we didn't find an entry in the IRQ mapping table for
+ * the target PCI device. If we did determine that the target device
+ * is on the other side of a PCI-to-PCI bridge, return the slot number
+ * for the bridge.
+ */
if (bridgeSlot != 0xFF) {
*slot = bridgeSlot;
- kfree(PCIIRQRoutingInfoLength);
return 0;
}
- kfree(PCIIRQRoutingInfoLength);
- // Couldn't find an entry in the routing table for this PCI device
+ /* Couldn't find an entry in the routing table for this PCI device */
return -1;
}
@@ -591,32 +405,32 @@ cpqhp_set_attention_status(struct controller *ctrl, struct pci_func *func,
u8 hp_slot;
if (func == NULL)
- return(1);
+ return 1;
hp_slot = func->device - ctrl->slot_device_offset;
- // Wait for exclusive access to hardware
+ /* Wait for exclusive access to hardware */
mutex_lock(&ctrl->crit_sect);
- if (status == 1) {
+ if (status == 1)
amber_LED_on (ctrl, hp_slot);
- } else if (status == 0) {
+ else if (status == 0)
amber_LED_off (ctrl, hp_slot);
- } else {
- // Done with exclusive hardware access
+ else {
+ /* Done with exclusive hardware access */
mutex_unlock(&ctrl->crit_sect);
- return(1);
+ return 1;
}
set_SOGO(ctrl);
- // Wait for SOBS to be unset
+ /* Wait for SOBS to be unset */
wait_for_ctrl_irq (ctrl);
- // Done with exclusive hardware access
+ /* Done with exclusive hardware access */
mutex_unlock(&ctrl->crit_sect);
- return(0);
+ return 0;
}
@@ -719,7 +533,7 @@ static int hardware_test(struct hotplug_slot *hotplug_slot, u32 value)
dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
- return cpqhp_hardware_test(ctrl, value);
+ return cpqhp_hardware_test(ctrl, value);
}
@@ -738,7 +552,7 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct slot *slot = hotplug_slot->private;
struct controller *ctrl = slot->ctrl;
-
+
dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
*value = cpq_get_attention_status(ctrl, slot);
@@ -793,6 +607,230 @@ static int get_cur_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_sp
return 0;
}
+static struct hotplug_slot_ops cpqphp_hotplug_slot_ops = {
+ .set_attention_status = set_attention_status,
+ .enable_slot = process_SI,
+ .disable_slot = process_SS,
+ .hardware_test = hardware_test,
+ .get_power_status = get_power_status,
+ .get_attention_status = get_attention_status,
+ .get_latch_status = get_latch_status,
+ .get_adapter_status = get_adapter_status,
+ .get_max_bus_speed = get_max_bus_speed,
+ .get_cur_bus_speed = get_cur_bus_speed,
+};
+
+#define SLOT_NAME_SIZE 10
+
+static int ctrl_slot_setup(struct controller *ctrl,
+ void __iomem *smbios_start,
+ void __iomem *smbios_table)
+{
+ struct slot *slot;
+ struct hotplug_slot *hotplug_slot;
+ struct hotplug_slot_info *hotplug_slot_info;
+ u8 number_of_slots;
+ u8 slot_device;
+ u8 slot_number;
+ u8 ctrl_slot;
+ u32 tempdword;
+ char name[SLOT_NAME_SIZE];
+ void __iomem *slot_entry= NULL;
+ int result = -ENOMEM;
+
+ dbg("%s\n", __func__);
+
+ tempdword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR);
+
+ number_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F;
+ slot_device = readb(ctrl->hpc_reg + SLOT_MASK) >> 4;
+ slot_number = ctrl->first_slot;
+
+ while (number_of_slots) {
+ slot = kzalloc(sizeof(*slot), GFP_KERNEL);
+ if (!slot)
+ goto error;
+
+ slot->hotplug_slot = kzalloc(sizeof(*(slot->hotplug_slot)),
+ GFP_KERNEL);
+ if (!slot->hotplug_slot)
+ goto error_slot;
+ hotplug_slot = slot->hotplug_slot;
+
+ hotplug_slot->info = kzalloc(sizeof(*(hotplug_slot->info)),
+ GFP_KERNEL);
+ if (!hotplug_slot->info)
+ goto error_hpslot;
+ hotplug_slot_info = hotplug_slot->info;
+
+ slot->ctrl = ctrl;
+ slot->bus = ctrl->bus;
+ slot->device = slot_device;
+ slot->number = slot_number;
+ dbg("slot->number = %u\n", slot->number);
+
+ slot_entry = get_SMBIOS_entry(smbios_start, smbios_table, 9,
+ slot_entry);
+
+ while (slot_entry && (readw(slot_entry + SMBIOS_SLOT_NUMBER) !=
+ slot->number)) {
+ slot_entry = get_SMBIOS_entry(smbios_start,
+ smbios_table, 9, slot_entry);
+ }
+
+ slot->p_sm_slot = slot_entry;
+
+ init_timer(&slot->task_event);
+ slot->task_event.expires = jiffies + 5 * HZ;
+ slot->task_event.function = cpqhp_pushbutton_thread;
+
+ /*FIXME: these capabilities aren't used but if they are
+ * they need to be correctly implemented
+ */
+ slot->capabilities |= PCISLOT_REPLACE_SUPPORTED;
+ slot->capabilities |= PCISLOT_INTERLOCK_SUPPORTED;
+
+ if (is_slot64bit(slot))
+ slot->capabilities |= PCISLOT_64_BIT_SUPPORTED;
+ if (is_slot66mhz(slot))
+ slot->capabilities |= PCISLOT_66_MHZ_SUPPORTED;
+ if (ctrl->speed == PCI_SPEED_66MHz)
+ slot->capabilities |= PCISLOT_66_MHZ_OPERATION;
+
+ ctrl_slot =
+ slot_device - (readb(ctrl->hpc_reg + SLOT_MASK) >> 4);
+
+ /* Check presence */
+ slot->capabilities |=
+ ((((~tempdword) >> 23) |
+ ((~tempdword) >> 15)) >> ctrl_slot) & 0x02;
+ /* Check the switch state */
+ slot->capabilities |=
+ ((~tempdword & 0xFF) >> ctrl_slot) & 0x01;
+ /* Check the slot enable */
+ slot->capabilities |=
+ ((read_slot_enable(ctrl) << 2) >> ctrl_slot) & 0x04;
+
+ /* register this slot with the hotplug pci core */
+ hotplug_slot->release = &release_slot;
+ hotplug_slot->private = slot;
+ snprintf(name, SLOT_NAME_SIZE, "%u", slot->number);
+ hotplug_slot->ops = &cpqphp_hotplug_slot_ops;
+
+ hotplug_slot_info->power_status = get_slot_enabled(ctrl, slot);
+ hotplug_slot_info->attention_status =
+ cpq_get_attention_status(ctrl, slot);
+ hotplug_slot_info->latch_status =
+ cpq_get_latch_status(ctrl, slot);
+ hotplug_slot_info->adapter_status =
+ get_presence_status(ctrl, slot);
+
+ dbg("registering bus %d, dev %d, number %d, "
+ "ctrl->slot_device_offset %d, slot %d\n",
+ slot->bus, slot->device,
+ slot->number, ctrl->slot_device_offset,
+ slot_number);
+ result = pci_hp_register(hotplug_slot,
+ ctrl->pci_dev->bus,
+ slot->device,
+ name);
+ if (result) {
+ err("pci_hp_register failed with error %d\n", result);
+ goto error_info;
+ }
+
+ slot->next = ctrl->slot;
+ ctrl->slot = slot;
+
+ number_of_slots--;
+ slot_device++;
+ slot_number++;
+ }
+
+ return 0;
+error_info:
+ kfree(hotplug_slot_info);
+error_hpslot:
+ kfree(hotplug_slot);
+error_slot:
+ kfree(slot);
+error:
+ return result;
+}
+
+static int one_time_init(void)
+{
+ int loop;
+ int retval = 0;
+
+ if (initialized)
+ return 0;
+
+ power_mode = 0;
+
+ retval = init_cpqhp_routing_table();
+ if (retval)
+ goto error;
+
+ if (cpqhp_debug)
+ pci_print_IRQ_route();
+
+ dbg("Initialize + Start the notification mechanism \n");
+
+ retval = cpqhp_event_start_thread();
+ if (retval)
+ goto error;
+
+ dbg("Initialize slot lists\n");
+ for (loop = 0; loop < 256; loop++)
+ cpqhp_slot_list[loop] = NULL;
+
+ /* FIXME: We also need to hook the NMI handler eventually.
+ * this also needs to be worked with Christoph
+ * register_NMI_handler();
+ */
+ /* Map rom address */
+ cpqhp_rom_start = ioremap(ROM_PHY_ADDR, ROM_PHY_LEN);
+ if (!cpqhp_rom_start) {
+ err ("Could not ioremap memory region for ROM\n");
+ retval = -EIO;
+ goto error;
+ }
+
+ /* Now, map the int15 entry point if we are on compaq specific
+ * hardware
+ */
+ compaq_nvram_init(cpqhp_rom_start);
+
+ /* Map smbios table entry point structure */
+ smbios_table = detect_SMBIOS_pointer(cpqhp_rom_start,
+ cpqhp_rom_start + ROM_PHY_LEN);
+ if (!smbios_table) {
+ err ("Could not find the SMBIOS pointer in memory\n");
+ retval = -EIO;
+ goto error_rom_start;
+ }
+
+ smbios_start = ioremap(readl(smbios_table + ST_ADDRESS),
+ readw(smbios_table + ST_LENGTH));
+ if (!smbios_start) {
+ err ("Could not ioremap memory region taken from SMBIOS values\n");
+ retval = -EIO;
+ goto error_smbios_start;
+ }
+
+ initialized = 1;
+
+ return retval;
+
+error_smbios_start:
+ iounmap(smbios_start);
+error_rom_start:
+ iounmap(cpqhp_rom_start);
+error:
+ return retval;
+}
+
static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
u8 num_of_slots = 0;
@@ -815,7 +853,9 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return err;
}
- // Need to read VID early b/c it's used to differentiate CPQ and INTC discovery
+ /* Need to read VID early b/c it's used to differentiate CPQ and INTC
+ * discovery
+ */
rc = pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor_id);
if (rc || ((vendor_id != PCI_VENDOR_ID_COMPAQ) && (vendor_id != PCI_VENDOR_ID_INTEL))) {
err(msg_HPC_non_compaq_or_intel);
@@ -832,217 +872,209 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* Check for the proper subsytem ID's
- * Intel uses a different SSID programming model than Compaq.
+ * Intel uses a different SSID programming model than Compaq.
* For Intel, each SSID bit identifies a PHP capability.
* Also Intel HPC's may have RID=0.
*/
- if ((pdev->revision > 2) || (vendor_id == PCI_VENDOR_ID_INTEL)) {
- // TODO: This code can be made to support non-Compaq or Intel subsystem IDs
- rc = pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &subsystem_vid);
- if (rc) {
- err("%s : pci_read_config_word failed\n", __func__);
- goto err_disable_device;
- }
- dbg("Subsystem Vendor ID: %x\n", subsystem_vid);
- if ((subsystem_vid != PCI_VENDOR_ID_COMPAQ) && (subsystem_vid != PCI_VENDOR_ID_INTEL)) {
- err(msg_HPC_non_compaq_or_intel);
- rc = -ENODEV;
- goto err_disable_device;
- }
+ if ((pdev->revision <= 2) && (vendor_id != PCI_VENDOR_ID_INTEL)) {
+ err(msg_HPC_not_supported);
+ return -ENODEV;
+ }
- ctrl = kzalloc(sizeof(struct controller), GFP_KERNEL);
- if (!ctrl) {
- err("%s : out of memory\n", __func__);
- rc = -ENOMEM;
- goto err_disable_device;
- }
+ /* TODO: This code can be made to support non-Compaq or Intel
+ * subsystem IDs
+ */
+ rc = pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &subsystem_vid);
+ if (rc) {
+ err("%s : pci_read_config_word failed\n", __func__);
+ goto err_disable_device;
+ }
+ dbg("Subsystem Vendor ID: %x\n", subsystem_vid);
+ if ((subsystem_vid != PCI_VENDOR_ID_COMPAQ) && (subsystem_vid != PCI_VENDOR_ID_INTEL)) {
+ err(msg_HPC_non_compaq_or_intel);
+ rc = -ENODEV;
+ goto err_disable_device;
+ }
- rc = pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &subsystem_deviceid);
- if (rc) {
- err("%s : pci_read_config_word failed\n", __func__);
- goto err_free_ctrl;
- }
+ ctrl = kzalloc(sizeof(struct controller), GFP_KERNEL);
+ if (!ctrl) {
+ err("%s : out of memory\n", __func__);
+ rc = -ENOMEM;
+ goto err_disable_device;
+ }
- info("Hot Plug Subsystem Device ID: %x\n", subsystem_deviceid);
-
- /* Set Vendor ID, so it can be accessed later from other functions */
- ctrl->vendor_id = vendor_id;
-
- switch (subsystem_vid) {
- case PCI_VENDOR_ID_COMPAQ:
- if (pdev->revision >= 0x13) { /* CIOBX */
- ctrl->push_flag = 1;
- ctrl->slot_switch_type = 1;
- ctrl->push_button = 1;
- ctrl->pci_config_space = 1;
- ctrl->defeature_PHP = 1;
- ctrl->pcix_support = 1;
- ctrl->pcix_speed_capability = 1;
- pci_read_config_byte(pdev, 0x41, &bus_cap);
- if (bus_cap & 0x80) {
- dbg("bus max supports 133MHz PCI-X\n");
- ctrl->speed_capability = PCI_SPEED_133MHz_PCIX;
- break;
- }
- if (bus_cap & 0x40) {
- dbg("bus max supports 100MHz PCI-X\n");
- ctrl->speed_capability = PCI_SPEED_100MHz_PCIX;
- break;
- }
- if (bus_cap & 20) {
- dbg("bus max supports 66MHz PCI-X\n");
- ctrl->speed_capability = PCI_SPEED_66MHz_PCIX;
- break;
- }
- if (bus_cap & 10) {
- dbg("bus max supports 66MHz PCI\n");
- ctrl->speed_capability = PCI_SPEED_66MHz;
- break;
- }
-
- break;
- }
-
- switch (subsystem_deviceid) {
- case PCI_SUB_HPC_ID:
- /* Original 6500/7000 implementation */
- ctrl->slot_switch_type = 1;
- ctrl->speed_capability = PCI_SPEED_33MHz;
- ctrl->push_button = 0;
- ctrl->pci_config_space = 1;
- ctrl->defeature_PHP = 1;
- ctrl->pcix_support = 0;
- ctrl->pcix_speed_capability = 0;
- break;
- case PCI_SUB_HPC_ID2:
- /* First Pushbutton implementation */
- ctrl->push_flag = 1;
- ctrl->slot_switch_type = 1;
- ctrl->speed_capability = PCI_SPEED_33MHz;
- ctrl->push_button = 1;
- ctrl->pci_config_space = 1;
- ctrl->defeature_PHP = 1;
- ctrl->pcix_support = 0;
- ctrl->pcix_speed_capability = 0;
- break;
- case PCI_SUB_HPC_ID_INTC:
- /* Third party (6500/7000) */
- ctrl->slot_switch_type = 1;
- ctrl->speed_capability = PCI_SPEED_33MHz;
- ctrl->push_button = 0;
- ctrl->pci_config_space = 1;
- ctrl->defeature_PHP = 1;
- ctrl->pcix_support = 0;
- ctrl->pcix_speed_capability = 0;
- break;
- case PCI_SUB_HPC_ID3:
- /* First 66 Mhz implementation */
- ctrl->push_flag = 1;
- ctrl->slot_switch_type = 1;
- ctrl->speed_capability = PCI_SPEED_66MHz;
- ctrl->push_button = 1;
- ctrl->pci_config_space = 1;
- ctrl->defeature_PHP = 1;
- ctrl->pcix_support = 0;
- ctrl->pcix_speed_capability = 0;
- break;
- case PCI_SUB_HPC_ID4:
- /* First PCI-X implementation, 100MHz */
- ctrl->push_flag = 1;
- ctrl->slot_switch_type = 1;
- ctrl->speed_capability = PCI_SPEED_100MHz_PCIX;
- ctrl->push_button = 1;
- ctrl->pci_config_space = 1;
- ctrl->defeature_PHP = 1;
- ctrl->pcix_support = 1;
- ctrl->pcix_speed_capability = 0;
- break;
- default:
- err(msg_HPC_not_supported);
- rc = -ENODEV;
- goto err_free_ctrl;
- }
- break;
+ rc = pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &subsystem_deviceid);
+ if (rc) {
+ err("%s : pci_read_config_word failed\n", __func__);
+ goto err_free_ctrl;
+ }
- case PCI_VENDOR_ID_INTEL:
- /* Check for speed capability (0=33, 1=66) */
- if (subsystem_deviceid & 0x0001) {
- ctrl->speed_capability = PCI_SPEED_66MHz;
- } else {
- ctrl->speed_capability = PCI_SPEED_33MHz;
- }
-
- /* Check for push button */
- if (subsystem_deviceid & 0x0002) {
- /* no push button */
- ctrl->push_button = 0;
- } else {
- /* push button supported */
- ctrl->push_button = 1;
- }
-
- /* Check for slot switch type (0=mechanical, 1=not mechanical) */
- if (subsystem_deviceid & 0x0004) {
- /* no switch */
- ctrl->slot_switch_type = 0;
- } else {
- /* switch */
- ctrl->slot_switch_type = 1;
- }
-
- /* PHP Status (0=De-feature PHP, 1=Normal operation) */
- if (subsystem_deviceid & 0x0008) {
- ctrl->defeature_PHP = 1; // PHP supported
- } else {
- ctrl->defeature_PHP = 0; // PHP not supported
- }
-
- /* Alternate Base Address Register Interface (0=not supported, 1=supported) */
- if (subsystem_deviceid & 0x0010) {
- ctrl->alternate_base_address = 1; // supported
- } else {
- ctrl->alternate_base_address = 0; // not supported
- }
-
- /* PCI Config Space Index (0=not supported, 1=supported) */
- if (subsystem_deviceid & 0x0020) {
- ctrl->pci_config_space = 1; // supported
- } else {
- ctrl->pci_config_space = 0; // not supported
- }
-
- /* PCI-X support */
- if (subsystem_deviceid & 0x0080) {
- /* PCI-X capable */
- ctrl->pcix_support = 1;
- /* Frequency of operation in PCI-X mode */
- if (subsystem_deviceid & 0x0040) {
- /* 133MHz PCI-X if bit 7 is 1 */
- ctrl->pcix_speed_capability = 1;
- } else {
- /* 100MHz PCI-X if bit 7 is 1 and bit 0 is 0, */
- /* 66MHz PCI-X if bit 7 is 1 and bit 0 is 1 */
- ctrl->pcix_speed_capability = 0;
- }
- } else {
- /* Conventional PCI */
- ctrl->pcix_support = 0;
- ctrl->pcix_speed_capability = 0;
- }
+ info("Hot Plug Subsystem Device ID: %x\n", subsystem_deviceid);
+
+ /* Set Vendor ID, so it can be accessed later from other
+ * functions
+ */
+ ctrl->vendor_id = vendor_id;
+
+ switch (subsystem_vid) {
+ case PCI_VENDOR_ID_COMPAQ:
+ if (pdev->revision >= 0x13) { /* CIOBX */
+ ctrl->push_flag = 1;
+ ctrl->slot_switch_type = 1;
+ ctrl->push_button = 1;
+ ctrl->pci_config_space = 1;
+ ctrl->defeature_PHP = 1;
+ ctrl->pcix_support = 1;
+ ctrl->pcix_speed_capability = 1;
+ pci_read_config_byte(pdev, 0x41, &bus_cap);
+ if (bus_cap & 0x80) {
+ dbg("bus max supports 133MHz PCI-X\n");
+ ctrl->speed_capability = PCI_SPEED_133MHz_PCIX;
break;
+ }
+ if (bus_cap & 0x40) {
+ dbg("bus max supports 100MHz PCI-X\n");
+ ctrl->speed_capability = PCI_SPEED_100MHz_PCIX;
+ break;
+ }
+ if (bus_cap & 20) {
+ dbg("bus max supports 66MHz PCI-X\n");
+ ctrl->speed_capability = PCI_SPEED_66MHz_PCIX;
+ break;
+ }
+ if (bus_cap & 10) {
+ dbg("bus max supports 66MHz PCI\n");
+ ctrl->speed_capability = PCI_SPEED_66MHz;
+ break;
+ }
+
+ break;
+ }
- default:
- err(msg_HPC_not_supported);
- rc = -ENODEV;
- goto err_free_ctrl;
+ switch (subsystem_deviceid) {
+ case PCI_SUB_HPC_ID:
+ /* Original 6500/7000 implementation */
+ ctrl->slot_switch_type = 1;
+ ctrl->speed_capability = PCI_SPEED_33MHz;
+ ctrl->push_button = 0;
+ ctrl->pci_config_space = 1;
+ ctrl->defeature_PHP = 1;
+ ctrl->pcix_support = 0;
+ ctrl->pcix_speed_capability = 0;
+ break;
+ case PCI_SUB_HPC_ID2:
+ /* First Pushbutton implementation */
+ ctrl->push_flag = 1;
+ ctrl->slot_switch_type = 1;
+ ctrl->speed_capability = PCI_SPEED_33MHz;
+ ctrl->push_button = 1;
+ ctrl->pci_config_space = 1;
+ ctrl->defeature_PHP = 1;
+ ctrl->pcix_support = 0;
+ ctrl->pcix_speed_capability = 0;
+ break;
+ case PCI_SUB_HPC_ID_INTC:
+ /* Third party (6500/7000) */
+ ctrl->slot_switch_type = 1;
+ ctrl->speed_capability = PCI_SPEED_33MHz;
+ ctrl->push_button = 0;
+ ctrl->pci_config_space = 1;
+ ctrl->defeature_PHP = 1;
+ ctrl->pcix_support = 0;
+ ctrl->pcix_speed_capability = 0;
+ break;
+ case PCI_SUB_HPC_ID3:
+ /* First 66 Mhz implementation */
+ ctrl->push_flag = 1;
+ ctrl->slot_switch_type = 1;
+ ctrl->speed_capability = PCI_SPEED_66MHz;
+ ctrl->push_button = 1;
+ ctrl->pci_config_space = 1;
+ ctrl->defeature_PHP = 1;
+ ctrl->pcix_support = 0;
+ ctrl->pcix_speed_capability = 0;
+ break;
+ case PCI_SUB_HPC_ID4:
+ /* First PCI-X implementation, 100MHz */
+ ctrl->push_flag = 1;
+ ctrl->slot_switch_type = 1;
+ ctrl->speed_capability = PCI_SPEED_100MHz_PCIX;
+ ctrl->push_button = 1;
+ ctrl->pci_config_space = 1;
+ ctrl->defeature_PHP = 1;
+ ctrl->pcix_support = 1;
+ ctrl->pcix_speed_capability = 0;
+ break;
+ default:
+ err(msg_HPC_not_supported);
+ rc = -ENODEV;
+ goto err_free_ctrl;
}
+ break;
+
+ case PCI_VENDOR_ID_INTEL:
+ /* Check for speed capability (0=33, 1=66) */
+ if (subsystem_deviceid & 0x0001)
+ ctrl->speed_capability = PCI_SPEED_66MHz;
+ else
+ ctrl->speed_capability = PCI_SPEED_33MHz;
+
+ /* Check for push button */
+ if (subsystem_deviceid & 0x0002)
+ ctrl->push_button = 0;
+ else
+ ctrl->push_button = 1;
+
+ /* Check for slot switch type (0=mechanical, 1=not mechanical) */
+ if (subsystem_deviceid & 0x0004)
+ ctrl->slot_switch_type = 0;
+ else
+ ctrl->slot_switch_type = 1;
+
+ /* PHP Status (0=De-feature PHP, 1=Normal operation) */
+ if (subsystem_deviceid & 0x0008)
+ ctrl->defeature_PHP = 1; /* PHP supported */
+ else
+ ctrl->defeature_PHP = 0; /* PHP not supported */
+
+ /* Alternate Base Address Register Interface
+ * (0=not supported, 1=supported)
+ */
+ if (subsystem_deviceid & 0x0010)
+ ctrl->alternate_base_address = 1;
+ else
+ ctrl->alternate_base_address = 0;
+
+ /* PCI Config Space Index (0=not supported, 1=supported) */
+ if (subsystem_deviceid & 0x0020)
+ ctrl->pci_config_space = 1;
+ else
+ ctrl->pci_config_space = 0;
+
+ /* PCI-X support */
+ if (subsystem_deviceid & 0x0080) {
+ ctrl->pcix_support = 1;
+ if (subsystem_deviceid & 0x0040)
+ /* 133MHz PCI-X if bit 7 is 1 */
+ ctrl->pcix_speed_capability = 1;
+ else
+ /* 100MHz PCI-X if bit 7 is 1 and bit 0 is 0, */
+ /* 66MHz PCI-X if bit 7 is 1 and bit 0 is 1 */
+ ctrl->pcix_speed_capability = 0;
+ } else {
+ /* Conventional PCI */
+ ctrl->pcix_support = 0;
+ ctrl->pcix_speed_capability = 0;
+ }
+ break;
- } else {
+ default:
err(msg_HPC_not_supported);
- return -ENODEV;
+ rc = -ENODEV;
+ goto err_free_ctrl;
}
- // Tell the user that we found one.
+ /* Tell the user that we found one. */
info("Initializing the PCI hot plug controller residing on PCI bus %d\n",
pdev->bus->number);
@@ -1087,7 +1119,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc) {
goto err_free_bus;
}
-
+
dbg("pdev = %p\n", pdev);
dbg("pci resource start %llx\n", (unsigned long long)pci_resource_start(pdev, 0));
dbg("pci resource len %llx\n", (unsigned long long)pci_resource_len(pdev, 0));
@@ -1109,7 +1141,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_free_mem_region;
}
- // Check for 66Mhz operation
+ /* Check for 66Mhz operation */
ctrl->speed = get_controller_speed(ctrl);
@@ -1120,7 +1152,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*
********************************************************/
- // find the physical slot number of the first hot plug slot
+ /* find the physical slot number of the first hot plug slot */
/* Get slot won't work for devices behind bridges, but
* in this case it will always be called for the "base"
@@ -1137,7 +1169,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_iounmap;
}
- // Store PCI Config Space for all devices on this bus
+ /* Store PCI Config Space for all devices on this bus */
rc = cpqhp_save_config(ctrl, ctrl->bus, readb(ctrl->hpc_reg + SLOT_MASK));
if (rc) {
err("%s: unable to save PCI configuration data, error %d\n",
@@ -1148,7 +1180,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/*
* Get IO, memory, and IRQ resources for new devices
*/
- // The next line is required for cpqhp_find_available_resources
+ /* The next line is required for cpqhp_find_available_resources */
ctrl->interrupt = pdev->irq;
if (ctrl->interrupt < 0x10) {
cpqhp_legacy_mode = 1;
@@ -1182,7 +1214,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
__func__, rc);
goto err_iounmap;
}
-
+
/* Mask all general input interrupts */
writel(0xFFFFFFFFL, ctrl->hpc_reg + INT_MASK);
@@ -1196,12 +1228,14 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_iounmap;
}
- /* Enable Shift Out interrupt and clear it, also enable SERR on power fault */
+ /* Enable Shift Out interrupt and clear it, also enable SERR on power
+ * fault
+ */
temp_word = readw(ctrl->hpc_reg + MISC);
temp_word |= 0x4006;
writew(temp_word, ctrl->hpc_reg + MISC);
- // Changed 05/05/97 to clear all interrupts at start
+ /* Changed 05/05/97 to clear all interrupts at start */
writel(0xFFFFFFFFL, ctrl->hpc_reg + INT_INPUT_CLEAR);
ctrl->ctrl_int_comp = readl(ctrl->hpc_reg + INT_INPUT_CLEAR);
@@ -1216,13 +1250,14 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
cpqhp_ctrl_list = ctrl;
}
- // turn off empty slots here unless command line option "ON" set
- // Wait for exclusive access to hardware
+ /* turn off empty slots here unless command line option "ON" set
+ * Wait for exclusive access to hardware
+ */
mutex_lock(&ctrl->crit_sect);
num_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F;
- // find first device number for the ctrl
+ /* find first device number for the ctrl */
device = readb(ctrl->hpc_reg + SLOT_MASK) >> 4;
while (num_of_slots) {
@@ -1234,23 +1269,21 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hp_slot = func->device - ctrl->slot_device_offset;
dbg("hp_slot: %d\n", hp_slot);
- // We have to save the presence info for these slots
+ /* We have to save the presence info for these slots */
temp_word = ctrl->ctrl_int_comp >> 16;
func->presence_save = (temp_word >> hp_slot) & 0x01;
func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02;
- if (ctrl->ctrl_int_comp & (0x1L << hp_slot)) {
+ if (ctrl->ctrl_int_comp & (0x1L << hp_slot))
func->switch_save = 0;
- } else {
+ else
func->switch_save = 0x10;
- }
- if (!power_mode) {
+ if (!power_mode)
if (!func->is_a_board) {
green_LED_off(ctrl, hp_slot);
slot_disable(ctrl, hp_slot);
}
- }
device++;
num_of_slots--;
@@ -1258,7 +1291,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!power_mode) {
set_SOGO(ctrl);
- // Wait for SOBS to be unset
+ /* Wait for SOBS to be unset */
wait_for_ctrl_irq(ctrl);
}
@@ -1269,7 +1302,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_free_irq;
}
- // Done with exclusive hardware access
+ /* Done with exclusive hardware access */
mutex_unlock(&ctrl->crit_sect);
cpqhp_create_debugfs_files(ctrl);
@@ -1291,77 +1324,6 @@ err_disable_device:
return rc;
}
-
-static int one_time_init(void)
-{
- int loop;
- int retval = 0;
-
- if (initialized)
- return 0;
-
- power_mode = 0;
-
- retval = pci_print_IRQ_route();
- if (retval)
- goto error;
-
- dbg("Initialize + Start the notification mechanism \n");
-
- retval = cpqhp_event_start_thread();
- if (retval)
- goto error;
-
- dbg("Initialize slot lists\n");
- for (loop = 0; loop < 256; loop++) {
- cpqhp_slot_list[loop] = NULL;
- }
-
- // FIXME: We also need to hook the NMI handler eventually.
- // this also needs to be worked with Christoph
- // register_NMI_handler();
-
- // Map rom address
- cpqhp_rom_start = ioremap(ROM_PHY_ADDR, ROM_PHY_LEN);
- if (!cpqhp_rom_start) {
- err ("Could not ioremap memory region for ROM\n");
- retval = -EIO;
- goto error;
- }
-
- /* Now, map the int15 entry point if we are on compaq specific hardware */
- compaq_nvram_init(cpqhp_rom_start);
-
- /* Map smbios table entry point structure */
- smbios_table = detect_SMBIOS_pointer(cpqhp_rom_start,
- cpqhp_rom_start + ROM_PHY_LEN);
- if (!smbios_table) {
- err ("Could not find the SMBIOS pointer in memory\n");
- retval = -EIO;
- goto error_rom_start;
- }
-
- smbios_start = ioremap(readl(smbios_table + ST_ADDRESS),
- readw(smbios_table + ST_LENGTH));
- if (!smbios_start) {
- err ("Could not ioremap memory region taken from SMBIOS values\n");
- retval = -EIO;
- goto error_smbios_start;
- }
-
- initialized = 1;
-
- return retval;
-
-error_smbios_start:
- iounmap(smbios_start);
-error_rom_start:
- iounmap(cpqhp_rom_start);
-error:
- return retval;
-}
-
-
static void __exit unload_cpqphpd(void)
{
struct pci_func *next;
@@ -1381,10 +1343,10 @@ static void __exit unload_cpqphpd(void)
if (ctrl->hpc_reg) {
u16 misc;
rc = read_slot_enable (ctrl);
-
+
writeb(0, ctrl->hpc_reg + SLOT_SERR);
writel(0xFFFFFFC0L | ~rc, ctrl->hpc_reg + INT_MASK);
-
+
misc = readw(ctrl->hpc_reg + MISC);
misc &= 0xFFFD;
writew(misc, ctrl->hpc_reg + MISC);
@@ -1464,38 +1426,34 @@ static void __exit unload_cpqphpd(void)
}
}
- // Stop the notification mechanism
+ /* Stop the notification mechanism */
if (initialized)
cpqhp_event_stop_thread();
- //unmap the rom address
+ /* unmap the rom address */
if (cpqhp_rom_start)
iounmap(cpqhp_rom_start);
if (smbios_start)
iounmap(smbios_start);
}
-
-
static struct pci_device_id hpcd_pci_tbl[] = {
{
/* handle any PCI Hotplug controller */
.class = ((PCI_CLASS_SYSTEM_PCI_HOTPLUG << 8) | 0x00),
.class_mask = ~0,
-
+
/* no matter who makes it */
.vendor = PCI_ANY_ID,
.device = PCI_ANY_ID,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
-
+
}, { /* end: all zeroes */ }
};
MODULE_DEVICE_TABLE(pci, hpcd_pci_tbl);
-
-
static struct pci_driver cpqhpc_driver = {
.name = "compaq_pci_hotplug",
.id_table = hpcd_pci_tbl,
@@ -1503,8 +1461,6 @@ static struct pci_driver cpqhpc_driver = {
/* remove: cpqhpc_remove_one, */
};
-
-
static int __init cpqhpc_init(void)
{
int result;
@@ -1518,7 +1474,6 @@ static int __init cpqhpc_init(void)
return result;
}
-
static void __exit cpqhpc_cleanup(void)
{
dbg("unload_cpqphpd()\n");
@@ -1529,8 +1484,5 @@ static void __exit cpqhpc_cleanup(void)
cpqhp_shutdown_debugfs();
}
-
module_init(cpqhpc_init);
module_exit(cpqhpc_cleanup);
-
-
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c
index cc227a8c4b1..2fa47af992a 100644
--- a/drivers/pci/hotplug/cpqphp_ctrl.c
+++ b/drivers/pci/hotplug/cpqphp_ctrl.c
@@ -81,14 +81,15 @@ static u8 handle_switch_change(u8 change, struct controller * ctrl)
for (hp_slot = 0; hp_slot < 6; hp_slot++) {
if (change & (0x1L << hp_slot)) {
- /**********************************
+ /*
* this one changed.
- **********************************/
+ */
func = cpqhp_slot_find(ctrl->bus,
(hp_slot + ctrl->slot_device_offset), 0);
/* this is the structure that tells the worker thread
- *what to do */
+ * what to do
+ */
taskInfo = &(ctrl->event_queue[ctrl->next_event]);
ctrl->next_event = (ctrl->next_event + 1) % 10;
taskInfo->hp_slot = hp_slot;
@@ -100,17 +101,17 @@ static u8 handle_switch_change(u8 change, struct controller * ctrl)
func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02;
if (ctrl->ctrl_int_comp & (0x1L << hp_slot)) {
- /**********************************
+ /*
* Switch opened
- **********************************/
+ */
func->switch_save = 0;
taskInfo->event_type = INT_SWITCH_OPEN;
} else {
- /**********************************
+ /*
* Switch closed
- **********************************/
+ */
func->switch_save = 0x10;
@@ -131,9 +132,8 @@ static struct slot *cpqhp_find_slot(struct controller *ctrl, u8 device)
{
struct slot *slot = ctrl->slot;
- while (slot && (slot->device != device)) {
+ while (slot && (slot->device != device))
slot = slot->next;
- }
return slot;
}
@@ -152,17 +152,17 @@ static u8 handle_presence_change(u16 change, struct controller * ctrl)
if (!change)
return 0;
- /**********************************
+ /*
* Presence Change
- **********************************/
+ */
dbg("cpqsbd: Presence/Notify input change.\n");
dbg(" Changed bits are 0x%4.4x\n", change );
for (hp_slot = 0; hp_slot < 6; hp_slot++) {
if (change & (0x0101 << hp_slot)) {
- /**********************************
+ /*
* this one changed.
- **********************************/
+ */
func = cpqhp_slot_find(ctrl->bus,
(hp_slot + ctrl->slot_device_offset), 0);
@@ -177,22 +177,23 @@ static u8 handle_presence_change(u16 change, struct controller * ctrl)
return 0;
/* If the switch closed, must be a button
- * If not in button mode, nevermind */
+ * If not in button mode, nevermind
+ */
if (func->switch_save && (ctrl->push_button == 1)) {
temp_word = ctrl->ctrl_int_comp >> 16;
temp_byte = (temp_word >> hp_slot) & 0x01;
temp_byte |= (temp_word >> (hp_slot + 7)) & 0x02;
if (temp_byte != func->presence_save) {
- /**************************************
+ /*
* button Pressed (doesn't do anything)
- **************************************/
+ */
dbg("hp_slot %d button pressed\n", hp_slot);
taskInfo->event_type = INT_BUTTON_PRESS;
} else {
- /**********************************
+ /*
* button Released - TAKE ACTION!!!!
- **********************************/
+ */
dbg("hp_slot %d button released\n", hp_slot);
taskInfo->event_type = INT_BUTTON_RELEASE;
@@ -210,7 +211,8 @@ static u8 handle_presence_change(u16 change, struct controller * ctrl)
}
} else {
/* Switch is open, assume a presence change
- * Save the presence state */
+ * Save the presence state
+ */
temp_word = ctrl->ctrl_int_comp >> 16;
func->presence_save = (temp_word >> hp_slot) & 0x01;
func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02;
@@ -241,17 +243,17 @@ static u8 handle_power_fault(u8 change, struct controller * ctrl)
if (!change)
return 0;
- /**********************************
+ /*
* power fault
- **********************************/
+ */
info("power fault interrupt\n");
for (hp_slot = 0; hp_slot < 6; hp_slot++) {
if (change & (0x01 << hp_slot)) {
- /**********************************
+ /*
* this one changed.
- **********************************/
+ */
func = cpqhp_slot_find(ctrl->bus,
(hp_slot + ctrl->slot_device_offset), 0);
@@ -262,16 +264,16 @@ static u8 handle_power_fault(u8 change, struct controller * ctrl)
rc++;
if (ctrl->ctrl_int_comp & (0x00000100 << hp_slot)) {
- /**********************************
+ /*
* power fault Cleared
- **********************************/
+ */
func->status = 0x00;
taskInfo->event_type = INT_POWER_FAULT_CLEAR;
} else {
- /**********************************
+ /*
* power fault
- **********************************/
+ */
taskInfo->event_type = INT_POWER_FAULT;
if (ctrl->rev < 4) {
@@ -432,13 +434,15 @@ static struct pci_resource *do_pre_bridge_resource_split(struct pci_resource **h
/* If we got here, there the bridge requires some of the resource, but
- * we may be able to split some off of the front */
+ * we may be able to split some off of the front
+ */
node = *head;
if (node->length & (alignment -1)) {
/* this one isn't an aligned length, so we'll make a new entry
- * and split it up. */
+ * and split it up.
+ */
split_node = kmalloc(sizeof(*split_node), GFP_KERNEL);
if (!split_node)
@@ -544,10 +548,10 @@ static struct pci_resource *get_io_resource(struct pci_resource **head, u32 size
if (!(*head))
return NULL;
- if ( cpqhp_resource_sort_and_combine(head) )
+ if (cpqhp_resource_sort_and_combine(head))
return NULL;
- if ( sort_by_size(head) )
+ if (sort_by_size(head))
return NULL;
for (node = *head; node; node = node->next) {
@@ -556,7 +560,8 @@ static struct pci_resource *get_io_resource(struct pci_resource **head, u32 size
if (node->base & (size - 1)) {
/* this one isn't base aligned properly
- * so we'll make a new entry and split it up */
+ * so we'll make a new entry and split it up
+ */
temp_dword = (node->base | (size-1)) + 1;
/* Short circuit if adjusted size is too small */
@@ -581,7 +586,8 @@ static struct pci_resource *get_io_resource(struct pci_resource **head, u32 size
/* Don't need to check if too small since we already did */
if (node->length > size) {
/* this one is longer than we need
- * so we'll make a new entry and split it up */
+ * so we'll make a new entry and split it up
+ */
split_node = kmalloc(sizeof(*split_node), GFP_KERNEL);
if (!split_node)
@@ -601,7 +607,8 @@ static struct pci_resource *get_io_resource(struct pci_resource **head, u32 size
continue;
/* If we got here, then it is the right size
- * Now take it out of the list and break */
+ * Now take it out of the list and break
+ */
if (*head == node) {
*head = node->next;
} else {
@@ -642,14 +649,16 @@ static struct pci_resource *get_max_resource(struct pci_resource **head, u32 siz
return NULL;
for (max = *head; max; max = max->next) {
- /* If not big enough we could probably just bail,
- * instead we'll continue to the next. */
+ /* If not big enough we could probably just bail,
+ * instead we'll continue to the next.
+ */
if (max->length < size)
continue;
if (max->base & (size - 1)) {
/* this one isn't base aligned properly
- * so we'll make a new entry and split it up */
+ * so we'll make a new entry and split it up
+ */
temp_dword = (max->base | (size-1)) + 1;
/* Short circuit if adjusted size is too small */
@@ -672,7 +681,8 @@ static struct pci_resource *get_max_resource(struct pci_resource **head, u32 siz
if ((max->base + max->length) & (size - 1)) {
/* this one isn't end aligned properly at the top
- * so we'll make a new entry and split it up */
+ * so we'll make a new entry and split it up
+ */
split_node = kmalloc(sizeof(*split_node), GFP_KERNEL);
if (!split_node)
@@ -744,7 +754,8 @@ static struct pci_resource *get_resource(struct pci_resource **head, u32 size)
if (node->base & (size - 1)) {
dbg("%s: not aligned\n", __func__);
/* this one isn't base aligned properly
- * so we'll make a new entry and split it up */
+ * so we'll make a new entry and split it up
+ */
temp_dword = (node->base | (size-1)) + 1;
/* Short circuit if adjusted size is too small */
@@ -769,7 +780,8 @@ static struct pci_resource *get_resource(struct pci_resource **head, u32 size)
if (node->length > size) {
dbg("%s: too big\n", __func__);
/* this one is longer than we need
- * so we'll make a new entry and split it up */
+ * so we'll make a new entry and split it up
+ */
split_node = kmalloc(sizeof(*split_node), GFP_KERNEL);
if (!split_node)
@@ -886,19 +898,19 @@ irqreturn_t cpqhp_ctrl_intr(int IRQ, void *data)
u32 Diff;
u32 temp_dword;
-
+
misc = readw(ctrl->hpc_reg + MISC);
- /***************************************
+ /*
* Check to see if it was our interrupt
- ***************************************/
+ */
if (!(misc & 0x000C)) {
return IRQ_NONE;
}
if (misc & 0x0004) {
- /**********************************
+ /*
* Serial Output interrupt Pending
- **********************************/
+ */
/* Clear the interrupt */
misc |= 0x0004;
@@ -961,11 +973,8 @@ struct pci_func *cpqhp_slot_create(u8 busnumber)
struct pci_func *next;
new_slot = kzalloc(sizeof(*new_slot), GFP_KERNEL);
- if (new_slot == NULL) {
- /* I'm not dead yet!
- * You will be. */
+ if (new_slot == NULL)
return new_slot;
- }
new_slot->next = NULL;
new_slot->configured = 1;
@@ -996,10 +1005,8 @@ static int slot_remove(struct pci_func * old_slot)
return 1;
next = cpqhp_slot_list[old_slot->bus];
-
- if (next == NULL) {
+ if (next == NULL)
return 1;
- }
if (next == old_slot) {
cpqhp_slot_list[old_slot->bus] = old_slot->next;
@@ -1008,9 +1015,8 @@ static int slot_remove(struct pci_func * old_slot)
return 0;
}
- while ((next->next != old_slot) && (next->next != NULL)) {
+ while ((next->next != old_slot) && (next->next != NULL))
next = next->next;
- }
if (next->next == old_slot) {
next->next = old_slot->next;
@@ -1040,9 +1046,8 @@ static int bridge_slot_remove(struct pci_func *bridge)
for (tempBus = secondaryBus; tempBus <= subordinateBus; tempBus++) {
next = cpqhp_slot_list[tempBus];
- while (!slot_remove(next)) {
+ while (!slot_remove(next))
next = cpqhp_slot_list[tempBus];
- }
}
next = cpqhp_slot_list[bridge->bus];
@@ -1130,39 +1135,43 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_
u8 slot_power = readb(ctrl->hpc_reg + SLOT_POWER);
u16 reg16;
u32 leds = readl(ctrl->hpc_reg + LED_CONTROL);
-
+
if (ctrl->speed == adapter_speed)
return 0;
-
+
/* We don't allow freq/mode changes if we find another adapter running
- * in another slot on this controller */
+ * in another slot on this controller
+ */
for(slot = ctrl->slot; slot; slot = slot->next) {
- if (slot->device == (hp_slot + ctrl->slot_device_offset))
+ if (slot->device == (hp_slot + ctrl->slot_device_offset))
continue;
if (!slot->hotplug_slot || !slot->hotplug_slot->info)
continue;
- if (slot->hotplug_slot->info->adapter_status == 0)
+ if (slot->hotplug_slot->info->adapter_status == 0)
continue;
/* If another adapter is running on the same segment but at a
* lower speed/mode, we allow the new adapter to function at
- * this rate if supported */
- if (ctrl->speed < adapter_speed)
+ * this rate if supported
+ */
+ if (ctrl->speed < adapter_speed)
return 0;
return 1;
}
-
+
/* If the controller doesn't support freq/mode changes and the
- * controller is running at a higher mode, we bail */
+ * controller is running at a higher mode, we bail
+ */
if ((ctrl->speed > adapter_speed) && (!ctrl->pcix_speed_capability))
return 1;
-
+
/* But we allow the adapter to run at a lower rate if possible */
if ((ctrl->speed < adapter_speed) && (!ctrl->pcix_speed_capability))
return 0;
/* We try to set the max speed supported by both the adapter and
- * controller */
+ * controller
+ */
if (ctrl->speed_capability < adapter_speed) {
if (ctrl->speed == ctrl->speed_capability)
return 0;
@@ -1171,22 +1180,22 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_
writel(0x0L, ctrl->hpc_reg + LED_CONTROL);
writeb(0x00, ctrl->hpc_reg + SLOT_ENABLE);
-
- set_SOGO(ctrl);
+
+ set_SOGO(ctrl);
wait_for_ctrl_irq(ctrl);
-
+
if (adapter_speed != PCI_SPEED_133MHz_PCIX)
reg = 0xF5;
else
- reg = 0xF4;
+ reg = 0xF4;
pci_write_config_byte(ctrl->pci_dev, 0x41, reg);
-
+
reg16 = readw(ctrl->hpc_reg + NEXT_CURR_FREQ);
reg16 &= ~0x000F;
switch(adapter_speed) {
- case(PCI_SPEED_133MHz_PCIX):
+ case(PCI_SPEED_133MHz_PCIX):
reg = 0x75;
- reg16 |= 0xB;
+ reg16 |= 0xB;
break;
case(PCI_SPEED_100MHz_PCIX):
reg = 0x74;
@@ -1203,48 +1212,48 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_
default: /* 33MHz PCI 2.2 */
reg = 0x71;
break;
-
+
}
reg16 |= 0xB << 12;
writew(reg16, ctrl->hpc_reg + NEXT_CURR_FREQ);
-
- mdelay(5);
-
+
+ mdelay(5);
+
/* Reenable interrupts */
writel(0, ctrl->hpc_reg + INT_MASK);
- pci_write_config_byte(ctrl->pci_dev, 0x41, reg);
-
+ pci_write_config_byte(ctrl->pci_dev, 0x41, reg);
+
/* Restart state machine */
reg = ~0xF;
pci_read_config_byte(ctrl->pci_dev, 0x43, &reg);
pci_write_config_byte(ctrl->pci_dev, 0x43, reg);
-
+
/* Only if mode change...*/
if (((ctrl->speed == PCI_SPEED_66MHz) && (adapter_speed == PCI_SPEED_66MHz_PCIX)) ||
((ctrl->speed == PCI_SPEED_66MHz_PCIX) && (adapter_speed == PCI_SPEED_66MHz)))
set_SOGO(ctrl);
-
+
wait_for_ctrl_irq(ctrl);
mdelay(1100);
-
+
/* Restore LED/Slot state */
writel(leds, ctrl->hpc_reg + LED_CONTROL);
writeb(slot_power, ctrl->hpc_reg + SLOT_ENABLE);
-
+
set_SOGO(ctrl);
wait_for_ctrl_irq(ctrl);
ctrl->speed = adapter_speed;
slot = cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
- info("Successfully changed frequency/mode for adapter in slot %d\n",
+ info("Successfully changed frequency/mode for adapter in slot %d\n",
slot->number);
return 0;
}
-/* the following routines constitute the bulk of the
- hotplug controller logic
+/* the following routines constitute the bulk of the
+ * hotplug controller logic
*/
@@ -1268,17 +1277,17 @@ static u32 board_replaced(struct pci_func *func, struct controller *ctrl)
hp_slot = func->device - ctrl->slot_device_offset;
- if (readl(ctrl->hpc_reg + INT_INPUT_CLEAR) & (0x01L << hp_slot)) {
- /**********************************
- * The switch is open.
- **********************************/
+ /*
+ * The switch is open.
+ */
+ if (readl(ctrl->hpc_reg + INT_INPUT_CLEAR) & (0x01L << hp_slot))
rc = INTERLOCK_OPEN;
- } else if (is_slot_enabled (ctrl, hp_slot)) {
- /**********************************
- * The board is already on
- **********************************/
+ /*
+ * The board is already on
+ */
+ else if (is_slot_enabled (ctrl, hp_slot))
rc = CARD_FUNCTIONING;
- } else {
+ else {
mutex_lock(&ctrl->crit_sect);
/* turn on board without attaching to the bus */
@@ -1299,7 +1308,7 @@ static u32 board_replaced(struct pci_func *func, struct controller *ctrl)
/* Wait for SOBS to be unset */
wait_for_ctrl_irq (ctrl);
-
+
adapter_speed = get_adapter_speed(ctrl, hp_slot);
if (ctrl->speed != adapter_speed)
if (set_controller_speed(ctrl, adapter_speed, hp_slot))
@@ -1352,7 +1361,8 @@ static u32 board_replaced(struct pci_func *func, struct controller *ctrl)
* Get slot won't work for devices behind
* bridges, but in this case it will always be
* called for the "base" bus/dev/func of an
- * adapter. */
+ * adapter.
+ */
mutex_lock(&ctrl->crit_sect);
@@ -1377,7 +1387,8 @@ static u32 board_replaced(struct pci_func *func, struct controller *ctrl)
* Get slot won't work for devices behind bridges, but
* in this case it will always be called for the "base"
- * bus/dev/func of an adapter. */
+ * bus/dev/func of an adapter.
+ */
mutex_lock(&ctrl->crit_sect);
@@ -1434,7 +1445,8 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl)
wait_for_ctrl_irq (ctrl);
/* Change bits in slot power register to force another shift out
- * NOTE: this is to work around the timer bug */
+ * NOTE: this is to work around the timer bug
+ */
temp_byte = readb(ctrl->hpc_reg + SLOT_POWER);
writeb(0x00, ctrl->hpc_reg + SLOT_POWER);
writeb(temp_byte, ctrl->hpc_reg + SLOT_POWER);
@@ -1443,12 +1455,12 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl)
/* Wait for SOBS to be unset */
wait_for_ctrl_irq (ctrl);
-
+
adapter_speed = get_adapter_speed(ctrl, hp_slot);
if (ctrl->speed != adapter_speed)
if (set_controller_speed(ctrl, adapter_speed, hp_slot))
rc = WRONG_BUS_FREQUENCY;
-
+
/* turn off board without attaching to the bus */
disable_slot_power (ctrl, hp_slot);
@@ -1461,7 +1473,7 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl)
if (rc)
return rc;
-
+
p_slot = cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
/* turn on board and blink green LED */
@@ -1521,7 +1533,7 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl)
}
/* All F's is an empty slot or an invalid board */
- if (temp_register != 0xFFFFFFFF) { /* Check for a board in the slot */
+ if (temp_register != 0xFFFFFFFF) {
res_lists.io_head = ctrl->io_head;
res_lists.mem_head = ctrl->mem_head;
res_lists.p_mem_head = ctrl->p_mem_head;
@@ -1570,9 +1582,8 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl)
index = 0;
do {
new_slot = cpqhp_slot_find(ctrl->bus, func->device, index++);
- if (new_slot && !new_slot->pci_dev) {
+ if (new_slot && !new_slot->pci_dev)
cpqhp_configure_device(ctrl, new_slot);
- }
} while (new_slot);
mutex_lock(&ctrl->crit_sect);
@@ -1859,12 +1870,12 @@ static void interrupt_event_handler(struct controller *ctrl)
info(msg_button_on, p_slot->number);
}
mutex_lock(&ctrl->crit_sect);
-
+
dbg("blink green LED and turn off amber\n");
-
+
amber_LED_off (ctrl, hp_slot);
green_LED_blink (ctrl, hp_slot);
-
+
set_SOGO(ctrl);
/* Wait for SOBS to be unset */
@@ -1958,7 +1969,7 @@ void cpqhp_pushbutton_thread(unsigned long slot)
if (cpqhp_process_SI(ctrl, func) != 0) {
amber_LED_on(ctrl, hp_slot);
green_LED_off(ctrl, hp_slot);
-
+
set_SOGO(ctrl);
/* Wait for SOBS to be unset */
@@ -2079,7 +2090,7 @@ int cpqhp_process_SS(struct controller *ctrl, struct pci_func *func)
struct pci_bus *pci_bus = ctrl->pci_bus;
int physical_slot=0;
- device = func->device;
+ device = func->device;
func = cpqhp_slot_find(ctrl->bus, device, index++);
p_slot = cpqhp_find_slot(ctrl, device);
if (p_slot) {
@@ -2113,9 +2124,8 @@ int cpqhp_process_SS(struct controller *ctrl, struct pci_func *func)
/* If the VGA Enable bit is set, remove isn't
* supported */
- if (BCR & PCI_BRIDGE_CTL_VGA) {
+ if (BCR & PCI_BRIDGE_CTL_VGA)
rc = REMOVE_NOT_SUPPORTED;
- }
}
}
@@ -2183,67 +2193,67 @@ int cpqhp_hardware_test(struct controller *ctrl, int test_num)
num_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0f;
switch (test_num) {
- case 1:
- /* Do stuff here! */
-
- /* Do that funky LED thing */
- /* so we can restore them later */
- save_LED = readl(ctrl->hpc_reg + LED_CONTROL);
- work_LED = 0x01010101;
- switch_leds(ctrl, num_of_slots, &work_LED, 0);
- switch_leds(ctrl, num_of_slots, &work_LED, 1);
- switch_leds(ctrl, num_of_slots, &work_LED, 0);
- switch_leds(ctrl, num_of_slots, &work_LED, 1);
-
- work_LED = 0x01010000;
- writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
- switch_leds(ctrl, num_of_slots, &work_LED, 0);
- switch_leds(ctrl, num_of_slots, &work_LED, 1);
- work_LED = 0x00000101;
- writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
- switch_leds(ctrl, num_of_slots, &work_LED, 0);
- switch_leds(ctrl, num_of_slots, &work_LED, 1);
+ case 1:
+ /* Do stuff here! */
+
+ /* Do that funky LED thing */
+ /* so we can restore them later */
+ save_LED = readl(ctrl->hpc_reg + LED_CONTROL);
+ work_LED = 0x01010101;
+ switch_leds(ctrl, num_of_slots, &work_LED, 0);
+ switch_leds(ctrl, num_of_slots, &work_LED, 1);
+ switch_leds(ctrl, num_of_slots, &work_LED, 0);
+ switch_leds(ctrl, num_of_slots, &work_LED, 1);
+
+ work_LED = 0x01010000;
+ writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
+ switch_leds(ctrl, num_of_slots, &work_LED, 0);
+ switch_leds(ctrl, num_of_slots, &work_LED, 1);
+ work_LED = 0x00000101;
+ writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
+ switch_leds(ctrl, num_of_slots, &work_LED, 0);
+ switch_leds(ctrl, num_of_slots, &work_LED, 1);
+
+ work_LED = 0x01010000;
+ writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
+ for (loop = 0; loop < num_of_slots; loop++) {
+ set_SOGO(ctrl);
- work_LED = 0x01010000;
- writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
- for (loop = 0; loop < num_of_slots; loop++) {
- set_SOGO(ctrl);
+ /* Wait for SOGO interrupt */
+ wait_for_ctrl_irq (ctrl);
- /* Wait for SOGO interrupt */
- wait_for_ctrl_irq (ctrl);
+ /* Get ready for next iteration */
+ long_delay((3*HZ)/10);
+ work_LED = work_LED >> 16;
+ writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
- /* Get ready for next iteration */
- long_delay((3*HZ)/10);
- work_LED = work_LED >> 16;
- writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
-
- set_SOGO(ctrl);
+ set_SOGO(ctrl);
- /* Wait for SOGO interrupt */
- wait_for_ctrl_irq (ctrl);
+ /* Wait for SOGO interrupt */
+ wait_for_ctrl_irq (ctrl);
- /* Get ready for next iteration */
- long_delay((3*HZ)/10);
- work_LED = work_LED << 16;
- writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
- work_LED = work_LED << 1;
- writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
- }
+ /* Get ready for next iteration */
+ long_delay((3*HZ)/10);
+ work_LED = work_LED << 16;
+ writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
+ work_LED = work_LED << 1;
+ writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
+ }
- /* put it back the way it was */
- writel(save_LED, ctrl->hpc_reg + LED_CONTROL);
+ /* put it back the way it was */
+ writel(save_LED, ctrl->hpc_reg + LED_CONTROL);
- set_SOGO(ctrl);
+ set_SOGO(ctrl);
- /* Wait for SOBS to be unset */
- wait_for_ctrl_irq (ctrl);
- break;
- case 2:
- /* Do other stuff here! */
- break;
- case 3:
- /* and more... */
- break;
+ /* Wait for SOBS to be unset */
+ wait_for_ctrl_irq (ctrl);
+ break;
+ case 2:
+ /* Do other stuff here! */
+ break;
+ case 3:
+ /* and more... */
+ break;
}
return 0;
}
@@ -2312,9 +2322,9 @@ static u32 configure_new_device(struct controller * ctrl, struct pci_func * func
while ((function < max_functions) && (!stop_it)) {
pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(func->device, function), 0x00, &ID);
- if (ID == 0xFFFFFFFF) { /* There's nothing there. */
+ if (ID == 0xFFFFFFFF) {
function++;
- } else { /* There's something there */
+ } else {
/* Setup slot structure. */
new_slot = cpqhp_slot_create(func->bus);
@@ -2339,8 +2349,8 @@ static u32 configure_new_device(struct controller * ctrl, struct pci_func * func
/*
- Configuration logic that involves the hotplug data structures and
- their bookkeeping
+ * Configuration logic that involves the hotplug data structures and
+ * their bookkeeping
*/
@@ -2393,7 +2403,7 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func
if (rc)
return rc;
- if ((temp_byte & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { /* PCI-PCI Bridge */
+ if ((temp_byte & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
/* set Primary bus */
dbg("set Primary bus = %d\n", func->bus);
rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_PRIMARY_BUS, func->bus);
@@ -2484,7 +2494,8 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func
temp_resources.irqs = &irqs;
/* Make copies of the nodes we are going to pass down so that
- * if there is a problem,we can just use these to free resources */
+ * if there is a problem,we can just use these to free resources
+ */
hold_bus_node = kmalloc(sizeof(*hold_bus_node), GFP_KERNEL);
hold_IO_node = kmalloc(sizeof(*hold_IO_node), GFP_KERNEL);
hold_mem_node = kmalloc(sizeof(*hold_mem_node), GFP_KERNEL);
@@ -2556,7 +2567,8 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func
temp_word = (p_mem_node->base + p_mem_node->length - 1) >> 16;
rc = pci_bus_write_config_word (pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, temp_word);
- /* Adjust this to compensate for extra adjustment in first loop */
+ /* Adjust this to compensate for extra adjustment in first loop
+ */
irqs.barber_pole--;
rc = 0;
@@ -2917,27 +2929,26 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func
} /* End of base register loop */
if (cpqhp_legacy_mode) {
/* Figure out which interrupt pin this function uses */
- rc = pci_bus_read_config_byte (pci_bus, devfn,
+ rc = pci_bus_read_config_byte (pci_bus, devfn,
PCI_INTERRUPT_PIN, &temp_byte);
/* If this function needs an interrupt and we are behind
* a bridge and the pin is tied to something that's
* alread mapped, set this one the same */
- if (temp_byte && resources->irqs &&
- (resources->irqs->valid_INT &
+ if (temp_byte && resources->irqs &&
+ (resources->irqs->valid_INT &
(0x01 << ((temp_byte + resources->irqs->barber_pole - 1) & 0x03)))) {
/* We have to share with something already set up */
- IRQ = resources->irqs->interrupt[(temp_byte +
+ IRQ = resources->irqs->interrupt[(temp_byte +
resources->irqs->barber_pole - 1) & 0x03];
} else {
/* Program IRQ based on card type */
rc = pci_bus_read_config_byte (pci_bus, devfn, 0x0B, &class_code);
- if (class_code == PCI_BASE_CLASS_STORAGE) {
+ if (class_code == PCI_BASE_CLASS_STORAGE)
IRQ = cpqhp_disk_irq;
- } else {
+ else
IRQ = cpqhp_nic_irq;
- }
}
/* IRQ Line */
diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
index cb174888002..76ba8a1c774 100644
--- a/drivers/pci/hotplug/cpqphp_nvram.c
+++ b/drivers/pci/hotplug/cpqphp_nvram.c
@@ -94,12 +94,13 @@ static u8 evbuffer[1024];
static void __iomem *compaq_int15_entry_point;
-static spinlock_t int15_lock; /* lock for ordering int15_bios_call() */
+/* lock for ordering int15_bios_call() */
+static spinlock_t int15_lock;
/* This is a series of function that deals with
- setting & getting the hotplug resource table in some environment variable.
-*/
+ * setting & getting the hotplug resource table in some environment variable.
+ */
/*
* We really shouldn't be doing this unless there is a _very_ good reason to!!!
@@ -113,7 +114,7 @@ static u32 add_byte( u32 **p_buffer, u8 value, u32 *used, u32 *avail)
if ((*used + 1) > *avail)
return(1);
-
+
*((u8*)*p_buffer) = value;
tByte = (u8**)p_buffer;
(*tByte)++;
@@ -170,10 +171,10 @@ static u32 access_EV (u16 operation, u8 *ev_name, u8 *buffer, u32 *buf_size)
unsigned long flags;
int op = operation;
int ret_val;
-
+
if (!compaq_int15_entry_point)
return -ENODEV;
-
+
spin_lock_irqsave(&int15_lock, flags);
__asm__ (
"xorl %%ebx,%%ebx\n" \
@@ -187,7 +188,7 @@ static u32 access_EV (u16 operation, u8 *ev_name, u8 *buffer, u32 *buf_size)
"D" (buffer), "m" (compaq_int15_entry_point)
: "%ebx", "%edx");
spin_unlock_irqrestore(&int15_lock, flags);
-
+
return((ret_val & 0xFF00) >> 8);
}
@@ -210,14 +211,16 @@ static int load_HRT (void __iomem *rom_start)
available = 1024;
- // Now load the EV
+ /* Now load the EV */
temp_dword = available;
rc = access_EV(READ_EV, "CQTHPS", evbuffer, &temp_dword);
evbuffer_length = temp_dword;
- // We're maintaining the resource lists so write FF to invalidate old info
+ /* We're maintaining the resource lists so write FF to invalidate old
+ * info
+ */
temp_dword = 1;
rc = access_EV(WRITE_EV, "CQTHPS", &temp_byte, &temp_dword);
@@ -263,13 +266,13 @@ static u32 store_HRT (void __iomem *rom_start)
p_EV_header = (struct ev_hrt_header *) pFill;
ctrl = cpqhp_ctrl_list;
-
- // The revision of this structure
+
+ /* The revision of this structure */
rc = add_byte( &pFill, 1 + ctrl->push_flag, &usedbytes, &available);
if (rc)
return(rc);
- // The number of controllers
+ /* The number of controllers */
rc = add_byte( &pFill, 1, &usedbytes, &available);
if (rc)
return(rc);
@@ -279,27 +282,27 @@ static u32 store_HRT (void __iomem *rom_start)
numCtrl++;
- // The bus number
+ /* The bus number */
rc = add_byte( &pFill, ctrl->bus, &usedbytes, &available);
if (rc)
return(rc);
- // The device Number
+ /* The device Number */
rc = add_byte( &pFill, PCI_SLOT(ctrl->pci_dev->devfn), &usedbytes, &available);
if (rc)
return(rc);
- // The function Number
+ /* The function Number */
rc = add_byte( &pFill, PCI_FUNC(ctrl->pci_dev->devfn), &usedbytes, &available);
if (rc)
return(rc);
- // Skip the number of available entries
+ /* Skip the number of available entries */
rc = add_dword( &pFill, 0, &usedbytes, &available);
if (rc)
return(rc);
- // Figure out memory Available
+ /* Figure out memory Available */
resNode = ctrl->mem_head;
@@ -308,12 +311,12 @@ static u32 store_HRT (void __iomem *rom_start)
while (resNode) {
loop ++;
- // base
+ /* base */
rc = add_dword( &pFill, resNode->base, &usedbytes, &available);
if (rc)
return(rc);
- // length
+ /* length */
rc = add_dword( &pFill, resNode->length, &usedbytes, &available);
if (rc)
return(rc);
@@ -321,10 +324,10 @@ static u32 store_HRT (void __iomem *rom_start)
resNode = resNode->next;
}
- // Fill in the number of entries
+ /* Fill in the number of entries */
p_ev_ctrl->mem_avail = loop;
- // Figure out prefetchable memory Available
+ /* Figure out prefetchable memory Available */
resNode = ctrl->p_mem_head;
@@ -333,12 +336,12 @@ static u32 store_HRT (void __iomem *rom_start)
while (resNode) {
loop ++;
- // base
+ /* base */
rc = add_dword( &pFill, resNode->base, &usedbytes, &available);
if (rc)
return(rc);
- // length
+ /* length */
rc = add_dword( &pFill, resNode->length, &usedbytes, &available);
if (rc)
return(rc);
@@ -346,10 +349,10 @@ static u32 store_HRT (void __iomem *rom_start)
resNode = resNode->next;
}
- // Fill in the number of entries
+ /* Fill in the number of entries */
p_ev_ctrl->p_mem_avail = loop;
- // Figure out IO Available
+ /* Figure out IO Available */
resNode = ctrl->io_head;
@@ -358,12 +361,12 @@ static u32 store_HRT (void __iomem *rom_start)
while (resNode) {
loop ++;
- // base
+ /* base */
rc = add_dword( &pFill, resNode->base, &usedbytes, &available);
if (rc)
return(rc);
- // length
+ /* length */
rc = add_dword( &pFill, resNode->length, &usedbytes, &available);
if (rc)
return(rc);
@@ -371,10 +374,10 @@ static u32 store_HRT (void __iomem *rom_start)
resNode = resNode->next;
}
- // Fill in the number of entries
+ /* Fill in the number of entries */
p_ev_ctrl->io_avail = loop;
- // Figure out bus Available
+ /* Figure out bus Available */
resNode = ctrl->bus_head;
@@ -383,12 +386,12 @@ static u32 store_HRT (void __iomem *rom_start)
while (resNode) {
loop ++;
- // base
+ /* base */
rc = add_dword( &pFill, resNode->base, &usedbytes, &available);
if (rc)
return(rc);
- // length
+ /* length */
rc = add_dword( &pFill, resNode->length, &usedbytes, &available);
if (rc)
return(rc);
@@ -396,15 +399,15 @@ static u32 store_HRT (void __iomem *rom_start)
resNode = resNode->next;
}
- // Fill in the number of entries
+ /* Fill in the number of entries */
p_ev_ctrl->bus_avail = loop;
ctrl = ctrl->next;
}
-
+
p_EV_header->num_of_ctrl = numCtrl;
- // Now store the EV
+ /* Now store the EV */
temp_dword = usedbytes;
@@ -449,20 +452,21 @@ int compaq_nvram_load (void __iomem *rom_start, struct controller *ctrl)
struct ev_hrt_header *p_EV_header;
if (!evbuffer_init) {
- // Read the resource list information in from NVRAM
+ /* Read the resource list information in from NVRAM */
if (load_HRT(rom_start))
memset (evbuffer, 0, 1024);
evbuffer_init = 1;
}
- // If we saved information in NVRAM, use it now
+ /* If we saved information in NVRAM, use it now */
p_EV_header = (struct ev_hrt_header *) evbuffer;
- // The following code is for systems where version 1.0 of this
- // driver has been loaded, but doesn't support the hardware.
- // In that case, the driver would incorrectly store something
- // in NVRAM.
+ /* The following code is for systems where version 1.0 of this
+ * driver has been loaded, but doesn't support the hardware.
+ * In that case, the driver would incorrectly store something
+ * in NVRAM.
+ */
if ((p_EV_header->Version == 2) ||
((p_EV_header->Version == 1) && !ctrl->push_flag)) {
p_byte = &(p_EV_header->next);
@@ -479,7 +483,7 @@ int compaq_nvram_load (void __iomem *rom_start, struct controller *ctrl)
function = p_ev_ctrl->function;
while ((bus != ctrl->bus) ||
- (device != PCI_SLOT(ctrl->pci_dev->devfn)) ||
+ (device != PCI_SLOT(ctrl->pci_dev->devfn)) ||
(function != PCI_FUNC(ctrl->pci_dev->devfn))) {
nummem = p_ev_ctrl->mem_avail;
numpmem = p_ev_ctrl->p_mem_avail;
@@ -491,7 +495,7 @@ int compaq_nvram_load (void __iomem *rom_start, struct controller *ctrl)
if (p_byte > ((u8*)p_EV_header + evbuffer_length))
return 2;
- // Skip forward to the next entry
+ /* Skip forward to the next entry */
p_byte += (nummem + numpmem + numio + numbus) * 8;
if (p_byte > ((u8*)p_EV_header + evbuffer_length))
@@ -629,8 +633,9 @@ int compaq_nvram_load (void __iomem *rom_start, struct controller *ctrl)
ctrl->bus_head = bus_node;
}
- // If all of the following fail, we don't have any resources for
- // hot plug add
+ /* If all of the following fail, we don't have any resources for
+ * hot plug add
+ */
rc = 1;
rc &= cpqhp_resource_sort_and_combine(&(ctrl->mem_head));
rc &= cpqhp_resource_sort_and_combine(&(ctrl->p_mem_head));
@@ -640,14 +645,14 @@ int compaq_nvram_load (void __iomem *rom_start, struct controller *ctrl)
if (rc)
return(rc);
} else {
- if ((evbuffer[0] != 0) && (!ctrl->push_flag))
+ if ((evbuffer[0] != 0) && (!ctrl->push_flag))
return 1;
}
return 0;
}
-
+
int compaq_nvram_store (void __iomem *rom_start)
{
int rc = 1;
diff --git a/drivers/pci/hotplug/cpqphp_pci.c b/drivers/pci/hotplug/cpqphp_pci.c
index 6c0ed0fcb8e..6173b9a4544 100644
--- a/drivers/pci/hotplug/cpqphp_pci.c
+++ b/drivers/pci/hotplug/cpqphp_pci.c
@@ -37,7 +37,6 @@
#include "../pci.h"
#include "cpqphp.h"
#include "cpqphp_nvram.h"
-#include <asm/pci_x86.h>
u8 cpqhp_nic_irq;
@@ -82,14 +81,14 @@ static void __iomem *detect_HRT_floating_pointer(void __iomem *begin, void __iom
}
-int cpqhp_configure_device (struct controller* ctrl, struct pci_func* func)
+int cpqhp_configure_device (struct controller* ctrl, struct pci_func* func)
{
unsigned char bus;
struct pci_bus *child;
int num;
if (func->pci_dev == NULL)
- func->pci_dev = pci_find_slot(func->bus, PCI_DEVFN(func->device, func->function));
+ func->pci_dev = pci_get_bus_and_slot(func->bus,PCI_DEVFN(func->device, func->function));
/* No pci device, we need to create it then */
if (func->pci_dev == NULL) {
@@ -99,7 +98,7 @@ int cpqhp_configure_device (struct controller* ctrl, struct pci_func* func)
if (num)
pci_bus_add_devices(ctrl->pci_dev->bus);
- func->pci_dev = pci_find_slot(func->bus, PCI_DEVFN(func->device, func->function));
+ func->pci_dev = pci_get_bus_and_slot(func->bus, PCI_DEVFN(func->device, func->function));
if (func->pci_dev == NULL) {
dbg("ERROR: pci_dev still null\n");
return 0;
@@ -112,20 +111,24 @@ int cpqhp_configure_device (struct controller* ctrl, struct pci_func* func)
pci_do_scan_bus(child);
}
+ pci_dev_put(func->pci_dev);
+
return 0;
}
-int cpqhp_unconfigure_device(struct pci_func* func)
+int cpqhp_unconfigure_device(struct pci_func* func)
{
int j;
-
+
dbg("%s: bus/dev/func = %x/%x/%x\n", __func__, func->bus, func->device, func->function);
for (j=0; j<8 ; j++) {
- struct pci_dev* temp = pci_find_slot(func->bus, PCI_DEVFN(func->device, j));
- if (temp)
+ struct pci_dev* temp = pci_get_bus_and_slot(func->bus, PCI_DEVFN(func->device, j));
+ if (temp) {
+ pci_dev_put(temp);
pci_remove_bus_device(temp);
+ }
}
return 0;
}
@@ -178,32 +181,22 @@ int cpqhp_set_irq (u8 bus_num, u8 dev_num, u8 int_pin, u8 irq_num)
if (!rc)
return !rc;
- // set the Edge Level Control Register (ELCR)
+ /* set the Edge Level Control Register (ELCR) */
temp_word = inb(0x4d0);
temp_word |= inb(0x4d1) << 8;
temp_word |= 0x01 << irq_num;
- // This should only be for x86 as it sets the Edge Level Control Register
- outb((u8) (temp_word & 0xFF), 0x4d0);
- outb((u8) ((temp_word & 0xFF00) >> 8), 0x4d1);
- rc = 0;
- }
+ /* This should only be for x86 as it sets the Edge Level
+ * Control Register
+ */
+ outb((u8) (temp_word & 0xFF), 0x4d0); outb((u8) ((temp_word &
+ 0xFF00) >> 8), 0x4d1); rc = 0; }
return rc;
}
-/*
- * WTF??? This function isn't in the code, yet a function calls it, but the
- * compiler optimizes it away? strange. Here as a placeholder to keep the
- * compiler happy.
- */
-static int PCI_ScanBusNonBridge (u8 bus, u8 device)
-{
- return 0;
-}
-
static int PCI_ScanBusForNonBridge(struct controller *ctrl, u8 bus_num, u8 * dev_num)
{
u16 tdevice;
@@ -213,11 +206,11 @@ static int PCI_ScanBusForNonBridge(struct controller *ctrl, u8 bus_num, u8 * dev
ctrl->pci_bus->number = bus_num;
for (tdevice = 0; tdevice < 0xFF; tdevice++) {
- //Scan for access first
+ /* Scan for access first */
if (PCI_RefinedAccessConfig(ctrl->pci_bus, tdevice, 0x08, &work) == -1)
continue;
dbg("Looking for nonbridge bus_num %d dev_num %d\n", bus_num, tdevice);
- //Yep we got one. Not a bridge ?
+ /* Yep we got one. Not a bridge ? */
if ((work >> 8) != PCI_TO_PCI_BRIDGE_CLASS) {
*dev_num = tdevice;
dbg("found it !\n");
@@ -225,16 +218,16 @@ static int PCI_ScanBusForNonBridge(struct controller *ctrl, u8 bus_num, u8 * dev
}
}
for (tdevice = 0; tdevice < 0xFF; tdevice++) {
- //Scan for access first
+ /* Scan for access first */
if (PCI_RefinedAccessConfig(ctrl->pci_bus, tdevice, 0x08, &work) == -1)
continue;
dbg("Looking for bridge bus_num %d dev_num %d\n", bus_num, tdevice);
- //Yep we got one. bridge ?
+ /* Yep we got one. bridge ? */
if ((work >> 8) == PCI_TO_PCI_BRIDGE_CLASS) {
pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(tdevice, 0), PCI_SECONDARY_BUS, &tbus);
+ /* XXX: no recursion, wtf? */
dbg("Recurse on bus_num %d tdevice %d\n", tbus, tdevice);
- if (PCI_ScanBusNonBridge(tbus, tdevice) == 0)
- return 0;
+ return 0;
}
}
@@ -244,39 +237,23 @@ static int PCI_ScanBusForNonBridge(struct controller *ctrl, u8 bus_num, u8 * dev
static int PCI_GetBusDevHelper(struct controller *ctrl, u8 *bus_num, u8 *dev_num, u8 slot, u8 nobridge)
{
- struct irq_routing_table *PCIIRQRoutingInfoLength;
- long len;
- long loop;
+ int loop, len;
u32 work;
-
u8 tbus, tdevice, tslot;
- PCIIRQRoutingInfoLength = pcibios_get_irq_routing_table();
- if (!PCIIRQRoutingInfoLength)
- return -1;
-
- len = (PCIIRQRoutingInfoLength->size -
- sizeof(struct irq_routing_table)) / sizeof(struct irq_info);
- // Make sure I got at least one entry
- if (len == 0) {
- kfree(PCIIRQRoutingInfoLength );
- return -1;
- }
-
+ len = cpqhp_routing_table_length();
for (loop = 0; loop < len; ++loop) {
- tbus = PCIIRQRoutingInfoLength->slots[loop].bus;
- tdevice = PCIIRQRoutingInfoLength->slots[loop].devfn;
- tslot = PCIIRQRoutingInfoLength->slots[loop].slot;
+ tbus = cpqhp_routing_table->slots[loop].bus;
+ tdevice = cpqhp_routing_table->slots[loop].devfn;
+ tslot = cpqhp_routing_table->slots[loop].slot;
if (tslot == slot) {
*bus_num = tbus;
*dev_num = tdevice;
ctrl->pci_bus->number = tbus;
pci_bus_read_config_dword (ctrl->pci_bus, *dev_num, PCI_VENDOR_ID, &work);
- if (!nobridge || (work == 0xffffffff)) {
- kfree(PCIIRQRoutingInfoLength );
+ if (!nobridge || (work == 0xffffffff))
return 0;
- }
dbg("bus_num %d devfn %d\n", *bus_num, *dev_num);
pci_bus_read_config_dword (ctrl->pci_bus, *dev_num, PCI_CLASS_REVISION, &work);
@@ -287,28 +264,26 @@ static int PCI_GetBusDevHelper(struct controller *ctrl, u8 *bus_num, u8 *dev_num
dbg("Scan bus for Non Bridge: bus %d\n", tbus);
if (PCI_ScanBusForNonBridge(ctrl, tbus, dev_num) == 0) {
*bus_num = tbus;
- kfree(PCIIRQRoutingInfoLength );
return 0;
}
- } else {
- kfree(PCIIRQRoutingInfoLength );
+ } else
return 0;
- }
-
}
}
- kfree(PCIIRQRoutingInfoLength );
return -1;
}
int cpqhp_get_bus_dev (struct controller *ctrl, u8 * bus_num, u8 * dev_num, u8 slot)
{
- return PCI_GetBusDevHelper(ctrl, bus_num, dev_num, slot, 0); //plain (bridges allowed)
+ /* plain (bridges allowed) */
+ return PCI_GetBusDevHelper(ctrl, bus_num, dev_num, slot, 0);
}
-/* More PCI configuration routines; this time centered around hotplug controller */
+/* More PCI configuration routines; this time centered around hotplug
+ * controller
+ */
/*
@@ -339,12 +314,12 @@ int cpqhp_save_config(struct controller *ctrl, int busnumber, int is_hot_plug)
int stop_it;
int index;
- // Decide which slots are supported
+ /* Decide which slots are supported */
if (is_hot_plug) {
- //*********************************
- // is_hot_plug is the slot mask
- //*********************************
+ /*
+ * is_hot_plug is the slot mask
+ */
FirstSupported = is_hot_plug >> 4;
LastSupported = FirstSupported + (is_hot_plug & 0x0F) - 1;
} else {
@@ -352,123 +327,127 @@ int cpqhp_save_config(struct controller *ctrl, int busnumber, int is_hot_plug)
LastSupported = 0x1F;
}
- // Save PCI configuration space for all devices in supported slots
+ /* Save PCI configuration space for all devices in supported slots */
ctrl->pci_bus->number = busnumber;
for (device = FirstSupported; device <= LastSupported; device++) {
ID = 0xFFFFFFFF;
- rc = pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(device, 0), PCI_VENDOR_ID, &ID);
+ rc = pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(device, 0), PCI_VENDOR_ID, &ID);
+
+ if (ID == 0xFFFFFFFF) {
+ if (is_hot_plug) {
+ /* Setup slot structure with entry for empty
+ * slot
+ */
+ new_slot = cpqhp_slot_create(busnumber);
+ if (new_slot == NULL)
+ return 1;
- if (ID != 0xFFFFFFFF) { // device in slot
- rc = pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(device, 0), 0x0B, &class_code);
- if (rc)
- return rc;
+ new_slot->bus = (u8) busnumber;
+ new_slot->device = (u8) device;
+ new_slot->function = 0;
+ new_slot->is_a_board = 0;
+ new_slot->presence_save = 0;
+ new_slot->switch_save = 0;
+ }
+ continue;
+ }
- rc = pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(device, 0), PCI_HEADER_TYPE, &header_type);
- if (rc)
- return rc;
+ rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, 0), 0x0B, &class_code);
+ if (rc)
+ return rc;
- // If multi-function device, set max_functions to 8
- if (header_type & 0x80)
- max_functions = 8;
- else
- max_functions = 1;
+ rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, 0), PCI_HEADER_TYPE, &header_type);
+ if (rc)
+ return rc;
- function = 0;
+ /* If multi-function device, set max_functions to 8 */
+ if (header_type & 0x80)
+ max_functions = 8;
+ else
+ max_functions = 1;
- do {
- DevError = 0;
+ function = 0;
- if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { // P-P Bridge
- // Recurse the subordinate bus
- // get the subordinate bus number
- rc = pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(device, function), PCI_SECONDARY_BUS, &secondary_bus);
- if (rc) {
+ do {
+ DevError = 0;
+ if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
+ /* Recurse the subordinate bus
+ * get the subordinate bus number
+ */
+ rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, function), PCI_SECONDARY_BUS, &secondary_bus);
+ if (rc) {
+ return rc;
+ } else {
+ sub_bus = (int) secondary_bus;
+
+ /* Save secondary bus cfg spc
+ * with this recursive call.
+ */
+ rc = cpqhp_save_config(ctrl, sub_bus, 0);
+ if (rc)
return rc;
- } else {
- sub_bus = (int) secondary_bus;
-
- // Save secondary bus cfg spc
- // with this recursive call.
- rc = cpqhp_save_config(ctrl, sub_bus, 0);
- if (rc)
- return rc;
- ctrl->pci_bus->number = busnumber;
- }
+ ctrl->pci_bus->number = busnumber;
}
+ }
- index = 0;
+ index = 0;
+ new_slot = cpqhp_slot_find(busnumber, device, index++);
+ while (new_slot &&
+ (new_slot->function != (u8) function))
new_slot = cpqhp_slot_find(busnumber, device, index++);
- while (new_slot &&
- (new_slot->function != (u8) function))
- new_slot = cpqhp_slot_find(busnumber, device, index++);
- if (!new_slot) {
- // Setup slot structure.
- new_slot = cpqhp_slot_create(busnumber);
-
- if (new_slot == NULL)
- return(1);
- }
-
- new_slot->bus = (u8) busnumber;
- new_slot->device = (u8) device;
- new_slot->function = (u8) function;
- new_slot->is_a_board = 1;
- new_slot->switch_save = 0x10;
- // In case of unsupported board
- new_slot->status = DevError;
- new_slot->pci_dev = pci_find_slot(new_slot->bus, (new_slot->device << 3) | new_slot->function);
-
- for (cloop = 0; cloop < 0x20; cloop++) {
- rc = pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(device, function), cloop << 2, (u32 *) & (new_slot-> config_space [cloop]));
- if (rc)
- return rc;
- }
+ if (!new_slot) {
+ /* Setup slot structure. */
+ new_slot = cpqhp_slot_create(busnumber);
+ if (new_slot == NULL)
+ return 1;
+ }
- function++;
+ new_slot->bus = (u8) busnumber;
+ new_slot->device = (u8) device;
+ new_slot->function = (u8) function;
+ new_slot->is_a_board = 1;
+ new_slot->switch_save = 0x10;
+ /* In case of unsupported board */
+ new_slot->status = DevError;
+ new_slot->pci_dev = pci_get_bus_and_slot(new_slot->bus, (new_slot->device << 3) | new_slot->function);
- stop_it = 0;
+ for (cloop = 0; cloop < 0x20; cloop++) {
+ rc = pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(device, function), cloop << 2, (u32 *) & (new_slot-> config_space [cloop]));
+ if (rc)
+ return rc;
+ }
- // this loop skips to the next present function
- // reading in Class Code and Header type.
+ pci_dev_put(new_slot->pci_dev);
- while ((function < max_functions)&&(!stop_it)) {
- rc = pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(device, function), PCI_VENDOR_ID, &ID);
- if (ID == 0xFFFFFFFF) { // nothing there.
- function++;
- } else { // Something there
- rc = pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(device, function), 0x0B, &class_code);
- if (rc)
- return rc;
+ function++;
- rc = pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(device, function), PCI_HEADER_TYPE, &header_type);
- if (rc)
- return rc;
+ stop_it = 0;
- stop_it++;
- }
+ /* this loop skips to the next present function
+ * reading in Class Code and Header type.
+ */
+ while ((function < max_functions) && (!stop_it)) {
+ rc = pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(device, function), PCI_VENDOR_ID, &ID);
+ if (ID == 0xFFFFFFFF) {
+ function++;
+ continue;
}
+ rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, function), 0x0B, &class_code);
+ if (rc)
+ return rc;
- } while (function < max_functions);
- } // End of IF (device in slot?)
- else if (is_hot_plug) {
- // Setup slot structure with entry for empty slot
- new_slot = cpqhp_slot_create(busnumber);
+ rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, function), PCI_HEADER_TYPE, &header_type);
+ if (rc)
+ return rc;
- if (new_slot == NULL) {
- return(1);
+ stop_it++;
}
- new_slot->bus = (u8) busnumber;
- new_slot->device = (u8) device;
- new_slot->function = 0;
- new_slot->is_a_board = 0;
- new_slot->presence_save = 0;
- new_slot->switch_save = 0;
- }
- } // End of FOR loop
+ } while (function < max_functions);
+ } /* End of FOR loop */
- return(0);
+ return 0;
}
@@ -489,7 +468,7 @@ int cpqhp_save_slot_config (struct controller *ctrl, struct pci_func * new_slot)
u8 secondary_bus;
int sub_bus;
int max_functions;
- int function;
+ int function = 0;
int cloop = 0;
int stop_it;
@@ -498,63 +477,58 @@ int cpqhp_save_slot_config (struct controller *ctrl, struct pci_func * new_slot)
ctrl->pci_bus->number = new_slot->bus;
pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), PCI_VENDOR_ID, &ID);
- if (ID != 0xFFFFFFFF) { // device in slot
- pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), 0x0B, &class_code);
- pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), PCI_HEADER_TYPE, &header_type);
-
- if (header_type & 0x80) // Multi-function device
- max_functions = 8;
- else
- max_functions = 1;
-
- function = 0;
-
- do {
- if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { // PCI-PCI Bridge
- // Recurse the subordinate bus
- pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_SECONDARY_BUS, &secondary_bus);
+ if (ID == 0xFFFFFFFF)
+ return 2;
- sub_bus = (int) secondary_bus;
+ pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), 0x0B, &class_code);
+ pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), PCI_HEADER_TYPE, &header_type);
- // Save the config headers for the secondary bus.
- rc = cpqhp_save_config(ctrl, sub_bus, 0);
- if (rc)
- return(rc);
- ctrl->pci_bus->number = new_slot->bus;
+ if (header_type & 0x80) /* Multi-function device */
+ max_functions = 8;
+ else
+ max_functions = 1;
- } // End of IF
+ while (function < max_functions) {
+ if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
+ /* Recurse the subordinate bus */
+ pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_SECONDARY_BUS, &secondary_bus);
- new_slot->status = 0;
+ sub_bus = (int) secondary_bus;
- for (cloop = 0; cloop < 0x20; cloop++) {
- pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), cloop << 2, (u32 *) & (new_slot-> config_space [cloop]));
- }
+ /* Save the config headers for the secondary
+ * bus.
+ */
+ rc = cpqhp_save_config(ctrl, sub_bus, 0);
+ if (rc)
+ return(rc);
+ ctrl->pci_bus->number = new_slot->bus;
- function++;
+ }
- stop_it = 0;
+ new_slot->status = 0;
- // this loop skips to the next present function
- // reading in the Class Code and the Header type.
+ for (cloop = 0; cloop < 0x20; cloop++)
+ pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), cloop << 2, (u32 *) & (new_slot-> config_space [cloop]));
- while ((function < max_functions) && (!stop_it)) {
- pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_VENDOR_ID, &ID);
+ function++;
- if (ID == 0xFFFFFFFF) { // nothing there.
- function++;
- } else { // Something there
- pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), 0x0B, &class_code);
+ stop_it = 0;
- pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_HEADER_TYPE, &header_type);
+ /* this loop skips to the next present function
+ * reading in the Class Code and the Header type.
+ */
+ while ((function < max_functions) && (!stop_it)) {
+ pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_VENDOR_ID, &ID);
- stop_it++;
- }
+ if (ID == 0xFFFFFFFF)
+ function++;
+ else {
+ pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), 0x0B, &class_code);
+ pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_HEADER_TYPE, &header_type);
+ stop_it++;
}
+ }
- } while (function < max_functions);
- } // End of IF (device in slot?)
- else {
- return 2;
}
return 0;
@@ -590,11 +564,10 @@ int cpqhp_save_base_addr_length(struct controller *ctrl, struct pci_func * func)
pci_bus->number = func->bus;
devfn = PCI_DEVFN(func->device, func->function);
- // Check for Bridge
+ /* Check for Bridge */
pci_bus_read_config_byte (pci_bus, devfn, PCI_HEADER_TYPE, &header_type);
if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
- // PCI-PCI Bridge
pci_bus_read_config_byte (pci_bus, devfn, PCI_SECONDARY_BUS, &secondary_bus);
sub_bus = (int) secondary_bus;
@@ -610,23 +583,27 @@ int cpqhp_save_base_addr_length(struct controller *ctrl, struct pci_func * func)
}
pci_bus->number = func->bus;
- //FIXME: this loop is duplicated in the non-bridge case. The two could be rolled together
- // Figure out IO and memory base lengths
+ /* FIXME: this loop is duplicated in the non-bridge
+ * case. The two could be rolled together Figure out
+ * IO and memory base lengths
+ */
for (cloop = 0x10; cloop <= 0x14; cloop += 4) {
temp_register = 0xFFFFFFFF;
pci_bus_write_config_dword (pci_bus, devfn, cloop, temp_register);
pci_bus_read_config_dword (pci_bus, devfn, cloop, &base);
-
- if (base) { // If this register is implemented
+ /* If this register is implemented */
+ if (base) {
if (base & 0x01L) {
- // IO base
- // set base = amount of IO space requested
+ /* IO base
+ * set base = amount of IO space
+ * requested
+ */
base = base & 0xFFFFFFFE;
base = (~base) + 1;
type = 1;
} else {
- // memory base
+ /* memory base */
base = base & 0xFFFFFFF0;
base = (~base) + 1;
@@ -637,32 +614,36 @@ int cpqhp_save_base_addr_length(struct controller *ctrl, struct pci_func * func)
type = 0;
}
- // Save information in slot structure
+ /* Save information in slot structure */
func->base_length[(cloop - 0x10) >> 2] =
base;
func->base_type[(cloop - 0x10) >> 2] = type;
- } // End of base register loop
+ } /* End of base register loop */
-
- } else if ((header_type & 0x7F) == 0x00) { // PCI-PCI Bridge
- // Figure out IO and memory base lengths
+ } else if ((header_type & 0x7F) == 0x00) {
+ /* Figure out IO and memory base lengths */
for (cloop = 0x10; cloop <= 0x24; cloop += 4) {
temp_register = 0xFFFFFFFF;
pci_bus_write_config_dword (pci_bus, devfn, cloop, temp_register);
pci_bus_read_config_dword (pci_bus, devfn, cloop, &base);
- if (base) { // If this register is implemented
+ /* If this register is implemented */
+ if (base) {
if (base & 0x01L) {
- // IO base
- // base = amount of IO space requested
+ /* IO base
+ * base = amount of IO space
+ * requested
+ */
base = base & 0xFFFFFFFE;
base = (~base) + 1;
type = 1;
} else {
- // memory base
- // base = amount of memory space requested
+ /* memory base
+ * base = amount of memory
+ * space requested
+ */
base = base & 0xFFFFFFF0;
base = (~base) + 1;
@@ -673,16 +654,16 @@ int cpqhp_save_base_addr_length(struct controller *ctrl, struct pci_func * func)
type = 0;
}
- // Save information in slot structure
+ /* Save information in slot structure */
func->base_length[(cloop - 0x10) >> 2] = base;
func->base_type[(cloop - 0x10) >> 2] = type;
- } // End of base register loop
+ } /* End of base register loop */
- } else { // Some other unknown header type
+ } else { /* Some other unknown header type */
}
- // find the next device in this slot
+ /* find the next device in this slot */
func = cpqhp_slot_find(func->bus, func->device, index++);
}
@@ -728,18 +709,18 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
pci_bus->number = func->bus;
devfn = PCI_DEVFN(func->device, func->function);
- // Save the command register
+ /* Save the command register */
pci_bus_read_config_word(pci_bus, devfn, PCI_COMMAND, &save_command);
- // disable card
+ /* disable card */
command = 0x00;
pci_bus_write_config_word(pci_bus, devfn, PCI_COMMAND, command);
- // Check for Bridge
+ /* Check for Bridge */
pci_bus_read_config_byte(pci_bus, devfn, PCI_HEADER_TYPE, &header_type);
- if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { // PCI-PCI Bridge
- // Clear Bridge Control Register
+ if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
+ /* Clear Bridge Control Register */
command = 0x00;
pci_bus_write_config_word(pci_bus, devfn, PCI_BRIDGE_CONTROL, command);
pci_bus_read_config_byte(pci_bus, devfn, PCI_SECONDARY_BUS, &secondary_bus);
@@ -755,7 +736,7 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
bus_node->next = func->bus_head;
func->bus_head = bus_node;
- // Save IO base and Limit registers
+ /* Save IO base and Limit registers */
pci_bus_read_config_byte(pci_bus, devfn, PCI_IO_BASE, &b_base);
pci_bus_read_config_byte(pci_bus, devfn, PCI_IO_LIMIT, &b_length);
@@ -771,7 +752,7 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
func->io_head = io_node;
}
- // Save memory base and Limit registers
+ /* Save memory base and Limit registers */
pci_bus_read_config_word(pci_bus, devfn, PCI_MEMORY_BASE, &w_base);
pci_bus_read_config_word(pci_bus, devfn, PCI_MEMORY_LIMIT, &w_length);
@@ -787,7 +768,7 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
func->mem_head = mem_node;
}
- // Save prefetchable memory base and Limit registers
+ /* Save prefetchable memory base and Limit registers */
pci_bus_read_config_word(pci_bus, devfn, PCI_PREF_MEMORY_BASE, &w_base);
pci_bus_read_config_word(pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, &w_length);
@@ -802,7 +783,7 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
p_mem_node->next = func->p_mem_head;
func->p_mem_head = p_mem_node;
}
- // Figure out IO and memory base lengths
+ /* Figure out IO and memory base lengths */
for (cloop = 0x10; cloop <= 0x14; cloop += 4) {
pci_bus_read_config_dword (pci_bus, devfn, cloop, &save_base);
@@ -812,11 +793,14 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
temp_register = base;
- if (base) { // If this register is implemented
+ /* If this register is implemented */
+ if (base) {
if (((base & 0x03L) == 0x01)
&& (save_command & 0x01)) {
- // IO base
- // set temp_register = amount of IO space requested
+ /* IO base
+ * set temp_register = amount
+ * of IO space requested
+ */
temp_register = base & 0xFFFFFFFE;
temp_register = (~temp_register) + 1;
@@ -834,7 +818,7 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
} else
if (((base & 0x0BL) == 0x08)
&& (save_command & 0x02)) {
- // prefetchable memory base
+ /* prefetchable memory base */
temp_register = base & 0xFFFFFFF0;
temp_register = (~temp_register) + 1;
@@ -851,7 +835,7 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
} else
if (((base & 0x0BL) == 0x00)
&& (save_command & 0x02)) {
- // prefetchable memory base
+ /* prefetchable memory base */
temp_register = base & 0xFFFFFFF0;
temp_register = (~temp_register) + 1;
@@ -868,9 +852,10 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
} else
return(1);
}
- } // End of base register loop
- } else if ((header_type & 0x7F) == 0x00) { // Standard header
- // Figure out IO and memory base lengths
+ } /* End of base register loop */
+ /* Standard header */
+ } else if ((header_type & 0x7F) == 0x00) {
+ /* Figure out IO and memory base lengths */
for (cloop = 0x10; cloop <= 0x24; cloop += 4) {
pci_bus_read_config_dword(pci_bus, devfn, cloop, &save_base);
@@ -880,11 +865,14 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
temp_register = base;
- if (base) { // If this register is implemented
+ /* If this register is implemented */
+ if (base) {
if (((base & 0x03L) == 0x01)
&& (save_command & 0x01)) {
- // IO base
- // set temp_register = amount of IO space requested
+ /* IO base
+ * set temp_register = amount
+ * of IO space requested
+ */
temp_register = base & 0xFFFFFFFE;
temp_register = (~temp_register) + 1;
@@ -901,7 +889,7 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
} else
if (((base & 0x0BL) == 0x08)
&& (save_command & 0x02)) {
- // prefetchable memory base
+ /* prefetchable memory base */
temp_register = base & 0xFFFFFFF0;
temp_register = (~temp_register) + 1;
@@ -918,7 +906,7 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
} else
if (((base & 0x0BL) == 0x00)
&& (save_command & 0x02)) {
- // prefetchable memory base
+ /* prefetchable memory base */
temp_register = base & 0xFFFFFFF0;
temp_register = (~temp_register) + 1;
@@ -935,15 +923,14 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
} else
return(1);
}
- } // End of base register loop
- } else { // Some other unknown header type
+ } /* End of base register loop */
}
- // find the next device in this slot
+ /* find the next device in this slot */
func = cpqhp_slot_find(func->bus, func->device, index++);
}
- return(0);
+ return 0;
}
@@ -975,16 +962,16 @@ int cpqhp_configure_board(struct controller *ctrl, struct pci_func * func)
pci_bus->number = func->bus;
devfn = PCI_DEVFN(func->device, func->function);
- // Start at the top of config space so that the control
- // registers are programmed last
- for (cloop = 0x3C; cloop > 0; cloop -= 4) {
+ /* Start at the top of config space so that the control
+ * registers are programmed last
+ */
+ for (cloop = 0x3C; cloop > 0; cloop -= 4)
pci_bus_write_config_dword (pci_bus, devfn, cloop, func->config_space[cloop >> 2]);
- }
pci_bus_read_config_byte (pci_bus, devfn, PCI_HEADER_TYPE, &header_type);
- // If this is a bridge device, restore subordinate devices
- if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { // PCI-PCI Bridge
+ /* If this is a bridge device, restore subordinate devices */
+ if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
pci_bus_read_config_byte (pci_bus, devfn, PCI_SECONDARY_BUS, &secondary_bus);
sub_bus = (int) secondary_bus;
@@ -1000,8 +987,9 @@ int cpqhp_configure_board(struct controller *ctrl, struct pci_func * func)
}
} else {
- // Check all the base Address Registers to make sure
- // they are the same. If not, the board is different.
+ /* Check all the base Address Registers to make sure
+ * they are the same. If not, the board is different.
+ */
for (cloop = 16; cloop < 40; cloop += 4) {
pci_bus_read_config_dword (pci_bus, devfn, cloop, &temp);
@@ -1058,27 +1046,28 @@ int cpqhp_valid_replace(struct controller *ctrl, struct pci_func * func)
pci_bus_read_config_dword (pci_bus, devfn, PCI_VENDOR_ID, &temp_register);
- // No adapter present
+ /* No adapter present */
if (temp_register == 0xFFFFFFFF)
return(NO_ADAPTER_PRESENT);
if (temp_register != func->config_space[0])
return(ADAPTER_NOT_SAME);
- // Check for same revision number and class code
+ /* Check for same revision number and class code */
pci_bus_read_config_dword (pci_bus, devfn, PCI_CLASS_REVISION, &temp_register);
- // Adapter not the same
+ /* Adapter not the same */
if (temp_register != func->config_space[0x08 >> 2])
return(ADAPTER_NOT_SAME);
- // Check for Bridge
+ /* Check for Bridge */
pci_bus_read_config_byte (pci_bus, devfn, PCI_HEADER_TYPE, &header_type);
- if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { // PCI-PCI Bridge
- // In order to continue checking, we must program the
- // bus registers in the bridge to respond to accesses
- // for it's subordinate bus(es)
+ if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
+ /* In order to continue checking, we must program the
+ * bus registers in the bridge to respond to accesses
+ * for its subordinate bus(es)
+ */
temp_register = func->config_space[0x18 >> 2];
pci_bus_write_config_dword (pci_bus, devfn, PCI_PRIMARY_BUS, temp_register);
@@ -1096,35 +1085,39 @@ int cpqhp_valid_replace(struct controller *ctrl, struct pci_func * func)
}
}
- // Check to see if it is a standard config header
+ /* Check to see if it is a standard config header */
else if ((header_type & 0x7F) == PCI_HEADER_TYPE_NORMAL) {
- // Check subsystem vendor and ID
+ /* Check subsystem vendor and ID */
pci_bus_read_config_dword (pci_bus, devfn, PCI_SUBSYSTEM_VENDOR_ID, &temp_register);
if (temp_register != func->config_space[0x2C >> 2]) {
- // If it's a SMART-2 and the register isn't filled
- // in, ignore the difference because
- // they just have an old rev of the firmware
-
+ /* If it's a SMART-2 and the register isn't
+ * filled in, ignore the difference because
+ * they just have an old rev of the firmware
+ */
if (!((func->config_space[0] == 0xAE100E11)
&& (temp_register == 0x00L)))
return(ADAPTER_NOT_SAME);
}
- // Figure out IO and memory base lengths
+ /* Figure out IO and memory base lengths */
for (cloop = 0x10; cloop <= 0x24; cloop += 4) {
temp_register = 0xFFFFFFFF;
pci_bus_write_config_dword (pci_bus, devfn, cloop, temp_register);
pci_bus_read_config_dword (pci_bus, devfn, cloop, &base);
- if (base) { // If this register is implemented
+
+ /* If this register is implemented */
+ if (base) {
if (base & 0x01L) {
- // IO base
- // set base = amount of IO space requested
+ /* IO base
+ * set base = amount of IO
+ * space requested
+ */
base = base & 0xFFFFFFFE;
base = (~base) + 1;
type = 1;
} else {
- // memory base
+ /* memory base */
base = base & 0xFFFFFFF0;
base = (~base) + 1;
@@ -1135,23 +1128,24 @@ int cpqhp_valid_replace(struct controller *ctrl, struct pci_func * func)
type = 0;
}
- // Check information in slot structure
+ /* Check information in slot structure */
if (func->base_length[(cloop - 0x10) >> 2] != base)
return(ADAPTER_NOT_SAME);
if (func->base_type[(cloop - 0x10) >> 2] != type)
return(ADAPTER_NOT_SAME);
- } // End of base register loop
+ } /* End of base register loop */
- } // End of (type 0 config space) else
+ } /* End of (type 0 config space) else */
else {
- // this is not a type 0 or 1 config space header so
- // we don't know how to do it
+ /* this is not a type 0 or 1 config space header so
+ * we don't know how to do it
+ */
return(DEVICE_TYPE_NOT_SUPPORTED);
}
- // Get the next function
+ /* Get the next function */
func = cpqhp_slot_find(func->bus, func->device, index++);
}
@@ -1168,7 +1162,7 @@ int cpqhp_valid_replace(struct controller *ctrl, struct pci_func * func)
* this function is for hot plug ADD!
*
* returns 0 if success
- */
+ */
int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_start)
{
u8 temp;
@@ -1187,10 +1181,10 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
rom_resource_table = detect_HRT_floating_pointer(rom_start, rom_start+0xffff);
dbg("rom_resource_table = %p\n", rom_resource_table);
- if (rom_resource_table == NULL) {
+ if (rom_resource_table == NULL)
return -ENODEV;
- }
- // Sum all resources and setup resource maps
+
+ /* Sum all resources and setup resource maps */
unused_IRQ = readl(rom_resource_table + UNUSED_IRQ);
dbg("unused_IRQ = %x\n", unused_IRQ);
@@ -1222,13 +1216,11 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
temp = 0;
- if (!cpqhp_nic_irq) {
+ if (!cpqhp_nic_irq)
cpqhp_nic_irq = ctrl->cfgspc_irq;
- }
- if (!cpqhp_disk_irq) {
+ if (!cpqhp_disk_irq)
cpqhp_disk_irq = ctrl->cfgspc_irq;
- }
dbg("cpqhp_disk_irq, cpqhp_nic_irq= %d, %d\n", cpqhp_disk_irq, cpqhp_nic_irq);
@@ -1262,13 +1254,13 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
dev_func, io_base, io_length, mem_base, mem_length, pre_mem_base, pre_mem_length,
primary_bus, secondary_bus, max_bus);
- // If this entry isn't for our controller's bus, ignore it
+ /* If this entry isn't for our controller's bus, ignore it */
if (primary_bus != ctrl->bus) {
i--;
one_slot += sizeof (struct slot_rt);
continue;
}
- // find out if this entry is for an occupied slot
+ /* find out if this entry is for an occupied slot */
ctrl->pci_bus->number = primary_bus;
pci_bus_read_config_dword (ctrl->pci_bus, dev_func, PCI_VENDOR_ID, &temp_dword);
dbg("temp_D_word = %x\n", temp_dword);
@@ -1282,13 +1274,13 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
func = cpqhp_slot_find(primary_bus, dev_func >> 3, index++);
}
- // If we can't find a match, skip this table entry
+ /* If we can't find a match, skip this table entry */
if (!func) {
i--;
one_slot += sizeof (struct slot_rt);
continue;
}
- // this may not work and shouldn't be used
+ /* this may not work and shouldn't be used */
if (secondary_bus != primary_bus)
bridged_slot = 1;
else
@@ -1301,7 +1293,7 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
}
- // If we've got a valid IO base, use it
+ /* If we've got a valid IO base, use it */
temp_dword = io_base + io_length;
@@ -1325,7 +1317,7 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
}
}
- // If we've got a valid memory base, use it
+ /* If we've got a valid memory base, use it */
temp_dword = mem_base + mem_length;
if ((mem_base) && (temp_dword < 0x10000)) {
mem_node = kmalloc(sizeof(*mem_node), GFP_KERNEL);
@@ -1348,8 +1340,9 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
}
}
- // If we've got a valid prefetchable memory base, and
- // the base + length isn't greater than 0xFFFF
+ /* If we've got a valid prefetchable memory base, and
+ * the base + length isn't greater than 0xFFFF
+ */
temp_dword = pre_mem_base + pre_mem_length;
if ((pre_mem_base) && (temp_dword < 0x10000)) {
p_mem_node = kmalloc(sizeof(*p_mem_node), GFP_KERNEL);
@@ -1372,9 +1365,10 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
}
}
- // If we've got a valid bus number, use it
- // The second condition is to ignore bus numbers on
- // populated slots that don't have PCI-PCI bridges
+ /* If we've got a valid bus number, use it
+ * The second condition is to ignore bus numbers on
+ * populated slots that don't have PCI-PCI bridges
+ */
if (secondary_bus && (secondary_bus != primary_bus)) {
bus_node = kmalloc(sizeof(*bus_node), GFP_KERNEL);
if (!bus_node)
@@ -1398,8 +1392,9 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
one_slot += sizeof (struct slot_rt);
}
- // If all of the following fail, we don't have any resources for
- // hot plug add
+ /* If all of the following fail, we don't have any resources for
+ * hot plug add
+ */
rc = 1;
rc &= cpqhp_resource_sort_and_combine(&(ctrl->mem_head));
rc &= cpqhp_resource_sort_and_combine(&(ctrl->p_mem_head));
diff --git a/drivers/pci/hotplug/cpqphp_sysfs.c b/drivers/pci/hotplug/cpqphp_sysfs.c
index a13abf55d78..8450f4a6568 100644
--- a/drivers/pci/hotplug/cpqphp_sysfs.c
+++ b/drivers/pci/hotplug/cpqphp_sysfs.c
@@ -225,7 +225,8 @@ void cpqhp_shutdown_debugfs(void)
void cpqhp_create_debugfs_files(struct controller *ctrl)
{
- ctrl->dentry = debugfs_create_file(ctrl->pci_dev->dev.bus_id, S_IRUGO, root, ctrl, &debug_ops);
+ ctrl->dentry = debugfs_create_file(dev_name(&ctrl->pci_dev->dev),
+ S_IRUGO, root, ctrl, &debug_ops);
}
void cpqhp_remove_debugfs_files(struct controller *ctrl)
diff --git a/drivers/pci/hotplug/fakephp.c b/drivers/pci/hotplug/fakephp.c
index d8649e12729..6151389fd90 100644
--- a/drivers/pci/hotplug/fakephp.c
+++ b/drivers/pci/hotplug/fakephp.c
@@ -1,395 +1,163 @@
-/*
- * Fake PCI Hot Plug Controller Driver
+/* Works like the fakephp driver used to, except a little better.
*
- * Copyright (C) 2003 Greg Kroah-Hartman <greg@kroah.com>
- * Copyright (C) 2003 IBM Corp.
- * Copyright (C) 2003 Rolf Eike Beer <eike-kernel@sf-tec.de>
+ * - It's possible to remove devices with subordinate busses.
+ * - New PCI devices that appear via any method, not just a fakephp triggered
+ * rescan, will be noticed.
+ * - Devices that are removed via any method, not just a fakephp triggered
+ * removal, will also be noticed.
*
- * Based on ideas and code from:
- * Vladimir Kondratiev <vladimir.kondratiev@intel.com>
- * Rolf Eike Beer <eike-kernel@sf-tec.de>
+ * Uses nothing from the pci-hotplug subsystem.
*
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, version 2 of the License.
- *
- * Send feedback to <greg@kroah.com>
*/
-/*
- *
- * This driver will "emulate" removing PCI devices from the system. If
- * the "power" file is written to with "0" then the specified PCI device
- * will be completely removed from the kernel.
- *
- * WARNING, this does NOT turn off the power to the PCI device. This is
- * a "logical" removal, not a physical or electrical removal.
- *
- * Use this module at your own risk, you have been warned!
- *
- * Enabling PCI devices is left as an exercise for the reader...
- *
- */
-#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/pci_hotplug.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
#include <linux/init.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/workqueue.h>
+#include <linux/pci.h>
+#include <linux/device.h>
#include "../pci.h"
-#if !defined(MODULE)
- #define MY_NAME "fakephp"
-#else
- #define MY_NAME THIS_MODULE->name
-#endif
-
-#define dbg(format, arg...) \
- do { \
- if (debug) \
- printk(KERN_DEBUG "%s: " format, \
- MY_NAME , ## arg); \
- } while (0)
-#define err(format, arg...) printk(KERN_ERR "%s: " format, MY_NAME , ## arg)
-#define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME , ## arg)
-
-#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>"
-#define DRIVER_DESC "Fake PCI Hot Plug Controller Driver"
-
-struct dummy_slot {
- struct list_head node;
- struct hotplug_slot *slot;
- struct pci_dev *dev;
- struct work_struct remove_work;
- unsigned long removed;
+struct legacy_slot {
+ struct kobject kobj;
+ struct pci_dev *dev;
+ struct list_head list;
};
-static int debug;
-static int dup_slots;
-static LIST_HEAD(slot_list);
-static struct workqueue_struct *dummyphp_wq;
-
-static void pci_rescan_worker(struct work_struct *work);
-static DECLARE_WORK(pci_rescan_work, pci_rescan_worker);
-
-static int enable_slot (struct hotplug_slot *slot);
-static int disable_slot (struct hotplug_slot *slot);
+static LIST_HEAD(legacy_list);
-static struct hotplug_slot_ops dummy_hotplug_slot_ops = {
- .owner = THIS_MODULE,
- .enable_slot = enable_slot,
- .disable_slot = disable_slot,
-};
-
-static void dummy_release(struct hotplug_slot *slot)
+static ssize_t legacy_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
{
- struct dummy_slot *dslot = slot->private;
-
- list_del(&dslot->node);
- kfree(dslot->slot->info);
- kfree(dslot->slot);
- pci_dev_put(dslot->dev);
- kfree(dslot);
+ struct legacy_slot *slot = container_of(kobj, typeof(*slot), kobj);
+ strcpy(buf, "1\n");
+ return 2;
}
-#define SLOT_NAME_SIZE 8
-
-static int add_slot(struct pci_dev *dev)
+static void remove_callback(void *data)
{
- struct dummy_slot *dslot;
- struct hotplug_slot *slot;
- char name[SLOT_NAME_SIZE];
- int retval = -ENOMEM;
- static int count = 1;
-
- slot = kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL);
- if (!slot)
- goto error;
-
- slot->info = kzalloc(sizeof(struct hotplug_slot_info), GFP_KERNEL);
- if (!slot->info)
- goto error_slot;
-
- slot->info->power_status = 1;
- slot->info->max_bus_speed = PCI_SPEED_UNKNOWN;
- slot->info->cur_bus_speed = PCI_SPEED_UNKNOWN;
-
- dslot = kzalloc(sizeof(struct dummy_slot), GFP_KERNEL);
- if (!dslot)
- goto error_info;
-
- if (dup_slots)
- snprintf(name, SLOT_NAME_SIZE, "fake");
- else
- snprintf(name, SLOT_NAME_SIZE, "fake%d", count++);
- dbg("slot->name = %s\n", name);
- slot->ops = &dummy_hotplug_slot_ops;
- slot->release = &dummy_release;
- slot->private = dslot;
-
- retval = pci_hp_register(slot, dev->bus, PCI_SLOT(dev->devfn), name);
- if (retval) {
- err("pci_hp_register failed with error %d\n", retval);
- goto error_dslot;
- }
-
- dbg("slot->name = %s\n", hotplug_slot_name(slot));
- dslot->slot = slot;
- dslot->dev = pci_dev_get(dev);
- list_add (&dslot->node, &slot_list);
- return retval;
-
-error_dslot:
- kfree(dslot);
-error_info:
- kfree(slot->info);
-error_slot:
- kfree(slot);
-error:
- return retval;
+ pci_remove_bus_device((struct pci_dev *)data);
}
-static int __init pci_scan_buses(void)
+static ssize_t legacy_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t len)
{
- struct pci_dev *dev = NULL;
- int lastslot = 0;
+ struct legacy_slot *slot = container_of(kobj, typeof(*slot), kobj);
+ unsigned long val;
- while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
- if (PCI_FUNC(dev->devfn) > 0 &&
- lastslot == PCI_SLOT(dev->devfn))
- continue;
- lastslot = PCI_SLOT(dev->devfn);
- add_slot(dev);
- }
+ if (strict_strtoul(buf, 0, &val) < 0)
+ return -EINVAL;
- return 0;
+ if (val)
+ pci_rescan_bus(slot->dev->bus);
+ else
+ sysfs_schedule_callback(&slot->dev->dev.kobj, remove_callback,
+ slot->dev, THIS_MODULE);
+ return len;
}
-static void remove_slot(struct dummy_slot *dslot)
-{
- int retval;
-
- dbg("removing slot %s\n", hotplug_slot_name(dslot->slot));
- retval = pci_hp_deregister(dslot->slot);
- if (retval)
- err("Problem unregistering a slot %s\n",
- hotplug_slot_name(dslot->slot));
-}
+static struct attribute *legacy_attrs[] = {
+ &(struct attribute){ .name = "power", .mode = 0644 },
+ NULL,
+};
-/* called from the single-threaded workqueue handler to remove a slot */
-static void remove_slot_worker(struct work_struct *work)
+static void legacy_release(struct kobject *kobj)
{
- struct dummy_slot *dslot =
- container_of(work, struct dummy_slot, remove_work);
- remove_slot(dslot);
-}
+ struct legacy_slot *slot = container_of(kobj, typeof(*slot), kobj);
-/**
- * pci_rescan_slot - Rescan slot
- * @temp: Device template. Should be set: bus and devfn.
- *
- * Tries hard not to re-enable already existing devices;
- * also handles scanning of subfunctions.
- */
-static int pci_rescan_slot(struct pci_dev *temp)
-{
- struct pci_bus *bus = temp->bus;
- struct pci_dev *dev;
- int func;
- u8 hdr_type;
- int count = 0;
-
- if (!pci_read_config_byte(temp, PCI_HEADER_TYPE, &hdr_type)) {
- temp->hdr_type = hdr_type & 0x7f;
- if ((dev = pci_get_slot(bus, temp->devfn)) != NULL)
- pci_dev_put(dev);
- else {
- dev = pci_scan_single_device(bus, temp->devfn);
- if (dev) {
- dbg("New device on %s function %x:%x\n",
- bus->name, temp->devfn >> 3,
- temp->devfn & 7);
- count++;
- }
- }
- /* multifunction device? */
- if (!(hdr_type & 0x80))
- return count;
-
- /* continue scanning for other functions */
- for (func = 1, temp->devfn++; func < 8; func++, temp->devfn++) {
- if (pci_read_config_byte(temp, PCI_HEADER_TYPE, &hdr_type))
- continue;
- temp->hdr_type = hdr_type & 0x7f;
-
- if ((dev = pci_get_slot(bus, temp->devfn)) != NULL)
- pci_dev_put(dev);
- else {
- dev = pci_scan_single_device(bus, temp->devfn);
- if (dev) {
- dbg("New device on %s function %x:%x\n",
- bus->name, temp->devfn >> 3,
- temp->devfn & 7);
- count++;
- }
- }
- }
- }
-
- return count;
+ pci_dev_put(slot->dev);
+ kfree(slot);
}
+static struct kobj_type legacy_ktype = {
+ .sysfs_ops = &(struct sysfs_ops){
+ .store = legacy_store, .show = legacy_show
+ },
+ .release = &legacy_release,
+ .default_attrs = legacy_attrs,
+};
-/**
- * pci_rescan_bus - Rescan PCI bus
- * @bus: the PCI bus to rescan
- *
- * Call pci_rescan_slot for each possible function of the bus.
- */
-static void pci_rescan_bus(const struct pci_bus *bus)
+static int legacy_add_slot(struct pci_dev *pdev)
{
- unsigned int devfn;
- struct pci_dev *dev;
- int retval;
- int found = 0;
- dev = alloc_pci_dev();
- if (!dev)
- return;
+ struct legacy_slot *slot = kzalloc(sizeof(*slot), GFP_KERNEL);
- dev->bus = (struct pci_bus*)bus;
- dev->sysdata = bus->sysdata;
- for (devfn = 0; devfn < 0x100; devfn += 8) {
- dev->devfn = devfn;
- found += pci_rescan_slot(dev);
- }
-
- if (found) {
- pci_bus_assign_resources(bus);
- list_for_each_entry(dev, &bus->devices, bus_list) {
- /* Skip already-added devices */
- if (dev->is_added)
- continue;
- retval = pci_bus_add_device(dev);
- if (retval)
- dev_err(&dev->dev,
- "Error adding device, continuing\n");
- else
- add_slot(dev);
- }
- pci_bus_add_devices(bus);
- }
- kfree(dev);
-}
+ if (!slot)
+ return -ENOMEM;
-/* recursively scan all buses */
-static void pci_rescan_buses(const struct list_head *list)
-{
- const struct list_head *l;
- list_for_each(l,list) {
- const struct pci_bus *b = pci_bus_b(l);
- pci_rescan_bus(b);
- pci_rescan_buses(&b->children);
+ if (kobject_init_and_add(&slot->kobj, &legacy_ktype,
+ &pci_slots_kset->kobj, "%s",
+ dev_name(&pdev->dev))) {
+ dev_warn(&pdev->dev, "Failed to created legacy fake slot\n");
+ return -EINVAL;
}
-}
+ slot->dev = pci_dev_get(pdev);
-/* initiate rescan of all pci buses */
-static inline void pci_rescan(void) {
- pci_rescan_buses(&pci_root_buses);
-}
-
-/* called from the single-threaded workqueue handler to rescan all pci buses */
-static void pci_rescan_worker(struct work_struct *work)
-{
- pci_rescan();
-}
+ list_add(&slot->list, &legacy_list);
-static int enable_slot(struct hotplug_slot *hotplug_slot)
-{
- /* mis-use enable_slot for rescanning of the pci bus */
- cancel_work_sync(&pci_rescan_work);
- queue_work(dummyphp_wq, &pci_rescan_work);
return 0;
}
-static int disable_slot(struct hotplug_slot *slot)
+static int legacy_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
{
- struct dummy_slot *dslot;
- struct pci_dev *dev;
- int func;
-
- if (!slot)
- return -ENODEV;
- dslot = slot->private;
-
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot_name(slot));
+ struct pci_dev *pdev = to_pci_dev(data);
- for (func = 7; func >= 0; func--) {
- dev = pci_get_slot(dslot->dev->bus, dslot->dev->devfn + func);
- if (!dev)
- continue;
+ if (action == BUS_NOTIFY_ADD_DEVICE) {
+ legacy_add_slot(pdev);
+ } else if (action == BUS_NOTIFY_DEL_DEVICE) {
+ struct legacy_slot *slot;
- if (test_and_set_bit(0, &dslot->removed)) {
- dbg("Slot already scheduled for removal\n");
- pci_dev_put(dev);
- return -ENODEV;
- }
+ list_for_each_entry(slot, &legacy_list, list)
+ if (slot->dev == pdev)
+ goto found;
- /* remove the device from the pci core */
- pci_remove_bus_device(dev);
-
- /* queue work item to blow away this sysfs entry and other
- * parts.
- */
- INIT_WORK(&dslot->remove_work, remove_slot_worker);
- queue_work(dummyphp_wq, &dslot->remove_work);
-
- pci_dev_put(dev);
+ dev_warn(&pdev->dev, "Missing legacy fake slot?");
+ return -ENODEV;
+found:
+ kobject_del(&slot->kobj);
+ list_del(&slot->list);
+ kobject_put(&slot->kobj);
}
+
return 0;
}
-static void cleanup_slots (void)
-{
- struct list_head *tmp;
- struct list_head *next;
- struct dummy_slot *dslot;
-
- destroy_workqueue(dummyphp_wq);
- list_for_each_safe (tmp, next, &slot_list) {
- dslot = list_entry (tmp, struct dummy_slot, node);
- remove_slot(dslot);
- }
-
-}
+static struct notifier_block legacy_notifier = {
+ .notifier_call = legacy_notify
+};
-static int __init dummyphp_init(void)
+static int __init init_legacy(void)
{
- info(DRIVER_DESC "\n");
+ struct pci_dev *pdev = NULL;
- dummyphp_wq = create_singlethread_workqueue(MY_NAME);
- if (!dummyphp_wq)
- return -ENOMEM;
+ /* Add existing devices */
+ while ((pdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pdev)))
+ legacy_add_slot(pdev);
- return pci_scan_buses();
+ /* Be alerted of any new ones */
+ bus_register_notifier(&pci_bus_type, &legacy_notifier);
+ return 0;
}
+module_init(init_legacy);
-
-static void __exit dummyphp_exit(void)
+static void __exit remove_legacy(void)
{
- cleanup_slots();
+ struct legacy_slot *slot, *tmp;
+
+ bus_unregister_notifier(&pci_bus_type, &legacy_notifier);
+
+ list_for_each_entry_safe(slot, tmp, &legacy_list, list) {
+ list_del(&slot->list);
+ kobject_del(&slot->kobj);
+ kobject_put(&slot->kobj);
+ }
}
+module_exit(remove_legacy);
-module_init(dummyphp_init);
-module_exit(dummyphp_exit);
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Trent Piepho <xyzzy@speakeasy.org>");
+MODULE_DESCRIPTION("Legacy version of the fakephp interface");
MODULE_LICENSE("GPL");
-module_param(debug, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
-module_param(dup_slots, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(dup_slots, "Force duplicate slot names for debugging");
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c
index dd18f857dfb..7485ffda950 100644
--- a/drivers/pci/hotplug/ibmphp_core.c
+++ b/drivers/pci/hotplug/ibmphp_core.c
@@ -153,45 +153,47 @@ int ibmphp_init_devno(struct slot **cur_slot)
return -1;
}
for (loop = 0; loop < len; loop++) {
- if ((*cur_slot)->number == rtable->slots[loop].slot) {
- if ((*cur_slot)->bus == rtable->slots[loop].bus) {
+ if ((*cur_slot)->number == rtable->slots[loop].slot &&
+ (*cur_slot)->bus == rtable->slots[loop].bus) {
+ struct io_apic_irq_attr irq_attr;
+
(*cur_slot)->device = PCI_SLOT(rtable->slots[loop].devfn);
for (i = 0; i < 4; i++)
(*cur_slot)->irq[i] = IO_APIC_get_PCI_irq_vector((int) (*cur_slot)->bus,
- (int) (*cur_slot)->device, i);
-
- debug("(*cur_slot)->irq[0] = %x\n",
- (*cur_slot)->irq[0]);
- debug("(*cur_slot)->irq[1] = %x\n",
- (*cur_slot)->irq[1]);
- debug("(*cur_slot)->irq[2] = %x\n",
- (*cur_slot)->irq[2]);
- debug("(*cur_slot)->irq[3] = %x\n",
- (*cur_slot)->irq[3]);
-
- debug("rtable->exlusive_irqs = %x\n",
+ (int) (*cur_slot)->device, i,
+ &irq_attr);
+
+ debug("(*cur_slot)->irq[0] = %x\n",
+ (*cur_slot)->irq[0]);
+ debug("(*cur_slot)->irq[1] = %x\n",
+ (*cur_slot)->irq[1]);
+ debug("(*cur_slot)->irq[2] = %x\n",
+ (*cur_slot)->irq[2]);
+ debug("(*cur_slot)->irq[3] = %x\n",
+ (*cur_slot)->irq[3]);
+
+ debug("rtable->exlusive_irqs = %x\n",
rtable->exclusive_irqs);
- debug("rtable->slots[loop].irq[0].bitmap = %x\n",
+ debug("rtable->slots[loop].irq[0].bitmap = %x\n",
rtable->slots[loop].irq[0].bitmap);
- debug("rtable->slots[loop].irq[1].bitmap = %x\n",
+ debug("rtable->slots[loop].irq[1].bitmap = %x\n",
rtable->slots[loop].irq[1].bitmap);
- debug("rtable->slots[loop].irq[2].bitmap = %x\n",
+ debug("rtable->slots[loop].irq[2].bitmap = %x\n",
rtable->slots[loop].irq[2].bitmap);
- debug("rtable->slots[loop].irq[3].bitmap = %x\n",
+ debug("rtable->slots[loop].irq[3].bitmap = %x\n",
rtable->slots[loop].irq[3].bitmap);
- debug("rtable->slots[loop].irq[0].link = %x\n",
+ debug("rtable->slots[loop].irq[0].link = %x\n",
rtable->slots[loop].irq[0].link);
- debug("rtable->slots[loop].irq[1].link = %x\n",
+ debug("rtable->slots[loop].irq[1].link = %x\n",
rtable->slots[loop].irq[1].link);
- debug("rtable->slots[loop].irq[2].link = %x\n",
+ debug("rtable->slots[loop].irq[2].link = %x\n",
rtable->slots[loop].irq[2].link);
- debug("rtable->slots[loop].irq[3].link = %x\n",
+ debug("rtable->slots[loop].irq[3].link = %x\n",
rtable->slots[loop].irq[3].link);
- debug("end of init_devno\n");
- kfree(rtable);
- return 0;
- }
+ debug("end of init_devno\n");
+ kfree(rtable);
+ return 0;
}
}
@@ -1316,7 +1318,6 @@ error:
}
struct hotplug_slot_ops ibmphp_hotplug_slot_ops = {
- .owner = THIS_MODULE,
.set_attention_status = set_attention_status,
.enable_slot = enable_slot,
.disable_slot = ibmphp_disable_slot,
@@ -1419,3 +1420,4 @@ static void __exit ibmphp_exit(void)
}
module_init(ibmphp_init);
+module_exit(ibmphp_exit);
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index 535fce0f07f..844580489d4 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -347,125 +347,129 @@ static struct pci_slot_attribute hotplug_slot_attr_test = {
.store = test_write_file
};
-static int has_power_file(struct pci_slot *pci_slot)
+static bool has_power_file(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
if ((!slot) || (!slot->ops))
- return -ENODEV;
+ return false;
if ((slot->ops->enable_slot) ||
(slot->ops->disable_slot) ||
(slot->ops->get_power_status))
- return 0;
- return -ENOENT;
+ return true;
+ return false;
}
-static int has_attention_file(struct pci_slot *pci_slot)
+static bool has_attention_file(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
if ((!slot) || (!slot->ops))
- return -ENODEV;
+ return false;
if ((slot->ops->set_attention_status) ||
(slot->ops->get_attention_status))
- return 0;
- return -ENOENT;
+ return true;
+ return false;
}
-static int has_latch_file(struct pci_slot *pci_slot)
+static bool has_latch_file(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
if ((!slot) || (!slot->ops))
- return -ENODEV;
+ return false;
if (slot->ops->get_latch_status)
- return 0;
- return -ENOENT;
+ return true;
+ return false;
}
-static int has_adapter_file(struct pci_slot *pci_slot)
+static bool has_adapter_file(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
if ((!slot) || (!slot->ops))
- return -ENODEV;
+ return false;
if (slot->ops->get_adapter_status)
- return 0;
- return -ENOENT;
+ return true;
+ return false;
}
-static int has_max_bus_speed_file(struct pci_slot *pci_slot)
+static bool has_max_bus_speed_file(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
if ((!slot) || (!slot->ops))
- return -ENODEV;
+ return false;
if (slot->ops->get_max_bus_speed)
- return 0;
- return -ENOENT;
+ return true;
+ return false;
}
-static int has_cur_bus_speed_file(struct pci_slot *pci_slot)
+static bool has_cur_bus_speed_file(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
if ((!slot) || (!slot->ops))
- return -ENODEV;
+ return false;
if (slot->ops->get_cur_bus_speed)
- return 0;
- return -ENOENT;
+ return true;
+ return false;
}
-static int has_test_file(struct pci_slot *pci_slot)
+static bool has_test_file(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
if ((!slot) || (!slot->ops))
- return -ENODEV;
+ return false;
if (slot->ops->hardware_test)
- return 0;
- return -ENOENT;
+ return true;
+ return false;
}
static int fs_add_slot(struct pci_slot *slot)
{
int retval = 0;
- if (has_power_file(slot) == 0) {
- retval = sysfs_create_file(&slot->kobj, &hotplug_slot_attr_power.attr);
+ /* Create symbolic link to the hotplug driver module */
+ pci_hp_create_module_link(slot);
+
+ if (has_power_file(slot)) {
+ retval = sysfs_create_file(&slot->kobj,
+ &hotplug_slot_attr_power.attr);
if (retval)
goto exit_power;
}
- if (has_attention_file(slot) == 0) {
+ if (has_attention_file(slot)) {
retval = sysfs_create_file(&slot->kobj,
&hotplug_slot_attr_attention.attr);
if (retval)
goto exit_attention;
}
- if (has_latch_file(slot) == 0) {
+ if (has_latch_file(slot)) {
retval = sysfs_create_file(&slot->kobj,
&hotplug_slot_attr_latch.attr);
if (retval)
goto exit_latch;
}
- if (has_adapter_file(slot) == 0) {
+ if (has_adapter_file(slot)) {
retval = sysfs_create_file(&slot->kobj,
&hotplug_slot_attr_presence.attr);
if (retval)
goto exit_adapter;
}
- if (has_max_bus_speed_file(slot) == 0) {
+ if (has_max_bus_speed_file(slot)) {
retval = sysfs_create_file(&slot->kobj,
- &hotplug_slot_attr_max_bus_speed.attr);
+ &hotplug_slot_attr_max_bus_speed.attr);
if (retval)
goto exit_max_speed;
}
- if (has_cur_bus_speed_file(slot) == 0) {
+ if (has_cur_bus_speed_file(slot)) {
retval = sysfs_create_file(&slot->kobj,
- &hotplug_slot_attr_cur_bus_speed.attr);
+ &hotplug_slot_attr_cur_bus_speed.attr);
if (retval)
goto exit_cur_speed;
}
- if (has_test_file(slot) == 0) {
+ if (has_test_file(slot)) {
retval = sysfs_create_file(&slot->kobj,
&hotplug_slot_attr_test.attr);
if (retval)
@@ -475,55 +479,61 @@ static int fs_add_slot(struct pci_slot *slot)
goto exit;
exit_test:
- if (has_cur_bus_speed_file(slot) == 0)
- sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_cur_bus_speed.attr);
-
+ if (has_cur_bus_speed_file(slot))
+ sysfs_remove_file(&slot->kobj,
+ &hotplug_slot_attr_cur_bus_speed.attr);
exit_cur_speed:
- if (has_max_bus_speed_file(slot) == 0)
- sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_max_bus_speed.attr);
-
+ if (has_max_bus_speed_file(slot))
+ sysfs_remove_file(&slot->kobj,
+ &hotplug_slot_attr_max_bus_speed.attr);
exit_max_speed:
- if (has_adapter_file(slot) == 0)
- sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_presence.attr);
-
+ if (has_adapter_file(slot))
+ sysfs_remove_file(&slot->kobj,
+ &hotplug_slot_attr_presence.attr);
exit_adapter:
- if (has_latch_file(slot) == 0)
+ if (has_latch_file(slot))
sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_latch.attr);
-
exit_latch:
- if (has_attention_file(slot) == 0)
- sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_attention.attr);
-
+ if (has_attention_file(slot))
+ sysfs_remove_file(&slot->kobj,
+ &hotplug_slot_attr_attention.attr);
exit_attention:
- if (has_power_file(slot) == 0)
+ if (has_power_file(slot))
sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_power.attr);
exit_power:
+ pci_hp_remove_module_link(slot);
exit:
return retval;
}
static void fs_remove_slot(struct pci_slot *slot)
{
- if (has_power_file(slot) == 0)
+ if (has_power_file(slot))
sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_power.attr);
- if (has_attention_file(slot) == 0)
- sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_attention.attr);
+ if (has_attention_file(slot))
+ sysfs_remove_file(&slot->kobj,
+ &hotplug_slot_attr_attention.attr);
- if (has_latch_file(slot) == 0)
+ if (has_latch_file(slot))
sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_latch.attr);
- if (has_adapter_file(slot) == 0)
- sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_presence.attr);
+ if (has_adapter_file(slot))
+ sysfs_remove_file(&slot->kobj,
+ &hotplug_slot_attr_presence.attr);
- if (has_max_bus_speed_file(slot) == 0)
- sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_max_bus_speed.attr);
+ if (has_max_bus_speed_file(slot))
+ sysfs_remove_file(&slot->kobj,
+ &hotplug_slot_attr_max_bus_speed.attr);
- if (has_cur_bus_speed_file(slot) == 0)
- sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_cur_bus_speed.attr);
+ if (has_cur_bus_speed_file(slot))
+ sysfs_remove_file(&slot->kobj,
+ &hotplug_slot_attr_cur_bus_speed.attr);
- if (has_test_file(slot) == 0)
+ if (has_test_file(slot))
sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_test.attr);
+
+ pci_hp_remove_module_link(slot);
}
static struct hotplug_slot *get_slot_from_name (const char *name)
@@ -540,10 +550,10 @@ static struct hotplug_slot *get_slot_from_name (const char *name)
}
/**
- * pci_hp_register - register a hotplug_slot with the PCI hotplug subsystem
+ * __pci_hp_register - register a hotplug_slot with the PCI hotplug subsystem
* @bus: bus this slot is on
* @slot: pointer to the &struct hotplug_slot to register
- * @slot_nr: slot number
+ * @devnr: device number
* @name: name registered with kobject core
*
* Registers a hotplug slot with the pci hotplug subsystem, which will allow
@@ -551,8 +561,9 @@ static struct hotplug_slot *get_slot_from_name (const char *name)
*
* Returns 0 if successful, anything else for an error.
*/
-int pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus, int slot_nr,
- const char *name)
+int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
+ int devnr, const char *name,
+ struct module *owner, const char *mod_name)
{
int result;
struct pci_slot *pci_slot;
@@ -567,14 +578,16 @@ int pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus, int slot_nr,
return -EINVAL;
}
- mutex_lock(&pci_hp_mutex);
+ slot->ops->owner = owner;
+ slot->ops->mod_name = mod_name;
+ mutex_lock(&pci_hp_mutex);
/*
* No problems if we call this interface from both ACPI_PCI_SLOT
* driver and call it here again. If we've already created the
* pci_slot, the interface will simply bump the refcount.
*/
- pci_slot = pci_create_slot(bus, slot_nr, name, slot);
+ pci_slot = pci_create_slot(bus, devnr, name, slot);
if (IS_ERR(pci_slot)) {
result = PTR_ERR(pci_slot);
goto out;
@@ -684,6 +697,6 @@ MODULE_LICENSE("GPL");
module_param(debug, bool, 0644);
MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
-EXPORT_SYMBOL_GPL(pci_hp_register);
+EXPORT_SYMBOL_GPL(__pci_hp_register);
EXPORT_SYMBOL_GPL(pci_hp_deregister);
EXPORT_SYMBOL_GPL(pci_hp_change_slot_info);
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 39ae37589fd..e6cf096498b 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -46,10 +46,10 @@ extern int pciehp_force;
extern struct workqueue_struct *pciehp_wq;
#define dbg(format, arg...) \
- do { \
- if (pciehp_debug) \
- printk("%s: " format, MY_NAME , ## arg); \
- } while (0)
+do { \
+ if (pciehp_debug) \
+ printk(KERN_DEBUG "%s: " format, MY_NAME , ## arg); \
+} while (0)
#define err(format, arg...) \
printk(KERN_ERR "%s: " format, MY_NAME , ## arg)
#define info(format, arg...) \
@@ -60,7 +60,7 @@ extern struct workqueue_struct *pciehp_wq;
#define ctrl_dbg(ctrl, format, arg...) \
do { \
if (pciehp_debug) \
- dev_printk(, &ctrl->pcie->device, \
+ dev_printk(KERN_DEBUG, &ctrl->pcie->device, \
format, ## arg); \
} while (0)
#define ctrl_err(ctrl, format, arg...) \
@@ -81,7 +81,6 @@ struct slot {
struct hpc_ops *hpc_ops;
struct hotplug_slot *hotplug_slot;
struct list_head slot_list;
- unsigned long last_emi_toggle;
struct delayed_work work; /* work for button event */
struct mutex lock;
};
@@ -108,10 +107,11 @@ struct controller {
u32 slot_cap;
u8 cap_base;
struct timer_list poll_timer;
- int cmd_busy;
+ unsigned int cmd_busy:1;
unsigned int no_cmd_complete:1;
unsigned int link_active_reporting:1;
unsigned int notification_enabled:1;
+ unsigned int power_fault_detected;
};
#define INT_BUTTON_IGNORE 0
@@ -202,8 +202,6 @@ struct hpc_ops {
int (*set_attention_status)(struct slot *slot, u8 status);
int (*get_latch_status)(struct slot *slot, u8 *status);
int (*get_adapter_status)(struct slot *slot, u8 *status);
- int (*get_emi_status)(struct slot *slot, u8 *status);
- int (*toggle_emi)(struct slot *slot);
int (*get_max_bus_speed)(struct slot *slot, enum pci_bus_speed *speed);
int (*get_cur_bus_speed)(struct slot *slot, enum pci_bus_speed *speed);
int (*get_max_lnk_width)(struct slot *slot, enum pcie_link_width *val);
diff --git a/drivers/pci/hotplug/pciehp_acpi.c b/drivers/pci/hotplug/pciehp_acpi.c
index 438d795f9fe..96048010e7d 100644
--- a/drivers/pci/hotplug/pciehp_acpi.c
+++ b/drivers/pci/hotplug/pciehp_acpi.c
@@ -67,37 +67,27 @@ static int __init parse_detect_mode(void)
return PCIEHP_DETECT_DEFAULT;
}
-static struct pcie_port_service_id __initdata port_pci_ids[] = {
- {
- .vendor = PCI_ANY_ID,
- .device = PCI_ANY_ID,
- .port_type = PCIE_ANY_PORT,
- .service_type = PCIE_PORT_SERVICE_HP,
- .driver_data = 0,
- }, { /* end: all zeroes */ }
-};
-
static int __initdata dup_slot_id;
static int __initdata acpi_slot_detected;
static struct list_head __initdata dummy_slots = LIST_HEAD_INIT(dummy_slots);
/* Dummy driver for dumplicate name detection */
-static int __init dummy_probe(struct pcie_device *dev,
- const struct pcie_port_service_id *id)
+static int __init dummy_probe(struct pcie_device *dev)
{
int pos;
u32 slot_cap;
struct slot *slot, *tmp;
struct pci_dev *pdev = dev->port;
struct pci_bus *pbus = pdev->subordinate;
- if (!(slot = kzalloc(sizeof(*slot), GFP_KERNEL)))
- return -ENOMEM;
/* Note: pciehp_detect_mode != PCIEHP_DETECT_ACPI here */
if (pciehp_get_hp_hw_control_from_firmware(pdev))
return -ENODEV;
if (!(pos = pci_find_capability(pdev, PCI_CAP_ID_EXP)))
return -ENODEV;
pci_read_config_dword(pdev, pos + PCI_EXP_SLTCAP, &slot_cap);
+ slot = kzalloc(sizeof(*slot), GFP_KERNEL);
+ if (!slot)
+ return -ENOMEM;
slot->number = slot_cap >> 19;
list_for_each_entry(tmp, &dummy_slots, slot_list) {
if (tmp->number == slot->number)
@@ -111,7 +101,8 @@ static int __init dummy_probe(struct pcie_device *dev,
static struct pcie_port_service_driver __initdata dummy_driver = {
.name = "pciehp_dummy",
- .id_table = port_pci_ids,
+ .port_type = PCIE_ANY_PORT,
+ .service = PCIE_PORT_SERVICE_HP,
.probe = dummy_probe,
};
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 681e3912b82..2317557fdee 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -73,7 +73,6 @@ static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *val
static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
static struct hotplug_slot_ops pciehp_hotplug_slot_ops = {
- .owner = THIS_MODULE,
.set_attention_status = set_attention_status,
.enable_slot = enable_slot,
.disable_slot = disable_slot,
@@ -85,99 +84,6 @@ static struct hotplug_slot_ops pciehp_hotplug_slot_ops = {
.get_cur_bus_speed = get_cur_bus_speed,
};
-/*
- * Check the status of the Electro Mechanical Interlock (EMI)
- */
-static int get_lock_status(struct hotplug_slot *hotplug_slot, u8 *value)
-{
- struct slot *slot = hotplug_slot->private;
- return (slot->hpc_ops->get_emi_status(slot, value));
-}
-
-/*
- * sysfs interface for the Electro Mechanical Interlock (EMI)
- * 1 == locked, 0 == unlocked
- */
-static ssize_t lock_read_file(struct hotplug_slot *slot, char *buf)
-{
- int retval;
- u8 value;
-
- retval = get_lock_status(slot, &value);
- if (retval)
- goto lock_read_exit;
- retval = sprintf (buf, "%d\n", value);
-
-lock_read_exit:
- return retval;
-}
-
-/*
- * Change the status of the Electro Mechanical Interlock (EMI)
- * This is a toggle - in addition there must be at least 1 second
- * in between toggles.
- */
-static int set_lock_status(struct hotplug_slot *hotplug_slot, u8 status)
-{
- struct slot *slot = hotplug_slot->private;
- int retval;
- u8 value;
-
- mutex_lock(&slot->ctrl->crit_sect);
-
- /* has it been >1 sec since our last toggle? */
- if ((get_seconds() - slot->last_emi_toggle) < 1) {
- mutex_unlock(&slot->ctrl->crit_sect);
- return -EINVAL;
- }
-
- /* see what our current state is */
- retval = get_lock_status(hotplug_slot, &value);
- if (retval || (value == status))
- goto set_lock_exit;
-
- slot->hpc_ops->toggle_emi(slot);
-set_lock_exit:
- mutex_unlock(&slot->ctrl->crit_sect);
- return 0;
-}
-
-/*
- * sysfs interface which allows the user to toggle the Electro Mechanical
- * Interlock. Valid values are either 0 or 1. 0 == unlock, 1 == lock
- */
-static ssize_t lock_write_file(struct hotplug_slot *hotplug_slot,
- const char *buf, size_t count)
-{
- struct slot *slot = hotplug_slot->private;
- unsigned long llock;
- u8 lock;
- int retval = 0;
-
- llock = simple_strtoul(buf, NULL, 10);
- lock = (u8)(llock & 0xff);
-
- switch (lock) {
- case 0:
- case 1:
- retval = set_lock_status(hotplug_slot, lock);
- break;
- default:
- ctrl_err(slot->ctrl, "%d is an invalid lock value\n",
- lock);
- retval = -EINVAL;
- }
- if (retval)
- return retval;
- return count;
-}
-
-static struct hotplug_slot_attribute hotplug_slot_attr_lock = {
- .attr = {.name = "lock", .mode = S_IFREG | S_IRUGO | S_IWUSR},
- .show = lock_read_file,
- .store = lock_write_file
-};
-
/**
* release_slot - free up the memory used by a slot
* @hotplug_slot: slot to free
@@ -236,17 +142,6 @@ static int init_slots(struct controller *ctrl)
get_attention_status(hotplug_slot, &info->attention_status);
get_latch_status(hotplug_slot, &info->latch_status);
get_adapter_status(hotplug_slot, &info->adapter_status);
- /* create additional sysfs entries */
- if (EMI(ctrl)) {
- retval = sysfs_create_file(&hotplug_slot->pci_slot->kobj,
- &hotplug_slot_attr_lock.attr);
- if (retval) {
- pci_hp_deregister(hotplug_slot);
- ctrl_err(ctrl, "Cannot create additional sysfs "
- "entries\n");
- goto error_info;
- }
- }
}
return 0;
@@ -261,13 +156,8 @@ error:
static void cleanup_slots(struct controller *ctrl)
{
struct slot *slot;
-
- list_for_each_entry(slot, &ctrl->slot_list, slot_list) {
- if (EMI(ctrl))
- sysfs_remove_file(&slot->hotplug_slot->pci_slot->kobj,
- &hotplug_slot_attr_lock.attr);
+ list_for_each_entry(slot, &ctrl->slot_list, slot_list)
pci_hp_deregister(slot->hotplug_slot);
- }
}
/*
@@ -401,7 +291,7 @@ static int get_cur_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_spe
return 0;
}
-static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_id *id)
+static int pciehp_probe(struct pcie_device *dev)
{
int rc;
struct controller *ctrl;
@@ -475,7 +365,7 @@ static void pciehp_remove (struct pcie_device *dev)
}
#ifdef CONFIG_PM
-static int pciehp_suspend (struct pcie_device *dev, pm_message_t state)
+static int pciehp_suspend (struct pcie_device *dev)
{
dev_info(&dev->device, "%s ENTRY\n", __func__);
return 0;
@@ -503,20 +393,12 @@ static int pciehp_resume (struct pcie_device *dev)
}
return 0;
}
-#endif
-
-static struct pcie_port_service_id port_pci_ids[] = { {
- .vendor = PCI_ANY_ID,
- .device = PCI_ANY_ID,
- .port_type = PCIE_ANY_PORT,
- .service_type = PCIE_PORT_SERVICE_HP,
- .driver_data = 0,
- }, { /* end: all zeroes */ }
-};
+#endif /* PM */
static struct pcie_port_service_driver hpdriver_portdrv = {
.name = PCIE_MODULE_NAME,
- .id_table = &port_pci_ids[0],
+ .port_type = PCIE_ANY_PORT,
+ .service = PCIE_PORT_SERVICE_HP,
.probe = pciehp_probe,
.remove = pciehp_remove,
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 7a16c6897bb..52813257e5b 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -422,35 +422,6 @@ static int hpc_query_power_fault(struct slot *slot)
return !!(slot_status & PCI_EXP_SLTSTA_PFD);
}
-static int hpc_get_emi_status(struct slot *slot, u8 *status)
-{
- struct controller *ctrl = slot->ctrl;
- u16 slot_status;
- int retval;
-
- retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status);
- if (retval) {
- ctrl_err(ctrl, "Cannot check EMI status\n");
- return retval;
- }
- *status = !!(slot_status & PCI_EXP_SLTSTA_EIS);
- return retval;
-}
-
-static int hpc_toggle_emi(struct slot *slot)
-{
- u16 slot_cmd;
- u16 cmd_mask;
- int rc;
-
- slot_cmd = PCI_EXP_SLTCTL_EIC;
- cmd_mask = PCI_EXP_SLTCTL_EIC;
- rc = pcie_write_cmd(slot->ctrl, slot_cmd, cmd_mask);
- slot->last_emi_toggle = get_seconds();
-
- return rc;
-}
-
static int hpc_set_attention_status(struct slot *slot, u8 value)
{
struct controller *ctrl = slot->ctrl;
@@ -548,23 +519,21 @@ static int hpc_power_on_slot(struct slot * slot)
slot_cmd = POWER_ON;
cmd_mask = PCI_EXP_SLTCTL_PCC;
- /* Enable detection that we turned off at slot power-off time */
if (!pciehp_poll_mode) {
- slot_cmd |= (PCI_EXP_SLTCTL_PFDE | PCI_EXP_SLTCTL_MRLSCE |
- PCI_EXP_SLTCTL_PDCE);
- cmd_mask |= (PCI_EXP_SLTCTL_PFDE | PCI_EXP_SLTCTL_MRLSCE |
- PCI_EXP_SLTCTL_PDCE);
+ /* Enable power fault detection turned off at power off time */
+ slot_cmd |= PCI_EXP_SLTCTL_PFDE;
+ cmd_mask |= PCI_EXP_SLTCTL_PFDE;
}
retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
-
if (retval) {
ctrl_err(ctrl, "Write %x command failed!\n", slot_cmd);
- return -1;
+ return retval;
}
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n",
__func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd);
+ ctrl->power_fault_detected = 0;
return retval;
}
@@ -621,18 +590,10 @@ static int hpc_power_off_slot(struct slot * slot)
slot_cmd = POWER_OFF;
cmd_mask = PCI_EXP_SLTCTL_PCC;
- /*
- * If we get MRL or presence detect interrupts now, the isr
- * will notice the sticky power-fault bit too and issue power
- * indicator change commands. This will lead to an endless loop
- * of command completions, since the power-fault bit remains on
- * till the slot is powered on again.
- */
if (!pciehp_poll_mode) {
- slot_cmd &= ~(PCI_EXP_SLTCTL_PFDE | PCI_EXP_SLTCTL_MRLSCE |
- PCI_EXP_SLTCTL_PDCE);
- cmd_mask |= (PCI_EXP_SLTCTL_PFDE | PCI_EXP_SLTCTL_MRLSCE |
- PCI_EXP_SLTCTL_PDCE);
+ /* Disable power fault detection */
+ slot_cmd &= ~PCI_EXP_SLTCTL_PFDE;
+ cmd_mask |= PCI_EXP_SLTCTL_PFDE;
}
retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
@@ -672,10 +633,11 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
detected &= (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC |
PCI_EXP_SLTSTA_CC);
+ detected &= ~intr_loc;
intr_loc |= detected;
if (!intr_loc)
return IRQ_NONE;
- if (detected && pciehp_writew(ctrl, PCI_EXP_SLTSTA, detected)) {
+ if (detected && pciehp_writew(ctrl, PCI_EXP_SLTSTA, intr_loc)) {
ctrl_err(ctrl, "%s: Cannot write to SLOTSTATUS\n",
__func__);
return IRQ_NONE;
@@ -709,9 +671,10 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
pciehp_handle_presence_change(p_slot);
/* Check Power Fault Detected */
- if (intr_loc & PCI_EXP_SLTSTA_PFD)
+ if ((intr_loc & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) {
+ ctrl->power_fault_detected = 1;
pciehp_handle_power_fault(p_slot);
-
+ }
return IRQ_HANDLED;
}
@@ -882,8 +845,6 @@ static struct hpc_ops pciehp_hpc_ops = {
.get_attention_status = hpc_get_attention_status,
.get_latch_status = hpc_get_latch_status,
.get_adapter_status = hpc_get_adapter_status,
- .get_emi_status = hpc_get_emi_status,
- .toggle_emi = hpc_toggle_emi,
.get_max_bus_speed = hpc_get_max_lnk_speed,
.get_cur_bus_speed = hpc_get_cur_lnk_speed,
diff --git a/drivers/pci/hotplug/pcihp_skeleton.c b/drivers/pci/hotplug/pcihp_skeleton.c
index e3dd6cf9e89..5175d9b26f0 100644
--- a/drivers/pci/hotplug/pcihp_skeleton.c
+++ b/drivers/pci/hotplug/pcihp_skeleton.c
@@ -82,7 +82,6 @@ static int get_latch_status (struct hotplug_slot *slot, u8 *value);
static int get_adapter_status (struct hotplug_slot *slot, u8 *value);
static struct hotplug_slot_ops skel_hotplug_slot_ops = {
- .owner = THIS_MODULE,
.enable_slot = enable_slot,
.disable_slot = disable_slot,
.set_attention_status = set_attention_status,
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
index 95d02a08fdc..c159223389e 100644
--- a/drivers/pci/hotplug/rpaphp_core.c
+++ b/drivers/pci/hotplug/rpaphp_core.c
@@ -423,7 +423,6 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
}
struct hotplug_slot_ops rpaphp_hotplug_slot_ops = {
- .owner = THIS_MODULE,
.enable_slot = enable_slot,
.disable_slot = disable_slot,
.set_attention_status = set_attention_status,
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c
index 3eee70928d4..a4494d78e7c 100644
--- a/drivers/pci/hotplug/sgi_hotplug.c
+++ b/drivers/pci/hotplug/sgi_hotplug.c
@@ -83,7 +83,6 @@ static int disable_slot(struct hotplug_slot *slot);
static inline int get_power_status(struct hotplug_slot *slot, u8 *value);
static struct hotplug_slot_ops sn_hotplug_slot_ops = {
- .owner = THIS_MODULE,
.enable_slot = enable_slot,
.disable_slot = disable_slot,
.get_power_status = get_power_status,
@@ -679,7 +678,7 @@ alloc_err:
return rc;
}
-static int sn_pci_hotplug_init(void)
+static int __init sn_pci_hotplug_init(void)
{
struct pci_bus *pci_bus = NULL;
int rc;
@@ -716,7 +715,7 @@ static int sn_pci_hotplug_init(void)
return registered == 1 ? 0 : -ENODEV;
}
-static void sn_pci_hotplug_exit(void)
+static void __exit sn_pci_hotplug_exit(void)
{
struct hotplug_slot *bss_hotplug_slot;
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index 6aba0b6cf2e..974e924ca96 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -48,10 +48,10 @@ extern int shpchp_debug;
extern struct workqueue_struct *shpchp_wq;
#define dbg(format, arg...) \
- do { \
- if (shpchp_debug) \
- printk("%s: " format, MY_NAME , ## arg); \
- } while (0)
+do { \
+ if (shpchp_debug) \
+ printk(KERN_DEBUG "%s: " format, MY_NAME , ## arg); \
+} while (0)
#define err(format, arg...) \
printk(KERN_ERR "%s: " format, MY_NAME , ## arg)
#define info(format, arg...) \
@@ -62,7 +62,7 @@ extern struct workqueue_struct *shpchp_wq;
#define ctrl_dbg(ctrl, format, arg...) \
do { \
if (shpchp_debug) \
- dev_printk(, &ctrl->pci_dev->dev, \
+ dev_printk(KERN_DEBUG, &ctrl->pci_dev->dev, \
format, ## arg); \
} while (0)
#define ctrl_err(ctrl, format, arg...) \
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index fe8d149c229..8a520a3d0f5 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -69,7 +69,6 @@ static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *val
static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
static struct hotplug_slot_ops shpchp_hotplug_slot_ops = {
- .owner = THIS_MODULE,
.set_attention_status = set_attention_status,
.enable_slot = enable_slot,
.disable_slot = disable_slot,
diff --git a/drivers/pci/hotplug/shpchp_pci.c b/drivers/pci/hotplug/shpchp_pci.c
index 138f161becc..aa315e52529 100644
--- a/drivers/pci/hotplug/shpchp_pci.c
+++ b/drivers/pci/hotplug/shpchp_pci.c
@@ -137,7 +137,7 @@ int __ref shpchp_configure_device(struct slot *p_slot)
busnr))
break;
}
- if (busnr >= end) {
+ if (busnr > end) {
ctrl_err(ctrl,
"No free bus for hot-added bridge\n");
pci_dev_put(dev);
diff --git a/drivers/pci/htirq.c b/drivers/pci/htirq.c
index bf7d6ce9bbb..737a1c44b07 100644
--- a/drivers/pci/htirq.c
+++ b/drivers/pci/htirq.c
@@ -98,6 +98,7 @@ int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update)
int max_irq;
int pos;
int irq;
+ int node;
pos = pci_find_ht_capability(dev, HT_CAPTYPE_IRQ);
if (!pos)
@@ -125,7 +126,8 @@ int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update)
cfg->msg.address_lo = 0xffffffff;
cfg->msg.address_hi = 0xffffffff;
- irq = create_irq();
+ node = dev_to_node(&dev->dev);
+ irq = create_irq_nr(0, node);
if (irq <= 0) {
kfree(cfg);
@@ -158,6 +160,7 @@ int ht_create_irq(struct pci_dev *dev, int idx)
/**
* ht_destroy_irq - destroy an irq created with ht_create_irq
+ * @irq: irq to be destroyed
*
* This reverses ht_create_irq removing the specified irq from
* existence. The irq should be free before this happens.
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index f3f686581a9..cd389162735 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -36,6 +36,7 @@
#include <linux/iova.h>
#include <linux/iommu.h>
#include <linux/intel-iommu.h>
+#include <linux/sysdev.h>
#include <asm/cacheflush.h>
#include <asm/iommu.h>
#include "pci.h"
@@ -55,8 +56,12 @@
#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
-#define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK)
-#define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK)
+#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
+#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
+
+#ifndef PHYSICAL_PAGE_MASK
+#define PHYSICAL_PAGE_MASK PAGE_MASK
+#endif
/* global iommu list, set NULL for ignored DMAR units */
static struct intel_iommu **g_iommus;
@@ -164,7 +169,8 @@ static inline void context_clear_entry(struct context_entry *context)
* 1: writable
* 2-6: reserved
* 7: super page
- * 8-11: available
+ * 8-10: available
+ * 11: snoop behavior
* 12-63: Host physcial address
*/
struct dma_pte {
@@ -186,6 +192,11 @@ static inline void dma_set_pte_writable(struct dma_pte *pte)
pte->val |= DMA_PTE_WRITE;
}
+static inline void dma_set_pte_snp(struct dma_pte *pte)
+{
+ pte->val |= DMA_PTE_SNP;
+}
+
static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
{
pte->val = (pte->val & ~3) | (prot & 3);
@@ -231,6 +242,7 @@ struct dmar_domain {
int flags; /* flags to find out type of domain */
int iommu_coherency;/* indicate coherency of iommu access */
+ int iommu_snooping; /* indicate snooping control feature*/
int iommu_count; /* reference count of iommu */
spinlock_t iommu_lock; /* protect iommu set in domain */
u64 max_addr; /* maximum mapped address */
@@ -240,7 +252,8 @@ struct dmar_domain {
struct device_domain_info {
struct list_head link; /* link to domain siblings */
struct list_head global; /* link to global list */
- u8 bus; /* PCI bus numer */
+ int segment; /* PCI domain */
+ u8 bus; /* PCI bus number */
u8 devfn; /* PCI devfn number */
struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
struct dmar_domain *domain; /* pointer to domain */
@@ -421,7 +434,6 @@ static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
return g_iommus[iommu_id];
}
-/* "Coherency" capability may be different across iommus */
static void domain_update_iommu_coherency(struct dmar_domain *domain)
{
int i;
@@ -438,7 +450,30 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
}
}
-static struct intel_iommu *device_to_iommu(u8 bus, u8 devfn)
+static void domain_update_iommu_snooping(struct dmar_domain *domain)
+{
+ int i;
+
+ domain->iommu_snooping = 1;
+
+ i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
+ for (; i < g_num_of_iommus; ) {
+ if (!ecap_sc_support(g_iommus[i]->ecap)) {
+ domain->iommu_snooping = 0;
+ break;
+ }
+ i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
+ }
+}
+
+/* Some capabilities may be different across iommus */
+static void domain_update_iommu_cap(struct dmar_domain *domain)
+{
+ domain_update_iommu_coherency(domain);
+ domain_update_iommu_snooping(domain);
+}
+
+static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
{
struct dmar_drhd_unit *drhd = NULL;
int i;
@@ -446,12 +481,20 @@ static struct intel_iommu *device_to_iommu(u8 bus, u8 devfn)
for_each_drhd_unit(drhd) {
if (drhd->ignored)
continue;
+ if (segment != drhd->segment)
+ continue;
- for (i = 0; i < drhd->devices_cnt; i++)
+ for (i = 0; i < drhd->devices_cnt; i++) {
if (drhd->devices[i] &&
drhd->devices[i]->bus->number == bus &&
drhd->devices[i]->devfn == devfn)
return drhd->iommu;
+ if (drhd->devices[i] &&
+ drhd->devices[i]->subordinate &&
+ drhd->devices[i]->subordinate->number <= bus &&
+ drhd->devices[i]->subordinate->subordinate >= bus)
+ return drhd->iommu;
+ }
if (drhd->include_all)
return drhd->iommu;
@@ -689,15 +732,17 @@ static void dma_pte_clear_one(struct dmar_domain *domain, u64 addr)
static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end)
{
int addr_width = agaw_to_width(domain->agaw);
+ int npages;
start &= (((u64)1) << addr_width) - 1;
end &= (((u64)1) << addr_width) - 1;
/* in case it's partial page */
- start = PAGE_ALIGN(start);
- end &= PAGE_MASK;
+ start &= PAGE_MASK;
+ end = PAGE_ALIGN(end);
+ npages = (end - start) / VTD_PAGE_SIZE;
/* we don't need lock here, nobody else touches the iova range */
- while (start < end) {
+ while (npages--) {
dma_pte_clear_one(domain, start);
start += VTD_PAGE_SIZE;
}
@@ -1004,194 +1049,6 @@ static int iommu_disable_translation(struct intel_iommu *iommu)
return 0;
}
-/* iommu interrupt handling. Most stuff are MSI-like. */
-
-static const char *fault_reason_strings[] =
-{
- "Software",
- "Present bit in root entry is clear",
- "Present bit in context entry is clear",
- "Invalid context entry",
- "Access beyond MGAW",
- "PTE Write access is not set",
- "PTE Read access is not set",
- "Next page table ptr is invalid",
- "Root table address invalid",
- "Context table ptr is invalid",
- "non-zero reserved fields in RTP",
- "non-zero reserved fields in CTP",
- "non-zero reserved fields in PTE",
-};
-#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
-
-const char *dmar_get_fault_reason(u8 fault_reason)
-{
- if (fault_reason > MAX_FAULT_REASON_IDX)
- return "Unknown";
- else
- return fault_reason_strings[fault_reason];
-}
-
-void dmar_msi_unmask(unsigned int irq)
-{
- struct intel_iommu *iommu = get_irq_data(irq);
- unsigned long flag;
-
- /* unmask it */
- spin_lock_irqsave(&iommu->register_lock, flag);
- writel(0, iommu->reg + DMAR_FECTL_REG);
- /* Read a reg to force flush the post write */
- readl(iommu->reg + DMAR_FECTL_REG);
- spin_unlock_irqrestore(&iommu->register_lock, flag);
-}
-
-void dmar_msi_mask(unsigned int irq)
-{
- unsigned long flag;
- struct intel_iommu *iommu = get_irq_data(irq);
-
- /* mask it */
- spin_lock_irqsave(&iommu->register_lock, flag);
- writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
- /* Read a reg to force flush the post write */
- readl(iommu->reg + DMAR_FECTL_REG);
- spin_unlock_irqrestore(&iommu->register_lock, flag);
-}
-
-void dmar_msi_write(int irq, struct msi_msg *msg)
-{
- struct intel_iommu *iommu = get_irq_data(irq);
- unsigned long flag;
-
- spin_lock_irqsave(&iommu->register_lock, flag);
- writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
- writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
- writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
- spin_unlock_irqrestore(&iommu->register_lock, flag);
-}
-
-void dmar_msi_read(int irq, struct msi_msg *msg)
-{
- struct intel_iommu *iommu = get_irq_data(irq);
- unsigned long flag;
-
- spin_lock_irqsave(&iommu->register_lock, flag);
- msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
- msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
- msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
- spin_unlock_irqrestore(&iommu->register_lock, flag);
-}
-
-static int iommu_page_fault_do_one(struct intel_iommu *iommu, int type,
- u8 fault_reason, u16 source_id, unsigned long long addr)
-{
- const char *reason;
-
- reason = dmar_get_fault_reason(fault_reason);
-
- printk(KERN_ERR
- "DMAR:[%s] Request device [%02x:%02x.%d] "
- "fault addr %llx \n"
- "DMAR:[fault reason %02d] %s\n",
- (type ? "DMA Read" : "DMA Write"),
- (source_id >> 8), PCI_SLOT(source_id & 0xFF),
- PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
- return 0;
-}
-
-#define PRIMARY_FAULT_REG_LEN (16)
-static irqreturn_t iommu_page_fault(int irq, void *dev_id)
-{
- struct intel_iommu *iommu = dev_id;
- int reg, fault_index;
- u32 fault_status;
- unsigned long flag;
-
- spin_lock_irqsave(&iommu->register_lock, flag);
- fault_status = readl(iommu->reg + DMAR_FSTS_REG);
-
- /* TBD: ignore advanced fault log currently */
- if (!(fault_status & DMA_FSTS_PPF))
- goto clear_overflow;
-
- fault_index = dma_fsts_fault_record_index(fault_status);
- reg = cap_fault_reg_offset(iommu->cap);
- while (1) {
- u8 fault_reason;
- u16 source_id;
- u64 guest_addr;
- int type;
- u32 data;
-
- /* highest 32 bits */
- data = readl(iommu->reg + reg +
- fault_index * PRIMARY_FAULT_REG_LEN + 12);
- if (!(data & DMA_FRCD_F))
- break;
-
- fault_reason = dma_frcd_fault_reason(data);
- type = dma_frcd_type(data);
-
- data = readl(iommu->reg + reg +
- fault_index * PRIMARY_FAULT_REG_LEN + 8);
- source_id = dma_frcd_source_id(data);
-
- guest_addr = dmar_readq(iommu->reg + reg +
- fault_index * PRIMARY_FAULT_REG_LEN);
- guest_addr = dma_frcd_page_addr(guest_addr);
- /* clear the fault */
- writel(DMA_FRCD_F, iommu->reg + reg +
- fault_index * PRIMARY_FAULT_REG_LEN + 12);
-
- spin_unlock_irqrestore(&iommu->register_lock, flag);
-
- iommu_page_fault_do_one(iommu, type, fault_reason,
- source_id, guest_addr);
-
- fault_index++;
- if (fault_index > cap_num_fault_regs(iommu->cap))
- fault_index = 0;
- spin_lock_irqsave(&iommu->register_lock, flag);
- }
-clear_overflow:
- /* clear primary fault overflow */
- fault_status = readl(iommu->reg + DMAR_FSTS_REG);
- if (fault_status & DMA_FSTS_PFO)
- writel(DMA_FSTS_PFO, iommu->reg + DMAR_FSTS_REG);
-
- spin_unlock_irqrestore(&iommu->register_lock, flag);
- return IRQ_HANDLED;
-}
-
-int dmar_set_interrupt(struct intel_iommu *iommu)
-{
- int irq, ret;
-
- irq = create_irq();
- if (!irq) {
- printk(KERN_ERR "IOMMU: no free vectors\n");
- return -EINVAL;
- }
-
- set_irq_data(irq, iommu);
- iommu->irq = irq;
-
- ret = arch_setup_dmar_msi(irq);
- if (ret) {
- set_irq_data(irq, NULL);
- iommu->irq = 0;
- destroy_irq(irq);
- return 0;
- }
-
- /* Force fault register is cleared */
- iommu_page_fault(irq, iommu);
-
- ret = request_irq(irq, iommu_page_fault, 0, iommu->name, iommu);
- if (ret)
- printk(KERN_ERR "IOMMU: can't request irq\n");
- return ret;
-}
static int iommu_init_domains(struct intel_iommu *iommu)
{
@@ -1363,7 +1220,7 @@ static void dmar_init_reserved_ranges(void)
if (!r->flags || !(r->flags & IORESOURCE_MEM))
continue;
addr = r->start;
- addr &= PAGE_MASK;
+ addr &= PHYSICAL_PAGE_MASK;
size = r->end - addr;
size = PAGE_ALIGN(size);
iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr),
@@ -1429,6 +1286,11 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
else
domain->iommu_coherency = 0;
+ if (ecap_sc_support(iommu->ecap))
+ domain->iommu_snooping = 1;
+ else
+ domain->iommu_snooping = 0;
+
domain->iommu_count = 1;
/* always allocate the top pgd */
@@ -1464,7 +1326,7 @@ static void domain_exit(struct dmar_domain *domain)
}
static int domain_context_mapping_one(struct dmar_domain *domain,
- u8 bus, u8 devfn)
+ int segment, u8 bus, u8 devfn)
{
struct context_entry *context;
unsigned long flags;
@@ -1479,7 +1341,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
BUG_ON(!domain->pgd);
- iommu = device_to_iommu(bus, devfn);
+ iommu = device_to_iommu(segment, bus, devfn);
if (!iommu)
return -ENODEV;
@@ -1557,7 +1419,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
spin_lock_irqsave(&domain->iommu_lock, flags);
if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
domain->iommu_count++;
- domain_update_iommu_coherency(domain);
+ domain_update_iommu_cap(domain);
}
spin_unlock_irqrestore(&domain->iommu_lock, flags);
return 0;
@@ -1569,8 +1431,8 @@ domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev)
int ret;
struct pci_dev *tmp, *parent;
- ret = domain_context_mapping_one(domain, pdev->bus->number,
- pdev->devfn);
+ ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
+ pdev->bus->number, pdev->devfn);
if (ret)
return ret;
@@ -1581,18 +1443,23 @@ domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev)
/* Secondary interface's bus number and devfn 0 */
parent = pdev->bus->self;
while (parent != tmp) {
- ret = domain_context_mapping_one(domain, parent->bus->number,
- parent->devfn);
+ ret = domain_context_mapping_one(domain,
+ pci_domain_nr(parent->bus),
+ parent->bus->number,
+ parent->devfn);
if (ret)
return ret;
parent = parent->bus->self;
}
if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
return domain_context_mapping_one(domain,
- tmp->subordinate->number, 0);
+ pci_domain_nr(tmp->subordinate),
+ tmp->subordinate->number, 0);
else /* this is a legacy PCI bridge */
return domain_context_mapping_one(domain,
- tmp->bus->number, tmp->devfn);
+ pci_domain_nr(tmp->bus),
+ tmp->bus->number,
+ tmp->devfn);
}
static int domain_context_mapped(struct pci_dev *pdev)
@@ -1601,12 +1468,12 @@ static int domain_context_mapped(struct pci_dev *pdev)
struct pci_dev *tmp, *parent;
struct intel_iommu *iommu;
- iommu = device_to_iommu(pdev->bus->number, pdev->devfn);
+ iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
+ pdev->devfn);
if (!iommu)
return -ENODEV;
- ret = device_context_mapped(iommu,
- pdev->bus->number, pdev->devfn);
+ ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
if (!ret)
return ret;
/* dependent device mapping */
@@ -1617,17 +1484,17 @@ static int domain_context_mapped(struct pci_dev *pdev)
parent = pdev->bus->self;
while (parent != tmp) {
ret = device_context_mapped(iommu, parent->bus->number,
- parent->devfn);
+ parent->devfn);
if (!ret)
return ret;
parent = parent->bus->self;
}
if (tmp->is_pcie)
- return device_context_mapped(iommu,
- tmp->subordinate->number, 0);
+ return device_context_mapped(iommu, tmp->subordinate->number,
+ 0);
else
- return device_context_mapped(iommu,
- tmp->bus->number, tmp->devfn);
+ return device_context_mapped(iommu, tmp->bus->number,
+ tmp->devfn);
}
static int
@@ -1657,6 +1524,8 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
BUG_ON(dma_pte_addr(pte));
dma_set_pte_addr(pte, start_pfn << VTD_PAGE_SHIFT);
dma_set_pte_prot(pte, prot);
+ if (prot & DMA_PTE_SNP)
+ dma_set_pte_snp(pte);
domain_flush_cache(domain, pte, sizeof(*pte));
start_pfn++;
index++;
@@ -1692,7 +1561,7 @@ static void domain_remove_dev_info(struct dmar_domain *domain)
info->dev->dev.archdata.iommu = NULL;
spin_unlock_irqrestore(&device_domain_lock, flags);
- iommu = device_to_iommu(info->bus, info->devfn);
+ iommu = device_to_iommu(info->segment, info->bus, info->devfn);
iommu_detach_dev(iommu, info->bus, info->devfn);
free_devinfo_mem(info);
@@ -1727,11 +1596,14 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
struct pci_dev *dev_tmp;
unsigned long flags;
int bus = 0, devfn = 0;
+ int segment;
domain = find_domain(pdev);
if (domain)
return domain;
+ segment = pci_domain_nr(pdev->bus);
+
dev_tmp = pci_find_upstream_pcie_bridge(pdev);
if (dev_tmp) {
if (dev_tmp->is_pcie) {
@@ -1743,7 +1615,8 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
}
spin_lock_irqsave(&device_domain_lock, flags);
list_for_each_entry(info, &device_domain_list, global) {
- if (info->bus == bus && info->devfn == devfn) {
+ if (info->segment == segment &&
+ info->bus == bus && info->devfn == devfn) {
found = info->domain;
break;
}
@@ -1781,6 +1654,7 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
domain_exit(domain);
goto error;
}
+ info->segment = segment;
info->bus = bus;
info->devfn = devfn;
info->dev = NULL;
@@ -1792,7 +1666,8 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
found = NULL;
spin_lock_irqsave(&device_domain_lock, flags);
list_for_each_entry(tmp, &device_domain_list, global) {
- if (tmp->bus == bus && tmp->devfn == devfn) {
+ if (tmp->segment == segment &&
+ tmp->bus == bus && tmp->devfn == devfn) {
found = tmp->domain;
break;
}
@@ -1812,6 +1687,7 @@ found_domain:
info = alloc_devinfo_mem();
if (!info)
goto error;
+ info->segment = segment;
info->bus = pdev->bus->number;
info->devfn = pdev->devfn;
info->dev = pdev;
@@ -1970,7 +1846,7 @@ static inline void iommu_prepare_isa(void)
ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
if (ret)
- printk("IOMMU: Failed to create 0-64M identity map, "
+ printk(KERN_ERR "IOMMU: Failed to create 0-64M identity map, "
"floppy might not work\n");
}
@@ -1987,7 +1863,7 @@ static int __init init_dmars(void)
struct dmar_rmrr_unit *rmrr;
struct pci_dev *pdev;
struct intel_iommu *iommu;
- int i, ret, unit = 0;
+ int i, ret;
/*
* for each drhd
@@ -2043,11 +1919,40 @@ static int __init init_dmars(void)
}
}
+ /*
+ * Start from the sane iommu hardware state.
+ */
for_each_drhd_unit(drhd) {
if (drhd->ignored)
continue;
iommu = drhd->iommu;
+
+ /*
+ * If the queued invalidation is already initialized by us
+ * (for example, while enabling interrupt-remapping) then
+ * we got the things already rolling from a sane state.
+ */
+ if (iommu->qi)
+ continue;
+
+ /*
+ * Clear any previous faults.
+ */
+ dmar_fault(-1, iommu);
+ /*
+ * Disable queued invalidation if supported and already enabled
+ * before OS handover.
+ */
+ dmar_disable_qi(iommu);
+ }
+
+ for_each_drhd_unit(drhd) {
+ if (drhd->ignored)
+ continue;
+
+ iommu = drhd->iommu;
+
if (dmar_enable_qi(iommu)) {
/*
* Queued Invalidate not enabled, use Register Based
@@ -2109,7 +2014,6 @@ static int __init init_dmars(void)
if (drhd->ignored)
continue;
iommu = drhd->iommu;
- sprintf (iommu->name, "dmar%d", unit++);
iommu_flush_write_buffer(iommu);
@@ -2171,15 +2075,15 @@ __intel_alloc_iova(struct device *dev, struct dmar_domain *domain,
struct pci_dev *pdev = to_pci_dev(dev);
struct iova *iova = NULL;
- if (dma_mask <= DMA_32BIT_MASK || dmar_forcedac)
+ if (dma_mask <= DMA_BIT_MASK(32) || dmar_forcedac)
iova = iommu_alloc_iova(domain, size, dma_mask);
else {
/*
* First try to allocate an io virtual address in
- * DMA_32BIT_MASK and if that fails then try allocating
+ * DMA_BIT_MASK(32) and if that fails then try allocating
* from higher range
*/
- iova = iommu_alloc_iova(domain, size, DMA_32BIT_MASK);
+ iova = iommu_alloc_iova(domain, size, DMA_BIT_MASK(32));
if (!iova)
iova = iommu_alloc_iova(domain, size, dma_mask);
}
@@ -2264,7 +2168,8 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
* is not a big problem
*/
ret = domain_page_mapping(domain, start_paddr,
- ((u64)paddr) & PAGE_MASK, size, prot);
+ ((u64)paddr) & PHYSICAL_PAGE_MASK,
+ size, prot);
if (ret)
goto error;
@@ -2279,16 +2184,18 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
error:
if (iova)
__free_iova(&domain->iovad, iova);
- printk(KERN_ERR"Device %s request: %lx@%llx dir %d --- failed\n",
+ printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
pci_name(pdev), size, (unsigned long long)paddr, dir);
return 0;
}
-dma_addr_t intel_map_single(struct device *hwdev, phys_addr_t paddr,
- size_t size, int dir)
+static dma_addr_t intel_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
{
- return __intel_map_single(hwdev, paddr, size, dir,
- to_pci_dev(hwdev)->dma_mask);
+ return __intel_map_single(dev, page_to_phys(page) + offset, size,
+ dir, to_pci_dev(dev)->dma_mask);
}
static void flush_unmaps(void)
@@ -2352,8 +2259,9 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova)
spin_unlock_irqrestore(&async_umap_flush_lock, flags);
}
-void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
- int dir)
+static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct dmar_domain *domain;
@@ -2375,7 +2283,7 @@ void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
start_addr = iova->pfn_lo << PAGE_SHIFT;
size = aligned_size((u64)dev_addr, size);
- pr_debug("Device %s unmapping: %lx@%llx\n",
+ pr_debug("Device %s unmapping: %zx@%llx\n",
pci_name(pdev), size, (unsigned long long)start_addr);
/* clear the whole page */
@@ -2397,8 +2305,14 @@ void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
}
}
-void *intel_alloc_coherent(struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags)
+static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
+ int dir)
+{
+ intel_unmap_page(dev, dev_addr, size, dir, NULL);
+}
+
+static void *intel_alloc_coherent(struct device *hwdev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags)
{
void *vaddr;
int order;
@@ -2421,8 +2335,8 @@ void *intel_alloc_coherent(struct device *hwdev, size_t size,
return NULL;
}
-void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
- dma_addr_t dma_handle)
+static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
+ dma_addr_t dma_handle)
{
int order;
@@ -2433,10 +2347,9 @@ void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
free_pages((unsigned long)vaddr, order);
}
-#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
-
-void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
- int nelems, int dir)
+static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
+ int nelems, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
{
int i;
struct pci_dev *pdev = to_pci_dev(hwdev);
@@ -2444,7 +2357,7 @@ void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
unsigned long start_addr;
struct iova *iova;
size_t size = 0;
- void *addr;
+ phys_addr_t addr;
struct scatterlist *sg;
struct intel_iommu *iommu;
@@ -2460,7 +2373,7 @@ void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
if (!iova)
return;
for_each_sg(sglist, sg, nelems, i) {
- addr = SG_ENT_VIRT_ADDRESS(sg);
+ addr = page_to_phys(sg_page(sg)) + sg->offset;
size += aligned_size((u64)addr, sg->length);
}
@@ -2487,16 +2400,16 @@ static int intel_nontranslate_map_sg(struct device *hddev,
for_each_sg(sglist, sg, nelems, i) {
BUG_ON(!sg_page(sg));
- sg->dma_address = virt_to_bus(SG_ENT_VIRT_ADDRESS(sg));
+ sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
sg->dma_length = sg->length;
}
return nelems;
}
-int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
- int dir)
+static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
{
- void *addr;
+ phys_addr_t addr;
int i;
struct pci_dev *pdev = to_pci_dev(hwdev);
struct dmar_domain *domain;
@@ -2520,8 +2433,7 @@ int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
iommu = domain_get_iommu(domain);
for_each_sg(sglist, sg, nelems, i) {
- addr = SG_ENT_VIRT_ADDRESS(sg);
- addr = (void *)virt_to_phys(addr);
+ addr = page_to_phys(sg_page(sg)) + sg->offset;
size += aligned_size((u64)addr, sg->length);
}
@@ -2544,12 +2456,11 @@ int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
start_addr = iova->pfn_lo << PAGE_SHIFT;
offset = 0;
for_each_sg(sglist, sg, nelems, i) {
- addr = SG_ENT_VIRT_ADDRESS(sg);
- addr = (void *)virt_to_phys(addr);
+ addr = page_to_phys(sg_page(sg)) + sg->offset;
size = aligned_size((u64)addr, sg->length);
ret = domain_page_mapping(domain, start_addr + offset,
- ((u64)addr) & PAGE_MASK,
- size, prot);
+ ((u64)addr) & PHYSICAL_PAGE_MASK,
+ size, prot);
if (ret) {
/* clear the page */
dma_pte_clear_range(domain, start_addr,
@@ -2574,13 +2485,19 @@ int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
return nelems;
}
-static struct dma_mapping_ops intel_dma_ops = {
+static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return !dma_addr;
+}
+
+struct dma_map_ops intel_dma_ops = {
.alloc_coherent = intel_alloc_coherent,
.free_coherent = intel_free_coherent,
- .map_single = intel_map_single,
- .unmap_single = intel_unmap_single,
.map_sg = intel_map_sg,
.unmap_sg = intel_unmap_sg,
+ .map_page = intel_map_page,
+ .unmap_page = intel_unmap_page,
+ .mapping_error = intel_mapping_error,
};
static inline int iommu_domain_cache_init(void)
@@ -2707,6 +2624,150 @@ static void __init init_no_remapping_devices(void)
}
}
+#ifdef CONFIG_SUSPEND
+static int init_iommu_hw(void)
+{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu = NULL;
+
+ for_each_active_iommu(iommu, drhd)
+ if (iommu->qi)
+ dmar_reenable_qi(iommu);
+
+ for_each_active_iommu(iommu, drhd) {
+ iommu_flush_write_buffer(iommu);
+
+ iommu_set_root_entry(iommu);
+
+ iommu->flush.flush_context(iommu, 0, 0, 0,
+ DMA_CCMD_GLOBAL_INVL, 0);
+ iommu->flush.flush_iotlb(iommu, 0, 0, 0,
+ DMA_TLB_GLOBAL_FLUSH, 0);
+ iommu_disable_protect_mem_regions(iommu);
+ iommu_enable_translation(iommu);
+ }
+
+ return 0;
+}
+
+static void iommu_flush_all(void)
+{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
+
+ for_each_active_iommu(iommu, drhd) {
+ iommu->flush.flush_context(iommu, 0, 0, 0,
+ DMA_CCMD_GLOBAL_INVL, 0);
+ iommu->flush.flush_iotlb(iommu, 0, 0, 0,
+ DMA_TLB_GLOBAL_FLUSH, 0);
+ }
+}
+
+static int iommu_suspend(struct sys_device *dev, pm_message_t state)
+{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu = NULL;
+ unsigned long flag;
+
+ for_each_active_iommu(iommu, drhd) {
+ iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
+ GFP_ATOMIC);
+ if (!iommu->iommu_state)
+ goto nomem;
+ }
+
+ iommu_flush_all();
+
+ for_each_active_iommu(iommu, drhd) {
+ iommu_disable_translation(iommu);
+
+ spin_lock_irqsave(&iommu->register_lock, flag);
+
+ iommu->iommu_state[SR_DMAR_FECTL_REG] =
+ readl(iommu->reg + DMAR_FECTL_REG);
+ iommu->iommu_state[SR_DMAR_FEDATA_REG] =
+ readl(iommu->reg + DMAR_FEDATA_REG);
+ iommu->iommu_state[SR_DMAR_FEADDR_REG] =
+ readl(iommu->reg + DMAR_FEADDR_REG);
+ iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
+ readl(iommu->reg + DMAR_FEUADDR_REG);
+
+ spin_unlock_irqrestore(&iommu->register_lock, flag);
+ }
+ return 0;
+
+nomem:
+ for_each_active_iommu(iommu, drhd)
+ kfree(iommu->iommu_state);
+
+ return -ENOMEM;
+}
+
+static int iommu_resume(struct sys_device *dev)
+{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu = NULL;
+ unsigned long flag;
+
+ if (init_iommu_hw()) {
+ WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
+ return -EIO;
+ }
+
+ for_each_active_iommu(iommu, drhd) {
+
+ spin_lock_irqsave(&iommu->register_lock, flag);
+
+ writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
+ iommu->reg + DMAR_FECTL_REG);
+ writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
+ iommu->reg + DMAR_FEDATA_REG);
+ writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
+ iommu->reg + DMAR_FEADDR_REG);
+ writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
+ iommu->reg + DMAR_FEUADDR_REG);
+
+ spin_unlock_irqrestore(&iommu->register_lock, flag);
+ }
+
+ for_each_active_iommu(iommu, drhd)
+ kfree(iommu->iommu_state);
+
+ return 0;
+}
+
+static struct sysdev_class iommu_sysclass = {
+ .name = "iommu",
+ .resume = iommu_resume,
+ .suspend = iommu_suspend,
+};
+
+static struct sys_device device_iommu = {
+ .cls = &iommu_sysclass,
+};
+
+static int __init init_iommu_sysfs(void)
+{
+ int error;
+
+ error = sysdev_class_register(&iommu_sysclass);
+ if (error)
+ return error;
+
+ error = sysdev_register(&device_iommu);
+ if (error)
+ sysdev_class_unregister(&iommu_sysclass);
+
+ return error;
+}
+
+#else
+static int __init init_iommu_sysfs(void)
+{
+ return 0;
+}
+#endif /* CONFIG_PM */
+
int __init intel_iommu_init(void)
{
int ret = 0;
@@ -2742,6 +2803,7 @@ int __init intel_iommu_init(void)
init_timer(&unmap_timer);
force_iommu = 1;
dma_ops = &intel_dma_ops;
+ init_iommu_sysfs();
register_iommu(&intel_iommu_ops);
@@ -2758,6 +2820,7 @@ static int vm_domain_add_dev_info(struct dmar_domain *domain,
if (!info)
return -ENOMEM;
+ info->segment = pci_domain_nr(pdev->bus);
info->bus = pdev->bus->number;
info->devfn = pdev->devfn;
info->dev = pdev;
@@ -2772,6 +2835,33 @@ static int vm_domain_add_dev_info(struct dmar_domain *domain,
return 0;
}
+static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
+ struct pci_dev *pdev)
+{
+ struct pci_dev *tmp, *parent;
+
+ if (!iommu || !pdev)
+ return;
+
+ /* dependent device detach */
+ tmp = pci_find_upstream_pcie_bridge(pdev);
+ /* Secondary interface's bus number and devfn 0 */
+ if (tmp) {
+ parent = pdev->bus->self;
+ while (parent != tmp) {
+ iommu_detach_dev(iommu, parent->bus->number,
+ parent->devfn);
+ parent = parent->bus->self;
+ }
+ if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
+ iommu_detach_dev(iommu,
+ tmp->subordinate->number, 0);
+ else /* this is a legacy PCI bridge */
+ iommu_detach_dev(iommu, tmp->bus->number,
+ tmp->devfn);
+ }
+}
+
static void vm_domain_remove_one_dev_info(struct dmar_domain *domain,
struct pci_dev *pdev)
{
@@ -2781,13 +2871,15 @@ static void vm_domain_remove_one_dev_info(struct dmar_domain *domain,
int found = 0;
struct list_head *entry, *tmp;
- iommu = device_to_iommu(pdev->bus->number, pdev->devfn);
+ iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
+ pdev->devfn);
if (!iommu)
return;
spin_lock_irqsave(&device_domain_lock, flags);
list_for_each_safe(entry, tmp, &domain->devices) {
info = list_entry(entry, struct device_domain_info, link);
+ /* No need to compare PCI domain; it has to be the same */
if (info->bus == pdev->bus->number &&
info->devfn == pdev->devfn) {
list_del(&info->link);
@@ -2797,6 +2889,7 @@ static void vm_domain_remove_one_dev_info(struct dmar_domain *domain,
spin_unlock_irqrestore(&device_domain_lock, flags);
iommu_detach_dev(iommu, info->bus, info->devfn);
+ iommu_detach_dependent_devices(iommu, pdev);
free_devinfo_mem(info);
spin_lock_irqsave(&device_domain_lock, flags);
@@ -2811,7 +2904,8 @@ static void vm_domain_remove_one_dev_info(struct dmar_domain *domain,
* owned by this domain, clear this iommu in iommu_bmp
* update iommu count and coherency
*/
- if (device_to_iommu(info->bus, info->devfn) == iommu)
+ if (iommu == device_to_iommu(info->segment, info->bus,
+ info->devfn))
found = 1;
}
@@ -2820,7 +2914,7 @@ static void vm_domain_remove_one_dev_info(struct dmar_domain *domain,
spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
clear_bit(iommu->seq_id, &domain->iommu_bmp);
domain->iommu_count--;
- domain_update_iommu_coherency(domain);
+ domain_update_iommu_cap(domain);
spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
}
@@ -2844,17 +2938,18 @@ static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
spin_unlock_irqrestore(&device_domain_lock, flags1);
- iommu = device_to_iommu(info->bus, info->devfn);
+ iommu = device_to_iommu(info->segment, info->bus, info->devfn);
iommu_detach_dev(iommu, info->bus, info->devfn);
+ iommu_detach_dependent_devices(iommu, info->dev);
/* clear this iommu in iommu_bmp, update iommu count
- * and coherency
+ * and capabilities
*/
spin_lock_irqsave(&domain->iommu_lock, flags2);
if (test_and_clear_bit(iommu->seq_id,
&domain->iommu_bmp)) {
domain->iommu_count--;
- domain_update_iommu_coherency(domain);
+ domain_update_iommu_cap(domain);
}
spin_unlock_irqrestore(&domain->iommu_lock, flags2);
@@ -3031,7 +3126,8 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
}
}
- iommu = device_to_iommu(pdev->bus->number, pdev->devfn);
+ iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
+ pdev->devfn);
if (!iommu)
return -ENODEV;
@@ -3077,6 +3173,8 @@ static int intel_iommu_map_range(struct iommu_domain *domain,
prot |= DMA_PTE_READ;
if (iommu_prot & IOMMU_WRITE)
prot |= DMA_PTE_WRITE;
+ if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
+ prot |= DMA_PTE_SNP;
max_addr = (iova & VTD_PAGE_MASK) + VTD_PAGE_ALIGN(size);
if (dmar_domain->max_addr < max_addr) {
@@ -3130,6 +3228,17 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
return phys;
}
+static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
+ unsigned long cap)
+{
+ struct dmar_domain *dmar_domain = domain->priv;
+
+ if (cap == IOMMU_CAP_CACHE_COHERENCY)
+ return dmar_domain->iommu_snooping;
+
+ return 0;
+}
+
static struct iommu_ops intel_iommu_ops = {
.domain_init = intel_iommu_domain_init,
.domain_destroy = intel_iommu_domain_destroy,
@@ -3138,6 +3247,7 @@ static struct iommu_ops intel_iommu_ops = {
.map = intel_iommu_map_range,
.unmap = intel_iommu_unmap_range,
.iova_to_phys = intel_iommu_iova_to_phys,
+ .domain_has_cap = intel_iommu_domain_has_cap,
};
static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index 45effc5726c..3a0cb0bb059 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -6,13 +6,23 @@
#include <linux/irq.h>
#include <asm/io_apic.h>
#include <asm/smp.h>
+#include <asm/cpu.h>
#include <linux/intel-iommu.h>
#include "intr_remapping.h"
+#include <acpi/acpi.h>
static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
static int ir_ioapic_num;
int intr_remapping_enabled;
+static int disable_intremap;
+static __init int setup_nointremap(char *str)
+{
+ disable_intremap = 1;
+ return 0;
+}
+early_param("nointremap", setup_nointremap);
+
struct irq_2_iommu {
struct intel_iommu *iommu;
u16 irte_index;
@@ -20,16 +30,13 @@ struct irq_2_iommu {
u8 irte_mask;
};
-#ifdef CONFIG_SPARSE_IRQ
-static struct irq_2_iommu *get_one_free_irq_2_iommu(int cpu)
+#ifdef CONFIG_GENERIC_HARDIRQS
+static struct irq_2_iommu *get_one_free_irq_2_iommu(int node)
{
struct irq_2_iommu *iommu;
- int node;
-
- node = cpu_to_node(cpu);
iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node);
- printk(KERN_DEBUG "alloc irq_2_iommu on cpu %d node %d\n", cpu, node);
+ printk(KERN_DEBUG "alloc irq_2_iommu on node %d\n", node);
return iommu;
}
@@ -46,7 +53,7 @@ static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
return desc->irq_2_iommu;
}
-static struct irq_2_iommu *irq_2_iommu_alloc_cpu(unsigned int irq, int cpu)
+static struct irq_2_iommu *irq_2_iommu_alloc_node(unsigned int irq, int node)
{
struct irq_desc *desc;
struct irq_2_iommu *irq_iommu;
@@ -54,7 +61,7 @@ static struct irq_2_iommu *irq_2_iommu_alloc_cpu(unsigned int irq, int cpu)
/*
* alloc irq desc if not allocated already.
*/
- desc = irq_to_desc_alloc_cpu(irq, cpu);
+ desc = irq_to_desc_alloc_node(irq, node);
if (!desc) {
printk(KERN_INFO "can not get irq_desc for %d\n", irq);
return NULL;
@@ -63,14 +70,14 @@ static struct irq_2_iommu *irq_2_iommu_alloc_cpu(unsigned int irq, int cpu)
irq_iommu = desc->irq_2_iommu;
if (!irq_iommu)
- desc->irq_2_iommu = get_one_free_irq_2_iommu(cpu);
+ desc->irq_2_iommu = get_one_free_irq_2_iommu(node);
return desc->irq_2_iommu;
}
static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
{
- return irq_2_iommu_alloc_cpu(irq, boot_cpu_id);
+ return irq_2_iommu_alloc_node(irq, cpu_to_node(boot_cpu_id));
}
#else /* !CONFIG_SPARSE_IRQ */
@@ -116,21 +123,22 @@ int get_irte(int irq, struct irte *entry)
{
int index;
struct irq_2_iommu *irq_iommu;
+ unsigned long flags;
if (!entry)
return -1;
- spin_lock(&irq_2_ir_lock);
+ spin_lock_irqsave(&irq_2_ir_lock, flags);
irq_iommu = valid_irq_2_iommu(irq);
if (!irq_iommu) {
- spin_unlock(&irq_2_ir_lock);
+ spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return -1;
}
index = irq_iommu->irte_index + irq_iommu->sub_handle;
*entry = *(irq_iommu->iommu->ir_table->base + index);
- spin_unlock(&irq_2_ir_lock);
+ spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return 0;
}
@@ -140,6 +148,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
struct irq_2_iommu *irq_iommu;
u16 index, start_index;
unsigned int mask = 0;
+ unsigned long flags;
int i;
if (!count)
@@ -169,7 +178,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
return -1;
}
- spin_lock(&irq_2_ir_lock);
+ spin_lock_irqsave(&irq_2_ir_lock, flags);
do {
for (i = index; i < index + count; i++)
if (table->base[i].present)
@@ -181,7 +190,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
if (index == start_index) {
- spin_unlock(&irq_2_ir_lock);
+ spin_unlock_irqrestore(&irq_2_ir_lock, flags);
printk(KERN_ERR "can't allocate an IRTE\n");
return -1;
}
@@ -192,7 +201,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
irq_iommu = irq_2_iommu_alloc(irq);
if (!irq_iommu) {
- spin_unlock(&irq_2_ir_lock);
+ spin_unlock_irqrestore(&irq_2_ir_lock, flags);
printk(KERN_ERR "can't allocate irq_2_iommu\n");
return -1;
}
@@ -202,7 +211,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
irq_iommu->sub_handle = 0;
irq_iommu->irte_mask = mask;
- spin_unlock(&irq_2_ir_lock);
+ spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return index;
}
@@ -222,30 +231,32 @@ int map_irq_to_irte_handle(int irq, u16 *sub_handle)
{
int index;
struct irq_2_iommu *irq_iommu;
+ unsigned long flags;
- spin_lock(&irq_2_ir_lock);
+ spin_lock_irqsave(&irq_2_ir_lock, flags);
irq_iommu = valid_irq_2_iommu(irq);
if (!irq_iommu) {
- spin_unlock(&irq_2_ir_lock);
+ spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return -1;
}
*sub_handle = irq_iommu->sub_handle;
index = irq_iommu->irte_index;
- spin_unlock(&irq_2_ir_lock);
+ spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return index;
}
int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
{
struct irq_2_iommu *irq_iommu;
+ unsigned long flags;
- spin_lock(&irq_2_ir_lock);
+ spin_lock_irqsave(&irq_2_ir_lock, flags);
irq_iommu = irq_2_iommu_alloc(irq);
if (!irq_iommu) {
- spin_unlock(&irq_2_ir_lock);
+ spin_unlock_irqrestore(&irq_2_ir_lock, flags);
printk(KERN_ERR "can't allocate irq_2_iommu\n");
return -1;
}
@@ -255,7 +266,7 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
irq_iommu->sub_handle = subhandle;
irq_iommu->irte_mask = 0;
- spin_unlock(&irq_2_ir_lock);
+ spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return 0;
}
@@ -263,11 +274,12 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
{
struct irq_2_iommu *irq_iommu;
+ unsigned long flags;
- spin_lock(&irq_2_ir_lock);
+ spin_lock_irqsave(&irq_2_ir_lock, flags);
irq_iommu = valid_irq_2_iommu(irq);
if (!irq_iommu) {
- spin_unlock(&irq_2_ir_lock);
+ spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return -1;
}
@@ -276,7 +288,7 @@ int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
irq_iommu->sub_handle = 0;
irq_2_iommu(irq)->irte_mask = 0;
- spin_unlock(&irq_2_ir_lock);
+ spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return 0;
}
@@ -288,11 +300,12 @@ int modify_irte(int irq, struct irte *irte_modified)
struct irte *irte;
struct intel_iommu *iommu;
struct irq_2_iommu *irq_iommu;
+ unsigned long flags;
- spin_lock(&irq_2_ir_lock);
+ spin_lock_irqsave(&irq_2_ir_lock, flags);
irq_iommu = valid_irq_2_iommu(irq);
if (!irq_iommu) {
- spin_unlock(&irq_2_ir_lock);
+ spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return -1;
}
@@ -301,11 +314,11 @@ int modify_irte(int irq, struct irte *irte_modified)
index = irq_iommu->irte_index + irq_iommu->sub_handle;
irte = &iommu->ir_table->base[index];
- set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1));
+ set_64bit((unsigned long *)irte, irte_modified->low);
__iommu_flush_cache(iommu, irte, sizeof(*irte));
rc = qi_flush_iec(iommu, index, 0);
- spin_unlock(&irq_2_ir_lock);
+ spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return rc;
}
@@ -316,11 +329,12 @@ int flush_irte(int irq)
int index;
struct intel_iommu *iommu;
struct irq_2_iommu *irq_iommu;
+ unsigned long flags;
- spin_lock(&irq_2_ir_lock);
+ spin_lock_irqsave(&irq_2_ir_lock, flags);
irq_iommu = valid_irq_2_iommu(irq);
if (!irq_iommu) {
- spin_unlock(&irq_2_ir_lock);
+ spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return -1;
}
@@ -329,7 +343,7 @@ int flush_irte(int irq)
index = irq_iommu->irte_index + irq_iommu->sub_handle;
rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
- spin_unlock(&irq_2_ir_lock);
+ spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return rc;
}
@@ -362,11 +376,12 @@ int free_irte(int irq)
struct irte *irte;
struct intel_iommu *iommu;
struct irq_2_iommu *irq_iommu;
+ unsigned long flags;
- spin_lock(&irq_2_ir_lock);
+ spin_lock_irqsave(&irq_2_ir_lock, flags);
irq_iommu = valid_irq_2_iommu(irq);
if (!irq_iommu) {
- spin_unlock(&irq_2_ir_lock);
+ spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return -1;
}
@@ -377,7 +392,7 @@ int free_irte(int irq)
if (!irq_iommu->sub_handle) {
for (i = 0; i < (1 << irq_iommu->irte_mask); i++)
- set_64bit((unsigned long *)irte, 0);
+ set_64bit((unsigned long *)(irte + i), 0);
rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
}
@@ -386,7 +401,7 @@ int free_irte(int irq)
irq_iommu->sub_handle = 0;
irq_iommu->irte_mask = 0;
- spin_unlock(&irq_2_ir_lock);
+ spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return rc;
}
@@ -406,6 +421,7 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
/* Set interrupt-remapping table pointer */
cmd = iommu->gcmd | DMA_GCMD_SIRTP;
+ iommu->gcmd |= DMA_GCMD_SIRTP;
writel(cmd, iommu->reg + DMAR_GCMD_REG);
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
@@ -438,12 +454,12 @@ static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
struct page *pages;
ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
- GFP_KERNEL);
+ GFP_ATOMIC);
if (!iommu->ir_table)
return -ENOMEM;
- pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, INTR_REMAP_PAGE_ORDER);
+ pages = alloc_pages(GFP_ATOMIC | __GFP_ZERO, INTR_REMAP_PAGE_ORDER);
if (!pages) {
printk(KERN_ERR "failed to allocate pages of order %d\n",
@@ -458,11 +474,85 @@ static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
return 0;
}
+/*
+ * Disable Interrupt Remapping.
+ */
+static void iommu_disable_intr_remapping(struct intel_iommu *iommu)
+{
+ unsigned long flags;
+ u32 sts;
+
+ if (!ecap_ir_support(iommu->ecap))
+ return;
+
+ /*
+ * global invalidation of interrupt entry cache before disabling
+ * interrupt-remapping.
+ */
+ qi_global_iec(iommu);
+
+ spin_lock_irqsave(&iommu->register_lock, flags);
+
+ sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
+ if (!(sts & DMA_GSTS_IRES))
+ goto end;
+
+ iommu->gcmd &= ~DMA_GCMD_IRE;
+ writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
+
+ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
+ readl, !(sts & DMA_GSTS_IRES), sts);
+
+end:
+ spin_unlock_irqrestore(&iommu->register_lock, flags);
+}
+
+int __init intr_remapping_supported(void)
+{
+ struct dmar_drhd_unit *drhd;
+
+ if (disable_intremap)
+ return 0;
+
+ for_each_drhd_unit(drhd) {
+ struct intel_iommu *iommu = drhd->iommu;
+
+ if (!ecap_ir_support(iommu->ecap))
+ return 0;
+ }
+
+ return 1;
+}
+
int __init enable_intr_remapping(int eim)
{
struct dmar_drhd_unit *drhd;
int setup = 0;
+ for_each_drhd_unit(drhd) {
+ struct intel_iommu *iommu = drhd->iommu;
+
+ /*
+ * If the queued invalidation is already initialized,
+ * shouldn't disable it.
+ */
+ if (iommu->qi)
+ continue;
+
+ /*
+ * Clear previous faults.
+ */
+ dmar_fault(-1, iommu);
+
+ /*
+ * Disable intr remapping and queued invalidation, if already
+ * enabled prior to OS handover.
+ */
+ iommu_disable_intr_remapping(iommu);
+
+ dmar_disable_qi(iommu);
+ }
+
/*
* check for the Interrupt-remapping support
*/
@@ -586,3 +676,54 @@ int __init parse_ioapics_under_ir(void)
return ir_supported;
}
+
+void disable_intr_remapping(void)
+{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu = NULL;
+
+ /*
+ * Disable Interrupt-remapping for all the DRHD's now.
+ */
+ for_each_iommu(iommu, drhd) {
+ if (!ecap_ir_support(iommu->ecap))
+ continue;
+
+ iommu_disable_intr_remapping(iommu);
+ }
+}
+
+int reenable_intr_remapping(int eim)
+{
+ struct dmar_drhd_unit *drhd;
+ int setup = 0;
+ struct intel_iommu *iommu = NULL;
+
+ for_each_iommu(iommu, drhd)
+ if (iommu->qi)
+ dmar_reenable_qi(iommu);
+
+ /*
+ * Setup Interrupt-remapping for all the DRHD's now.
+ */
+ for_each_iommu(iommu, drhd) {
+ if (!ecap_ir_support(iommu->ecap))
+ continue;
+
+ /* Set up interrupt remapping for iommu.*/
+ iommu_set_intr_remapping(iommu, eim);
+ setup = 1;
+ }
+
+ if (!setup)
+ goto error;
+
+ return 0;
+
+error:
+ /*
+ * handle error condition gracefully here!
+ */
+ return -1;
+}
+
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
new file mode 100644
index 00000000000..03c7706c0a0
--- /dev/null
+++ b/drivers/pci/iov.c
@@ -0,0 +1,683 @@
+/*
+ * drivers/pci/iov.c
+ *
+ * Copyright (C) 2009 Intel Corporation, Yu Zhao <yu.zhao@intel.com>
+ *
+ * PCI Express I/O Virtualization (IOV) support.
+ * Single Root IOV 1.0
+ */
+
+#include <linux/pci.h>
+#include <linux/mutex.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include "pci.h"
+
+#define VIRTFN_ID_LEN 16
+
+static inline u8 virtfn_bus(struct pci_dev *dev, int id)
+{
+ return dev->bus->number + ((dev->devfn + dev->sriov->offset +
+ dev->sriov->stride * id) >> 8);
+}
+
+static inline u8 virtfn_devfn(struct pci_dev *dev, int id)
+{
+ return (dev->devfn + dev->sriov->offset +
+ dev->sriov->stride * id) & 0xff;
+}
+
+static struct pci_bus *virtfn_add_bus(struct pci_bus *bus, int busnr)
+{
+ int rc;
+ struct pci_bus *child;
+
+ if (bus->number == busnr)
+ return bus;
+
+ child = pci_find_bus(pci_domain_nr(bus), busnr);
+ if (child)
+ return child;
+
+ child = pci_add_new_bus(bus, NULL, busnr);
+ if (!child)
+ return NULL;
+
+ child->subordinate = busnr;
+ child->dev.parent = bus->bridge;
+ rc = pci_bus_add_child(child);
+ if (rc) {
+ pci_remove_bus(child);
+ return NULL;
+ }
+
+ return child;
+}
+
+static void virtfn_remove_bus(struct pci_bus *bus, int busnr)
+{
+ struct pci_bus *child;
+
+ if (bus->number == busnr)
+ return;
+
+ child = pci_find_bus(pci_domain_nr(bus), busnr);
+ BUG_ON(!child);
+
+ if (list_empty(&child->devices))
+ pci_remove_bus(child);
+}
+
+static int virtfn_add(struct pci_dev *dev, int id, int reset)
+{
+ int i;
+ int rc;
+ u64 size;
+ char buf[VIRTFN_ID_LEN];
+ struct pci_dev *virtfn;
+ struct resource *res;
+ struct pci_sriov *iov = dev->sriov;
+
+ virtfn = alloc_pci_dev();
+ if (!virtfn)
+ return -ENOMEM;
+
+ mutex_lock(&iov->dev->sriov->lock);
+ virtfn->bus = virtfn_add_bus(dev->bus, virtfn_bus(dev, id));
+ if (!virtfn->bus) {
+ kfree(virtfn);
+ mutex_unlock(&iov->dev->sriov->lock);
+ return -ENOMEM;
+ }
+ virtfn->devfn = virtfn_devfn(dev, id);
+ virtfn->vendor = dev->vendor;
+ pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_DID, &virtfn->device);
+ pci_setup_device(virtfn);
+ virtfn->dev.parent = dev->dev.parent;
+
+ for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
+ res = dev->resource + PCI_IOV_RESOURCES + i;
+ if (!res->parent)
+ continue;
+ virtfn->resource[i].name = pci_name(virtfn);
+ virtfn->resource[i].flags = res->flags;
+ size = resource_size(res);
+ do_div(size, iov->total);
+ virtfn->resource[i].start = res->start + size * id;
+ virtfn->resource[i].end = virtfn->resource[i].start + size - 1;
+ rc = request_resource(res, &virtfn->resource[i]);
+ BUG_ON(rc);
+ }
+
+ if (reset)
+ __pci_reset_function(virtfn);
+
+ pci_device_add(virtfn, virtfn->bus);
+ mutex_unlock(&iov->dev->sriov->lock);
+
+ virtfn->physfn = pci_dev_get(dev);
+ virtfn->is_virtfn = 1;
+
+ rc = pci_bus_add_device(virtfn);
+ if (rc)
+ goto failed1;
+ sprintf(buf, "virtfn%u", id);
+ rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf);
+ if (rc)
+ goto failed1;
+ rc = sysfs_create_link(&virtfn->dev.kobj, &dev->dev.kobj, "physfn");
+ if (rc)
+ goto failed2;
+
+ kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE);
+
+ return 0;
+
+failed2:
+ sysfs_remove_link(&dev->dev.kobj, buf);
+failed1:
+ pci_dev_put(dev);
+ mutex_lock(&iov->dev->sriov->lock);
+ pci_remove_bus_device(virtfn);
+ virtfn_remove_bus(dev->bus, virtfn_bus(dev, id));
+ mutex_unlock(&iov->dev->sriov->lock);
+
+ return rc;
+}
+
+static void virtfn_remove(struct pci_dev *dev, int id, int reset)
+{
+ char buf[VIRTFN_ID_LEN];
+ struct pci_bus *bus;
+ struct pci_dev *virtfn;
+ struct pci_sriov *iov = dev->sriov;
+
+ bus = pci_find_bus(pci_domain_nr(dev->bus), virtfn_bus(dev, id));
+ if (!bus)
+ return;
+
+ virtfn = pci_get_slot(bus, virtfn_devfn(dev, id));
+ if (!virtfn)
+ return;
+
+ pci_dev_put(virtfn);
+
+ if (reset) {
+ device_release_driver(&virtfn->dev);
+ __pci_reset_function(virtfn);
+ }
+
+ sprintf(buf, "virtfn%u", id);
+ sysfs_remove_link(&dev->dev.kobj, buf);
+ sysfs_remove_link(&virtfn->dev.kobj, "physfn");
+
+ mutex_lock(&iov->dev->sriov->lock);
+ pci_remove_bus_device(virtfn);
+ virtfn_remove_bus(dev->bus, virtfn_bus(dev, id));
+ mutex_unlock(&iov->dev->sriov->lock);
+
+ pci_dev_put(dev);
+}
+
+static int sriov_migration(struct pci_dev *dev)
+{
+ u16 status;
+ struct pci_sriov *iov = dev->sriov;
+
+ if (!iov->nr_virtfn)
+ return 0;
+
+ if (!(iov->cap & PCI_SRIOV_CAP_VFM))
+ return 0;
+
+ pci_read_config_word(dev, iov->pos + PCI_SRIOV_STATUS, &status);
+ if (!(status & PCI_SRIOV_STATUS_VFM))
+ return 0;
+
+ schedule_work(&iov->mtask);
+
+ return 1;
+}
+
+static void sriov_migration_task(struct work_struct *work)
+{
+ int i;
+ u8 state;
+ u16 status;
+ struct pci_sriov *iov = container_of(work, struct pci_sriov, mtask);
+
+ for (i = iov->initial; i < iov->nr_virtfn; i++) {
+ state = readb(iov->mstate + i);
+ if (state == PCI_SRIOV_VFM_MI) {
+ writeb(PCI_SRIOV_VFM_AV, iov->mstate + i);
+ state = readb(iov->mstate + i);
+ if (state == PCI_SRIOV_VFM_AV)
+ virtfn_add(iov->self, i, 1);
+ } else if (state == PCI_SRIOV_VFM_MO) {
+ virtfn_remove(iov->self, i, 1);
+ writeb(PCI_SRIOV_VFM_UA, iov->mstate + i);
+ state = readb(iov->mstate + i);
+ if (state == PCI_SRIOV_VFM_AV)
+ virtfn_add(iov->self, i, 0);
+ }
+ }
+
+ pci_read_config_word(iov->self, iov->pos + PCI_SRIOV_STATUS, &status);
+ status &= ~PCI_SRIOV_STATUS_VFM;
+ pci_write_config_word(iov->self, iov->pos + PCI_SRIOV_STATUS, status);
+}
+
+static int sriov_enable_migration(struct pci_dev *dev, int nr_virtfn)
+{
+ int bir;
+ u32 table;
+ resource_size_t pa;
+ struct pci_sriov *iov = dev->sriov;
+
+ if (nr_virtfn <= iov->initial)
+ return 0;
+
+ pci_read_config_dword(dev, iov->pos + PCI_SRIOV_VFM, &table);
+ bir = PCI_SRIOV_VFM_BIR(table);
+ if (bir > PCI_STD_RESOURCE_END)
+ return -EIO;
+
+ table = PCI_SRIOV_VFM_OFFSET(table);
+ if (table + nr_virtfn > pci_resource_len(dev, bir))
+ return -EIO;
+
+ pa = pci_resource_start(dev, bir) + table;
+ iov->mstate = ioremap(pa, nr_virtfn);
+ if (!iov->mstate)
+ return -ENOMEM;
+
+ INIT_WORK(&iov->mtask, sriov_migration_task);
+
+ iov->ctrl |= PCI_SRIOV_CTRL_VFM | PCI_SRIOV_CTRL_INTR;
+ pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
+
+ return 0;
+}
+
+static void sriov_disable_migration(struct pci_dev *dev)
+{
+ struct pci_sriov *iov = dev->sriov;
+
+ iov->ctrl &= ~(PCI_SRIOV_CTRL_VFM | PCI_SRIOV_CTRL_INTR);
+ pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
+
+ cancel_work_sync(&iov->mtask);
+ iounmap(iov->mstate);
+}
+
+static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
+{
+ int rc;
+ int i, j;
+ int nres;
+ u16 offset, stride, initial;
+ struct resource *res;
+ struct pci_dev *pdev;
+ struct pci_sriov *iov = dev->sriov;
+
+ if (!nr_virtfn)
+ return 0;
+
+ if (iov->nr_virtfn)
+ return -EINVAL;
+
+ pci_read_config_word(dev, iov->pos + PCI_SRIOV_INITIAL_VF, &initial);
+ if (initial > iov->total ||
+ (!(iov->cap & PCI_SRIOV_CAP_VFM) && (initial != iov->total)))
+ return -EIO;
+
+ if (nr_virtfn < 0 || nr_virtfn > iov->total ||
+ (!(iov->cap & PCI_SRIOV_CAP_VFM) && (nr_virtfn > initial)))
+ return -EINVAL;
+
+ pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, nr_virtfn);
+ pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_OFFSET, &offset);
+ pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_STRIDE, &stride);
+ if (!offset || (nr_virtfn > 1 && !stride))
+ return -EIO;
+
+ nres = 0;
+ for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
+ res = dev->resource + PCI_IOV_RESOURCES + i;
+ if (res->parent)
+ nres++;
+ }
+ if (nres != iov->nres) {
+ dev_err(&dev->dev, "not enough MMIO resources for SR-IOV\n");
+ return -ENOMEM;
+ }
+
+ iov->offset = offset;
+ iov->stride = stride;
+
+ if (virtfn_bus(dev, nr_virtfn - 1) > dev->bus->subordinate) {
+ dev_err(&dev->dev, "SR-IOV: bus number out of range\n");
+ return -ENOMEM;
+ }
+
+ if (iov->link != dev->devfn) {
+ pdev = pci_get_slot(dev->bus, iov->link);
+ if (!pdev)
+ return -ENODEV;
+
+ pci_dev_put(pdev);
+
+ if (!pdev->is_physfn)
+ return -ENODEV;
+
+ rc = sysfs_create_link(&dev->dev.kobj,
+ &pdev->dev.kobj, "dep_link");
+ if (rc)
+ return rc;
+ }
+
+ iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
+ pci_block_user_cfg_access(dev);
+ pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
+ msleep(100);
+ pci_unblock_user_cfg_access(dev);
+
+ iov->initial = initial;
+ if (nr_virtfn < initial)
+ initial = nr_virtfn;
+
+ for (i = 0; i < initial; i++) {
+ rc = virtfn_add(dev, i, 0);
+ if (rc)
+ goto failed;
+ }
+
+ if (iov->cap & PCI_SRIOV_CAP_VFM) {
+ rc = sriov_enable_migration(dev, nr_virtfn);
+ if (rc)
+ goto failed;
+ }
+
+ kobject_uevent(&dev->dev.kobj, KOBJ_CHANGE);
+ iov->nr_virtfn = nr_virtfn;
+
+ return 0;
+
+failed:
+ for (j = 0; j < i; j++)
+ virtfn_remove(dev, j, 0);
+
+ iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
+ pci_block_user_cfg_access(dev);
+ pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
+ ssleep(1);
+ pci_unblock_user_cfg_access(dev);
+
+ if (iov->link != dev->devfn)
+ sysfs_remove_link(&dev->dev.kobj, "dep_link");
+
+ return rc;
+}
+
+static void sriov_disable(struct pci_dev *dev)
+{
+ int i;
+ struct pci_sriov *iov = dev->sriov;
+
+ if (!iov->nr_virtfn)
+ return;
+
+ if (iov->cap & PCI_SRIOV_CAP_VFM)
+ sriov_disable_migration(dev);
+
+ for (i = 0; i < iov->nr_virtfn; i++)
+ virtfn_remove(dev, i, 0);
+
+ iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
+ pci_block_user_cfg_access(dev);
+ pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
+ ssleep(1);
+ pci_unblock_user_cfg_access(dev);
+
+ if (iov->link != dev->devfn)
+ sysfs_remove_link(&dev->dev.kobj, "dep_link");
+
+ iov->nr_virtfn = 0;
+}
+
+static int sriov_init(struct pci_dev *dev, int pos)
+{
+ int i;
+ int rc;
+ int nres;
+ u32 pgsz;
+ u16 ctrl, total, offset, stride;
+ struct pci_sriov *iov;
+ struct resource *res;
+ struct pci_dev *pdev;
+
+ if (dev->pcie_type != PCI_EXP_TYPE_RC_END &&
+ dev->pcie_type != PCI_EXP_TYPE_ENDPOINT)
+ return -ENODEV;
+
+ pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &ctrl);
+ if (ctrl & PCI_SRIOV_CTRL_VFE) {
+ pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, 0);
+ ssleep(1);
+ }
+
+ pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &total);
+ if (!total)
+ return 0;
+
+ ctrl = 0;
+ list_for_each_entry(pdev, &dev->bus->devices, bus_list)
+ if (pdev->is_physfn)
+ goto found;
+
+ pdev = NULL;
+ if (pci_ari_enabled(dev->bus))
+ ctrl |= PCI_SRIOV_CTRL_ARI;
+
+found:
+ pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, ctrl);
+ pci_write_config_word(dev, pos + PCI_SRIOV_NUM_VF, total);
+ pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);
+ pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);
+ if (!offset || (total > 1 && !stride))
+ return -EIO;
+
+ pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &pgsz);
+ i = PAGE_SHIFT > 12 ? PAGE_SHIFT - 12 : 0;
+ pgsz &= ~((1 << i) - 1);
+ if (!pgsz)
+ return -EIO;
+
+ pgsz &= ~(pgsz - 1);
+ pci_write_config_dword(dev, pos + PCI_SRIOV_SYS_PGSIZE, pgsz);
+
+ nres = 0;
+ for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
+ res = dev->resource + PCI_IOV_RESOURCES + i;
+ i += __pci_read_base(dev, pci_bar_unknown, res,
+ pos + PCI_SRIOV_BAR + i * 4);
+ if (!res->flags)
+ continue;
+ if (resource_size(res) & (PAGE_SIZE - 1)) {
+ rc = -EIO;
+ goto failed;
+ }
+ res->end = res->start + resource_size(res) * total - 1;
+ nres++;
+ }
+
+ iov = kzalloc(sizeof(*iov), GFP_KERNEL);
+ if (!iov) {
+ rc = -ENOMEM;
+ goto failed;
+ }
+
+ iov->pos = pos;
+ iov->nres = nres;
+ iov->ctrl = ctrl;
+ iov->total = total;
+ iov->offset = offset;
+ iov->stride = stride;
+ iov->pgsz = pgsz;
+ iov->self = dev;
+ pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
+ pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
+ if (dev->pcie_type == PCI_EXP_TYPE_RC_END)
+ iov->link = PCI_DEVFN(PCI_SLOT(dev->devfn), iov->link);
+
+ if (pdev)
+ iov->dev = pci_dev_get(pdev);
+ else {
+ iov->dev = dev;
+ mutex_init(&iov->lock);
+ }
+
+ dev->sriov = iov;
+ dev->is_physfn = 1;
+
+ return 0;
+
+failed:
+ for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
+ res = dev->resource + PCI_IOV_RESOURCES + i;
+ res->flags = 0;
+ }
+
+ return rc;
+}
+
+static void sriov_release(struct pci_dev *dev)
+{
+ BUG_ON(dev->sriov->nr_virtfn);
+
+ if (dev == dev->sriov->dev)
+ mutex_destroy(&dev->sriov->lock);
+ else
+ pci_dev_put(dev->sriov->dev);
+
+ kfree(dev->sriov);
+ dev->sriov = NULL;
+}
+
+static void sriov_restore_state(struct pci_dev *dev)
+{
+ int i;
+ u16 ctrl;
+ struct pci_sriov *iov = dev->sriov;
+
+ pci_read_config_word(dev, iov->pos + PCI_SRIOV_CTRL, &ctrl);
+ if (ctrl & PCI_SRIOV_CTRL_VFE)
+ return;
+
+ for (i = PCI_IOV_RESOURCES; i <= PCI_IOV_RESOURCE_END; i++)
+ pci_update_resource(dev, i);
+
+ pci_write_config_dword(dev, iov->pos + PCI_SRIOV_SYS_PGSIZE, iov->pgsz);
+ pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, iov->nr_virtfn);
+ pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
+ if (iov->ctrl & PCI_SRIOV_CTRL_VFE)
+ msleep(100);
+}
+
+/**
+ * pci_iov_init - initialize the IOV capability
+ * @dev: the PCI device
+ *
+ * Returns 0 on success, or negative on failure.
+ */
+int pci_iov_init(struct pci_dev *dev)
+{
+ int pos;
+
+ if (!dev->is_pcie)
+ return -ENODEV;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
+ if (pos)
+ return sriov_init(dev, pos);
+
+ return -ENODEV;
+}
+
+/**
+ * pci_iov_release - release resources used by the IOV capability
+ * @dev: the PCI device
+ */
+void pci_iov_release(struct pci_dev *dev)
+{
+ if (dev->is_physfn)
+ sriov_release(dev);
+}
+
+/**
+ * pci_iov_resource_bar - get position of the SR-IOV BAR
+ * @dev: the PCI device
+ * @resno: the resource number
+ * @type: the BAR type to be filled in
+ *
+ * Returns position of the BAR encapsulated in the SR-IOV capability.
+ */
+int pci_iov_resource_bar(struct pci_dev *dev, int resno,
+ enum pci_bar_type *type)
+{
+ if (resno < PCI_IOV_RESOURCES || resno > PCI_IOV_RESOURCE_END)
+ return 0;
+
+ BUG_ON(!dev->is_physfn);
+
+ *type = pci_bar_unknown;
+
+ return dev->sriov->pos + PCI_SRIOV_BAR +
+ 4 * (resno - PCI_IOV_RESOURCES);
+}
+
+/**
+ * pci_restore_iov_state - restore the state of the IOV capability
+ * @dev: the PCI device
+ */
+void pci_restore_iov_state(struct pci_dev *dev)
+{
+ if (dev->is_physfn)
+ sriov_restore_state(dev);
+}
+
+/**
+ * pci_iov_bus_range - find bus range used by Virtual Function
+ * @bus: the PCI bus
+ *
+ * Returns max number of buses (exclude current one) used by Virtual
+ * Functions.
+ */
+int pci_iov_bus_range(struct pci_bus *bus)
+{
+ int max = 0;
+ u8 busnr;
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ if (!dev->is_physfn)
+ continue;
+ busnr = virtfn_bus(dev, dev->sriov->total - 1);
+ if (busnr > max)
+ max = busnr;
+ }
+
+ return max ? max - bus->number : 0;
+}
+
+/**
+ * pci_enable_sriov - enable the SR-IOV capability
+ * @dev: the PCI device
+ * @nr_virtfn: number of virtual functions to enable
+ *
+ * Returns 0 on success, or negative on failure.
+ */
+int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
+{
+ might_sleep();
+
+ if (!dev->is_physfn)
+ return -ENODEV;
+
+ return sriov_enable(dev, nr_virtfn);
+}
+EXPORT_SYMBOL_GPL(pci_enable_sriov);
+
+/**
+ * pci_disable_sriov - disable the SR-IOV capability
+ * @dev: the PCI device
+ */
+void pci_disable_sriov(struct pci_dev *dev)
+{
+ might_sleep();
+
+ if (!dev->is_physfn)
+ return;
+
+ sriov_disable(dev);
+}
+EXPORT_SYMBOL_GPL(pci_disable_sriov);
+
+/**
+ * pci_sriov_migration - notify SR-IOV core of Virtual Function Migration
+ * @dev: the PCI device
+ *
+ * Returns IRQ_HANDLED if the IRQ is handled, or IRQ_NONE if not.
+ *
+ * Physical Function driver is responsible to register IRQ handler using
+ * VF Migration Interrupt Message Number, and call this function when the
+ * interrupt is generated by the hardware.
+ */
+irqreturn_t pci_sriov_migration(struct pci_dev *dev)
+{
+ if (!dev->is_physfn)
+ return IRQ_NONE;
+
+ return sriov_migration(dev) ? IRQ_HANDLED : IRQ_NONE;
+}
+EXPORT_SYMBOL_GPL(pci_sriov_migration);
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index baba2eb5367..d9f06fbfa0b 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -27,65 +27,65 @@ static int pci_msi_enable = 1;
/* Arch hooks */
-int __attribute__ ((weak))
-arch_msi_check_device(struct pci_dev *dev, int nvec, int type)
+#ifndef arch_msi_check_device
+int arch_msi_check_device(struct pci_dev *dev, int nvec, int type)
{
return 0;
}
+#endif
-int __attribute__ ((weak))
-arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *entry)
-{
- return 0;
-}
-
-int __attribute__ ((weak))
-arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+#ifndef arch_setup_msi_irqs
+int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{
struct msi_desc *entry;
int ret;
+ /*
+ * If an architecture wants to support multiple MSI, it needs to
+ * override arch_setup_msi_irqs()
+ */
+ if (type == PCI_CAP_ID_MSI && nvec > 1)
+ return 1;
+
list_for_each_entry(entry, &dev->msi_list, list) {
ret = arch_setup_msi_irq(dev, entry);
- if (ret)
+ if (ret < 0)
return ret;
+ if (ret > 0)
+ return -ENOSPC;
}
return 0;
}
+#endif
-void __attribute__ ((weak)) arch_teardown_msi_irq(unsigned int irq)
-{
- return;
-}
-
-void __attribute__ ((weak))
-arch_teardown_msi_irqs(struct pci_dev *dev)
+#ifndef arch_teardown_msi_irqs
+void arch_teardown_msi_irqs(struct pci_dev *dev)
{
struct msi_desc *entry;
list_for_each_entry(entry, &dev->msi_list, list) {
- if (entry->irq != 0)
- arch_teardown_msi_irq(entry->irq);
+ int i, nvec;
+ if (entry->irq == 0)
+ continue;
+ nvec = 1 << entry->msi_attrib.multiple;
+ for (i = 0; i < nvec; i++)
+ arch_teardown_msi_irq(entry->irq + i);
}
}
+#endif
-static void __msi_set_enable(struct pci_dev *dev, int pos, int enable)
+static void msi_set_enable(struct pci_dev *dev, int pos, int enable)
{
u16 control;
- if (pos) {
- pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
- control &= ~PCI_MSI_FLAGS_ENABLE;
- if (enable)
- control |= PCI_MSI_FLAGS_ENABLE;
- pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
- }
-}
+ BUG_ON(!pos);
-static void msi_set_enable(struct pci_dev *dev, int enable)
-{
- __msi_set_enable(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), enable);
+ pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
+ control &= ~PCI_MSI_FLAGS_ENABLE;
+ if (enable)
+ control |= PCI_MSI_FLAGS_ENABLE;
+ pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
}
static void msix_set_enable(struct pci_dev *dev, int enable)
@@ -111,27 +111,14 @@ static inline __attribute_const__ u32 msi_mask(unsigned x)
return (1 << (1 << x)) - 1;
}
-static void msix_flush_writes(struct irq_desc *desc)
+static inline __attribute_const__ u32 msi_capable_mask(u16 control)
{
- struct msi_desc *entry;
+ return msi_mask((control >> 1) & 7);
+}
- entry = get_irq_desc_msi(desc);
- BUG_ON(!entry || !entry->dev);
- switch (entry->msi_attrib.type) {
- case PCI_CAP_ID_MSI:
- /* nothing to do */
- break;
- case PCI_CAP_ID_MSIX:
- {
- int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
- PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
- readl(entry->mask_base + offset);
- break;
- }
- default:
- BUG();
- break;
- }
+static inline __attribute_const__ u32 msi_enabled_mask(u16 control)
+{
+ return msi_mask((control >> 4) & 7);
}
/*
@@ -139,53 +126,72 @@ static void msix_flush_writes(struct irq_desc *desc)
* mask all MSI interrupts by clearing the MSI enable bit does not work
* reliably as devices without an INTx disable bit will then generate a
* level IRQ which will never be cleared.
- *
- * Returns 1 if it succeeded in masking the interrupt and 0 if the device
- * doesn't support MSI masking.
*/
-static int msi_set_mask_bits(struct irq_desc *desc, u32 mask, u32 flag)
+static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
{
- struct msi_desc *entry;
+ u32 mask_bits = desc->masked;
- entry = get_irq_desc_msi(desc);
- BUG_ON(!entry || !entry->dev);
- switch (entry->msi_attrib.type) {
- case PCI_CAP_ID_MSI:
- if (entry->msi_attrib.maskbit) {
- int pos;
- u32 mask_bits;
-
- pos = (long)entry->mask_base;
- pci_read_config_dword(entry->dev, pos, &mask_bits);
- mask_bits &= ~(mask);
- mask_bits |= flag & mask;
- pci_write_config_dword(entry->dev, pos, mask_bits);
- } else {
- return 0;
- }
- break;
- case PCI_CAP_ID_MSIX:
- {
- int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
- PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
- writel(flag, entry->mask_base + offset);
- readl(entry->mask_base + offset);
- break;
- }
- default:
- BUG();
- break;
+ if (!desc->msi_attrib.maskbit)
+ return;
+
+ mask_bits &= ~mask;
+ mask_bits |= flag;
+ pci_write_config_dword(desc->dev, desc->mask_pos, mask_bits);
+ desc->masked = mask_bits;
+}
+
+/*
+ * This internal function does not flush PCI writes to the device.
+ * All users must ensure that they read from the device before either
+ * assuming that the device state is up to date, or returning out of this
+ * file. This saves a few milliseconds when initialising devices with lots
+ * of MSI-X interrupts.
+ */
+static void msix_mask_irq(struct msi_desc *desc, u32 flag)
+{
+ u32 mask_bits = desc->masked;
+ unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
+ PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
+ mask_bits &= ~1;
+ mask_bits |= flag;
+ writel(mask_bits, desc->mask_base + offset);
+ desc->masked = mask_bits;
+}
+
+static void msi_set_mask_bit(unsigned irq, u32 flag)
+{
+ struct msi_desc *desc = get_irq_msi(irq);
+
+ if (desc->msi_attrib.is_msix) {
+ msix_mask_irq(desc, flag);
+ readl(desc->mask_base); /* Flush write to device */
+ } else {
+ unsigned offset = irq - desc->dev->irq;
+ msi_mask_irq(desc, 1 << offset, flag << offset);
}
- entry->msi_attrib.masked = !!flag;
- return 1;
+}
+
+void mask_msi_irq(unsigned int irq)
+{
+ msi_set_mask_bit(irq, 1);
+}
+
+void unmask_msi_irq(unsigned int irq)
+{
+ msi_set_mask_bit(irq, 0);
}
void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
{
struct msi_desc *entry = get_irq_desc_msi(desc);
- switch(entry->msi_attrib.type) {
- case PCI_CAP_ID_MSI:
- {
+ if (entry->msi_attrib.is_msix) {
+ void __iomem *base = entry->mask_base +
+ entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
+
+ msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
+ msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
+ msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET);
+ } else {
struct pci_dev *dev = entry->dev;
int pos = entry->msi_attrib.pos;
u16 data;
@@ -201,21 +207,6 @@ void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
pci_read_config_word(dev, msi_data_reg(pos, 0), &data);
}
msg->data = data;
- break;
- }
- case PCI_CAP_ID_MSIX:
- {
- void __iomem *base;
- base = entry->mask_base +
- entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
-
- msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
- msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
- msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET);
- break;
- }
- default:
- BUG();
}
}
@@ -229,11 +220,25 @@ void read_msi_msg(unsigned int irq, struct msi_msg *msg)
void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
{
struct msi_desc *entry = get_irq_desc_msi(desc);
- switch (entry->msi_attrib.type) {
- case PCI_CAP_ID_MSI:
- {
+ if (entry->msi_attrib.is_msix) {
+ void __iomem *base;
+ base = entry->mask_base +
+ entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
+
+ writel(msg->address_lo,
+ base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
+ writel(msg->address_hi,
+ base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
+ writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET);
+ } else {
struct pci_dev *dev = entry->dev;
int pos = entry->msi_attrib.pos;
+ u16 msgctl;
+
+ pci_read_config_word(dev, msi_control_reg(pos), &msgctl);
+ msgctl &= ~PCI_MSI_FLAGS_QSIZE;
+ msgctl |= entry->msi_attrib.multiple << 4;
+ pci_write_config_word(dev, msi_control_reg(pos), msgctl);
pci_write_config_dword(dev, msi_lower_address_reg(pos),
msg->address_lo);
@@ -246,23 +251,6 @@ void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
pci_write_config_word(dev, msi_data_reg(pos, 0),
msg->data);
}
- break;
- }
- case PCI_CAP_ID_MSIX:
- {
- void __iomem *base;
- base = entry->mask_base +
- entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
-
- writel(msg->address_lo,
- base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
- writel(msg->address_hi,
- base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
- writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET);
- break;
- }
- default:
- BUG();
}
entry->msg = *msg;
}
@@ -274,37 +262,18 @@ void write_msi_msg(unsigned int irq, struct msi_msg *msg)
write_msi_msg_desc(desc, msg);
}
-void mask_msi_irq(unsigned int irq)
-{
- struct irq_desc *desc = irq_to_desc(irq);
-
- msi_set_mask_bits(desc, 1, 1);
- msix_flush_writes(desc);
-}
-
-void unmask_msi_irq(unsigned int irq)
-{
- struct irq_desc *desc = irq_to_desc(irq);
-
- msi_set_mask_bits(desc, 1, 0);
- msix_flush_writes(desc);
-}
-
static int msi_free_irqs(struct pci_dev* dev);
-static struct msi_desc* alloc_msi_entry(void)
+static struct msi_desc *alloc_msi_entry(struct pci_dev *dev)
{
- struct msi_desc *entry;
-
- entry = kzalloc(sizeof(struct msi_desc), GFP_KERNEL);
- if (!entry)
+ struct msi_desc *desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
return NULL;
- INIT_LIST_HEAD(&entry->list);
- entry->irq = 0;
- entry->dev = NULL;
+ INIT_LIST_HEAD(&desc->list);
+ desc->dev = dev;
- return entry;
+ return desc;
}
static void pci_intx_for_msi(struct pci_dev *dev, int enable)
@@ -326,17 +295,13 @@ static void __pci_restore_msi_state(struct pci_dev *dev)
pos = entry->msi_attrib.pos;
pci_intx_for_msi(dev, 0);
- msi_set_enable(dev, 0);
+ msi_set_enable(dev, pos, 0);
write_msi_msg(dev->irq, &entry->msg);
- if (entry->msi_attrib.maskbit) {
- struct irq_desc *desc = irq_to_desc(dev->irq);
- msi_set_mask_bits(desc, entry->msi_attrib.maskbits_mask,
- entry->msi_attrib.masked);
- }
pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
+ msi_mask_irq(entry, msi_capable_mask(control), entry->masked);
control &= ~PCI_MSI_FLAGS_QSIZE;
- control |= PCI_MSI_FLAGS_ENABLE;
+ control |= (entry->msi_attrib.multiple << 4) | PCI_MSI_FLAGS_ENABLE;
pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
}
@@ -348,23 +313,22 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
if (!dev->msix_enabled)
return;
+ BUG_ON(list_empty(&dev->msi_list));
+ entry = list_entry(dev->msi_list.next, struct msi_desc, list);
+ pos = entry->msi_attrib.pos;
+ pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
/* route the table */
pci_intx_for_msi(dev, 0);
- msix_set_enable(dev, 0);
+ control |= PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL;
+ pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
list_for_each_entry(entry, &dev->msi_list, list) {
- struct irq_desc *desc = irq_to_desc(entry->irq);
write_msi_msg(entry->irq, &entry->msg);
- msi_set_mask_bits(desc, 1, entry->msi_attrib.masked);
+ msix_mask_irq(entry, entry->masked);
}
- BUG_ON(list_empty(&dev->msi_list));
- entry = list_entry(dev->msi_list.next, struct msi_desc, list);
- pos = entry->msi_attrib.pos;
- pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
control &= ~PCI_MSIX_FLAGS_MASKALL;
- control |= PCI_MSIX_FLAGS_ENABLE;
pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
}
@@ -378,52 +342,48 @@ EXPORT_SYMBOL_GPL(pci_restore_msi_state);
/**
* msi_capability_init - configure device's MSI capability structure
* @dev: pointer to the pci_dev data structure of MSI device function
+ * @nvec: number of interrupts to allocate
*
- * Setup the MSI capability structure of device function with a single
- * MSI irq, regardless of device function is capable of handling
- * multiple messages. A return of zero indicates the successful setup
- * of an entry zero with the new MSI irq or non-zero for otherwise.
- **/
-static int msi_capability_init(struct pci_dev *dev)
+ * Setup the MSI capability structure of the device with the requested
+ * number of interrupts. A return value of zero indicates the successful
+ * setup of an entry with the new MSI irq. A negative return value indicates
+ * an error, and a positive return value indicates the number of interrupts
+ * which could have been allocated.
+ */
+static int msi_capability_init(struct pci_dev *dev, int nvec)
{
struct msi_desc *entry;
int pos, ret;
u16 control;
-
- msi_set_enable(dev, 0); /* Ensure msi is disabled as I set it up */
+ unsigned mask;
pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
+ msi_set_enable(dev, pos, 0); /* Disable MSI during set up */
+
pci_read_config_word(dev, msi_control_reg(pos), &control);
/* MSI Entry Initialization */
- entry = alloc_msi_entry();
+ entry = alloc_msi_entry(dev);
if (!entry)
return -ENOMEM;
- entry->msi_attrib.type = PCI_CAP_ID_MSI;
+ entry->msi_attrib.is_msix = 0;
entry->msi_attrib.is_64 = is_64bit_address(control);
entry->msi_attrib.entry_nr = 0;
entry->msi_attrib.maskbit = is_mask_bit_support(control);
- entry->msi_attrib.masked = 1;
entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
entry->msi_attrib.pos = pos;
- entry->dev = dev;
- if (entry->msi_attrib.maskbit) {
- unsigned int base, maskbits, temp;
-
- base = msi_mask_bits_reg(pos, entry->msi_attrib.is_64);
- entry->mask_base = (void __iomem *)(long)base;
-
- /* All MSIs are unmasked by default, Mask them all */
- pci_read_config_dword(dev, base, &maskbits);
- temp = msi_mask((control & PCI_MSI_FLAGS_QMASK) >> 1);
- maskbits |= temp;
- pci_write_config_dword(dev, base, maskbits);
- entry->msi_attrib.maskbits_mask = temp;
- }
+
+ entry->mask_pos = msi_mask_reg(pos, entry->msi_attrib.is_64);
+ /* All MSIs are unmasked by default, Mask them all */
+ if (entry->msi_attrib.maskbit)
+ pci_read_config_dword(dev, entry->mask_pos, &entry->masked);
+ mask = msi_capable_mask(control);
+ msi_mask_irq(entry, mask, mask);
+
list_add_tail(&entry->list, &dev->msi_list);
/* Configure MSI capability structure */
- ret = arch_setup_msi_irqs(dev, 1, PCI_CAP_ID_MSI);
+ ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
if (ret) {
msi_free_irqs(dev);
return ret;
@@ -431,7 +391,7 @@ static int msi_capability_init(struct pci_dev *dev)
/* Set MSI enabled bits */
pci_intx_for_msi(dev, 0);
- msi_set_enable(dev, 1);
+ msi_set_enable(dev, pos, 1);
dev->msi_enabled = 1;
dev->irq = entry->irq;
@@ -459,11 +419,14 @@ static int msix_capability_init(struct pci_dev *dev,
u8 bir;
void __iomem *base;
- msix_set_enable(dev, 0);/* Ensure msix is disabled as I set it up */
-
pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
+ pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
+
+ /* Ensure MSI-X is disabled while it is set up */
+ control &= ~PCI_MSIX_FLAGS_ENABLE;
+ pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
+
/* Request & Map MSI-X table region */
- pci_read_config_word(dev, msi_control_reg(pos), &control);
nr_entries = multi_msix_capable(control);
pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
@@ -474,28 +437,26 @@ static int msix_capability_init(struct pci_dev *dev,
if (base == NULL)
return -ENOMEM;
- /* MSI-X Table Initialization */
for (i = 0; i < nvec; i++) {
- entry = alloc_msi_entry();
+ entry = alloc_msi_entry(dev);
if (!entry)
break;
j = entries[i].entry;
- entry->msi_attrib.type = PCI_CAP_ID_MSIX;
+ entry->msi_attrib.is_msix = 1;
entry->msi_attrib.is_64 = 1;
entry->msi_attrib.entry_nr = j;
- entry->msi_attrib.maskbit = 1;
- entry->msi_attrib.masked = 1;
entry->msi_attrib.default_irq = dev->irq;
entry->msi_attrib.pos = pos;
- entry->dev = dev;
entry->mask_base = base;
list_add_tail(&entry->list, &dev->msi_list);
}
ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
- if (ret) {
+ if (ret < 0) {
+ /* If we had some success report the number of irqs
+ * we succeeded in setting up. */
int avail = 0;
list_for_each_entry(entry, &dev->msi_list, list) {
if (entry->irq != 0) {
@@ -503,27 +464,41 @@ static int msix_capability_init(struct pci_dev *dev,
}
}
- msi_free_irqs(dev);
+ if (avail != 0)
+ ret = avail;
+ }
- /* If we had some success report the number of irqs
- * we succeeded in setting up.
- */
- if (avail == 0)
- avail = ret;
- return avail;
+ if (ret) {
+ msi_free_irqs(dev);
+ return ret;
}
+ /*
+ * Some devices require MSI-X to be enabled before we can touch the
+ * MSI-X registers. We need to mask all the vectors to prevent
+ * interrupts coming in before they're fully set up.
+ */
+ control |= PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE;
+ pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
+
i = 0;
list_for_each_entry(entry, &dev->msi_list, list) {
entries[i].vector = entry->irq;
set_irq_msi(entry->irq, entry);
+ j = entries[i].entry;
+ entry->masked = readl(base + j * PCI_MSIX_ENTRY_SIZE +
+ PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
+ msix_mask_irq(entry, 1);
i++;
}
- /* Set MSI-X enabled bits */
+
+ /* Set MSI-X enabled bits and unmask the function */
pci_intx_for_msi(dev, 0);
- msix_set_enable(dev, 1);
dev->msix_enabled = 1;
+ control &= ~PCI_MSIX_FLAGS_MASKALL;
+ pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
+
return 0;
}
@@ -575,61 +550,75 @@ static int pci_msi_check_device(struct pci_dev* dev, int nvec, int type)
}
/**
- * pci_enable_msi - configure device's MSI capability structure
- * @dev: pointer to the pci_dev data structure of MSI device function
+ * pci_enable_msi_block - configure device's MSI capability structure
+ * @dev: device to configure
+ * @nvec: number of interrupts to configure
*
- * Setup the MSI capability structure of device function with
- * a single MSI irq upon its software driver call to request for
- * MSI mode enabled on its hardware device function. A return of zero
- * indicates the successful setup of an entry zero with the new MSI
- * irq or non-zero for otherwise.
- **/
-int pci_enable_msi(struct pci_dev* dev)
+ * Allocate IRQs for a device with the MSI capability.
+ * This function returns a negative errno if an error occurs. If it
+ * is unable to allocate the number of interrupts requested, it returns
+ * the number of interrupts it might be able to allocate. If it successfully
+ * allocates at least the number of interrupts requested, it returns 0 and
+ * updates the @dev's irq member to the lowest new interrupt number; the
+ * other interrupt numbers allocated to this device are consecutive.
+ */
+int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec)
{
- int status;
+ int status, pos, maxvec;
+ u16 msgctl;
+
+ pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
+ if (!pos)
+ return -EINVAL;
+ pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl);
+ maxvec = 1 << ((msgctl & PCI_MSI_FLAGS_QMASK) >> 1);
+ if (nvec > maxvec)
+ return maxvec;
- status = pci_msi_check_device(dev, 1, PCI_CAP_ID_MSI);
+ status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSI);
if (status)
return status;
WARN_ON(!!dev->msi_enabled);
- /* Check whether driver already requested for MSI-X irqs */
+ /* Check whether driver already requested MSI-X irqs */
if (dev->msix_enabled) {
dev_info(&dev->dev, "can't enable MSI "
"(MSI-X already enabled)\n");
return -EINVAL;
}
- status = msi_capability_init(dev);
+
+ status = msi_capability_init(dev, nvec);
return status;
}
-EXPORT_SYMBOL(pci_enable_msi);
+EXPORT_SYMBOL(pci_enable_msi_block);
-void pci_msi_shutdown(struct pci_dev* dev)
+void pci_msi_shutdown(struct pci_dev *dev)
{
- struct msi_desc *entry;
+ struct msi_desc *desc;
+ u32 mask;
+ u16 ctrl;
+ unsigned pos;
if (!pci_msi_enable || !dev || !dev->msi_enabled)
return;
- msi_set_enable(dev, 0);
+ BUG_ON(list_empty(&dev->msi_list));
+ desc = list_first_entry(&dev->msi_list, struct msi_desc, list);
+ pos = desc->msi_attrib.pos;
+
+ msi_set_enable(dev, pos, 0);
pci_intx_for_msi(dev, 1);
dev->msi_enabled = 0;
- BUG_ON(list_empty(&dev->msi_list));
- entry = list_entry(dev->msi_list.next, struct msi_desc, list);
- /* Return the the pci reset with msi irqs unmasked */
- if (entry->msi_attrib.maskbit) {
- u32 mask = entry->msi_attrib.maskbits_mask;
- struct irq_desc *desc = irq_to_desc(dev->irq);
- msi_set_mask_bits(desc, mask, ~mask);
- }
- if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI)
- return;
+ pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &ctrl);
+ mask = msi_capable_mask(ctrl);
+ msi_mask_irq(desc, mask, ~mask);
/* Restore dev->irq to its default pin-assertion irq */
- dev->irq = entry->msi_attrib.default_irq;
+ dev->irq = desc->msi_attrib.default_irq;
}
+
void pci_disable_msi(struct pci_dev* dev)
{
struct msi_desc *entry;
@@ -640,7 +629,7 @@ void pci_disable_msi(struct pci_dev* dev)
pci_msi_shutdown(dev);
entry = list_entry(dev->msi_list.next, struct msi_desc, list);
- if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI)
+ if (entry->msi_attrib.is_msix)
return;
msi_free_irqs(dev);
@@ -652,18 +641,19 @@ static int msi_free_irqs(struct pci_dev* dev)
struct msi_desc *entry, *tmp;
list_for_each_entry(entry, &dev->msi_list, list) {
- if (entry->irq)
- BUG_ON(irq_has_action(entry->irq));
+ int i, nvec;
+ if (!entry->irq)
+ continue;
+ nvec = 1 << entry->msi_attrib.multiple;
+ for (i = 0; i < nvec; i++)
+ BUG_ON(irq_has_action(entry->irq + i));
}
arch_teardown_msi_irqs(dev);
list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) {
- if (entry->msi_attrib.type == PCI_CAP_ID_MSIX) {
- writel(1, entry->mask_base + entry->msi_attrib.entry_nr
- * PCI_MSIX_ENTRY_SIZE
- + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
-
+ if (entry->msi_attrib.is_msix) {
+ msix_mask_irq(entry, 1);
if (list_is_last(&entry->list, &dev->msi_list))
iounmap(entry->mask_base);
}
@@ -675,6 +665,23 @@ static int msi_free_irqs(struct pci_dev* dev)
}
/**
+ * pci_msix_table_size - return the number of device's MSI-X table entries
+ * @dev: pointer to the pci_dev data structure of MSI-X device function
+ */
+int pci_msix_table_size(struct pci_dev *dev)
+{
+ int pos;
+ u16 control;
+
+ pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
+ if (!pos)
+ return 0;
+
+ pci_read_config_word(dev, msi_control_reg(pos), &control);
+ return multi_msix_capable(control);
+}
+
+/**
* pci_enable_msix - configure device's MSI-X capability structure
* @dev: pointer to the pci_dev data structure of MSI-X device function
* @entries: pointer to an array of MSI-X entries
@@ -686,14 +693,13 @@ static int msi_free_irqs(struct pci_dev* dev)
* indicates the successful configuration of MSI-X capability structure
* with new allocated MSI-X irqs. A return of < 0 indicates a failure.
* Or a return of > 0 indicates that driver request is exceeding the number
- * of irqs available. Driver should use the returned value to re-send
- * its request.
+ * of irqs or MSI-X vectors available. Driver should use the returned value to
+ * re-send its request.
**/
int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
{
- int status, pos, nr_entries;
+ int status, nr_entries;
int i, j;
- u16 control;
if (!entries)
return -EINVAL;
@@ -702,11 +708,9 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
if (status)
return status;
- pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
- pci_read_config_word(dev, msi_control_reg(pos), &control);
- nr_entries = multi_msix_capable(control);
+ nr_entries = pci_msix_table_size(dev);
if (nvec > nr_entries)
- return -EINVAL;
+ return nr_entries;
/* Check for any invalid entries */
for (i = 0; i < nvec; i++) {
diff --git a/drivers/pci/msi.h b/drivers/pci/msi.h
index 3898f523714..a0662842550 100644
--- a/drivers/pci/msi.h
+++ b/drivers/pci/msi.h
@@ -16,27 +16,15 @@
#define msi_lower_address_reg(base) (base + PCI_MSI_ADDRESS_LO)
#define msi_upper_address_reg(base) (base + PCI_MSI_ADDRESS_HI)
#define msi_data_reg(base, is64bit) \
- ( (is64bit == 1) ? base+PCI_MSI_DATA_64 : base+PCI_MSI_DATA_32 )
-#define msi_mask_bits_reg(base, is64bit) \
- ( (is64bit == 1) ? base+PCI_MSI_MASK_BIT : base+PCI_MSI_MASK_BIT-4)
-#define msi_disable(control) control &= ~PCI_MSI_FLAGS_ENABLE
-#define multi_msi_capable(control) \
- (1 << ((control & PCI_MSI_FLAGS_QMASK) >> 1))
-#define multi_msi_enable(control, num) \
- control |= (((num >> 1) << 4) & PCI_MSI_FLAGS_QSIZE);
+ (base + ((is64bit == 1) ? PCI_MSI_DATA_64 : PCI_MSI_DATA_32))
+#define msi_mask_reg(base, is64bit) \
+ (base + ((is64bit == 1) ? PCI_MSI_MASK_64 : PCI_MSI_MASK_32))
#define is_64bit_address(control) (!!(control & PCI_MSI_FLAGS_64BIT))
#define is_mask_bit_support(control) (!!(control & PCI_MSI_FLAGS_MASKBIT))
-#define msi_enable(control, num) multi_msi_enable(control, num); \
- control |= PCI_MSI_FLAGS_ENABLE
#define msix_table_offset_reg(base) (base + 0x04)
#define msix_pba_offset_reg(base) (base + 0x08)
-#define msix_enable(control) control |= PCI_MSIX_FLAGS_ENABLE
-#define msix_disable(control) control &= ~PCI_MSIX_FLAGS_ENABLE
#define msix_table_size(control) ((control & PCI_MSIX_FLAGS_QSIZE)+1)
-#define multi_msix_capable msix_table_size
-#define msix_unmask(address) (address & ~PCI_MSIX_FLAGS_BITMASK)
-#define msix_mask(address) (address | PCI_MSIX_FLAGS_BITMASK)
-#define msix_is_pending(address) (address & PCI_MSIX_FLAGS_PENDMASK)
+#define multi_msix_capable(control) msix_table_size((control))
#endif /* MSI_H */
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index deea8a187eb..ea15b053745 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -18,221 +18,6 @@
#include <linux/pci-acpi.h>
#include "pci.h"
-struct acpi_osc_data {
- acpi_handle handle;
- u32 support_set;
- u32 control_set;
- u32 control_query;
- int is_queried;
- struct list_head sibiling;
-};
-static LIST_HEAD(acpi_osc_data_list);
-
-struct acpi_osc_args {
- u32 capbuf[3];
-};
-
-static DEFINE_MUTEX(pci_acpi_lock);
-
-static struct acpi_osc_data *acpi_get_osc_data(acpi_handle handle)
-{
- struct acpi_osc_data *data;
-
- list_for_each_entry(data, &acpi_osc_data_list, sibiling) {
- if (data->handle == handle)
- return data;
- }
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return NULL;
- INIT_LIST_HEAD(&data->sibiling);
- data->handle = handle;
- list_add_tail(&data->sibiling, &acpi_osc_data_list);
- return data;
-}
-
-static u8 OSC_UUID[16] = {0x5B, 0x4D, 0xDB, 0x33, 0xF7, 0x1F, 0x1C, 0x40,
- 0x96, 0x57, 0x74, 0x41, 0xC0, 0x3D, 0xD7, 0x66};
-
-static acpi_status acpi_run_osc(acpi_handle handle,
- struct acpi_osc_args *osc_args, u32 *retval)
-{
- acpi_status status;
- struct acpi_object_list input;
- union acpi_object in_params[4];
- struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
- union acpi_object *out_obj;
- u32 errors, flags = osc_args->capbuf[OSC_QUERY_TYPE];
-
- /* Setting up input parameters */
- input.count = 4;
- input.pointer = in_params;
- in_params[0].type = ACPI_TYPE_BUFFER;
- in_params[0].buffer.length = 16;
- in_params[0].buffer.pointer = OSC_UUID;
- in_params[1].type = ACPI_TYPE_INTEGER;
- in_params[1].integer.value = 1;
- in_params[2].type = ACPI_TYPE_INTEGER;
- in_params[2].integer.value = 3;
- in_params[3].type = ACPI_TYPE_BUFFER;
- in_params[3].buffer.length = 12;
- in_params[3].buffer.pointer = (u8 *)osc_args->capbuf;
-
- status = acpi_evaluate_object(handle, "_OSC", &input, &output);
- if (ACPI_FAILURE(status))
- return status;
-
- if (!output.length)
- return AE_NULL_OBJECT;
-
- out_obj = output.pointer;
- if (out_obj->type != ACPI_TYPE_BUFFER) {
- printk(KERN_DEBUG "Evaluate _OSC returns wrong type\n");
- status = AE_TYPE;
- goto out_kfree;
- }
- /* Need to ignore the bit0 in result code */
- errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0);
- if (errors) {
- if (errors & OSC_REQUEST_ERROR)
- printk(KERN_DEBUG "_OSC request fails\n");
- if (errors & OSC_INVALID_UUID_ERROR)
- printk(KERN_DEBUG "_OSC invalid UUID\n");
- if (errors & OSC_INVALID_REVISION_ERROR)
- printk(KERN_DEBUG "_OSC invalid revision\n");
- if (errors & OSC_CAPABILITIES_MASK_ERROR) {
- if (flags & OSC_QUERY_ENABLE)
- goto out_success;
- printk(KERN_DEBUG "_OSC FW not grant req. control\n");
- status = AE_SUPPORT;
- goto out_kfree;
- }
- status = AE_ERROR;
- goto out_kfree;
- }
-out_success:
- *retval = *((u32 *)(out_obj->buffer.pointer + 8));
- status = AE_OK;
-
-out_kfree:
- kfree(output.pointer);
- return status;
-}
-
-static acpi_status __acpi_query_osc(u32 flags, struct acpi_osc_data *osc_data)
-{
- acpi_status status;
- u32 support_set, result;
- struct acpi_osc_args osc_args;
-
- /* do _OSC query for all possible controls */
- support_set = osc_data->support_set | (flags & OSC_SUPPORT_MASKS);
- osc_args.capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
- osc_args.capbuf[OSC_SUPPORT_TYPE] = support_set;
- osc_args.capbuf[OSC_CONTROL_TYPE] = OSC_CONTROL_MASKS;
-
- status = acpi_run_osc(osc_data->handle, &osc_args, &result);
- if (ACPI_SUCCESS(status)) {
- osc_data->support_set = support_set;
- osc_data->control_query = result;
- osc_data->is_queried = 1;
- }
-
- return status;
-}
-
-/*
- * pci_acpi_osc_support: Invoke _OSC indicating support for the given feature
- * @flags: Bitmask of flags to support
- *
- * See the ACPI spec for the definition of the flags
- */
-int pci_acpi_osc_support(acpi_handle handle, u32 flags)
-{
- acpi_status status;
- acpi_handle tmp;
- struct acpi_osc_data *osc_data;
- int rc = 0;
-
- status = acpi_get_handle(handle, "_OSC", &tmp);
- if (ACPI_FAILURE(status))
- return -ENOTTY;
-
- mutex_lock(&pci_acpi_lock);
- osc_data = acpi_get_osc_data(handle);
- if (!osc_data) {
- printk(KERN_ERR "acpi osc data array is full\n");
- rc = -ENOMEM;
- goto out;
- }
-
- __acpi_query_osc(flags, osc_data);
-out:
- mutex_unlock(&pci_acpi_lock);
- return rc;
-}
-
-/**
- * pci_osc_control_set - commit requested control to Firmware
- * @handle: acpi_handle for the target ACPI object
- * @flags: driver's requested control bits
- *
- * Attempt to take control from Firmware on requested control bits.
- **/
-acpi_status pci_osc_control_set(acpi_handle handle, u32 flags)
-{
- acpi_status status;
- u32 control_req, control_set, result;
- acpi_handle tmp;
- struct acpi_osc_data *osc_data;
- struct acpi_osc_args osc_args;
-
- status = acpi_get_handle(handle, "_OSC", &tmp);
- if (ACPI_FAILURE(status))
- return status;
-
- mutex_lock(&pci_acpi_lock);
- osc_data = acpi_get_osc_data(handle);
- if (!osc_data) {
- printk(KERN_ERR "acpi osc data array is full\n");
- status = AE_ERROR;
- goto out;
- }
-
- control_req = (flags & OSC_CONTROL_MASKS);
- if (!control_req) {
- status = AE_TYPE;
- goto out;
- }
-
- /* No need to evaluate _OSC if the control was already granted. */
- if ((osc_data->control_set & control_req) == control_req)
- goto out;
-
- if (!osc_data->is_queried) {
- status = __acpi_query_osc(osc_data->support_set, osc_data);
- if (ACPI_FAILURE(status))
- goto out;
- }
-
- if ((osc_data->control_query & control_req) != control_req) {
- status = AE_SUPPORT;
- goto out;
- }
-
- control_set = osc_data->control_set | control_req;
- osc_args.capbuf[OSC_QUERY_TYPE] = 0;
- osc_args.capbuf[OSC_SUPPORT_TYPE] = osc_data->support_set;
- osc_args.capbuf[OSC_CONTROL_TYPE] = control_set;
- status = acpi_run_osc(handle, &osc_args, &result);
- if (ACPI_SUCCESS(status))
- osc_data->control_set = result;
-out:
- mutex_unlock(&pci_acpi_lock);
- return status;
-}
-EXPORT_SYMBOL(pci_osc_control_set);
-
/*
* _SxD returns the D-state with the highest power
* (lowest D-state number) supported in the S-state "x".
@@ -386,12 +171,12 @@ static int __init acpi_pci_init(void)
{
int ret;
- if (acpi_gbl_FADT.boot_flags & BAF_MSI_NOT_SUPPORTED) {
+ if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_MSI) {
printk(KERN_INFO"ACPI FADT declares the system doesn't support MSI, so disable it\n");
pci_no_msi();
}
- if (acpi_gbl_FADT.boot_flags & BAF_PCIE_ASPM_CONTROL) {
+ if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
printk(KERN_INFO"ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n");
pcie_no_aspm();
}
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 93eac142358..d76c4c85367 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -99,6 +99,52 @@ store_new_id(struct device_driver *driver, const char *buf, size_t count)
}
static DRIVER_ATTR(new_id, S_IWUSR, NULL, store_new_id);
+/**
+ * store_remove_id - remove a PCI device ID from this driver
+ * @driver: target device driver
+ * @buf: buffer for scanning device ID data
+ * @count: input size
+ *
+ * Removes a dynamic pci device ID to this driver.
+ */
+static ssize_t
+store_remove_id(struct device_driver *driver, const char *buf, size_t count)
+{
+ struct pci_dynid *dynid, *n;
+ struct pci_driver *pdrv = to_pci_driver(driver);
+ __u32 vendor, device, subvendor = PCI_ANY_ID,
+ subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
+ int fields = 0;
+ int retval = -ENODEV;
+
+ fields = sscanf(buf, "%x %x %x %x %x %x",
+ &vendor, &device, &subvendor, &subdevice,
+ &class, &class_mask);
+ if (fields < 2)
+ return -EINVAL;
+
+ spin_lock(&pdrv->dynids.lock);
+ list_for_each_entry_safe(dynid, n, &pdrv->dynids.list, node) {
+ struct pci_device_id *id = &dynid->id;
+ if ((id->vendor == vendor) &&
+ (id->device == device) &&
+ (subvendor == PCI_ANY_ID || id->subvendor == subvendor) &&
+ (subdevice == PCI_ANY_ID || id->subdevice == subdevice) &&
+ !((id->class ^ class) & class_mask)) {
+ list_del(&dynid->node);
+ kfree(dynid);
+ retval = 0;
+ break;
+ }
+ }
+ spin_unlock(&pdrv->dynids.lock);
+
+ if (retval)
+ return retval;
+ return count;
+}
+static DRIVER_ATTR(remove_id, S_IWUSR, NULL, store_remove_id);
+
static void
pci_free_dynids(struct pci_driver *drv)
{
@@ -125,6 +171,20 @@ static void pci_remove_newid_file(struct pci_driver *drv)
{
driver_remove_file(&drv->driver, &driver_attr_new_id);
}
+
+static int
+pci_create_removeid_file(struct pci_driver *drv)
+{
+ int error = 0;
+ if (drv->probe != NULL)
+ error = driver_create_file(&drv->driver,&driver_attr_remove_id);
+ return error;
+}
+
+static void pci_remove_removeid_file(struct pci_driver *drv)
+{
+ driver_remove_file(&drv->driver, &driver_attr_remove_id);
+}
#else /* !CONFIG_HOTPLUG */
static inline void pci_free_dynids(struct pci_driver *drv) {}
static inline int pci_create_newid_file(struct pci_driver *drv)
@@ -132,6 +192,11 @@ static inline int pci_create_newid_file(struct pci_driver *drv)
return 0;
}
static inline void pci_remove_newid_file(struct pci_driver *drv) {}
+static inline int pci_create_removeid_file(struct pci_driver *drv)
+{
+ return 0;
+}
+static inline void pci_remove_removeid_file(struct pci_driver *drv) {}
#endif
/**
@@ -212,10 +277,9 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
node = dev_to_node(&dev->dev);
if (node >= 0) {
int cpu;
- node_to_cpumask_ptr(nodecpumask, node);
get_online_cpus();
- cpu = cpumask_any_and(nodecpumask, cpu_online_mask);
+ cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask);
if (cpu < nr_cpu_ids)
error = work_on_cpu(cpu, local_pci_probe, &ddi);
else
@@ -352,53 +416,60 @@ static int pci_legacy_suspend(struct device *dev, pm_message_t state)
{
struct pci_dev * pci_dev = to_pci_dev(dev);
struct pci_driver * drv = pci_dev->driver;
- int i = 0;
+
+ pci_dev->state_saved = false;
if (drv && drv->suspend) {
pci_power_t prev = pci_dev->current_state;
+ int error;
- pci_dev->state_saved = false;
-
- i = drv->suspend(pci_dev, state);
- suspend_report_result(drv->suspend, i);
- if (i)
- return i;
-
- if (pci_dev->state_saved)
- goto Fixup;
+ error = drv->suspend(pci_dev, state);
+ suspend_report_result(drv->suspend, error);
+ if (error)
+ return error;
- if (pci_dev->current_state != PCI_D0
+ if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
&& pci_dev->current_state != PCI_UNKNOWN) {
WARN_ONCE(pci_dev->current_state != prev,
"PCI PM: Device state not saved by %pF\n",
drv->suspend);
- goto Fixup;
}
}
- pci_save_state(pci_dev);
- /*
- * This is for compatibility with existing code with legacy PM support.
- */
- pci_pm_set_unknown_state(pci_dev);
-
- Fixup:
pci_fixup_device(pci_fixup_suspend, pci_dev);
- return i;
+ return 0;
}
static int pci_legacy_suspend_late(struct device *dev, pm_message_t state)
{
struct pci_dev * pci_dev = to_pci_dev(dev);
struct pci_driver * drv = pci_dev->driver;
- int i = 0;
if (drv && drv->suspend_late) {
- i = drv->suspend_late(pci_dev, state);
- suspend_report_result(drv->suspend_late, i);
+ pci_power_t prev = pci_dev->current_state;
+ int error;
+
+ error = drv->suspend_late(pci_dev, state);
+ suspend_report_result(drv->suspend_late, error);
+ if (error)
+ return error;
+
+ if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
+ && pci_dev->current_state != PCI_UNKNOWN) {
+ WARN_ONCE(pci_dev->current_state != prev,
+ "PCI PM: Device state not saved by %pF\n",
+ drv->suspend_late);
+ return 0;
+ }
}
- return i;
+
+ if (!pci_dev->state_saved)
+ pci_save_state(pci_dev);
+
+ pci_pm_set_unknown_state(pci_dev);
+
+ return 0;
}
static int pci_legacy_resume_early(struct device *dev)
@@ -423,6 +494,23 @@ static int pci_legacy_resume(struct device *dev)
/* Auxiliary functions used by the new power management framework */
+/**
+ * pci_restore_standard_config - restore standard config registers of PCI device
+ * @pci_dev: PCI device to handle
+ */
+static int pci_restore_standard_config(struct pci_dev *pci_dev)
+{
+ pci_update_current_state(pci_dev, PCI_UNKNOWN);
+
+ if (pci_dev->current_state != PCI_D0) {
+ int error = pci_set_power_state(pci_dev, PCI_D0);
+ if (error)
+ return error;
+ }
+
+ return pci_dev->state_saved ? pci_restore_state(pci_dev) : 0;
+}
+
static void pci_pm_default_resume_noirq(struct pci_dev *pci_dev)
{
pci_restore_standard_config(pci_dev);
@@ -443,7 +531,6 @@ static void pci_pm_default_suspend(struct pci_dev *pci_dev)
/* Disable non-bridge devices without PM support */
if (!pci_is_bridge(pci_dev))
pci_disable_enabled_device(pci_dev);
- pci_save_state(pci_dev);
}
static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev)
@@ -493,13 +580,13 @@ static int pci_pm_suspend(struct device *dev)
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_suspend(dev, PMSG_SUSPEND);
+ pci_dev->state_saved = false;
+
if (!pm) {
pci_pm_default_suspend(pci_dev);
goto Fixup;
}
- pci_dev->state_saved = false;
-
if (pm->suspend) {
pci_power_t prev = pci_dev->current_state;
int error;
@@ -509,24 +596,14 @@ static int pci_pm_suspend(struct device *dev)
if (error)
return error;
- if (pci_dev->state_saved)
- goto Fixup;
-
- if (pci_dev->current_state != PCI_D0
+ if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
&& pci_dev->current_state != PCI_UNKNOWN) {
WARN_ONCE(pci_dev->current_state != prev,
"PCI PM: State of device not saved by %pF\n",
pm->suspend);
- goto Fixup;
}
}
- if (!pci_dev->state_saved) {
- pci_save_state(pci_dev);
- if (!pci_is_bridge(pci_dev))
- pci_prepare_to_sleep(pci_dev);
- }
-
Fixup:
pci_fixup_device(pci_fixup_suspend, pci_dev);
@@ -536,21 +613,43 @@ static int pci_pm_suspend(struct device *dev)
static int pci_pm_suspend_noirq(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- struct device_driver *drv = dev->driver;
- int error = 0;
+ struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_suspend_late(dev, PMSG_SUSPEND);
- if (drv && drv->pm && drv->pm->suspend_noirq) {
- error = drv->pm->suspend_noirq(dev);
- suspend_report_result(drv->pm->suspend_noirq, error);
+ if (!pm) {
+ pci_save_state(pci_dev);
+ return 0;
}
- if (!error)
- pci_pm_set_unknown_state(pci_dev);
+ if (pm->suspend_noirq) {
+ pci_power_t prev = pci_dev->current_state;
+ int error;
- return error;
+ error = pm->suspend_noirq(dev);
+ suspend_report_result(pm->suspend_noirq, error);
+ if (error)
+ return error;
+
+ if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
+ && pci_dev->current_state != PCI_UNKNOWN) {
+ WARN_ONCE(pci_dev->current_state != prev,
+ "PCI PM: State of device not saved by %pF\n",
+ pm->suspend_noirq);
+ return 0;
+ }
+ }
+
+ if (!pci_dev->state_saved) {
+ pci_save_state(pci_dev);
+ if (!pci_is_bridge(pci_dev))
+ pci_prepare_to_sleep(pci_dev);
+ }
+
+ pci_pm_set_unknown_state(pci_dev);
+
+ return 0;
}
static int pci_pm_resume_noirq(struct device *dev)
@@ -617,13 +716,13 @@ static int pci_pm_freeze(struct device *dev)
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_suspend(dev, PMSG_FREEZE);
+ pci_dev->state_saved = false;
+
if (!pm) {
pci_pm_default_suspend(pci_dev);
return 0;
}
- pci_dev->state_saved = false;
-
if (pm->freeze) {
int error;
@@ -633,9 +732,6 @@ static int pci_pm_freeze(struct device *dev)
return error;
}
- if (!pci_dev->state_saved)
- pci_save_state(pci_dev);
-
return 0;
}
@@ -643,20 +739,25 @@ static int pci_pm_freeze_noirq(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct device_driver *drv = dev->driver;
- int error = 0;
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_suspend_late(dev, PMSG_FREEZE);
if (drv && drv->pm && drv->pm->freeze_noirq) {
+ int error;
+
error = drv->pm->freeze_noirq(dev);
suspend_report_result(drv->pm->freeze_noirq, error);
+ if (error)
+ return error;
}
- if (!error)
- pci_pm_set_unknown_state(pci_dev);
+ if (!pci_dev->state_saved)
+ pci_save_state(pci_dev);
- return error;
+ pci_pm_set_unknown_state(pci_dev);
+
+ return 0;
}
static int pci_pm_thaw_noirq(struct device *dev)
@@ -699,46 +800,56 @@ static int pci_pm_poweroff(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- int error = 0;
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_suspend(dev, PMSG_HIBERNATE);
+ pci_dev->state_saved = false;
+
if (!pm) {
pci_pm_default_suspend(pci_dev);
goto Fixup;
}
- pci_dev->state_saved = false;
-
if (pm->poweroff) {
+ int error;
+
error = pm->poweroff(dev);
suspend_report_result(pm->poweroff, error);
+ if (error)
+ return error;
}
- if (!pci_dev->state_saved && !pci_is_bridge(pci_dev))
- pci_prepare_to_sleep(pci_dev);
-
Fixup:
pci_fixup_device(pci_fixup_suspend, pci_dev);
- return error;
+ return 0;
}
static int pci_pm_poweroff_noirq(struct device *dev)
{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
struct device_driver *drv = dev->driver;
- int error = 0;
if (pci_has_legacy_pm_support(to_pci_dev(dev)))
return pci_legacy_suspend_late(dev, PMSG_HIBERNATE);
- if (drv && drv->pm && drv->pm->poweroff_noirq) {
+ if (!drv || !drv->pm)
+ return 0;
+
+ if (drv->pm->poweroff_noirq) {
+ int error;
+
error = drv->pm->poweroff_noirq(dev);
suspend_report_result(drv->pm->poweroff_noirq, error);
+ if (error)
+ return error;
}
- return error;
+ if (!pci_dev->state_saved && !pci_is_bridge(pci_dev))
+ pci_prepare_to_sleep(pci_dev);
+
+ return 0;
}
static int pci_pm_restore_noirq(struct device *dev)
@@ -852,13 +963,23 @@ int __pci_register_driver(struct pci_driver *drv, struct module *owner,
/* register with core */
error = driver_register(&drv->driver);
if (error)
- return error;
+ goto out;
error = pci_create_newid_file(drv);
if (error)
- driver_unregister(&drv->driver);
+ goto out_newid;
+ error = pci_create_removeid_file(drv);
+ if (error)
+ goto out_removeid;
+out:
return error;
+
+out_removeid:
+ pci_remove_newid_file(drv);
+out_newid:
+ driver_unregister(&drv->driver);
+ goto out;
}
/**
@@ -874,6 +995,7 @@ int __pci_register_driver(struct pci_driver *drv, struct module *owner,
void
pci_unregister_driver(struct pci_driver *drv)
{
+ pci_remove_removeid_file(drv);
pci_remove_newid_file(drv);
driver_unregister(&drv->driver);
pci_free_dynids(drv);
@@ -973,6 +1095,7 @@ struct bus_type pci_bus_type = {
.remove = pci_device_remove,
.shutdown = pci_device_shutdown,
.dev_attrs = pci_dev_attrs,
+ .bus_attrs = pci_bus_attrs,
.pm = PCI_PM_OPS_PTR,
};
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index dfc4e0ddf24..85ebd02a64a 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -148,7 +148,7 @@ static ssize_t is_enabled_store(struct device *dev,
return -EPERM;
if (!val) {
- if (atomic_read(&pdev->enable_cnt) != 0)
+ if (pci_is_enabled(pdev))
pci_disable_device(pdev);
else
result = -EIO;
@@ -219,6 +219,79 @@ msi_bus_store(struct device *dev, struct device_attribute *attr,
return count;
}
+#ifdef CONFIG_HOTPLUG
+static DEFINE_MUTEX(pci_remove_rescan_mutex);
+static ssize_t bus_rescan_store(struct bus_type *bus, const char *buf,
+ size_t count)
+{
+ unsigned long val;
+ struct pci_bus *b = NULL;
+
+ if (strict_strtoul(buf, 0, &val) < 0)
+ return -EINVAL;
+
+ if (val) {
+ mutex_lock(&pci_remove_rescan_mutex);
+ while ((b = pci_find_next_bus(b)) != NULL)
+ pci_rescan_bus(b);
+ mutex_unlock(&pci_remove_rescan_mutex);
+ }
+ return count;
+}
+
+struct bus_attribute pci_bus_attrs[] = {
+ __ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, bus_rescan_store),
+ __ATTR_NULL
+};
+
+static ssize_t
+dev_rescan_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long val;
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ if (strict_strtoul(buf, 0, &val) < 0)
+ return -EINVAL;
+
+ if (val) {
+ mutex_lock(&pci_remove_rescan_mutex);
+ pci_rescan_bus(pdev->bus);
+ mutex_unlock(&pci_remove_rescan_mutex);
+ }
+ return count;
+}
+
+static void remove_callback(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ mutex_lock(&pci_remove_rescan_mutex);
+ pci_remove_bus_device(pdev);
+ mutex_unlock(&pci_remove_rescan_mutex);
+}
+
+static ssize_t
+remove_store(struct device *dev, struct device_attribute *dummy,
+ const char *buf, size_t count)
+{
+ int ret = 0;
+ unsigned long val;
+
+ if (strict_strtoul(buf, 0, &val) < 0)
+ return -EINVAL;
+
+ /* An attribute cannot be unregistered by one of its own methods,
+ * so we have to use this roundabout approach.
+ */
+ if (val)
+ ret = device_schedule_callback(dev, remove_callback);
+ if (ret)
+ count = ret;
+ return count;
+}
+#endif
+
struct device_attribute pci_dev_attrs[] = {
__ATTR_RO(resource),
__ATTR_RO(vendor),
@@ -237,10 +310,25 @@ struct device_attribute pci_dev_attrs[] = {
__ATTR(broken_parity_status,(S_IRUGO|S_IWUSR),
broken_parity_status_show,broken_parity_status_store),
__ATTR(msi_bus, 0644, msi_bus_show, msi_bus_store),
+#ifdef CONFIG_HOTPLUG
+ __ATTR(remove, (S_IWUSR|S_IWGRP), NULL, remove_store),
+ __ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, dev_rescan_store),
+#endif
__ATTR_NULL,
};
static ssize_t
+boot_vga_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ return sprintf(buf, "%u\n",
+ !!(pdev->resource[PCI_ROM_RESOURCE].flags &
+ IORESOURCE_ROM_SHADOW));
+}
+struct device_attribute vga_attr = __ATTR_RO(boot_vga);
+
+static ssize_t
pci_read_config(struct kobject *kobj, struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -404,6 +492,7 @@ write_vpd_attr(struct kobject *kobj, struct bin_attribute *bin_attr,
/**
* pci_read_legacy_io - read byte(s) from legacy I/O port space
* @kobj: kobject corresponding to file to read from
+ * @bin_attr: struct bin_attribute for this file
* @buf: buffer to store results
* @off: offset into legacy I/O port space
* @count: number of bytes to read
@@ -429,6 +518,7 @@ pci_read_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr,
/**
* pci_write_legacy_io - write byte(s) to legacy I/O port space
* @kobj: kobject corresponding to file to read from
+ * @bin_attr: struct bin_attribute for this file
* @buf: buffer containing value to be written
* @off: offset into legacy I/O port space
* @count: number of bytes to write
@@ -493,6 +583,19 @@ pci_mmap_legacy_io(struct kobject *kobj, struct bin_attribute *attr,
}
/**
+ * pci_adjust_legacy_attr - adjustment of legacy file attributes
+ * @b: bus to create files under
+ * @mmap_type: I/O port or memory
+ *
+ * Stub implementation. Can be overridden by arch if necessary.
+ */
+void __weak
+pci_adjust_legacy_attr(struct pci_bus *b, enum pci_mmap_state mmap_type)
+{
+ return;
+}
+
+/**
* pci_create_legacy_files - create legacy I/O port and memory files
* @b: bus to create files under
*
@@ -518,6 +621,7 @@ void pci_create_legacy_files(struct pci_bus *b)
b->legacy_io->read = pci_read_legacy_io;
b->legacy_io->write = pci_write_legacy_io;
b->legacy_io->mmap = pci_mmap_legacy_io;
+ pci_adjust_legacy_attr(b, pci_mmap_io);
error = device_create_bin_file(&b->dev, b->legacy_io);
if (error)
goto legacy_io_err;
@@ -528,6 +632,7 @@ void pci_create_legacy_files(struct pci_bus *b)
b->legacy_mem->size = 1024*1024;
b->legacy_mem->attr.mode = S_IRUSR | S_IWUSR;
b->legacy_mem->mmap = pci_mmap_legacy_mem;
+ pci_adjust_legacy_attr(b, pci_mmap_mem);
error = device_create_bin_file(&b->dev, b->legacy_mem);
if (error)
goto legacy_mem_err;
@@ -630,9 +735,9 @@ pci_mmap_resource_wc(struct kobject *kobj, struct bin_attribute *attr,
/**
* pci_remove_resource_files - cleanup resource files
- * @dev: dev to cleanup
+ * @pdev: dev to cleanup
*
- * If we created resource files for @dev, remove them from sysfs and
+ * If we created resource files for @pdev, remove them from sysfs and
* free their resources.
*/
static void
@@ -690,9 +795,9 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
/**
* pci_create_resource_files - create resource files in sysfs for @dev
- * @dev: dev in question
+ * @pdev: dev in question
*
- * Walk the resources in @dev creating files for each resource available.
+ * Walk the resources in @pdev creating files for each resource available.
*/
static int pci_create_resource_files(struct pci_dev *pdev)
{
@@ -719,13 +824,14 @@ static int pci_create_resource_files(struct pci_dev *pdev)
return 0;
}
#else /* !HAVE_PCI_MMAP */
-static inline int pci_create_resource_files(struct pci_dev *dev) { return 0; }
-static inline void pci_remove_resource_files(struct pci_dev *dev) { return; }
+int __weak pci_create_resource_files(struct pci_dev *dev) { return 0; }
+void __weak pci_remove_resource_files(struct pci_dev *dev) { return; }
#endif /* HAVE_PCI_MMAP */
/**
* pci_write_rom - used to enable access to the PCI ROM display
* @kobj: kernel object handle
+ * @bin_attr: struct bin_attribute for this file
* @buf: user input
* @off: file offset
* @count: number of byte in input
@@ -749,6 +855,7 @@ pci_write_rom(struct kobject *kobj, struct bin_attribute *bin_attr,
/**
* pci_read_rom - read a PCI ROM
* @kobj: kernel object handle
+ * @bin_attr: struct bin_attribute for this file
* @buf: where to put the data we read from the ROM
* @off: file offset
* @count: number of bytes to read
@@ -884,18 +991,27 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
pdev->rom_attr = attr;
}
+ if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) {
+ retval = device_create_file(&pdev->dev, &vga_attr);
+ if (retval)
+ goto err_rom_file;
+ }
+
/* add platform-specific attributes */
retval = pcibios_add_platform_entries(pdev);
if (retval)
- goto err_rom_file;
+ goto err_vga_file;
/* add sysfs entries for various capabilities */
retval = pci_create_capabilities_sysfs(pdev);
if (retval)
- goto err_rom_file;
+ goto err_vga_file;
return 0;
+err_vga_file:
+ if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
+ device_remove_file(&pdev->dev, &vga_attr);
err_rom_file:
if (rom_size) {
sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 6d6120007af..6c93af5ced1 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -20,8 +20,15 @@
#include <linux/pm_wakeup.h>
#include <linux/interrupt.h>
#include <asm/dma.h> /* isa_dma_bridge_buggy */
+#include <linux/device.h>
+#include <asm/setup.h>
#include "pci.h"
+const char *pci_power_names[] = {
+ "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
+};
+EXPORT_SYMBOL_GPL(pci_power_names);
+
unsigned int pci_pm_d3_delay = PCI_PM_D3_WAIT;
#ifdef CONFIG_PCI_DOMAINS
@@ -426,7 +433,6 @@ static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
* given PCI device
* @dev: PCI device to handle.
* @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
- * @wait: If 'true', wait for the device to change its power state
*
* RETURN VALUE:
* -EINVAL if the requested state is invalid.
@@ -435,12 +441,15 @@ static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
* 0 if device already is in the requested state.
* 0 if device's power state has been successfully changed.
*/
-static int
-pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state, bool wait)
+static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
{
u16 pmcsr;
bool need_restore = false;
+ /* Check if we're already there */
+ if (dev->current_state == state)
+ return 0;
+
if (!dev->pm_cap)
return -EIO;
@@ -451,10 +460,7 @@ pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state, bool wait)
* Can enter D0 from any state, but if we can only go deeper
* to sleep if we're already in a low power state
*/
- if (dev->current_state == state) {
- /* we're already there */
- return 0;
- } else if (state != PCI_D0 && dev->current_state <= PCI_D3cold
+ if (state != PCI_D0 && dev->current_state <= PCI_D3cold
&& dev->current_state > state) {
dev_err(&dev->dev, "invalid power transition "
"(from state %d to %d)\n", dev->current_state, state);
@@ -479,12 +485,12 @@ pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state, bool wait)
pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
pmcsr |= state;
break;
+ case PCI_D3hot:
+ case PCI_D3cold:
case PCI_UNKNOWN: /* Boot-up */
if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
- && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) {
+ && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
need_restore = true;
- wait = true;
- }
/* Fall-through: force to D0 */
default:
pmcsr = 0;
@@ -494,9 +500,6 @@ pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state, bool wait)
/* enter specified state */
pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
- if (!wait)
- return 0;
-
/* Mandatory power management transition delays */
/* see PCI PM 1.1 5.6.1 table 18 */
if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
@@ -521,7 +524,7 @@ pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state, bool wait)
if (need_restore)
pci_restore_bars(dev);
- if (wait && dev->bus->self)
+ if (dev->bus->self)
pcie_aspm_pm_state_change(dev->bus->self);
return 0;
@@ -546,11 +549,59 @@ void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
}
/**
+ * pci_platform_power_transition - Use platform to change device power state
+ * @dev: PCI device to handle.
+ * @state: State to put the device into.
+ */
+static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
+{
+ int error;
+
+ if (platform_pci_power_manageable(dev)) {
+ error = platform_pci_set_power_state(dev, state);
+ if (!error)
+ pci_update_current_state(dev, state);
+ } else {
+ error = -ENODEV;
+ /* Fall back to PCI_D0 if native PM is not supported */
+ if (!dev->pm_cap)
+ dev->current_state = PCI_D0;
+ }
+
+ return error;
+}
+
+/**
+ * __pci_start_power_transition - Start power transition of a PCI device
+ * @dev: PCI device to handle.
+ * @state: State to put the device into.
+ */
+static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
+{
+ if (state == PCI_D0)
+ pci_platform_power_transition(dev, PCI_D0);
+}
+
+/**
+ * __pci_complete_power_transition - Complete power transition of a PCI device
+ * @dev: PCI device to handle.
+ * @state: State to put the device into.
+ *
+ * This function should not be called directly by device drivers.
+ */
+int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
+{
+ return state > PCI_D0 ?
+ pci_platform_power_transition(dev, state) : -EINVAL;
+}
+EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
+
+/**
* pci_set_power_state - Set the power state of a PCI device
* @dev: PCI device to handle.
* @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
*
- * Transition a device to a new power state, using the platform formware and/or
+ * Transition a device to a new power state, using the platform firmware and/or
* the device's PCI PM registers.
*
* RETURN VALUE:
@@ -577,30 +628,21 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
*/
return 0;
- if (state == PCI_D0 && platform_pci_power_manageable(dev)) {
- /*
- * Allow the platform to change the state, for example via ACPI
- * _PR0, _PS0 and some such, but do not trust it.
- */
- int ret = platform_pci_set_power_state(dev, PCI_D0);
- if (!ret)
- pci_update_current_state(dev, PCI_D0);
- }
+ /* Check if we're already there */
+ if (dev->current_state == state)
+ return 0;
+
+ __pci_start_power_transition(dev, state);
+
/* This device is quirked not to be put into D3, so
don't put it in D3 */
if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
return 0;
- error = pci_raw_set_power_state(dev, state, true);
+ error = pci_raw_set_power_state(dev, state);
- if (state > PCI_D0 && platform_pci_power_manageable(dev)) {
- /* Allow the platform to finalize the transition */
- int ret = platform_pci_set_power_state(dev, state);
- if (!ret) {
- pci_update_current_state(dev, state);
- error = 0;
- }
- }
+ if (!__pci_complete_power_transition(dev, state))
+ error = 0;
return error;
}
@@ -645,11 +687,36 @@ pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
EXPORT_SYMBOL(pci_choose_state);
+#define PCI_EXP_SAVE_REGS 7
+
+#define pcie_cap_has_devctl(type, flags) 1
+#define pcie_cap_has_lnkctl(type, flags) \
+ ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
+ (type == PCI_EXP_TYPE_ROOT_PORT || \
+ type == PCI_EXP_TYPE_ENDPOINT || \
+ type == PCI_EXP_TYPE_LEG_END))
+#define pcie_cap_has_sltctl(type, flags) \
+ ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
+ ((type == PCI_EXP_TYPE_ROOT_PORT) || \
+ (type == PCI_EXP_TYPE_DOWNSTREAM && \
+ (flags & PCI_EXP_FLAGS_SLOT))))
+#define pcie_cap_has_rtctl(type, flags) \
+ ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
+ (type == PCI_EXP_TYPE_ROOT_PORT || \
+ type == PCI_EXP_TYPE_RC_EC))
+#define pcie_cap_has_devctl2(type, flags) \
+ ((flags & PCI_EXP_FLAGS_VERS) > 1)
+#define pcie_cap_has_lnkctl2(type, flags) \
+ ((flags & PCI_EXP_FLAGS_VERS) > 1)
+#define pcie_cap_has_sltctl2(type, flags) \
+ ((flags & PCI_EXP_FLAGS_VERS) > 1)
+
static int pci_save_pcie_state(struct pci_dev *dev)
{
int pos, i = 0;
struct pci_cap_saved_state *save_state;
u16 *cap;
+ u16 flags;
pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
if (pos <= 0)
@@ -657,15 +724,27 @@ static int pci_save_pcie_state(struct pci_dev *dev)
save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
if (!save_state) {
- dev_err(&dev->dev, "buffer not found in %s\n", __FUNCTION__);
+ dev_err(&dev->dev, "buffer not found in %s\n", __func__);
return -ENOMEM;
}
cap = (u16 *)&save_state->data[0];
- pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &cap[i++]);
- pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]);
- pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]);
- pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]);
+ pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
+
+ if (pcie_cap_has_devctl(dev->pcie_type, flags))
+ pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &cap[i++]);
+ if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
+ pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]);
+ if (pcie_cap_has_sltctl(dev->pcie_type, flags))
+ pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]);
+ if (pcie_cap_has_rtctl(dev->pcie_type, flags))
+ pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]);
+ if (pcie_cap_has_devctl2(dev->pcie_type, flags))
+ pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &cap[i++]);
+ if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
+ pci_read_config_word(dev, pos + PCI_EXP_LNKCTL2, &cap[i++]);
+ if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
+ pci_read_config_word(dev, pos + PCI_EXP_SLTCTL2, &cap[i++]);
return 0;
}
@@ -675,6 +754,7 @@ static void pci_restore_pcie_state(struct pci_dev *dev)
int i = 0, pos;
struct pci_cap_saved_state *save_state;
u16 *cap;
+ u16 flags;
save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
@@ -682,10 +762,22 @@ static void pci_restore_pcie_state(struct pci_dev *dev)
return;
cap = (u16 *)&save_state->data[0];
- pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, cap[i++]);
- pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]);
- pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]);
- pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]);
+ pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
+
+ if (pcie_cap_has_devctl(dev->pcie_type, flags))
+ pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, cap[i++]);
+ if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
+ pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]);
+ if (pcie_cap_has_sltctl(dev->pcie_type, flags))
+ pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]);
+ if (pcie_cap_has_rtctl(dev->pcie_type, flags))
+ pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]);
+ if (pcie_cap_has_devctl2(dev->pcie_type, flags))
+ pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, cap[i++]);
+ if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
+ pci_write_config_word(dev, pos + PCI_EXP_LNKCTL2, cap[i++]);
+ if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
+ pci_write_config_word(dev, pos + PCI_EXP_SLTCTL2, cap[i++]);
}
@@ -700,7 +792,7 @@ static int pci_save_pcix_state(struct pci_dev *dev)
save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
if (!save_state) {
- dev_err(&dev->dev, "buffer not found in %s\n", __FUNCTION__);
+ dev_err(&dev->dev, "buffer not found in %s\n", __func__);
return -ENOMEM;
}
@@ -773,6 +865,7 @@ pci_restore_state(struct pci_dev *dev)
}
pci_restore_pcix_state(dev);
pci_restore_msi_state(dev);
+ pci_restore_iov_state(dev);
return 0;
}
@@ -801,7 +894,7 @@ static int do_pci_enable_device(struct pci_dev *dev, int bars)
*/
int pci_reenable_device(struct pci_dev *dev)
{
- if (atomic_read(&dev->enable_cnt))
+ if (pci_is_enabled(dev))
return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
return 0;
}
@@ -999,7 +1092,7 @@ static void do_pci_disable_device(struct pci_dev *dev)
*/
void pci_disable_enabled_device(struct pci_dev *dev)
{
- if (atomic_read(&dev->enable_cnt))
+ if (pci_is_enabled(dev))
do_pci_disable_device(dev);
}
@@ -1117,7 +1210,7 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
* Error code depending on the platform is returned if both the platform and
* the native mechanism fail to enable the generation of wake-up events
*/
-int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable)
+int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
{
int error = 0;
bool pme_done = false;
@@ -1196,15 +1289,14 @@ pci_power_t pci_target_state(struct pci_dev *dev)
default:
target_state = state;
}
+ } else if (!dev->pm_cap) {
+ target_state = PCI_D0;
} else if (device_may_wakeup(&dev->dev)) {
/*
* Find the deepest state from which the device can generate
* wake-up events, make it the target state and enable device
* to generate PME#.
*/
- if (!dev->pm_cap)
- return PCI_POWER_ERROR;
-
if (dev->pme_support) {
while (target_state
&& !(dev->pme_support & (1 << target_state)))
@@ -1231,7 +1323,7 @@ int pci_prepare_to_sleep(struct pci_dev *dev)
if (target_state == PCI_POWER_ERROR)
return -EIO;
- pci_enable_wake(dev, target_state, true);
+ pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
error = pci_set_power_state(dev, target_state);
@@ -1369,7 +1461,8 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev)
{
int error;
- error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP, 4 * sizeof(u16));
+ error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
+ PCI_EXP_SAVE_REGS * sizeof(u16));
if (error)
dev_err(&dev->dev,
"unable to preallocate PCI Express save buffer\n");
@@ -1381,50 +1474,6 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev)
}
/**
- * pci_restore_standard_config - restore standard config registers of PCI device
- * @dev: PCI device to handle
- *
- * This function assumes that the device's configuration space is accessible.
- * If the device needs to be powered up, the function will wait for it to
- * change the state.
- */
-int pci_restore_standard_config(struct pci_dev *dev)
-{
- pci_power_t prev_state;
- int error;
-
- pci_update_current_state(dev, PCI_D0);
-
- prev_state = dev->current_state;
- if (prev_state == PCI_D0)
- goto Restore;
-
- error = pci_raw_set_power_state(dev, PCI_D0, false);
- if (error)
- return error;
-
- /*
- * This assumes that we won't get a bus in B2 or B3 from the BIOS, but
- * we've made this assumption forever and it appears to be universally
- * satisfied.
- */
- switch(prev_state) {
- case PCI_D3cold:
- case PCI_D3hot:
- mdelay(pci_pm_d3_delay);
- break;
- case PCI_D2:
- udelay(PCI_PM_D2_DELAY);
- break;
- }
-
- pci_update_current_state(dev, PCI_D0);
-
- Restore:
- return dev->state_saved ? pci_restore_state(dev) : 0;
-}
-
-/**
* pci_enable_ari - enable ARI forwarding if hardware support it
* @dev: the PCI device
*/
@@ -1484,7 +1533,7 @@ pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
if (!pin)
return -1;
- while (dev->bus->self) {
+ while (!pci_is_root_bus(dev->bus)) {
pin = pci_swizzle_interrupt_pin(dev, pin);
dev = dev->bus->self;
}
@@ -1504,7 +1553,7 @@ u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
{
u8 pin = *pinp;
- while (dev->bus->self) {
+ while (!pci_is_root_bus(dev->bus)) {
pin = pci_swizzle_interrupt_pin(dev, pin);
dev = dev->bus->self;
}
@@ -2010,99 +2059,177 @@ int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
EXPORT_SYMBOL(pci_set_dma_seg_boundary);
#endif
-static int __pcie_flr(struct pci_dev *dev, int probe)
+static int pcie_flr(struct pci_dev *dev, int probe)
{
- u16 status;
+ int i;
+ int pos;
u32 cap;
- int exppos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ u16 status;
- if (!exppos)
+ pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ if (!pos)
return -ENOTTY;
- pci_read_config_dword(dev, exppos + PCI_EXP_DEVCAP, &cap);
+
+ pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap);
if (!(cap & PCI_EXP_DEVCAP_FLR))
return -ENOTTY;
if (probe)
return 0;
- pci_block_user_cfg_access(dev);
-
/* Wait for Transaction Pending bit clean */
- msleep(100);
- pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status);
- if (status & PCI_EXP_DEVSTA_TRPND) {
- dev_info(&dev->dev, "Busy after 100ms while trying to reset; "
- "sleeping for 1 second\n");
- ssleep(1);
- pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status);
- if (status & PCI_EXP_DEVSTA_TRPND)
- dev_info(&dev->dev, "Still busy after 1s; "
- "proceeding with reset anyway\n");
+ for (i = 0; i < 4; i++) {
+ if (i)
+ msleep((1 << (i - 1)) * 100);
+
+ pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
+ if (!(status & PCI_EXP_DEVSTA_TRPND))
+ goto clear;
}
- pci_write_config_word(dev, exppos + PCI_EXP_DEVCTL,
+ dev_err(&dev->dev, "transaction is not cleared; "
+ "proceeding with reset anyway\n");
+
+clear:
+ pci_write_config_word(dev, pos + PCI_EXP_DEVCTL,
PCI_EXP_DEVCTL_BCR_FLR);
- mdelay(100);
+ msleep(100);
- pci_unblock_user_cfg_access(dev);
return 0;
}
-static int __pci_af_flr(struct pci_dev *dev, int probe)
+static int pci_af_flr(struct pci_dev *dev, int probe)
{
- int cappos = pci_find_capability(dev, PCI_CAP_ID_AF);
- u8 status;
+ int i;
+ int pos;
u8 cap;
+ u8 status;
- if (!cappos)
+ pos = pci_find_capability(dev, PCI_CAP_ID_AF);
+ if (!pos)
return -ENOTTY;
- pci_read_config_byte(dev, cappos + PCI_AF_CAP, &cap);
+
+ pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
return -ENOTTY;
if (probe)
return 0;
- pci_block_user_cfg_access(dev);
-
/* Wait for Transaction Pending bit clean */
- msleep(100);
- pci_read_config_byte(dev, cappos + PCI_AF_STATUS, &status);
- if (status & PCI_AF_STATUS_TP) {
- dev_info(&dev->dev, "Busy after 100ms while trying to"
- " reset; sleeping for 1 second\n");
- ssleep(1);
- pci_read_config_byte(dev,
- cappos + PCI_AF_STATUS, &status);
- if (status & PCI_AF_STATUS_TP)
- dev_info(&dev->dev, "Still busy after 1s; "
- "proceeding with reset anyway\n");
+ for (i = 0; i < 4; i++) {
+ if (i)
+ msleep((1 << (i - 1)) * 100);
+
+ pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status);
+ if (!(status & PCI_AF_STATUS_TP))
+ goto clear;
}
- pci_write_config_byte(dev, cappos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
- mdelay(100);
- pci_unblock_user_cfg_access(dev);
+ dev_err(&dev->dev, "transaction is not cleared; "
+ "proceeding with reset anyway\n");
+
+clear:
+ pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
+ msleep(100);
+
return 0;
}
-static int __pci_reset_function(struct pci_dev *pdev, int probe)
+static int pci_pm_reset(struct pci_dev *dev, int probe)
{
- int res;
+ u16 csr;
+
+ if (!dev->pm_cap)
+ return -ENOTTY;
+
+ pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
+ if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
+ return -ENOTTY;
+
+ if (probe)
+ return 0;
- res = __pcie_flr(pdev, probe);
- if (res != -ENOTTY)
- return res;
+ if (dev->current_state != PCI_D0)
+ return -EINVAL;
+
+ csr &= ~PCI_PM_CTRL_STATE_MASK;
+ csr |= PCI_D3hot;
+ pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
+ msleep(pci_pm_d3_delay);
- res = __pci_af_flr(pdev, probe);
- if (res != -ENOTTY)
- return res;
+ csr &= ~PCI_PM_CTRL_STATE_MASK;
+ csr |= PCI_D0;
+ pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
+ msleep(pci_pm_d3_delay);
- return res;
+ return 0;
+}
+
+static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
+{
+ u16 ctrl;
+ struct pci_dev *pdev;
+
+ if (dev->subordinate)
+ return -ENOTTY;
+
+ list_for_each_entry(pdev, &dev->bus->devices, bus_list)
+ if (pdev != dev)
+ return -ENOTTY;
+
+ if (probe)
+ return 0;
+
+ pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl);
+ ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
+ pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
+ msleep(100);
+
+ ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
+ pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
+ msleep(100);
+
+ return 0;
+}
+
+static int pci_dev_reset(struct pci_dev *dev, int probe)
+{
+ int rc;
+
+ might_sleep();
+
+ if (!probe) {
+ pci_block_user_cfg_access(dev);
+ /* block PM suspend, driver probe, etc. */
+ down(&dev->dev.sem);
+ }
+
+ rc = pcie_flr(dev, probe);
+ if (rc != -ENOTTY)
+ goto done;
+
+ rc = pci_af_flr(dev, probe);
+ if (rc != -ENOTTY)
+ goto done;
+
+ rc = pci_pm_reset(dev, probe);
+ if (rc != -ENOTTY)
+ goto done;
+
+ rc = pci_parent_bus_reset(dev, probe);
+done:
+ if (!probe) {
+ up(&dev->dev.sem);
+ pci_unblock_user_cfg_access(dev);
+ }
+
+ return rc;
}
/**
- * pci_execute_reset_function() - Reset a PCI device function
- * @dev: Device function to reset
+ * __pci_reset_function - reset a PCI device function
+ * @dev: PCI device to reset
*
* Some devices allow an individual function to be reset without affecting
* other functions in the same device. The PCI device must be responsive
@@ -2114,18 +2241,18 @@ static int __pci_reset_function(struct pci_dev *pdev, int probe)
* device including MSI, bus mastering, BARs, decoding IO and memory spaces,
* etc.
*
- * Returns 0 if the device function was successfully reset or -ENOTTY if the
+ * Returns 0 if the device function was successfully reset or negative if the
* device doesn't support resetting a single function.
*/
-int pci_execute_reset_function(struct pci_dev *dev)
+int __pci_reset_function(struct pci_dev *dev)
{
- return __pci_reset_function(dev, 0);
+ return pci_dev_reset(dev, 0);
}
-EXPORT_SYMBOL_GPL(pci_execute_reset_function);
+EXPORT_SYMBOL_GPL(__pci_reset_function);
/**
- * pci_reset_function() - quiesce and reset a PCI device function
- * @dev: Device function to reset
+ * pci_reset_function - quiesce and reset a PCI device function
+ * @dev: PCI device to reset
*
* Some devices allow an individual function to be reset without affecting
* other functions in the same device. The PCI device must be responsive
@@ -2133,32 +2260,33 @@ EXPORT_SYMBOL_GPL(pci_execute_reset_function);
*
* This function does not just reset the PCI portion of a device, but
* clears all the state associated with the device. This function differs
- * from pci_execute_reset_function in that it saves and restores device state
+ * from __pci_reset_function in that it saves and restores device state
* over the reset.
*
- * Returns 0 if the device function was successfully reset or -ENOTTY if the
+ * Returns 0 if the device function was successfully reset or negative if the
* device doesn't support resetting a single function.
*/
int pci_reset_function(struct pci_dev *dev)
{
- int r = __pci_reset_function(dev, 1);
+ int rc;
- if (r < 0)
- return r;
+ rc = pci_dev_reset(dev, 1);
+ if (rc)
+ return rc;
- if (!dev->msi_enabled && !dev->msix_enabled && dev->irq != 0)
- disable_irq(dev->irq);
pci_save_state(dev);
+ /*
+ * both INTx and MSI are disabled after the Interrupt Disable bit
+ * is set and the Bus Master bit is cleared.
+ */
pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
- r = pci_execute_reset_function(dev);
+ rc = pci_dev_reset(dev, 0);
pci_restore_state(dev);
- if (!dev->msi_enabled && !dev->msix_enabled && dev->irq != 0)
- enable_irq(dev->irq);
- return r;
+ return rc;
}
EXPORT_SYMBOL_GPL(pci_reset_function);
@@ -2346,18 +2474,140 @@ int pci_select_bars(struct pci_dev *dev, unsigned long flags)
*/
int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
{
+ int reg;
+
if (resno < PCI_ROM_RESOURCE) {
*type = pci_bar_unknown;
return PCI_BASE_ADDRESS_0 + 4 * resno;
} else if (resno == PCI_ROM_RESOURCE) {
*type = pci_bar_mem32;
return dev->rom_base_reg;
+ } else if (resno < PCI_BRIDGE_RESOURCES) {
+ /* device specific resource */
+ reg = pci_iov_resource_bar(dev, resno, type);
+ if (reg)
+ return reg;
}
dev_err(&dev->dev, "BAR: invalid resource #%d\n", resno);
return 0;
}
+#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
+static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
+spinlock_t resource_alignment_lock = SPIN_LOCK_UNLOCKED;
+
+/**
+ * pci_specified_resource_alignment - get resource alignment specified by user.
+ * @dev: the PCI device to get
+ *
+ * RETURNS: Resource alignment if it is specified.
+ * Zero if it is not specified.
+ */
+resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
+{
+ int seg, bus, slot, func, align_order, count;
+ resource_size_t align = 0;
+ char *p;
+
+ spin_lock(&resource_alignment_lock);
+ p = resource_alignment_param;
+ while (*p) {
+ count = 0;
+ if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
+ p[count] == '@') {
+ p += count + 1;
+ } else {
+ align_order = -1;
+ }
+ if (sscanf(p, "%x:%x:%x.%x%n",
+ &seg, &bus, &slot, &func, &count) != 4) {
+ seg = 0;
+ if (sscanf(p, "%x:%x.%x%n",
+ &bus, &slot, &func, &count) != 3) {
+ /* Invalid format */
+ printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
+ p);
+ break;
+ }
+ }
+ p += count;
+ if (seg == pci_domain_nr(dev->bus) &&
+ bus == dev->bus->number &&
+ slot == PCI_SLOT(dev->devfn) &&
+ func == PCI_FUNC(dev->devfn)) {
+ if (align_order == -1) {
+ align = PAGE_SIZE;
+ } else {
+ align = 1 << align_order;
+ }
+ /* Found */
+ break;
+ }
+ if (*p != ';' && *p != ',') {
+ /* End of param or invalid format */
+ break;
+ }
+ p++;
+ }
+ spin_unlock(&resource_alignment_lock);
+ return align;
+}
+
+/**
+ * pci_is_reassigndev - check if specified PCI is target device to reassign
+ * @dev: the PCI device to check
+ *
+ * RETURNS: non-zero for PCI device is a target device to reassign,
+ * or zero is not.
+ */
+int pci_is_reassigndev(struct pci_dev *dev)
+{
+ return (pci_specified_resource_alignment(dev) != 0);
+}
+
+ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
+{
+ if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
+ count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
+ spin_lock(&resource_alignment_lock);
+ strncpy(resource_alignment_param, buf, count);
+ resource_alignment_param[count] = '\0';
+ spin_unlock(&resource_alignment_lock);
+ return count;
+}
+
+ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
+{
+ size_t count;
+ spin_lock(&resource_alignment_lock);
+ count = snprintf(buf, size, "%s", resource_alignment_param);
+ spin_unlock(&resource_alignment_lock);
+ return count;
+}
+
+static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
+{
+ return pci_get_resource_alignment_param(buf, PAGE_SIZE);
+}
+
+static ssize_t pci_resource_alignment_store(struct bus_type *bus,
+ const char *buf, size_t count)
+{
+ return pci_set_resource_alignment_param(buf, count);
+}
+
+BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
+ pci_resource_alignment_store);
+
+static int __init pci_resource_alignment_sysfs_init(void)
+{
+ return bus_create_file(&pci_bus_type,
+ &bus_attr_resource_alignment);
+}
+
+late_initcall(pci_resource_alignment_sysfs_init);
+
static void __devinit pci_no_domains(void)
{
#ifdef CONFIG_PCI_DOMAINS
@@ -2406,6 +2656,11 @@ static int __init pci_setup(char *str)
pci_cardbus_io_size = memparse(str + 9, &str);
} else if (!strncmp(str, "cbmemsize=", 10)) {
pci_cardbus_mem_size = memparse(str + 10, &str);
+ } else if (!strncmp(str, "resource_alignment=", 19)) {
+ pci_set_resource_alignment_param(str + 19,
+ strlen(str + 19));
+ } else if (!strncmp(str, "ecrc=", 5)) {
+ pcie_ecrc_get_policy(str + 5);
} else {
printk(KERN_ERR "PCI: Unknown option `%s'\n",
str);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 07c0aa5275e..d03f6b99f29 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -1,6 +1,8 @@
#ifndef DRIVERS_PCI_H
#define DRIVERS_PCI_H
+#include <linux/workqueue.h>
+
#define PCI_CFG_SPACE_SIZE 256
#define PCI_CFG_SPACE_EXP_SIZE 4096
@@ -49,7 +51,6 @@ extern void pci_disable_enabled_device(struct pci_dev *dev);
extern void pci_pm_init(struct pci_dev *dev);
extern void platform_pci_wakeup_init(struct pci_dev *dev);
extern void pci_allocate_cap_save_buffers(struct pci_dev *dev);
-extern int pci_restore_standard_config(struct pci_dev *dev);
static inline bool pci_is_bridge(struct pci_dev *pci_dev)
{
@@ -136,6 +137,12 @@ extern int pcie_mch_quirk;
extern struct device_attribute pci_dev_attrs[];
extern struct device_attribute dev_attr_cpuaffinity;
extern struct device_attribute dev_attr_cpulistaffinity;
+#ifdef CONFIG_HOTPLUG
+extern struct bus_attribute pci_bus_attrs[];
+#else
+#define pci_bus_attrs NULL
+#endif
+
/**
* pci_match_one_device - Tell if a PCI device structure has a matching
@@ -178,6 +185,7 @@ enum pci_bar_type {
pci_bar_mem64, /* A 64-bit memory BAR */
};
+extern int pci_setup_device(struct pci_dev *dev);
extern int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
struct resource *res, unsigned int reg);
extern int pci_resource_bar(struct pci_dev *dev, int resno,
@@ -195,4 +203,60 @@ static inline int pci_ari_enabled(struct pci_bus *bus)
return bus->self && bus->self->ari_enabled;
}
+#ifdef CONFIG_PCI_QUIRKS
+extern int pci_is_reassigndev(struct pci_dev *dev);
+resource_size_t pci_specified_resource_alignment(struct pci_dev *dev);
+extern void pci_disable_bridge_window(struct pci_dev *dev);
+#endif
+
+/* Single Root I/O Virtualization */
+struct pci_sriov {
+ int pos; /* capability position */
+ int nres; /* number of resources */
+ u32 cap; /* SR-IOV Capabilities */
+ u16 ctrl; /* SR-IOV Control */
+ u16 total; /* total VFs associated with the PF */
+ u16 initial; /* initial VFs associated with the PF */
+ u16 nr_virtfn; /* number of VFs available */
+ u16 offset; /* first VF Routing ID offset */
+ u16 stride; /* following VF stride */
+ u32 pgsz; /* page size for BAR alignment */
+ u8 link; /* Function Dependency Link */
+ struct pci_dev *dev; /* lowest numbered PF */
+ struct pci_dev *self; /* this PF */
+ struct mutex lock; /* lock for VF bus */
+ struct work_struct mtask; /* VF Migration task */
+ u8 __iomem *mstate; /* VF Migration State Array */
+};
+
+#ifdef CONFIG_PCI_IOV
+extern int pci_iov_init(struct pci_dev *dev);
+extern void pci_iov_release(struct pci_dev *dev);
+extern int pci_iov_resource_bar(struct pci_dev *dev, int resno,
+ enum pci_bar_type *type);
+extern void pci_restore_iov_state(struct pci_dev *dev);
+extern int pci_iov_bus_range(struct pci_bus *bus);
+#else
+static inline int pci_iov_init(struct pci_dev *dev)
+{
+ return -ENODEV;
+}
+static inline void pci_iov_release(struct pci_dev *dev)
+
+{
+}
+static inline int pci_iov_resource_bar(struct pci_dev *dev, int resno,
+ enum pci_bar_type *type)
+{
+ return 0;
+}
+static inline void pci_restore_iov_state(struct pci_dev *dev)
+{
+}
+static inline int pci_iov_bus_range(struct pci_bus *bus)
+{
+ return 0;
+}
+#endif /* CONFIG_PCI_IOV */
+
#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/pcie/aer/Kconfig b/drivers/pci/pcie/aer/Kconfig
index c3bde588aa1..50e94e02378 100644
--- a/drivers/pci/pcie/aer/Kconfig
+++ b/drivers/pci/pcie/aer/Kconfig
@@ -10,3 +10,18 @@ config PCIEAER
This enables PCI Express Root Port Advanced Error Reporting
(AER) driver support. Error reporting messages sent to Root
Port will be handled by PCI Express AER driver.
+
+
+#
+# PCI Express ECRC
+#
+config PCIE_ECRC
+ bool "PCI Express ECRC settings control"
+ depends on PCIEAER
+ help
+ Used to override firmware/bios settings for PCI Express ECRC
+ (transaction layer end-to-end CRC checking).
+
+ When in doubt, say N.
+
+source "drivers/pci/pcie/aer/Kconfig.debug"
diff --git a/drivers/pci/pcie/aer/Kconfig.debug b/drivers/pci/pcie/aer/Kconfig.debug
new file mode 100644
index 00000000000..b8c925c1f6a
--- /dev/null
+++ b/drivers/pci/pcie/aer/Kconfig.debug
@@ -0,0 +1,18 @@
+#
+# PCI Express Root Port Device AER Debug Configuration
+#
+
+config PCIEAER_INJECT
+ tristate "PCIE AER error injector support"
+ depends on PCIEAER
+ default n
+ help
+ This enables PCI Express Root Port Advanced Error Reporting
+ (AER) software error injector.
+
+ Debuging PCIE AER code is quite difficult because it is hard
+ to trigger various real hardware errors. Software based
+ error injection can fake almost all kinds of errors with the
+ help of a user space helper tool aer-inject, which can be
+ gotten from:
+ http://www.kernel.org/pub/linux/utils/pci/aer-inject/
diff --git a/drivers/pci/pcie/aer/Makefile b/drivers/pci/pcie/aer/Makefile
index 8da3bd8455a..2cba67510dc 100644
--- a/drivers/pci/pcie/aer/Makefile
+++ b/drivers/pci/pcie/aer/Makefile
@@ -4,6 +4,9 @@
obj-$(CONFIG_PCIEAER) += aerdriver.o
+obj-$(CONFIG_PCIE_ECRC) += ecrc.o
+
aerdriver-objs := aerdrv_errprint.o aerdrv_core.o aerdrv.o
aerdriver-$(CONFIG_ACPI) += aerdrv_acpi.o
+obj-$(CONFIG_PCIEAER_INJECT) += aer_inject.o
diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c
new file mode 100644
index 00000000000..d92ae21a59d
--- /dev/null
+++ b/drivers/pci/pcie/aer/aer_inject.c
@@ -0,0 +1,473 @@
+/*
+ * PCIE AER software error injection support.
+ *
+ * Debuging PCIE AER code is quite difficult because it is hard to
+ * trigger various real hardware errors. Software based error
+ * injection can fake almost all kinds of errors with the help of a
+ * user space helper tool aer-inject, which can be gotten from:
+ * http://www.kernel.org/pub/linux/utils/pci/aer-inject/
+ *
+ * Copyright 2009 Intel Corporation.
+ * Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/miscdevice.h>
+#include <linux/pci.h>
+#include <linux/fs.h>
+#include <asm/uaccess.h>
+#include "aerdrv.h"
+
+struct aer_error_inj
+{
+ u8 bus;
+ u8 dev;
+ u8 fn;
+ u32 uncor_status;
+ u32 cor_status;
+ u32 header_log0;
+ u32 header_log1;
+ u32 header_log2;
+ u32 header_log3;
+};
+
+struct aer_error
+{
+ struct list_head list;
+ unsigned int bus;
+ unsigned int devfn;
+ int pos_cap_err;
+
+ u32 uncor_status;
+ u32 cor_status;
+ u32 header_log0;
+ u32 header_log1;
+ u32 header_log2;
+ u32 header_log3;
+ u32 root_status;
+ u32 source_id;
+};
+
+struct pci_bus_ops
+{
+ struct list_head list;
+ struct pci_bus *bus;
+ struct pci_ops *ops;
+};
+
+static LIST_HEAD(einjected);
+
+static LIST_HEAD(pci_bus_ops_list);
+
+/* Protect einjected and pci_bus_ops_list */
+static DEFINE_SPINLOCK(inject_lock);
+
+static void aer_error_init(struct aer_error *err, unsigned int bus,
+ unsigned int devfn, int pos_cap_err)
+{
+ INIT_LIST_HEAD(&err->list);
+ err->bus = bus;
+ err->devfn = devfn;
+ err->pos_cap_err = pos_cap_err;
+}
+
+/* inject_lock must be held before calling */
+static struct aer_error *__find_aer_error(unsigned int bus, unsigned int devfn)
+{
+ struct aer_error *err;
+
+ list_for_each_entry(err, &einjected, list) {
+ if (bus == err->bus && devfn == err->devfn)
+ return err;
+ }
+ return NULL;
+}
+
+/* inject_lock must be held before calling */
+static struct aer_error *__find_aer_error_by_dev(struct pci_dev *dev)
+{
+ return __find_aer_error(dev->bus->number, dev->devfn);
+}
+
+/* inject_lock must be held before calling */
+static struct pci_ops *__find_pci_bus_ops(struct pci_bus *bus)
+{
+ struct pci_bus_ops *bus_ops;
+
+ list_for_each_entry(bus_ops, &pci_bus_ops_list, list) {
+ if (bus_ops->bus == bus)
+ return bus_ops->ops;
+ }
+ return NULL;
+}
+
+static struct pci_bus_ops *pci_bus_ops_pop(void)
+{
+ unsigned long flags;
+ struct pci_bus_ops *bus_ops = NULL;
+
+ spin_lock_irqsave(&inject_lock, flags);
+ if (list_empty(&pci_bus_ops_list))
+ bus_ops = NULL;
+ else {
+ struct list_head *lh = pci_bus_ops_list.next;
+ list_del(lh);
+ bus_ops = list_entry(lh, struct pci_bus_ops, list);
+ }
+ spin_unlock_irqrestore(&inject_lock, flags);
+ return bus_ops;
+}
+
+static u32 *find_pci_config_dword(struct aer_error *err, int where,
+ int *prw1cs)
+{
+ int rw1cs = 0;
+ u32 *target = NULL;
+
+ if (err->pos_cap_err == -1)
+ return NULL;
+
+ switch (where - err->pos_cap_err) {
+ case PCI_ERR_UNCOR_STATUS:
+ target = &err->uncor_status;
+ rw1cs = 1;
+ break;
+ case PCI_ERR_COR_STATUS:
+ target = &err->cor_status;
+ rw1cs = 1;
+ break;
+ case PCI_ERR_HEADER_LOG:
+ target = &err->header_log0;
+ break;
+ case PCI_ERR_HEADER_LOG+4:
+ target = &err->header_log1;
+ break;
+ case PCI_ERR_HEADER_LOG+8:
+ target = &err->header_log2;
+ break;
+ case PCI_ERR_HEADER_LOG+12:
+ target = &err->header_log3;
+ break;
+ case PCI_ERR_ROOT_STATUS:
+ target = &err->root_status;
+ rw1cs = 1;
+ break;
+ case PCI_ERR_ROOT_COR_SRC:
+ target = &err->source_id;
+ break;
+ }
+ if (prw1cs)
+ *prw1cs = rw1cs;
+ return target;
+}
+
+static int pci_read_aer(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 *val)
+{
+ u32 *sim;
+ struct aer_error *err;
+ unsigned long flags;
+ struct pci_ops *ops;
+
+ spin_lock_irqsave(&inject_lock, flags);
+ if (size != sizeof(u32))
+ goto out;
+ err = __find_aer_error(bus->number, devfn);
+ if (!err)
+ goto out;
+
+ sim = find_pci_config_dword(err, where, NULL);
+ if (sim) {
+ *val = *sim;
+ spin_unlock_irqrestore(&inject_lock, flags);
+ return 0;
+ }
+out:
+ ops = __find_pci_bus_ops(bus);
+ spin_unlock_irqrestore(&inject_lock, flags);
+ return ops->read(bus, devfn, where, size, val);
+}
+
+int pci_write_aer(struct pci_bus *bus, unsigned int devfn, int where, int size,
+ u32 val)
+{
+ u32 *sim;
+ struct aer_error *err;
+ unsigned long flags;
+ int rw1cs;
+ struct pci_ops *ops;
+
+ spin_lock_irqsave(&inject_lock, flags);
+ if (size != sizeof(u32))
+ goto out;
+ err = __find_aer_error(bus->number, devfn);
+ if (!err)
+ goto out;
+
+ sim = find_pci_config_dword(err, where, &rw1cs);
+ if (sim) {
+ if (rw1cs)
+ *sim ^= val;
+ else
+ *sim = val;
+ spin_unlock_irqrestore(&inject_lock, flags);
+ return 0;
+ }
+out:
+ ops = __find_pci_bus_ops(bus);
+ spin_unlock_irqrestore(&inject_lock, flags);
+ return ops->write(bus, devfn, where, size, val);
+}
+
+static struct pci_ops pci_ops_aer = {
+ .read = pci_read_aer,
+ .write = pci_write_aer,
+};
+
+static void pci_bus_ops_init(struct pci_bus_ops *bus_ops,
+ struct pci_bus *bus,
+ struct pci_ops *ops)
+{
+ INIT_LIST_HEAD(&bus_ops->list);
+ bus_ops->bus = bus;
+ bus_ops->ops = ops;
+}
+
+static int pci_bus_set_aer_ops(struct pci_bus *bus)
+{
+ struct pci_ops *ops;
+ struct pci_bus_ops *bus_ops;
+ unsigned long flags;
+
+ bus_ops = kmalloc(sizeof(*bus_ops), GFP_KERNEL);
+ if (!bus_ops)
+ return -ENOMEM;
+ ops = pci_bus_set_ops(bus, &pci_ops_aer);
+ spin_lock_irqsave(&inject_lock, flags);
+ if (ops == &pci_ops_aer)
+ goto out;
+ pci_bus_ops_init(bus_ops, bus, ops);
+ list_add(&bus_ops->list, &pci_bus_ops_list);
+ bus_ops = NULL;
+out:
+ spin_unlock_irqrestore(&inject_lock, flags);
+ if (bus_ops)
+ kfree(bus_ops);
+ return 0;
+}
+
+static struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
+{
+ while (1) {
+ if (!dev->is_pcie)
+ break;
+ if (dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT)
+ return dev;
+ if (!dev->bus->self)
+ break;
+ dev = dev->bus->self;
+ }
+ return NULL;
+}
+
+static int find_aer_device_iter(struct device *device, void *data)
+{
+ struct pcie_device **result = data;
+ struct pcie_device *pcie_dev;
+
+ if (device->bus == &pcie_port_bus_type) {
+ pcie_dev = to_pcie_device(device);
+ if (pcie_dev->service & PCIE_PORT_SERVICE_AER) {
+ *result = pcie_dev;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int find_aer_device(struct pci_dev *dev, struct pcie_device **result)
+{
+ return device_for_each_child(&dev->dev, result, find_aer_device_iter);
+}
+
+static int aer_inject(struct aer_error_inj *einj)
+{
+ struct aer_error *err, *rperr;
+ struct aer_error *err_alloc = NULL, *rperr_alloc = NULL;
+ struct pci_dev *dev, *rpdev;
+ struct pcie_device *edev;
+ unsigned long flags;
+ unsigned int devfn = PCI_DEVFN(einj->dev, einj->fn);
+ int pos_cap_err, rp_pos_cap_err;
+ u32 sever;
+ int ret = 0;
+
+ dev = pci_get_bus_and_slot(einj->bus, devfn);
+ if (!dev)
+ return -EINVAL;
+ rpdev = pcie_find_root_port(dev);
+ if (!rpdev) {
+ ret = -EINVAL;
+ goto out_put;
+ }
+
+ pos_cap_err = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ if (!pos_cap_err) {
+ ret = -EIO;
+ goto out_put;
+ }
+ pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_SEVER, &sever);
+
+ rp_pos_cap_err = pci_find_ext_capability(rpdev, PCI_EXT_CAP_ID_ERR);
+ if (!rp_pos_cap_err) {
+ ret = -EIO;
+ goto out_put;
+ }
+
+ err_alloc = kzalloc(sizeof(struct aer_error), GFP_KERNEL);
+ if (!err_alloc) {
+ ret = -ENOMEM;
+ goto out_put;
+ }
+ rperr_alloc = kzalloc(sizeof(struct aer_error), GFP_KERNEL);
+ if (!rperr_alloc) {
+ ret = -ENOMEM;
+ goto out_put;
+ }
+
+ spin_lock_irqsave(&inject_lock, flags);
+
+ err = __find_aer_error_by_dev(dev);
+ if (!err) {
+ err = err_alloc;
+ err_alloc = NULL;
+ aer_error_init(err, einj->bus, devfn, pos_cap_err);
+ list_add(&err->list, &einjected);
+ }
+ err->uncor_status |= einj->uncor_status;
+ err->cor_status |= einj->cor_status;
+ err->header_log0 = einj->header_log0;
+ err->header_log1 = einj->header_log1;
+ err->header_log2 = einj->header_log2;
+ err->header_log3 = einj->header_log3;
+
+ rperr = __find_aer_error_by_dev(rpdev);
+ if (!rperr) {
+ rperr = rperr_alloc;
+ rperr_alloc = NULL;
+ aer_error_init(rperr, rpdev->bus->number, rpdev->devfn,
+ rp_pos_cap_err);
+ list_add(&rperr->list, &einjected);
+ }
+ if (einj->cor_status) {
+ if (rperr->root_status & PCI_ERR_ROOT_COR_RCV)
+ rperr->root_status |= PCI_ERR_ROOT_MULTI_COR_RCV;
+ else
+ rperr->root_status |= PCI_ERR_ROOT_COR_RCV;
+ rperr->source_id &= 0xffff0000;
+ rperr->source_id |= (einj->bus << 8) | devfn;
+ }
+ if (einj->uncor_status) {
+ if (rperr->root_status & PCI_ERR_ROOT_UNCOR_RCV)
+ rperr->root_status |= PCI_ERR_ROOT_MULTI_UNCOR_RCV;
+ if (sever & einj->uncor_status) {
+ rperr->root_status |= PCI_ERR_ROOT_FATAL_RCV;
+ if (!(rperr->root_status & PCI_ERR_ROOT_UNCOR_RCV))
+ rperr->root_status |= PCI_ERR_ROOT_FIRST_FATAL;
+ } else
+ rperr->root_status |= PCI_ERR_ROOT_NONFATAL_RCV;
+ rperr->root_status |= PCI_ERR_ROOT_UNCOR_RCV;
+ rperr->source_id &= 0x0000ffff;
+ rperr->source_id |= ((einj->bus << 8) | devfn) << 16;
+ }
+ spin_unlock_irqrestore(&inject_lock, flags);
+
+ ret = pci_bus_set_aer_ops(dev->bus);
+ if (ret)
+ goto out_put;
+ ret = pci_bus_set_aer_ops(rpdev->bus);
+ if (ret)
+ goto out_put;
+
+ if (find_aer_device(rpdev, &edev))
+ aer_irq(-1, edev);
+ else
+ ret = -EINVAL;
+out_put:
+ if (err_alloc)
+ kfree(err_alloc);
+ if (rperr_alloc)
+ kfree(rperr_alloc);
+ pci_dev_put(dev);
+ return ret;
+}
+
+static ssize_t aer_inject_write(struct file *filp, const char __user *ubuf,
+ size_t usize, loff_t *off)
+{
+ struct aer_error_inj einj;
+ int ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (usize != sizeof(struct aer_error_inj))
+ return -EINVAL;
+
+ if (copy_from_user(&einj, ubuf, usize))
+ return -EFAULT;
+
+ ret = aer_inject(&einj);
+ return ret ? ret : usize;
+}
+
+static const struct file_operations aer_inject_fops = {
+ .write = aer_inject_write,
+ .owner = THIS_MODULE,
+};
+
+static struct miscdevice aer_inject_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "aer_inject",
+ .fops = &aer_inject_fops,
+};
+
+static int __init aer_inject_init(void)
+{
+ return misc_register(&aer_inject_device);
+}
+
+static void __exit aer_inject_exit(void)
+{
+ struct aer_error *err, *err_next;
+ unsigned long flags;
+ struct pci_bus_ops *bus_ops;
+
+ misc_deregister(&aer_inject_device);
+
+ while ((bus_ops = pci_bus_ops_pop())) {
+ pci_bus_set_ops(bus_ops->bus, bus_ops->ops);
+ kfree(bus_ops);
+ }
+
+ spin_lock_irqsave(&inject_lock, flags);
+ list_for_each_entry_safe(err, err_next,
+ &pci_bus_ops_list, list) {
+ list_del(&err->list);
+ kfree(err);
+ }
+ spin_unlock_irqrestore(&inject_lock, flags);
+}
+
+module_init(aer_inject_init);
+module_exit(aer_inject_exit);
+
+MODULE_DESCRIPTION("PCIE AER software error injector");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index e390707661d..4770f13b3ca 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -38,30 +38,13 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
-static int __devinit aer_probe (struct pcie_device *dev,
- const struct pcie_port_service_id *id );
+static int __devinit aer_probe (struct pcie_device *dev);
static void aer_remove(struct pcie_device *dev);
-static int aer_suspend(struct pcie_device *dev, pm_message_t state)
-{return 0;}
-static int aer_resume(struct pcie_device *dev) {return 0;}
static pci_ers_result_t aer_error_detected(struct pci_dev *dev,
enum pci_channel_state error);
static void aer_error_resume(struct pci_dev *dev);
static pci_ers_result_t aer_root_reset(struct pci_dev *dev);
-/*
- * PCI Express bus's AER Root service driver data structure
- */
-static struct pcie_port_service_id aer_id[] = {
- {
- .vendor = PCI_ANY_ID,
- .device = PCI_ANY_ID,
- .port_type = PCIE_RC_PORT,
- .service_type = PCIE_PORT_SERVICE_AER,
- },
- { /* end: all zeroes */ }
-};
-
static struct pci_error_handlers aer_error_handlers = {
.error_detected = aer_error_detected,
.resume = aer_error_resume,
@@ -69,14 +52,12 @@ static struct pci_error_handlers aer_error_handlers = {
static struct pcie_port_service_driver aerdriver = {
.name = "aer",
- .id_table = &aer_id[0],
+ .port_type = PCIE_ANY_PORT,
+ .service = PCIE_PORT_SERVICE_AER,
.probe = aer_probe,
.remove = aer_remove,
- .suspend = aer_suspend,
- .resume = aer_resume,
-
.err_handler = &aer_error_handlers,
.reset_link = aer_root_reset,
@@ -96,7 +77,7 @@ void pci_no_aer(void)
*
* Invoked when Root Port detects AER messages.
**/
-static irqreturn_t aer_irq(int irq, void *context)
+irqreturn_t aer_irq(int irq, void *context)
{
unsigned int status, id;
struct pcie_device *pdev = (struct pcie_device *)context;
@@ -145,6 +126,7 @@ static irqreturn_t aer_irq(int irq, void *context)
return IRQ_HANDLED;
}
+EXPORT_SYMBOL_GPL(aer_irq);
/**
* aer_alloc_rpc - allocate Root Port data structure
@@ -207,8 +189,7 @@ static void aer_remove(struct pcie_device *dev)
*
* Invoked when PCI Express bus loads AER service driver.
**/
-static int __devinit aer_probe (struct pcie_device *dev,
- const struct pcie_port_service_id *id )
+static int __devinit aer_probe (struct pcie_device *dev)
{
int status;
struct aer_rpc *rpc;
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index c7ad68b6c6d..bbd7428ca2d 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -11,6 +11,7 @@
#include <linux/workqueue.h>
#include <linux/pcieport_if.h>
#include <linux/aer.h>
+#include <linux/interrupt.h>
#define AER_NONFATAL 0
#define AER_FATAL 1
@@ -56,7 +57,11 @@ struct header_log_regs {
unsigned int dw3;
};
+#define AER_MAX_MULTI_ERR_DEVICES 5 /* Not likely to have more */
struct aer_err_info {
+ struct pci_dev *dev[AER_MAX_MULTI_ERR_DEVICES];
+ int error_dev_num;
+ u16 id;
int severity; /* 0:NONFATAL | 1:FATAL | 2:COR */
int flags;
unsigned int status; /* COR/UNCOR Error Status */
@@ -95,6 +100,9 @@ struct aer_broadcast_data {
static inline pci_ers_result_t merge_result(enum pci_ers_result orig,
enum pci_ers_result new)
{
+ if (new == PCI_ERS_RESULT_NONE)
+ return orig;
+
switch (orig) {
case PCI_ERS_RESULT_CAN_RECOVER:
case PCI_ERS_RESULT_RECOVERED:
@@ -117,6 +125,7 @@ extern void aer_delete_rootport(struct aer_rpc *rpc);
extern int aer_init(struct pcie_device *dev);
extern void aer_isr(struct work_struct *work);
extern void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
+extern irqreturn_t aer_irq(int irq, void *context);
#ifdef CONFIG_ACPI
extern int aer_osc_setup(struct pcie_device *pciedev);
diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c
index ebce26c3704..8edb2f300e8 100644
--- a/drivers/pci/pcie/aer/aerdrv_acpi.c
+++ b/drivers/pci/pcie/aer/aerdrv_acpi.c
@@ -38,7 +38,7 @@ int aer_osc_setup(struct pcie_device *pciedev)
handle = acpi_find_root_bridge_handle(pdev);
if (handle) {
- status = pci_osc_control_set(handle,
+ status = acpi_pci_osc_control_set(handle,
OSC_PCI_EXPRESS_AER_CONTROL |
OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
}
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 38257500738..3d8872704a5 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -26,7 +26,9 @@
#include "aerdrv.h"
static int forceload;
+static int nosourceid;
module_param(forceload, bool, 0);
+module_param(nosourceid, bool, 0);
int pci_enable_pcie_error_reporting(struct pci_dev *dev)
{
@@ -109,19 +111,23 @@ int pci_cleanup_aer_correct_error_status(struct pci_dev *dev)
#endif /* 0 */
-static void set_device_error_reporting(struct pci_dev *dev, void *data)
+static int set_device_error_reporting(struct pci_dev *dev, void *data)
{
bool enable = *((bool *)data);
- if (dev->pcie_type != PCIE_RC_PORT &&
- dev->pcie_type != PCIE_SW_UPSTREAM_PORT &&
- dev->pcie_type != PCIE_SW_DOWNSTREAM_PORT)
- return;
+ if (dev->pcie_type == PCIE_RC_PORT ||
+ dev->pcie_type == PCIE_SW_UPSTREAM_PORT ||
+ dev->pcie_type == PCIE_SW_DOWNSTREAM_PORT) {
+ if (enable)
+ pci_enable_pcie_error_reporting(dev);
+ else
+ pci_disable_pcie_error_reporting(dev);
+ }
if (enable)
- pci_enable_pcie_error_reporting(dev);
- else
- pci_disable_pcie_error_reporting(dev);
+ pcie_set_ecrc_checking(dev);
+
+ return 0;
}
/**
@@ -139,73 +145,148 @@ static void set_downstream_devices_error_reporting(struct pci_dev *dev,
pci_walk_bus(dev->subordinate, set_device_error_reporting, &enable);
}
-static int find_device_iter(struct device *device, void *data)
+static inline int compare_device_id(struct pci_dev *dev,
+ struct aer_err_info *e_info)
{
- struct pci_dev *dev;
- u16 id = *(unsigned long *)data;
- u8 secondary, subordinate, d_bus = id >> 8;
+ if (e_info->id == ((dev->bus->number << 8) | dev->devfn)) {
+ /*
+ * Device ID match
+ */
+ return 1;
+ }
- if (device->bus == &pci_bus_type) {
- dev = to_pci_dev(device);
- if (id == ((dev->bus->number << 8) | dev->devfn)) {
- /*
- * Device ID match
- */
- *(unsigned long*)data = (unsigned long)device;
- return 1;
- }
+ return 0;
+}
+
+static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev)
+{
+ if (e_info->error_dev_num < AER_MAX_MULTI_ERR_DEVICES) {
+ e_info->dev[e_info->error_dev_num] = dev;
+ e_info->error_dev_num++;
+ return 1;
+ } else
+ return 0;
+}
+
+
+#define PCI_BUS(x) (((x) >> 8) & 0xff)
+
+static int find_device_iter(struct pci_dev *dev, void *data)
+{
+ int pos;
+ u32 status;
+ u32 mask;
+ u16 reg16;
+ int result;
+ struct aer_err_info *e_info = (struct aer_err_info *)data;
+
+ /*
+ * When bus id is equal to 0, it might be a bad id
+ * reported by root port.
+ */
+ if (!nosourceid && (PCI_BUS(e_info->id) != 0)) {
+ result = compare_device_id(dev, e_info);
+ if (result)
+ add_error_device(e_info, dev);
/*
- * If device is P2P, check if it is an upstream?
+ * If there is no multiple error, we stop
+ * or continue based on the id comparing.
*/
- if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE) {
- pci_read_config_byte(dev, PCI_SECONDARY_BUS,
- &secondary);
- pci_read_config_byte(dev, PCI_SUBORDINATE_BUS,
- &subordinate);
- if (d_bus >= secondary && d_bus <= subordinate) {
- *(unsigned long*)data = (unsigned long)device;
- return 1;
- }
+ if (!(e_info->flags & AER_MULTI_ERROR_VALID_FLAG))
+ return result;
+
+ /*
+ * If there are multiple errors and id does match,
+ * We need continue to search other devices under
+ * the root port. Return 0 means that.
+ */
+ if (result)
+ return 0;
+ }
+
+ /*
+ * When either
+ * 1) nosourceid==y;
+ * 2) bus id is equal to 0. Some ports might lose the bus
+ * id of error source id;
+ * 3) There are multiple errors and prior id comparing fails;
+ * We check AER status registers to find the initial reporter.
+ */
+ if (atomic_read(&dev->enable_cnt) == 0)
+ return 0;
+ pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ if (!pos)
+ return 0;
+ /* Check if AER is enabled */
+ pci_read_config_word(dev, pos+PCI_EXP_DEVCTL, &reg16);
+ if (!(reg16 & (
+ PCI_EXP_DEVCTL_CERE |
+ PCI_EXP_DEVCTL_NFERE |
+ PCI_EXP_DEVCTL_FERE |
+ PCI_EXP_DEVCTL_URRE)))
+ return 0;
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ if (!pos)
+ return 0;
+
+ status = 0;
+ mask = 0;
+ if (e_info->severity == AER_CORRECTABLE) {
+ pci_read_config_dword(dev,
+ pos + PCI_ERR_COR_STATUS,
+ &status);
+ pci_read_config_dword(dev,
+ pos + PCI_ERR_COR_MASK,
+ &mask);
+ if (status & ERR_CORRECTABLE_ERROR_MASK & ~mask) {
+ add_error_device(e_info, dev);
+ goto added;
+ }
+ } else {
+ pci_read_config_dword(dev,
+ pos + PCI_ERR_UNCOR_STATUS,
+ &status);
+ pci_read_config_dword(dev,
+ pos + PCI_ERR_UNCOR_MASK,
+ &mask);
+ if (status & ERR_UNCORRECTABLE_ERROR_MASK & ~mask) {
+ add_error_device(e_info, dev);
+ goto added;
}
}
return 0;
+
+added:
+ if (e_info->flags & AER_MULTI_ERROR_VALID_FLAG)
+ return 0;
+ else
+ return 1;
}
/**
* find_source_device - search through device hierarchy for source device
* @parent: pointer to Root Port pci_dev data structure
- * @id: device ID of agent who sends an error message to this Root Port
+ * @err_info: including detailed error information such like id
*
* Invoked when error is detected at the Root Port.
*/
-static struct device* find_source_device(struct pci_dev *parent, u16 id)
+static void find_source_device(struct pci_dev *parent,
+ struct aer_err_info *e_info)
{
struct pci_dev *dev = parent;
- struct device *device;
- unsigned long device_addr;
- int status;
+ int result;
/* Is Root Port an agent that sends error message? */
- if (id == ((dev->bus->number << 8) | dev->devfn))
- return &dev->dev;
-
- do {
- device_addr = id;
- if ((status = device_for_each_child(&dev->dev,
- &device_addr, find_device_iter))) {
- device = (struct device*)device_addr;
- dev = to_pci_dev(device);
- if (id == ((dev->bus->number << 8) | dev->devfn))
- return device;
- }
- }while (status);
+ result = find_device_iter(dev, e_info);
+ if (result)
+ return;
- return NULL;
+ pci_walk_bus(parent->subordinate, find_device_iter, e_info);
}
-static void report_error_detected(struct pci_dev *dev, void *data)
+static int report_error_detected(struct pci_dev *dev, void *data)
{
pci_ers_result_t vote;
struct pci_error_handlers *err_handler;
@@ -230,16 +311,16 @@ static void report_error_detected(struct pci_dev *dev, void *data)
dev->driver ?
"no AER-aware driver" : "no driver");
}
- return;
+ return 0;
}
err_handler = dev->driver->err_handler;
vote = err_handler->error_detected(dev, result_data->state);
result_data->result = merge_result(result_data->result, vote);
- return;
+ return 0;
}
-static void report_mmio_enabled(struct pci_dev *dev, void *data)
+static int report_mmio_enabled(struct pci_dev *dev, void *data)
{
pci_ers_result_t vote;
struct pci_error_handlers *err_handler;
@@ -249,15 +330,15 @@ static void report_mmio_enabled(struct pci_dev *dev, void *data)
if (!dev->driver ||
!dev->driver->err_handler ||
!dev->driver->err_handler->mmio_enabled)
- return;
+ return 0;
err_handler = dev->driver->err_handler;
vote = err_handler->mmio_enabled(dev);
result_data->result = merge_result(result_data->result, vote);
- return;
+ return 0;
}
-static void report_slot_reset(struct pci_dev *dev, void *data)
+static int report_slot_reset(struct pci_dev *dev, void *data)
{
pci_ers_result_t vote;
struct pci_error_handlers *err_handler;
@@ -267,15 +348,15 @@ static void report_slot_reset(struct pci_dev *dev, void *data)
if (!dev->driver ||
!dev->driver->err_handler ||
!dev->driver->err_handler->slot_reset)
- return;
+ return 0;
err_handler = dev->driver->err_handler;
vote = err_handler->slot_reset(dev);
result_data->result = merge_result(result_data->result, vote);
- return;
+ return 0;
}
-static void report_resume(struct pci_dev *dev, void *data)
+static int report_resume(struct pci_dev *dev, void *data)
{
struct pci_error_handlers *err_handler;
@@ -284,11 +365,11 @@ static void report_resume(struct pci_dev *dev, void *data)
if (!dev->driver ||
!dev->driver->err_handler ||
!dev->driver->err_handler->resume)
- return;
+ return 0;
err_handler = dev->driver->err_handler;
err_handler->resume(dev);
- return;
+ return 0;
}
/**
@@ -305,7 +386,7 @@ static void report_resume(struct pci_dev *dev, void *data)
static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
enum pci_channel_state state,
char *error_mesg,
- void (*cb)(struct pci_dev *, void *))
+ int (*cb)(struct pci_dev *, void *))
{
struct aer_broadcast_data result_data;
@@ -351,21 +432,21 @@ static int find_aer_service_iter(struct device *device, void *data)
{
struct device_driver *driver;
struct pcie_port_service_driver *service_driver;
- struct pcie_device *pcie_dev;
struct find_aer_service_data *result;
result = (struct find_aer_service_data *) data;
if (device->bus == &pcie_port_bus_type) {
- pcie_dev = to_pcie_device(device);
- if (pcie_dev->id.port_type == PCIE_SW_DOWNSTREAM_PORT)
+ struct pcie_port_data *port_data;
+
+ port_data = pci_get_drvdata(to_pcie_device(device)->port);
+ if (port_data->port_type == PCIE_SW_DOWNSTREAM_PORT)
result->is_downstream = 1;
driver = device->driver;
if (driver) {
service_driver = to_service_driver(driver);
- if (service_driver->id_table->service_type ==
- PCIE_PORT_SERVICE_AER) {
+ if (service_driver->service == PCIE_PORT_SERVICE_AER) {
result->aer_driver = service_driver;
return 1;
}
@@ -497,12 +578,12 @@ static pci_ers_result_t do_recovery(struct pcie_device *aerdev,
*/
static void handle_error_source(struct pcie_device * aerdev,
struct pci_dev *dev,
- struct aer_err_info info)
+ struct aer_err_info *info)
{
pci_ers_result_t status = 0;
int pos;
- if (info.severity == AER_CORRECTABLE) {
+ if (info->severity == AER_CORRECTABLE) {
/*
* Correctable error does not need software intevention.
* No need to go through error recovery process.
@@ -510,9 +591,9 @@ static void handle_error_source(struct pcie_device * aerdev,
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
if (pos)
pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS,
- info.status);
+ info->status);
} else {
- status = do_recovery(aerdev, dev, info.severity);
+ status = do_recovery(aerdev, dev, info->severity);
if (status == PCI_ERS_RESULT_RECOVERED) {
dev_printk(KERN_DEBUG, &dev->dev, "AER driver "
"successfully recovered\n");
@@ -661,6 +742,28 @@ static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
return AER_SUCCESS;
}
+static inline void aer_process_err_devices(struct pcie_device *p_device,
+ struct aer_err_info *e_info)
+{
+ int i;
+
+ if (!e_info->dev[0]) {
+ dev_printk(KERN_DEBUG, &p_device->port->dev,
+ "can't find device of ID%04x\n",
+ e_info->id);
+ }
+
+ for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
+ if (get_device_error_info(e_info->dev[i], e_info) ==
+ AER_SUCCESS) {
+ aer_print_error(e_info->dev[i], e_info);
+ handle_error_source(p_device,
+ e_info->dev[i],
+ e_info);
+ }
+ }
+}
+
/**
* aer_isr_one_error - consume an error detected by root port
* @p_device: pointer to error root port service device
@@ -669,10 +772,16 @@ static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
static void aer_isr_one_error(struct pcie_device *p_device,
struct aer_err_source *e_src)
{
- struct device *s_device;
- struct aer_err_info e_info = {0, 0, 0,};
+ struct aer_err_info *e_info;
int i;
- u16 id;
+
+ /* struct aer_err_info might be big, so we allocate it with slab */
+ e_info = kmalloc(sizeof(struct aer_err_info), GFP_KERNEL);
+ if (e_info == NULL) {
+ dev_printk(KERN_DEBUG, &p_device->port->dev,
+ "Can't allocate mem when processing AER errors\n");
+ return;
+ }
/*
* There is a possibility that both correctable error and
@@ -684,31 +793,26 @@ static void aer_isr_one_error(struct pcie_device *p_device,
if (!(e_src->status & i))
continue;
+ memset(e_info, 0, sizeof(struct aer_err_info));
+
/* Init comprehensive error information */
if (i & PCI_ERR_ROOT_COR_RCV) {
- id = ERR_COR_ID(e_src->id);
- e_info.severity = AER_CORRECTABLE;
+ e_info->id = ERR_COR_ID(e_src->id);
+ e_info->severity = AER_CORRECTABLE;
} else {
- id = ERR_UNCOR_ID(e_src->id);
- e_info.severity = ((e_src->status >> 6) & 1);
+ e_info->id = ERR_UNCOR_ID(e_src->id);
+ e_info->severity = ((e_src->status >> 6) & 1);
}
if (e_src->status &
(PCI_ERR_ROOT_MULTI_COR_RCV |
PCI_ERR_ROOT_MULTI_UNCOR_RCV))
- e_info.flags |= AER_MULTI_ERROR_VALID_FLAG;
- if (!(s_device = find_source_device(p_device->port, id))) {
- printk(KERN_DEBUG "%s->can't find device of ID%04x\n",
- __func__, id);
- continue;
- }
- if (get_device_error_info(to_pci_dev(s_device), &e_info) ==
- AER_SUCCESS) {
- aer_print_error(to_pci_dev(s_device), &e_info);
- handle_error_source(p_device,
- to_pci_dev(s_device),
- e_info);
- }
+ e_info->flags |= AER_MULTI_ERROR_VALID_FLAG;
+
+ find_source_device(p_device->port, e_info);
+ aer_process_err_devices(p_device, e_info);
}
+
+ kfree(e_info);
}
/**
diff --git a/drivers/pci/pcie/aer/ecrc.c b/drivers/pci/pcie/aer/ecrc.c
new file mode 100644
index 00000000000..ece97df4df6
--- /dev/null
+++ b/drivers/pci/pcie/aer/ecrc.c
@@ -0,0 +1,131 @@
+/*
+ * Enables/disables PCIe ECRC checking.
+ *
+ * (C) Copyright 2009 Hewlett-Packard Development Company, L.P.
+ * Andrew Patterson <andrew.patterson@hp.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/pci_regs.h>
+#include <linux/errno.h>
+#include "../../pci.h"
+
+#define ECRC_POLICY_DEFAULT 0 /* ECRC set by BIOS */
+#define ECRC_POLICY_OFF 1 /* ECRC off for performance */
+#define ECRC_POLICY_ON 2 /* ECRC on for data integrity */
+
+static int ecrc_policy = ECRC_POLICY_DEFAULT;
+
+static const char *ecrc_policy_str[] = {
+ [ECRC_POLICY_DEFAULT] = "bios",
+ [ECRC_POLICY_OFF] = "off",
+ [ECRC_POLICY_ON] = "on"
+};
+
+/**
+ * enable_ercr_checking - enable PCIe ECRC checking for a device
+ * @dev: the PCI device
+ *
+ * Returns 0 on success, or negative on failure.
+ */
+static int enable_ecrc_checking(struct pci_dev *dev)
+{
+ int pos;
+ u32 reg32;
+
+ if (!dev->is_pcie)
+ return -ENODEV;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ if (!pos)
+ return -ENODEV;
+
+ pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
+ if (reg32 & PCI_ERR_CAP_ECRC_GENC)
+ reg32 |= PCI_ERR_CAP_ECRC_GENE;
+ if (reg32 & PCI_ERR_CAP_ECRC_CHKC)
+ reg32 |= PCI_ERR_CAP_ECRC_CHKE;
+ pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
+
+ return 0;
+}
+
+/**
+ * disable_ercr_checking - disables PCIe ECRC checking for a device
+ * @dev: the PCI device
+ *
+ * Returns 0 on success, or negative on failure.
+ */
+static int disable_ecrc_checking(struct pci_dev *dev)
+{
+ int pos;
+ u32 reg32;
+
+ if (!dev->is_pcie)
+ return -ENODEV;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ if (!pos)
+ return -ENODEV;
+
+ pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
+ reg32 &= ~(PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE);
+ pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
+
+ return 0;
+}
+
+/**
+ * pcie_set_ecrc_checking - set/unset PCIe ECRC checking for a device based on global policy
+ * @dev: the PCI device
+ */
+void pcie_set_ecrc_checking(struct pci_dev *dev)
+{
+ switch (ecrc_policy) {
+ case ECRC_POLICY_DEFAULT:
+ return;
+ case ECRC_POLICY_OFF:
+ disable_ecrc_checking(dev);
+ break;
+ case ECRC_POLICY_ON:
+ enable_ecrc_checking(dev);;
+ break;
+ default:
+ return;
+ }
+}
+
+/**
+ * pcie_ecrc_get_policy - parse kernel command-line ecrc option
+ */
+void pcie_ecrc_get_policy(char *str)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ecrc_policy_str); i++)
+ if (!strncmp(str, ecrc_policy_str[i],
+ strlen(ecrc_policy_str[i])))
+ break;
+ if (i >= ARRAY_SIZE(ecrc_policy_str))
+ return;
+
+ ecrc_policy = i;
+}
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index b0367f168af..3d27c97e048 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -26,40 +26,36 @@
#endif
#define MODULE_PARAM_PREFIX "pcie_aspm."
-struct endpoint_state {
- unsigned int l0s_acceptable_latency;
- unsigned int l1_acceptable_latency;
+struct aspm_latency {
+ u32 l0s; /* L0s latency (nsec) */
+ u32 l1; /* L1 latency (nsec) */
};
struct pcie_link_state {
- struct list_head sibiling;
- struct pci_dev *pdev;
- bool downstream_has_switch;
-
- struct pcie_link_state *parent;
- struct list_head children;
- struct list_head link;
+ struct pci_dev *pdev; /* Upstream component of the Link */
+ struct pcie_link_state *root; /* pointer to the root port link */
+ struct pcie_link_state *parent; /* pointer to the parent Link state */
+ struct list_head sibling; /* node in link_list */
+ struct list_head children; /* list of child link states */
+ struct list_head link; /* node in parent's children list */
/* ASPM state */
- unsigned int support_state;
- unsigned int enabled_state;
- unsigned int bios_aspm_state;
- /* upstream component */
- unsigned int l0s_upper_latency;
- unsigned int l1_upper_latency;
- /* downstream component */
- unsigned int l0s_down_latency;
- unsigned int l1_down_latency;
- /* Clock PM state*/
- unsigned int clk_pm_capable;
- unsigned int clk_pm_enabled;
- unsigned int bios_clk_state;
+ u32 aspm_support:2; /* Supported ASPM state */
+ u32 aspm_enabled:2; /* Enabled ASPM state */
+ u32 aspm_default:2; /* Default ASPM state by BIOS */
+
+ /* Clock PM state */
+ u32 clkpm_capable:1; /* Clock PM capable? */
+ u32 clkpm_enabled:1; /* Current Clock PM state */
+ u32 clkpm_default:1; /* Default Clock PM state by BIOS */
+ /* Latencies */
+ struct aspm_latency latency; /* Exit latency */
/*
- * A pcie downstream port only has one slot under it, so at most there
- * are 8 functions
+ * Endpoint acceptable latencies. A pcie downstream port only
+ * has one slot under it, so at most there are 8 functions.
*/
- struct endpoint_state endpoints[8];
+ struct aspm_latency acceptable[8];
};
static int aspm_disabled, aspm_force;
@@ -78,27 +74,23 @@ static const char *policy_str[] = {
#define LINK_RETRAIN_TIMEOUT HZ
-static int policy_to_aspm_state(struct pci_dev *pdev)
+static int policy_to_aspm_state(struct pcie_link_state *link)
{
- struct pcie_link_state *link_state = pdev->link_state;
-
switch (aspm_policy) {
case POLICY_PERFORMANCE:
/* Disable ASPM and Clock PM */
return 0;
case POLICY_POWERSAVE:
/* Enable ASPM L0s/L1 */
- return PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1;
+ return PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1;
case POLICY_DEFAULT:
- return link_state->bios_aspm_state;
+ return link->aspm_default;
}
return 0;
}
-static int policy_to_clkpm_state(struct pci_dev *pdev)
+static int policy_to_clkpm_state(struct pcie_link_state *link)
{
- struct pcie_link_state *link_state = pdev->link_state;
-
switch (aspm_policy) {
case POLICY_PERFORMANCE:
/* Disable ASPM and Clock PM */
@@ -107,73 +99,78 @@ static int policy_to_clkpm_state(struct pci_dev *pdev)
/* Disable Clock PM */
return 1;
case POLICY_DEFAULT:
- return link_state->bios_clk_state;
+ return link->clkpm_default;
}
return 0;
}
-static void pcie_set_clock_pm(struct pci_dev *pdev, int enable)
+static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable)
{
- struct pci_dev *child_dev;
int pos;
u16 reg16;
- struct pcie_link_state *link_state = pdev->link_state;
+ struct pci_dev *child;
+ struct pci_bus *linkbus = link->pdev->subordinate;
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
- pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP);
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
+ pos = pci_find_capability(child, PCI_CAP_ID_EXP);
if (!pos)
return;
- pci_read_config_word(child_dev, pos + PCI_EXP_LNKCTL, &reg16);
+ pci_read_config_word(child, pos + PCI_EXP_LNKCTL, &reg16);
if (enable)
reg16 |= PCI_EXP_LNKCTL_CLKREQ_EN;
else
reg16 &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
- pci_write_config_word(child_dev, pos + PCI_EXP_LNKCTL, reg16);
+ pci_write_config_word(child, pos + PCI_EXP_LNKCTL, reg16);
}
- link_state->clk_pm_enabled = !!enable;
+ link->clkpm_enabled = !!enable;
}
-static void pcie_check_clock_pm(struct pci_dev *pdev, int blacklist)
+static void pcie_set_clkpm(struct pcie_link_state *link, int enable)
{
- int pos;
+ /* Don't enable Clock PM if the link is not Clock PM capable */
+ if (!link->clkpm_capable && enable)
+ return;
+ /* Need nothing if the specified equals to current state */
+ if (link->clkpm_enabled == enable)
+ return;
+ pcie_set_clkpm_nocheck(link, enable);
+}
+
+static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
+{
+ int pos, capable = 1, enabled = 1;
u32 reg32;
u16 reg16;
- int capable = 1, enabled = 1;
- struct pci_dev *child_dev;
- struct pcie_link_state *link_state = pdev->link_state;
+ struct pci_dev *child;
+ struct pci_bus *linkbus = link->pdev->subordinate;
/* All functions should have the same cap and state, take the worst */
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
- pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP);
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
+ pos = pci_find_capability(child, PCI_CAP_ID_EXP);
if (!pos)
return;
- pci_read_config_dword(child_dev, pos + PCI_EXP_LNKCAP, &reg32);
+ pci_read_config_dword(child, pos + PCI_EXP_LNKCAP, &reg32);
if (!(reg32 & PCI_EXP_LNKCAP_CLKPM)) {
capable = 0;
enabled = 0;
break;
}
- pci_read_config_word(child_dev, pos + PCI_EXP_LNKCTL, &reg16);
+ pci_read_config_word(child, pos + PCI_EXP_LNKCTL, &reg16);
if (!(reg16 & PCI_EXP_LNKCTL_CLKREQ_EN))
enabled = 0;
}
- link_state->clk_pm_enabled = enabled;
- link_state->bios_clk_state = enabled;
- if (!blacklist) {
- link_state->clk_pm_capable = capable;
- pcie_set_clock_pm(pdev, policy_to_clkpm_state(pdev));
- } else {
- link_state->clk_pm_capable = 0;
- pcie_set_clock_pm(pdev, 0);
- }
+ link->clkpm_enabled = enabled;
+ link->clkpm_default = enabled;
+ link->clkpm_capable = (blacklist) ? 0 : capable;
}
-static bool pcie_aspm_downstream_has_switch(struct pci_dev *pdev)
+static bool pcie_aspm_downstream_has_switch(struct pcie_link_state *link)
{
- struct pci_dev *child_dev;
+ struct pci_dev *child;
+ struct pci_bus *linkbus = link->pdev->subordinate;
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
- if (child_dev->pcie_type == PCI_EXP_TYPE_UPSTREAM)
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
+ if (child->pcie_type == PCI_EXP_TYPE_UPSTREAM)
return true;
}
return false;
@@ -184,289 +181,263 @@ static bool pcie_aspm_downstream_has_switch(struct pci_dev *pdev)
* could use common clock. If they are, configure them to use the
* common clock. That will reduce the ASPM state exit latency.
*/
-static void pcie_aspm_configure_common_clock(struct pci_dev *pdev)
+static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
{
- int pos, child_pos, i = 0;
- u16 reg16 = 0;
- struct pci_dev *child_dev;
- int same_clock = 1;
+ int ppos, cpos, same_clock = 1;
+ u16 reg16, parent_reg, child_reg[8];
unsigned long start_jiffies;
- u16 child_regs[8], parent_reg;
+ struct pci_dev *child, *parent = link->pdev;
+ struct pci_bus *linkbus = parent->subordinate;
/*
- * all functions of a slot should have the same Slot Clock
+ * All functions of a slot should have the same Slot Clock
* Configuration, so just check one function
- * */
- child_dev = list_entry(pdev->subordinate->devices.next, struct pci_dev,
- bus_list);
- BUG_ON(!child_dev->is_pcie);
+ */
+ child = list_entry(linkbus->devices.next, struct pci_dev, bus_list);
+ BUG_ON(!child->is_pcie);
/* Check downstream component if bit Slot Clock Configuration is 1 */
- child_pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP);
- pci_read_config_word(child_dev, child_pos + PCI_EXP_LNKSTA, &reg16);
+ cpos = pci_find_capability(child, PCI_CAP_ID_EXP);
+ pci_read_config_word(child, cpos + PCI_EXP_LNKSTA, &reg16);
if (!(reg16 & PCI_EXP_LNKSTA_SLC))
same_clock = 0;
/* Check upstream component if bit Slot Clock Configuration is 1 */
- pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
- pci_read_config_word(pdev, pos + PCI_EXP_LNKSTA, &reg16);
+ ppos = pci_find_capability(parent, PCI_CAP_ID_EXP);
+ pci_read_config_word(parent, ppos + PCI_EXP_LNKSTA, &reg16);
if (!(reg16 & PCI_EXP_LNKSTA_SLC))
same_clock = 0;
/* Configure downstream component, all functions */
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
- child_pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP);
- pci_read_config_word(child_dev, child_pos + PCI_EXP_LNKCTL,
- &reg16);
- child_regs[i] = reg16;
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
+ cpos = pci_find_capability(child, PCI_CAP_ID_EXP);
+ pci_read_config_word(child, cpos + PCI_EXP_LNKCTL, &reg16);
+ child_reg[PCI_FUNC(child->devfn)] = reg16;
if (same_clock)
reg16 |= PCI_EXP_LNKCTL_CCC;
else
reg16 &= ~PCI_EXP_LNKCTL_CCC;
- pci_write_config_word(child_dev, child_pos + PCI_EXP_LNKCTL,
- reg16);
- i++;
+ pci_write_config_word(child, cpos + PCI_EXP_LNKCTL, reg16);
}
/* Configure upstream component */
- pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
+ pci_read_config_word(parent, ppos + PCI_EXP_LNKCTL, &reg16);
parent_reg = reg16;
if (same_clock)
reg16 |= PCI_EXP_LNKCTL_CCC;
else
reg16 &= ~PCI_EXP_LNKCTL_CCC;
- pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
+ pci_write_config_word(parent, ppos + PCI_EXP_LNKCTL, reg16);
- /* retrain link */
+ /* Retrain link */
reg16 |= PCI_EXP_LNKCTL_RL;
- pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
+ pci_write_config_word(parent, ppos + PCI_EXP_LNKCTL, reg16);
- /* Wait for link training end */
- /* break out after waiting for timeout */
+ /* Wait for link training end. Break out after waiting for timeout */
start_jiffies = jiffies;
for (;;) {
- pci_read_config_word(pdev, pos + PCI_EXP_LNKSTA, &reg16);
+ pci_read_config_word(parent, ppos + PCI_EXP_LNKSTA, &reg16);
if (!(reg16 & PCI_EXP_LNKSTA_LT))
break;
if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT))
break;
msleep(1);
}
- /* training failed -> recover */
- if (reg16 & PCI_EXP_LNKSTA_LT) {
- dev_printk (KERN_ERR, &pdev->dev, "ASPM: Could not configure"
- " common clock\n");
- i = 0;
- list_for_each_entry(child_dev, &pdev->subordinate->devices,
- bus_list) {
- child_pos = pci_find_capability(child_dev,
- PCI_CAP_ID_EXP);
- pci_write_config_word(child_dev,
- child_pos + PCI_EXP_LNKCTL,
- child_regs[i]);
- i++;
- }
- pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, parent_reg);
+ if (!(reg16 & PCI_EXP_LNKSTA_LT))
+ return;
+
+ /* Training failed. Restore common clock configurations */
+ dev_printk(KERN_ERR, &parent->dev,
+ "ASPM: Could not configure common clock\n");
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
+ cpos = pci_find_capability(child, PCI_CAP_ID_EXP);
+ pci_write_config_word(child, cpos + PCI_EXP_LNKCTL,
+ child_reg[PCI_FUNC(child->devfn)]);
}
+ pci_write_config_word(parent, ppos + PCI_EXP_LNKCTL, parent_reg);
}
-/*
- * calc_L0S_latency: Convert L0s latency encoding to ns
- */
-static unsigned int calc_L0S_latency(unsigned int latency_encoding, int ac)
+/* Convert L0s latency encoding to ns */
+static u32 calc_l0s_latency(u32 encoding)
{
- unsigned int ns = 64;
+ if (encoding == 0x7)
+ return (5 * 1000); /* > 4us */
+ return (64 << encoding);
+}
- if (latency_encoding == 0x7) {
- if (ac)
- ns = -1U;
- else
- ns = 5*1000; /* > 4us */
- } else
- ns *= (1 << latency_encoding);
- return ns;
+/* Convert L0s acceptable latency encoding to ns */
+static u32 calc_l0s_acceptable(u32 encoding)
+{
+ if (encoding == 0x7)
+ return -1U;
+ return (64 << encoding);
}
-/*
- * calc_L1_latency: Convert L1 latency encoding to ns
- */
-static unsigned int calc_L1_latency(unsigned int latency_encoding, int ac)
+/* Convert L1 latency encoding to ns */
+static u32 calc_l1_latency(u32 encoding)
{
- unsigned int ns = 1000;
+ if (encoding == 0x7)
+ return (65 * 1000); /* > 64us */
+ return (1000 << encoding);
+}
- if (latency_encoding == 0x7) {
- if (ac)
- ns = -1U;
- else
- ns = 65*1000; /* > 64us */
- } else
- ns *= (1 << latency_encoding);
- return ns;
+/* Convert L1 acceptable latency encoding to ns */
+static u32 calc_l1_acceptable(u32 encoding)
+{
+ if (encoding == 0x7)
+ return -1U;
+ return (1000 << encoding);
}
static void pcie_aspm_get_cap_device(struct pci_dev *pdev, u32 *state,
- unsigned int *l0s, unsigned int *l1, unsigned int *enabled)
+ u32 *l0s, u32 *l1, u32 *enabled)
{
int pos;
u16 reg16;
- u32 reg32;
- unsigned int latency;
+ u32 reg32, encoding;
+ *l0s = *l1 = *enabled = 0;
pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
pci_read_config_dword(pdev, pos + PCI_EXP_LNKCAP, &reg32);
*state = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10;
if (*state != PCIE_LINK_STATE_L0S &&
- *state != (PCIE_LINK_STATE_L1|PCIE_LINK_STATE_L0S))
+ *state != (PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_L0S))
*state = 0;
if (*state == 0)
return;
- latency = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12;
- *l0s = calc_L0S_latency(latency, 0);
+ encoding = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12;
+ *l0s = calc_l0s_latency(encoding);
if (*state & PCIE_LINK_STATE_L1) {
- latency = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15;
- *l1 = calc_L1_latency(latency, 0);
+ encoding = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15;
+ *l1 = calc_l1_latency(encoding);
}
pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
- *enabled = reg16 & (PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1);
+ *enabled = reg16 & (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
}
-static void pcie_aspm_cap_init(struct pci_dev *pdev)
+static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
{
- struct pci_dev *child_dev;
- u32 state, tmp;
- struct pcie_link_state *link_state = pdev->link_state;
+ u32 support, l0s, l1, enabled;
+ struct pci_dev *child, *parent = link->pdev;
+ struct pci_bus *linkbus = parent->subordinate;
+
+ if (blacklist) {
+ /* Set support state to 0, so we will disable ASPM later */
+ link->aspm_support = 0;
+ link->aspm_default = 0;
+ link->aspm_enabled = PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1;
+ return;
+ }
+
+ /* Configure common clock before checking latencies */
+ pcie_aspm_configure_common_clock(link);
/* upstream component states */
- pcie_aspm_get_cap_device(pdev, &link_state->support_state,
- &link_state->l0s_upper_latency,
- &link_state->l1_upper_latency,
- &link_state->enabled_state);
+ pcie_aspm_get_cap_device(parent, &support, &l0s, &l1, &enabled);
+ link->aspm_support = support;
+ link->latency.l0s = l0s;
+ link->latency.l1 = l1;
+ link->aspm_enabled = enabled;
+
/* downstream component states, all functions have the same setting */
- child_dev = list_entry(pdev->subordinate->devices.next, struct pci_dev,
- bus_list);
- pcie_aspm_get_cap_device(child_dev, &state,
- &link_state->l0s_down_latency,
- &link_state->l1_down_latency,
- &tmp);
- link_state->support_state &= state;
- if (!link_state->support_state)
+ child = list_entry(linkbus->devices.next, struct pci_dev, bus_list);
+ pcie_aspm_get_cap_device(child, &support, &l0s, &l1, &enabled);
+ link->aspm_support &= support;
+ link->latency.l0s = max_t(u32, link->latency.l0s, l0s);
+ link->latency.l1 = max_t(u32, link->latency.l1, l1);
+
+ if (!link->aspm_support)
return;
- link_state->enabled_state &= link_state->support_state;
- link_state->bios_aspm_state = link_state->enabled_state;
+
+ link->aspm_enabled &= link->aspm_support;
+ link->aspm_default = link->aspm_enabled;
/* ENDPOINT states*/
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
int pos;
- u32 reg32;
- unsigned int latency;
- struct endpoint_state *ep_state =
- &link_state->endpoints[PCI_FUNC(child_dev->devfn)];
+ u32 reg32, encoding;
+ struct aspm_latency *acceptable =
+ &link->acceptable[PCI_FUNC(child->devfn)];
- if (child_dev->pcie_type != PCI_EXP_TYPE_ENDPOINT &&
- child_dev->pcie_type != PCI_EXP_TYPE_LEG_END)
+ if (child->pcie_type != PCI_EXP_TYPE_ENDPOINT &&
+ child->pcie_type != PCI_EXP_TYPE_LEG_END)
continue;
- pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP);
- pci_read_config_dword(child_dev, pos + PCI_EXP_DEVCAP, &reg32);
- latency = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6;
- latency = calc_L0S_latency(latency, 1);
- ep_state->l0s_acceptable_latency = latency;
- if (link_state->support_state & PCIE_LINK_STATE_L1) {
- latency = (reg32 & PCI_EXP_DEVCAP_L1) >> 9;
- latency = calc_L1_latency(latency, 1);
- ep_state->l1_acceptable_latency = latency;
+ pos = pci_find_capability(child, PCI_CAP_ID_EXP);
+ pci_read_config_dword(child, pos + PCI_EXP_DEVCAP, &reg32);
+ encoding = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6;
+ acceptable->l0s = calc_l0s_acceptable(encoding);
+ if (link->aspm_support & PCIE_LINK_STATE_L1) {
+ encoding = (reg32 & PCI_EXP_DEVCAP_L1) >> 9;
+ acceptable->l1 = calc_l1_acceptable(encoding);
}
}
}
-static unsigned int __pcie_aspm_check_state_one(struct pci_dev *pdev,
- unsigned int state)
-{
- struct pci_dev *parent_dev, *tmp_dev;
- unsigned int latency, l1_latency = 0;
- struct pcie_link_state *link_state;
- struct endpoint_state *ep_state;
-
- parent_dev = pdev->bus->self;
- link_state = parent_dev->link_state;
- state &= link_state->support_state;
- if (state == 0)
- return 0;
- ep_state = &link_state->endpoints[PCI_FUNC(pdev->devfn)];
-
- /*
- * Check latency for endpoint device.
- * TBD: The latency from the endpoint to root complex vary per
- * switch's upstream link state above the device. Here we just do a
- * simple check which assumes all links above the device can be in L1
- * state, that is we just consider the worst case. If switch's upstream
- * link can't be put into L0S/L1, then our check is too strictly.
- */
- tmp_dev = pdev;
- while (state & (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1)) {
- parent_dev = tmp_dev->bus->self;
- link_state = parent_dev->link_state;
- if (state & PCIE_LINK_STATE_L0S) {
- latency = max_t(unsigned int,
- link_state->l0s_upper_latency,
- link_state->l0s_down_latency);
- if (latency > ep_state->l0s_acceptable_latency)
- state &= ~PCIE_LINK_STATE_L0S;
- }
- if (state & PCIE_LINK_STATE_L1) {
- latency = max_t(unsigned int,
- link_state->l1_upper_latency,
- link_state->l1_down_latency);
- if (latency + l1_latency >
- ep_state->l1_acceptable_latency)
- state &= ~PCIE_LINK_STATE_L1;
- }
- if (!parent_dev->bus->self) /* parent_dev is a root port */
- break;
- else {
- /*
- * parent_dev is the downstream port of a switch, make
- * tmp_dev the upstream port of the switch
- */
- tmp_dev = parent_dev->bus->self;
- /*
- * every switch on the path to root complex need 1 more
- * microsecond for L1. Spec doesn't mention L0S.
- */
- if (state & PCIE_LINK_STATE_L1)
- l1_latency += 1000;
- }
+/**
+ * __pcie_aspm_check_state_one - check latency for endpoint device.
+ * @endpoint: pointer to the struct pci_dev of endpoint device
+ *
+ * TBD: The latency from the endpoint to root complex vary per switch's
+ * upstream link state above the device. Here we just do a simple check
+ * which assumes all links above the device can be in L1 state, that
+ * is we just consider the worst case. If switch's upstream link can't
+ * be put into L0S/L1, then our check is too strictly.
+ */
+static u32 __pcie_aspm_check_state_one(struct pci_dev *endpoint, u32 state)
+{
+ u32 l1_switch_latency = 0;
+ struct aspm_latency *acceptable;
+ struct pcie_link_state *link;
+
+ link = endpoint->bus->self->link_state;
+ state &= link->aspm_support;
+ acceptable = &link->acceptable[PCI_FUNC(endpoint->devfn)];
+
+ while (link && state) {
+ if ((state & PCIE_LINK_STATE_L0S) &&
+ (link->latency.l0s > acceptable->l0s))
+ state &= ~PCIE_LINK_STATE_L0S;
+ if ((state & PCIE_LINK_STATE_L1) &&
+ (link->latency.l1 + l1_switch_latency > acceptable->l1))
+ state &= ~PCIE_LINK_STATE_L1;
+ link = link->parent;
+ /*
+ * Every switch on the path to root complex need 1
+ * more microsecond for L1. Spec doesn't mention L0s.
+ */
+ l1_switch_latency += 1000;
}
return state;
}
-static unsigned int pcie_aspm_check_state(struct pci_dev *pdev,
- unsigned int state)
+static u32 pcie_aspm_check_state(struct pcie_link_state *link, u32 state)
{
- struct pci_dev *child_dev;
+ pci_power_t power_state;
+ struct pci_dev *child;
+ struct pci_bus *linkbus = link->pdev->subordinate;
/* If no child, ignore the link */
- if (list_empty(&pdev->subordinate->devices))
+ if (list_empty(&linkbus->devices))
return state;
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
- if (child_dev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) {
- /*
- * If downstream component of a link is pci bridge, we
- * disable ASPM for now for the link
- * */
- state = 0;
- break;
- }
- if ((child_dev->pcie_type != PCI_EXP_TYPE_ENDPOINT &&
- child_dev->pcie_type != PCI_EXP_TYPE_LEG_END))
+
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
+ /*
+ * If downstream component of a link is pci bridge, we
+ * disable ASPM for now for the link
+ */
+ if (child->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
+ return 0;
+
+ if ((child->pcie_type != PCI_EXP_TYPE_ENDPOINT &&
+ child->pcie_type != PCI_EXP_TYPE_LEG_END))
continue;
/* Device not in D0 doesn't need check latency */
- if (child_dev->current_state == PCI_D1 ||
- child_dev->current_state == PCI_D2 ||
- child_dev->current_state == PCI_D3hot ||
- child_dev->current_state == PCI_D3cold)
+ power_state = child->current_state;
+ if (power_state == PCI_D1 || power_state == PCI_D2 ||
+ power_state == PCI_D3hot || power_state == PCI_D3cold)
continue;
- state = __pcie_aspm_check_state_one(child_dev, state);
+ state = __pcie_aspm_check_state_one(child, state);
}
return state;
}
@@ -482,90 +453,71 @@ static void __pcie_aspm_config_one_dev(struct pci_dev *pdev, unsigned int state)
pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
}
-static void __pcie_aspm_config_link(struct pci_dev *pdev, unsigned int state)
+static void __pcie_aspm_config_link(struct pcie_link_state *link, u32 state)
{
- struct pci_dev *child_dev;
- int valid = 1;
- struct pcie_link_state *link_state = pdev->link_state;
+ struct pci_dev *child, *parent = link->pdev;
+ struct pci_bus *linkbus = parent->subordinate;
/* If no child, disable the link */
- if (list_empty(&pdev->subordinate->devices))
+ if (list_empty(&linkbus->devices))
state = 0;
/*
- * if the downstream component has pci bridge function, don't do ASPM
- * now
+ * If the downstream component has pci bridge function, don't
+ * do ASPM now.
*/
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
- if (child_dev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) {
- valid = 0;
- break;
- }
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
+ if (child->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
+ return;
}
- if (!valid)
- return;
-
/*
- * spec 2.0 suggests all functions should be configured the same
- * setting for ASPM. Enabling ASPM L1 should be done in upstream
- * component first and then downstream, and vice versa for disabling
- * ASPM L1. Spec doesn't mention L0S.
+ * Spec 2.0 suggests all functions should be configured the
+ * same setting for ASPM. Enabling ASPM L1 should be done in
+ * upstream component first and then downstream, and vice
+ * versa for disabling ASPM L1. Spec doesn't mention L0S.
*/
if (state & PCIE_LINK_STATE_L1)
- __pcie_aspm_config_one_dev(pdev, state);
+ __pcie_aspm_config_one_dev(parent, state);
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list)
- __pcie_aspm_config_one_dev(child_dev, state);
+ list_for_each_entry(child, &linkbus->devices, bus_list)
+ __pcie_aspm_config_one_dev(child, state);
if (!(state & PCIE_LINK_STATE_L1))
- __pcie_aspm_config_one_dev(pdev, state);
+ __pcie_aspm_config_one_dev(parent, state);
- link_state->enabled_state = state;
+ link->aspm_enabled = state;
}
-static struct pcie_link_state *get_root_port_link(struct pcie_link_state *link)
+/* Check the whole hierarchy, and configure each link in the hierarchy */
+static void __pcie_aspm_configure_link_state(struct pcie_link_state *link,
+ u32 state)
{
- struct pcie_link_state *root_port_link = link;
- while (root_port_link->parent)
- root_port_link = root_port_link->parent;
- return root_port_link;
-}
+ struct pcie_link_state *leaf, *root = link->root;
-/* check the whole hierarchy, and configure each link in the hierarchy */
-static void __pcie_aspm_configure_link_state(struct pci_dev *pdev,
- unsigned int state)
-{
- struct pcie_link_state *link_state = pdev->link_state;
- struct pcie_link_state *root_port_link = get_root_port_link(link_state);
- struct pcie_link_state *leaf;
+ state &= (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
- state &= PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1;
-
- /* check all links who have specific root port link */
- list_for_each_entry(leaf, &link_list, sibiling) {
- if (!list_empty(&leaf->children) ||
- get_root_port_link(leaf) != root_port_link)
+ /* Check all links who have specific root port link */
+ list_for_each_entry(leaf, &link_list, sibling) {
+ if (!list_empty(&leaf->children) || (leaf->root != root))
continue;
- state = pcie_aspm_check_state(leaf->pdev, state);
+ state = pcie_aspm_check_state(leaf, state);
}
- /* check root port link too in case it hasn't children */
- state = pcie_aspm_check_state(root_port_link->pdev, state);
-
- if (link_state->enabled_state == state)
+ /* Check root port link too in case it hasn't children */
+ state = pcie_aspm_check_state(root, state);
+ if (link->aspm_enabled == state)
return;
-
/*
- * we must change the hierarchy. See comments in
+ * We must change the hierarchy. See comments in
* __pcie_aspm_config_link for the order
**/
if (state & PCIE_LINK_STATE_L1) {
- list_for_each_entry(leaf, &link_list, sibiling) {
- if (get_root_port_link(leaf) == root_port_link)
- __pcie_aspm_config_link(leaf->pdev, state);
+ list_for_each_entry(leaf, &link_list, sibling) {
+ if (leaf->root == root)
+ __pcie_aspm_config_link(leaf, state);
}
} else {
- list_for_each_entry_reverse(leaf, &link_list, sibiling) {
- if (get_root_port_link(leaf) == root_port_link)
- __pcie_aspm_config_link(leaf->pdev, state);
+ list_for_each_entry_reverse(leaf, &link_list, sibling) {
+ if (leaf->root == root)
+ __pcie_aspm_config_link(leaf, state);
}
}
}
@@ -574,45 +526,42 @@ static void __pcie_aspm_configure_link_state(struct pci_dev *pdev,
* pcie_aspm_configure_link_state: enable/disable PCI express link state
* @pdev: the root port or switch downstream port
*/
-static void pcie_aspm_configure_link_state(struct pci_dev *pdev,
- unsigned int state)
+static void pcie_aspm_configure_link_state(struct pcie_link_state *link,
+ u32 state)
{
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
- __pcie_aspm_configure_link_state(pdev, state);
+ __pcie_aspm_configure_link_state(link, state);
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
}
-static void free_link_state(struct pci_dev *pdev)
+static void free_link_state(struct pcie_link_state *link)
{
- kfree(pdev->link_state);
- pdev->link_state = NULL;
+ link->pdev->link_state = NULL;
+ kfree(link);
}
static int pcie_aspm_sanity_check(struct pci_dev *pdev)
{
- struct pci_dev *child_dev;
- int child_pos;
+ struct pci_dev *child;
+ int pos;
u32 reg32;
-
/*
- * Some functions in a slot might not all be PCIE functions, very
- * strange. Disable ASPM for the whole slot
+ * Some functions in a slot might not all be PCIE functions,
+ * very strange. Disable ASPM for the whole slot
*/
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
- child_pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP);
- if (!child_pos)
+ list_for_each_entry(child, &pdev->subordinate->devices, bus_list) {
+ pos = pci_find_capability(child, PCI_CAP_ID_EXP);
+ if (!pos)
return -EINVAL;
-
/*
* Disable ASPM for pre-1.1 PCIe device, we follow MS to use
* RBER bit to determine if a function is 1.1 version device
*/
- pci_read_config_dword(child_dev, child_pos + PCI_EXP_DEVCAP,
- &reg32);
+ pci_read_config_dword(child, pos + PCI_EXP_DEVCAP, &reg32);
if (!(reg32 & PCI_EXP_DEVCAP_RBER) && !aspm_force) {
- dev_printk(KERN_INFO, &child_dev->dev, "disabling ASPM"
+ dev_printk(KERN_INFO, &child->dev, "disabling ASPM"
" on pre-1.1 PCIe device. You can enable it"
" with 'pcie_aspm=force'\n");
return -EINVAL;
@@ -621,6 +570,47 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
return 0;
}
+static struct pcie_link_state *pcie_aspm_setup_link_state(struct pci_dev *pdev)
+{
+ struct pcie_link_state *link;
+ int blacklist = !!pcie_aspm_sanity_check(pdev);
+
+ link = kzalloc(sizeof(*link), GFP_KERNEL);
+ if (!link)
+ return NULL;
+ INIT_LIST_HEAD(&link->sibling);
+ INIT_LIST_HEAD(&link->children);
+ INIT_LIST_HEAD(&link->link);
+ link->pdev = pdev;
+ if (pdev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM) {
+ struct pcie_link_state *parent;
+ parent = pdev->bus->parent->self->link_state;
+ if (!parent) {
+ kfree(link);
+ return NULL;
+ }
+ link->parent = parent;
+ list_add(&link->link, &parent->children);
+ }
+ /* Setup a pointer to the root port link */
+ if (!link->parent)
+ link->root = link;
+ else
+ link->root = link->parent->root;
+
+ list_add(&link->sibling, &link_list);
+
+ pdev->link_state = link;
+
+ /* Check ASPM capability */
+ pcie_aspm_cap_init(link, blacklist);
+
+ /* Check Clock PM capability */
+ pcie_clkpm_cap_init(link, blacklist);
+
+ return link;
+}
+
/*
* pcie_aspm_init_link_state: Initiate PCI express link state.
* It is called after the pcie and its children devices are scaned.
@@ -628,75 +618,47 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
*/
void pcie_aspm_init_link_state(struct pci_dev *pdev)
{
- unsigned int state;
- struct pcie_link_state *link_state;
- int error = 0;
- int blacklist;
+ u32 state;
+ struct pcie_link_state *link;
if (aspm_disabled || !pdev->is_pcie || pdev->link_state)
return;
if (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT &&
- pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)
+ pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)
+ return;
+
+ /* VIA has a strange chipset, root port is under a bridge */
+ if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT &&
+ pdev->bus->self)
return;
+
down_read(&pci_bus_sem);
if (list_empty(&pdev->subordinate->devices))
goto out;
- blacklist = !!pcie_aspm_sanity_check(pdev);
-
mutex_lock(&aspm_lock);
-
- link_state = kzalloc(sizeof(*link_state), GFP_KERNEL);
- if (!link_state)
- goto unlock_out;
-
- link_state->downstream_has_switch = pcie_aspm_downstream_has_switch(pdev);
- INIT_LIST_HEAD(&link_state->children);
- INIT_LIST_HEAD(&link_state->link);
- if (pdev->bus->self) {/* this is a switch */
- struct pcie_link_state *parent_link_state;
-
- parent_link_state = pdev->bus->parent->self->link_state;
- if (!parent_link_state) {
- kfree(link_state);
- goto unlock_out;
- }
- list_add(&link_state->link, &parent_link_state->children);
- link_state->parent = parent_link_state;
- }
-
- pdev->link_state = link_state;
-
- if (!blacklist) {
- pcie_aspm_configure_common_clock(pdev);
- pcie_aspm_cap_init(pdev);
+ link = pcie_aspm_setup_link_state(pdev);
+ if (!link)
+ goto unlock;
+ /*
+ * Setup initial ASPM state
+ *
+ * If link has switch, delay the link config. The leaf link
+ * initialization will config the whole hierarchy. But we must
+ * make sure BIOS doesn't set unsupported link state.
+ */
+ if (pcie_aspm_downstream_has_switch(link)) {
+ state = pcie_aspm_check_state(link, link->aspm_default);
+ __pcie_aspm_config_link(link, state);
} else {
- link_state->enabled_state = PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1;
- link_state->bios_aspm_state = 0;
- /* Set support state to 0, so we will disable ASPM later */
- link_state->support_state = 0;
+ state = policy_to_aspm_state(link);
+ __pcie_aspm_configure_link_state(link, state);
}
- link_state->pdev = pdev;
- list_add(&link_state->sibiling, &link_list);
-
- if (link_state->downstream_has_switch) {
- /*
- * If link has switch, delay the link config. The leaf link
- * initialization will config the whole hierarchy. but we must
- * make sure BIOS doesn't set unsupported link state
- **/
- state = pcie_aspm_check_state(pdev, link_state->bios_aspm_state);
- __pcie_aspm_config_link(pdev, state);
- } else
- __pcie_aspm_configure_link_state(pdev,
- policy_to_aspm_state(pdev));
-
- pcie_check_clock_pm(pdev, blacklist);
-
-unlock_out:
- if (error)
- free_link_state(pdev);
+ /* Setup initial Clock PM state */
+ state = (link->clkpm_capable) ? policy_to_clkpm_state(link) : 0;
+ pcie_set_clkpm(link, state);
+unlock:
mutex_unlock(&aspm_lock);
out:
up_read(&pci_bus_sem);
@@ -725,11 +687,11 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
/* All functions are removed, so just disable ASPM for the link */
__pcie_aspm_config_one_dev(parent, 0);
- list_del(&link_state->sibiling);
+ list_del(&link_state->sibling);
list_del(&link_state->link);
/* Clock PM is for endpoint device */
- free_link_state(parent);
+ free_link_state(link_state);
out:
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
@@ -749,7 +711,7 @@ void pcie_aspm_pm_state_change(struct pci_dev *pdev)
* devices changed PM state, we should recheck if latency meets all
* functions' requirement
*/
- pcie_aspm_configure_link_state(pdev, link_state->enabled_state);
+ pcie_aspm_configure_link_state(link_state, link_state->aspm_enabled);
}
/*
@@ -772,14 +734,12 @@ void pci_disable_link_state(struct pci_dev *pdev, int state)
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
link_state = parent->link_state;
- link_state->support_state &=
- ~(state & (PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1));
- if (state & PCIE_LINK_STATE_CLKPM)
- link_state->clk_pm_capable = 0;
-
- __pcie_aspm_configure_link_state(parent, link_state->enabled_state);
- if (!link_state->clk_pm_capable && link_state->clk_pm_enabled)
- pcie_set_clock_pm(parent, 0);
+ link_state->aspm_support &= ~state;
+ __pcie_aspm_configure_link_state(link_state, link_state->aspm_enabled);
+ if (state & PCIE_LINK_STATE_CLKPM) {
+ link_state->clkpm_capable = 0;
+ pcie_set_clkpm(link_state, 0);
+ }
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
}
@@ -788,7 +748,6 @@ EXPORT_SYMBOL(pci_disable_link_state);
static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp)
{
int i;
- struct pci_dev *pdev;
struct pcie_link_state *link_state;
for (i = 0; i < ARRAY_SIZE(policy_str); i++)
@@ -802,14 +761,10 @@ static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp)
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
aspm_policy = i;
- list_for_each_entry(link_state, &link_list, sibiling) {
- pdev = link_state->pdev;
- __pcie_aspm_configure_link_state(pdev,
- policy_to_aspm_state(pdev));
- if (link_state->clk_pm_capable &&
- link_state->clk_pm_enabled != policy_to_clkpm_state(pdev))
- pcie_set_clock_pm(pdev, policy_to_clkpm_state(pdev));
-
+ list_for_each_entry(link_state, &link_list, sibling) {
+ __pcie_aspm_configure_link_state(link_state,
+ policy_to_aspm_state(link_state));
+ pcie_set_clkpm(link_state, policy_to_clkpm_state(link_state));
}
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
@@ -838,7 +793,7 @@ static ssize_t link_state_show(struct device *dev,
struct pci_dev *pci_device = to_pci_dev(dev);
struct pcie_link_state *link_state = pci_device->link_state;
- return sprintf(buf, "%d\n", link_state->enabled_state);
+ return sprintf(buf, "%d\n", link_state->aspm_enabled);
}
static ssize_t link_state_store(struct device *dev,
@@ -846,7 +801,7 @@ static ssize_t link_state_store(struct device *dev,
const char *buf,
size_t n)
{
- struct pci_dev *pci_device = to_pci_dev(dev);
+ struct pci_dev *pdev = to_pci_dev(dev);
int state;
if (n < 1)
@@ -854,7 +809,7 @@ static ssize_t link_state_store(struct device *dev,
state = buf[0]-'0';
if (state >= 0 && state <= 3) {
/* setup link aspm state */
- pcie_aspm_configure_link_state(pci_device, state);
+ pcie_aspm_configure_link_state(pdev->link_state, state);
return n;
}
@@ -868,7 +823,7 @@ static ssize_t clk_ctl_show(struct device *dev,
struct pci_dev *pci_device = to_pci_dev(dev);
struct pcie_link_state *link_state = pci_device->link_state;
- return sprintf(buf, "%d\n", link_state->clk_pm_enabled);
+ return sprintf(buf, "%d\n", link_state->clkpm_enabled);
}
static ssize_t clk_ctl_store(struct device *dev,
@@ -876,7 +831,7 @@ static ssize_t clk_ctl_store(struct device *dev,
const char *buf,
size_t n)
{
- struct pci_dev *pci_device = to_pci_dev(dev);
+ struct pci_dev *pdev = to_pci_dev(dev);
int state;
if (n < 1)
@@ -885,7 +840,7 @@ static ssize_t clk_ctl_store(struct device *dev,
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
- pcie_set_clock_pm(pci_device, !!state);
+ pcie_set_clkpm_nocheck(pdev->link_state, !!state);
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
@@ -904,10 +859,10 @@ void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev)
pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state)
return;
- if (link_state->support_state)
+ if (link_state->aspm_support)
sysfs_add_file_to_group(&pdev->dev.kobj,
&dev_attr_link_state.attr, power_group);
- if (link_state->clk_pm_capable)
+ if (link_state->clkpm_capable)
sysfs_add_file_to_group(&pdev->dev.kobj,
&dev_attr_clk_ctl.attr, power_group);
}
@@ -920,10 +875,10 @@ void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev)
pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state)
return;
- if (link_state->support_state)
+ if (link_state->aspm_support)
sysfs_remove_file_from_group(&pdev->dev.kobj,
&dev_attr_link_state.attr, power_group);
- if (link_state->clk_pm_capable)
+ if (link_state->clkpm_capable)
sysfs_remove_file_from_group(&pdev->dev.kobj,
&dev_attr_clk_ctl.attr, power_group);
}
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index 2529f3f2ea5..17ad53868f9 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -25,19 +25,21 @@
#define PCIE_CAPABILITIES_REG 0x2
#define PCIE_SLOT_CAPABILITIES_REG 0x14
#define PCIE_PORT_DEVICE_MAXSERVICES 4
+#define PCIE_PORT_MSI_VECTOR_MASK 0x1f
+/*
+ * According to the PCI Express Base Specification 2.0, the indices of the MSI-X
+ * table entires used by port services must not exceed 31
+ */
+#define PCIE_PORT_MAX_MSIX_ENTRIES 32
#define get_descriptor_id(type, service) (((type - 4) << 4) | service)
-struct pcie_port_device_ext {
- int interrupt_mode; /* [0:INTx | 1:MSI | 2:MSI-X] */
-};
-
extern struct bus_type pcie_port_bus_type;
extern int pcie_port_device_probe(struct pci_dev *dev);
extern int pcie_port_device_register(struct pci_dev *dev);
#ifdef CONFIG_PM
-extern int pcie_port_device_suspend(struct pci_dev *dev, pm_message_t state);
-extern int pcie_port_device_resume(struct pci_dev *dev);
+extern int pcie_port_device_suspend(struct device *dev);
+extern int pcie_port_device_resume(struct device *dev);
#endif
extern void pcie_port_device_remove(struct pci_dev *dev);
extern int __must_check pcie_port_bus_register(void);
diff --git a/drivers/pci/pcie/portdrv_bus.c b/drivers/pci/pcie/portdrv_bus.c
index eec89b767f9..ef3a4eeaebb 100644
--- a/drivers/pci/pcie/portdrv_bus.c
+++ b/drivers/pci/pcie/portdrv_bus.c
@@ -26,20 +26,22 @@ EXPORT_SYMBOL_GPL(pcie_port_bus_type);
static int pcie_port_bus_match(struct device *dev, struct device_driver *drv)
{
struct pcie_device *pciedev;
+ struct pcie_port_data *port_data;
struct pcie_port_service_driver *driver;
if (drv->bus != &pcie_port_bus_type || dev->bus != &pcie_port_bus_type)
return 0;
-
+
pciedev = to_pcie_device(dev);
driver = to_service_driver(drv);
- if ( (driver->id_table->vendor != PCI_ANY_ID &&
- driver->id_table->vendor != pciedev->id.vendor) ||
- (driver->id_table->device != PCI_ANY_ID &&
- driver->id_table->device != pciedev->id.device) ||
- (driver->id_table->port_type != PCIE_ANY_PORT &&
- driver->id_table->port_type != pciedev->id.port_type) ||
- driver->id_table->service_type != pciedev->id.service_type )
+
+ if (driver->service != pciedev->service)
+ return 0;
+
+ port_data = pci_get_drvdata(pciedev->port);
+
+ if (driver->port_type != PCIE_ANY_PORT
+ && driver->port_type != port_data->port_type)
return 0;
return 1;
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 8b3f8c18032..13ffdc35ea0 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -15,10 +15,9 @@
#include <linux/slab.h>
#include <linux/pcieport_if.h>
+#include "../pci.h"
#include "portdrv.h"
-extern int pcie_mch_quirk; /* MSI-quirk Indicator */
-
/**
* release_pcie_device - free PCI Express port service device structure
* @dev: Port service device to release
@@ -31,26 +30,150 @@ static void release_pcie_device(struct device *dev)
kfree(to_pcie_device(dev));
}
-static int is_msi_quirked(struct pci_dev *dev)
+/**
+ * pcie_port_msix_add_entry - add entry to given array of MSI-X entries
+ * @entries: Array of MSI-X entries
+ * @new_entry: Index of the entry to add to the array
+ * @nr_entries: Number of entries aleady in the array
+ *
+ * Return value: Position of the added entry in the array
+ */
+static int pcie_port_msix_add_entry(
+ struct msix_entry *entries, int new_entry, int nr_entries)
{
- int port_type, quirk = 0;
+ int j;
+
+ for (j = 0; j < nr_entries; j++)
+ if (entries[j].entry == new_entry)
+ return j;
+
+ entries[j].entry = new_entry;
+ return j;
+}
+
+/**
+ * pcie_port_enable_msix - try to set up MSI-X as interrupt mode for given port
+ * @dev: PCI Express port to handle
+ * @vectors: Array of interrupt vectors to populate
+ * @mask: Bitmask of port capabilities returned by get_port_device_capability()
+ *
+ * Return value: 0 on success, error code on failure
+ */
+static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask)
+{
+ struct msix_entry *msix_entries;
+ int idx[PCIE_PORT_DEVICE_MAXSERVICES];
+ int nr_entries, status, pos, i, nvec;
u16 reg16;
+ u32 reg32;
- pci_read_config_word(dev,
- pci_find_capability(dev, PCI_CAP_ID_EXP) +
- PCIE_CAPABILITIES_REG, &reg16);
- port_type = (reg16 >> 4) & PORT_TYPE_MASK;
- switch(port_type) {
- case PCIE_RC_PORT:
- if (pcie_mch_quirk == 1)
- quirk = 1;
- break;
- case PCIE_SW_UPSTREAM_PORT:
- case PCIE_SW_DOWNSTREAM_PORT:
- default:
- break;
+ nr_entries = pci_msix_table_size(dev);
+ if (!nr_entries)
+ return -EINVAL;
+ if (nr_entries > PCIE_PORT_MAX_MSIX_ENTRIES)
+ nr_entries = PCIE_PORT_MAX_MSIX_ENTRIES;
+
+ msix_entries = kzalloc(sizeof(*msix_entries) * nr_entries, GFP_KERNEL);
+ if (!msix_entries)
+ return -ENOMEM;
+
+ /*
+ * Allocate as many entries as the port wants, so that we can check
+ * which of them will be useful. Moreover, if nr_entries is correctly
+ * equal to the number of entries this port actually uses, we'll happily
+ * go through without any tricks.
+ */
+ for (i = 0; i < nr_entries; i++)
+ msix_entries[i].entry = i;
+
+ status = pci_enable_msix(dev, msix_entries, nr_entries);
+ if (status)
+ goto Exit;
+
+ for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
+ idx[i] = -1;
+ status = -EIO;
+ nvec = 0;
+
+ if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP)) {
+ int entry;
+
+ /*
+ * The code below follows the PCI Express Base Specification 2.0
+ * stating in Section 6.1.6 that "PME and Hot-Plug Event
+ * interrupts (when both are implemented) always share the same
+ * MSI or MSI-X vector, as indicated by the Interrupt Message
+ * Number field in the PCI Express Capabilities register", where
+ * according to Section 7.8.2 of the specification "For MSI-X,
+ * the value in this field indicates which MSI-X Table entry is
+ * used to generate the interrupt message."
+ */
+ pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ pci_read_config_word(dev, pos + PCIE_CAPABILITIES_REG, &reg16);
+ entry = (reg16 >> 9) & PCIE_PORT_MSI_VECTOR_MASK;
+ if (entry >= nr_entries)
+ goto Error;
+
+ i = pcie_port_msix_add_entry(msix_entries, entry, nvec);
+ if (i == nvec)
+ nvec++;
+
+ idx[PCIE_PORT_SERVICE_PME_SHIFT] = i;
+ idx[PCIE_PORT_SERVICE_HP_SHIFT] = i;
+ }
+
+ if (mask & PCIE_PORT_SERVICE_AER) {
+ int entry;
+
+ /*
+ * The code below follows Section 7.10.10 of the PCI Express
+ * Base Specification 2.0 stating that bits 31-27 of the Root
+ * Error Status Register contain a value indicating which of the
+ * MSI/MSI-X vectors assigned to the port is going to be used
+ * for AER, where "For MSI-X, the value in this register
+ * indicates which MSI-X Table entry is used to generate the
+ * interrupt message."
+ */
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &reg32);
+ entry = reg32 >> 27;
+ if (entry >= nr_entries)
+ goto Error;
+
+ i = pcie_port_msix_add_entry(msix_entries, entry, nvec);
+ if (i == nvec)
+ nvec++;
+
+ idx[PCIE_PORT_SERVICE_AER_SHIFT] = i;
}
- return quirk;
+
+ /*
+ * If nvec is equal to the allocated number of entries, we can just use
+ * what we have. Otherwise, the port has some extra entries not for the
+ * services we know and we need to work around that.
+ */
+ if (nvec == nr_entries) {
+ status = 0;
+ } else {
+ /* Drop the temporary MSI-X setup */
+ pci_disable_msix(dev);
+
+ /* Now allocate the MSI-X vectors for real */
+ status = pci_enable_msix(dev, msix_entries, nvec);
+ if (status)
+ goto Exit;
+ }
+
+ for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
+ vectors[i] = idx[i] >= 0 ? msix_entries[idx[i]].vector : -1;
+
+ Exit:
+ kfree(msix_entries);
+ return status;
+
+ Error:
+ pci_disable_msix(dev);
+ goto Exit;
}
/**
@@ -64,47 +187,32 @@ static int is_msi_quirked(struct pci_dev *dev)
*/
static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask)
{
- int i, pos, nvec, status = -EINVAL;
- int interrupt_mode = PCIE_PORT_INTx_MODE;
+ struct pcie_port_data *port_data = pci_get_drvdata(dev);
+ int irq, interrupt_mode = PCIE_PORT_NO_IRQ;
+ int i;
- /* Set INTx as default */
- for (i = 0, nvec = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) {
- if (mask & (1 << i))
- nvec++;
- vectors[i] = dev->irq;
- }
-
/* Check MSI quirk */
- if (is_msi_quirked(dev))
- return interrupt_mode;
-
- /* Select MSI-X over MSI if supported */
- pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
- if (pos) {
- struct msix_entry msix_entries[PCIE_PORT_DEVICE_MAXSERVICES] =
- {{0, 0}, {0, 1}, {0, 2}, {0, 3}};
- status = pci_enable_msix(dev, msix_entries, nvec);
- if (!status) {
- int j = 0;
-
- interrupt_mode = PCIE_PORT_MSIX_MODE;
- for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) {
- if (mask & (1 << i))
- vectors[i] = msix_entries[j++].vector;
- }
- }
- }
- if (status) {
- pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
- if (pos) {
- status = pci_enable_msi(dev);
- if (!status) {
- interrupt_mode = PCIE_PORT_MSI_MODE;
- for (i = 0;i < PCIE_PORT_DEVICE_MAXSERVICES;i++)
- vectors[i] = dev->irq;
- }
- }
- }
+ if (port_data->port_type == PCIE_RC_PORT && pcie_mch_quirk)
+ goto Fallback;
+
+ /* Try to use MSI-X if supported */
+ if (!pcie_port_enable_msix(dev, vectors, mask))
+ return PCIE_PORT_MSIX_MODE;
+
+ /* We're not going to use MSI-X, so try MSI and fall back to INTx */
+ if (!pci_enable_msi(dev))
+ interrupt_mode = PCIE_PORT_MSI_MODE;
+
+ Fallback:
+ if (interrupt_mode == PCIE_PORT_NO_IRQ && dev->pin)
+ interrupt_mode = PCIE_PORT_INTx_MODE;
+
+ irq = interrupt_mode != PCIE_PORT_NO_IRQ ? dev->irq : -1;
+ for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
+ vectors[i] = irq;
+
+ vectors[PCIE_PORT_SERVICE_VC_SHIFT] = -1;
+
return interrupt_mode;
}
@@ -132,13 +240,11 @@ static int get_port_device_capability(struct pci_dev *dev)
pos + PCIE_SLOT_CAPABILITIES_REG, &reg32);
if (reg32 & SLOT_HP_CAPABLE_MASK)
services |= PCIE_PORT_SERVICE_HP;
- }
- /* PME Capable - root port capability */
- if (((reg16 >> 4) & PORT_TYPE_MASK) == PCIE_RC_PORT)
- services |= PCIE_PORT_SERVICE_PME;
-
+ }
+ /* AER capable */
if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR))
services |= PCIE_PORT_SERVICE_AER;
+ /* VC support */
if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_VC))
services |= PCIE_PORT_SERVICE_VC;
@@ -152,27 +258,24 @@ static int get_port_device_capability(struct pci_dev *dev)
* @port_type: Type of the port
* @service_type: Type of service to associate with the service device
* @irq: Interrupt vector to associate with the service device
- * @irq_mode: Interrupt mode of the service (INTx, MSI-X, MSI)
*/
static void pcie_device_init(struct pci_dev *parent, struct pcie_device *dev,
- int port_type, int service_type, int irq, int irq_mode)
+ int service_type, int irq)
{
+ struct pcie_port_data *port_data = pci_get_drvdata(parent);
struct device *device;
+ int port_type = port_data->port_type;
dev->port = parent;
- dev->interrupt_mode = irq_mode;
dev->irq = irq;
- dev->id.vendor = parent->vendor;
- dev->id.device = parent->device;
- dev->id.port_type = port_type;
- dev->id.service_type = (1 << service_type);
+ dev->service = service_type;
/* Initialize generic device interface */
device = &dev->device;
memset(device, 0, sizeof(struct device));
device->bus = &pcie_port_bus_type;
device->driver = NULL;
- device->driver_data = NULL;
+ dev_set_drvdata(device, NULL);
device->release = release_pcie_device; /* callback to free pcie dev */
dev_set_name(device, "%s:pcie%02x",
pci_name(parent), get_descriptor_id(port_type, service_type));
@@ -185,10 +288,9 @@ static void pcie_device_init(struct pci_dev *parent, struct pcie_device *dev,
* @port_type: Type of the port
* @service_type: Type of service to associate with the service device
* @irq: Interrupt vector to associate with the service device
- * @irq_mode: Interrupt mode of the service (INTx, MSI-X, MSI)
*/
static struct pcie_device* alloc_pcie_device(struct pci_dev *parent,
- int port_type, int service_type, int irq, int irq_mode)
+ int service_type, int irq)
{
struct pcie_device *device;
@@ -196,7 +298,7 @@ static struct pcie_device* alloc_pcie_device(struct pci_dev *parent,
if (!device)
return NULL;
- pcie_device_init(parent, device, port_type, service_type, irq,irq_mode);
+ pcie_device_init(parent, device, service_type, irq);
return device;
}
@@ -230,63 +332,90 @@ int pcie_port_device_probe(struct pci_dev *dev)
*/
int pcie_port_device_register(struct pci_dev *dev)
{
- struct pcie_port_device_ext *p_ext;
- int status, type, capabilities, irq_mode, i;
+ struct pcie_port_data *port_data;
+ int status, capabilities, irq_mode, i, nr_serv;
int vectors[PCIE_PORT_DEVICE_MAXSERVICES];
u16 reg16;
- /* Allocate port device extension */
- if (!(p_ext = kmalloc(sizeof(struct pcie_port_device_ext), GFP_KERNEL)))
+ port_data = kzalloc(sizeof(*port_data), GFP_KERNEL);
+ if (!port_data)
return -ENOMEM;
-
- pci_set_drvdata(dev, p_ext);
+ pci_set_drvdata(dev, port_data);
/* Get port type */
pci_read_config_word(dev,
pci_find_capability(dev, PCI_CAP_ID_EXP) +
PCIE_CAPABILITIES_REG, &reg16);
- type = (reg16 >> 4) & PORT_TYPE_MASK;
+ port_data->port_type = (reg16 >> 4) & PORT_TYPE_MASK;
- /* Now get port services */
capabilities = get_port_device_capability(dev);
+ /* Root ports are capable of generating PME too */
+ if (port_data->port_type == PCIE_RC_PORT)
+ capabilities |= PCIE_PORT_SERVICE_PME;
+
irq_mode = assign_interrupt_mode(dev, vectors, capabilities);
- p_ext->interrupt_mode = irq_mode;
+ if (irq_mode == PCIE_PORT_NO_IRQ) {
+ /*
+ * Don't use service devices that require interrupts if there is
+ * no way to generate them.
+ */
+ if (!(capabilities & PCIE_PORT_SERVICE_VC)) {
+ status = -ENODEV;
+ goto Error;
+ }
+ capabilities = PCIE_PORT_SERVICE_VC;
+ }
+ port_data->port_irq_mode = irq_mode;
+
+ status = pci_enable_device(dev);
+ if (status)
+ goto Error;
+ pci_set_master(dev);
/* Allocate child services if any */
- for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) {
+ for (i = 0, nr_serv = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) {
struct pcie_device *child;
+ int service = 1 << i;
+
+ if (!(capabilities & service))
+ continue;
- if (capabilities & (1 << i)) {
- child = alloc_pcie_device(
- dev, /* parent */
- type, /* port type */
- i, /* service type */
- vectors[i], /* irq */
- irq_mode /* interrupt mode */);
- if (child) {
- status = device_register(&child->device);
- if (status) {
- kfree(child);
- continue;
- }
- get_device(&child->device);
- }
+ child = alloc_pcie_device(dev, service, vectors[i]);
+ if (!child)
+ continue;
+
+ status = device_register(&child->device);
+ if (status) {
+ kfree(child);
+ continue;
}
+
+ get_device(&child->device);
+ nr_serv++;
+ }
+ if (!nr_serv) {
+ pci_disable_device(dev);
+ status = -ENODEV;
+ goto Error;
}
+
return 0;
+
+ Error:
+ kfree(port_data);
+ return status;
}
#ifdef CONFIG_PM
static int suspend_iter(struct device *dev, void *data)
{
struct pcie_port_service_driver *service_driver;
- pm_message_t state = * (pm_message_t *) data;
if ((dev->bus == &pcie_port_bus_type) &&
(dev->driver)) {
service_driver = to_service_driver(dev->driver);
if (service_driver->suspend)
- service_driver->suspend(to_pcie_device(dev), state);
+ service_driver->suspend(to_pcie_device(dev));
}
return 0;
}
@@ -294,11 +423,10 @@ static int suspend_iter(struct device *dev, void *data)
/**
* pcie_port_device_suspend - suspend port services associated with a PCIe port
* @dev: PCI Express port to handle
- * @state: Representation of system power management transition in progress
*/
-int pcie_port_device_suspend(struct pci_dev *dev, pm_message_t state)
+int pcie_port_device_suspend(struct device *dev)
{
- return device_for_each_child(&dev->dev, &state, suspend_iter);
+ return device_for_each_child(dev, NULL, suspend_iter);
}
static int resume_iter(struct device *dev, void *data)
@@ -318,24 +446,17 @@ static int resume_iter(struct device *dev, void *data)
* pcie_port_device_suspend - resume port services associated with a PCIe port
* @dev: PCI Express port to handle
*/
-int pcie_port_device_resume(struct pci_dev *dev)
+int pcie_port_device_resume(struct device *dev)
{
- return device_for_each_child(&dev->dev, NULL, resume_iter);
+ return device_for_each_child(dev, NULL, resume_iter);
}
-#endif
+#endif /* PM */
static int remove_iter(struct device *dev, void *data)
{
- struct pcie_port_service_driver *service_driver;
-
if (dev->bus == &pcie_port_bus_type) {
- if (dev->driver) {
- service_driver = to_service_driver(dev->driver);
- if (service_driver->remove)
- service_driver->remove(to_pcie_device(dev));
- }
- *(unsigned long*)data = (unsigned long)dev;
- return 1;
+ put_device(dev);
+ device_unregister(dev);
}
return 0;
}
@@ -349,25 +470,21 @@ static int remove_iter(struct device *dev, void *data)
*/
void pcie_port_device_remove(struct pci_dev *dev)
{
- struct device *device;
- unsigned long device_addr;
- int interrupt_mode = PCIE_PORT_INTx_MODE;
- int status;
+ struct pcie_port_data *port_data = pci_get_drvdata(dev);
- do {
- status = device_for_each_child(&dev->dev, &device_addr, remove_iter);
- if (status) {
- device = (struct device*)device_addr;
- interrupt_mode = (to_pcie_device(device))->interrupt_mode;
- put_device(device);
- device_unregister(device);
- }
- } while (status);
- /* Switch to INTx by default if MSI enabled */
- if (interrupt_mode == PCIE_PORT_MSIX_MODE)
+ device_for_each_child(&dev->dev, NULL, remove_iter);
+ pci_disable_device(dev);
+
+ switch (port_data->port_irq_mode) {
+ case PCIE_PORT_MSIX_MODE:
pci_disable_msix(dev);
- else if (interrupt_mode == PCIE_PORT_MSI_MODE)
+ break;
+ case PCIE_PORT_MSI_MODE:
pci_disable_msi(dev);
+ break;
+ }
+
+ kfree(port_data);
}
/**
@@ -392,7 +509,7 @@ static int pcie_port_probe_service(struct device *dev)
return -ENODEV;
pciedev = to_pcie_device(dev);
- status = driver->probe(pciedev, driver->id_table);
+ status = driver->probe(pciedev);
if (!status) {
dev_printk(KERN_DEBUG, dev, "service driver %s loaded\n",
driver->name);
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 5ea566e20b3..091ce70051e 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -32,11 +32,6 @@ MODULE_LICENSE("GPL");
/* global data */
static const char device_name[] = "pcieport-driver";
-static int pcie_portdrv_save_config(struct pci_dev *dev)
-{
- return pci_save_state(dev);
-}
-
static int pcie_portdrv_restore_config(struct pci_dev *dev)
{
int retval;
@@ -49,21 +44,21 @@ static int pcie_portdrv_restore_config(struct pci_dev *dev)
}
#ifdef CONFIG_PM
-static int pcie_portdrv_suspend(struct pci_dev *dev, pm_message_t state)
-{
- return pcie_port_device_suspend(dev, state);
+static struct dev_pm_ops pcie_portdrv_pm_ops = {
+ .suspend = pcie_port_device_suspend,
+ .resume = pcie_port_device_resume,
+ .freeze = pcie_port_device_suspend,
+ .thaw = pcie_port_device_resume,
+ .poweroff = pcie_port_device_suspend,
+ .restore = pcie_port_device_resume,
+};
-}
+#define PCIE_PORTDRV_PM_OPS (&pcie_portdrv_pm_ops)
-static int pcie_portdrv_resume(struct pci_dev *dev)
-{
- pci_set_master(dev);
- return pcie_port_device_resume(dev);
-}
-#else
-#define pcie_portdrv_suspend NULL
-#define pcie_portdrv_resume NULL
-#endif
+#else /* !PM */
+
+#define PCIE_PORTDRV_PM_OPS NULL
+#endif /* !PM */
/*
* pcie_portdrv_probe - Probe PCI-Express port devices
@@ -82,20 +77,15 @@ static int __devinit pcie_portdrv_probe (struct pci_dev *dev,
if (status)
return status;
- if (pci_enable_device(dev) < 0)
- return -ENODEV;
-
- pci_set_master(dev);
if (!dev->irq && dev->pin) {
dev_warn(&dev->dev, "device [%04x:%04x] has invalid IRQ; "
"check vendor BIOS\n", dev->vendor, dev->device);
}
- if (pcie_port_device_register(dev)) {
- pci_disable_device(dev);
- return -ENOMEM;
- }
+ status = pcie_port_device_register(dev);
+ if (status)
+ return status;
- pcie_portdrv_save_config(dev);
+ pci_save_state(dev);
return 0;
}
@@ -104,7 +94,6 @@ static void pcie_portdrv_remove (struct pci_dev *dev)
{
pcie_port_device_remove(dev);
pci_disable_device(dev);
- kfree(pci_get_drvdata(dev));
}
static int error_detected_iter(struct device *device, void *data)
@@ -211,7 +200,7 @@ static int slot_reset_iter(struct device *device, void *data)
static pci_ers_result_t pcie_portdrv_slot_reset(struct pci_dev *dev)
{
- pci_ers_result_t status = PCI_ERS_RESULT_NONE;
+ pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
int retval;
/* If fatal, restore cfg space for possible link reset at upstream */
@@ -278,10 +267,9 @@ static struct pci_driver pcie_portdriver = {
.probe = pcie_portdrv_probe,
.remove = pcie_portdrv_remove,
- .suspend = pcie_portdrv_suspend,
- .resume = pcie_portdrv_resume,
-
.err_handler = &pcie_portdrv_err_handler,
+
+ .driver.pm = PCIE_PORTDRV_PM_OPS,
};
static int __init pcie_portdrv_init(void)
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 55ec44a27e8..40e75f6a505 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -193,7 +193,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
res->flags |= pci_calc_resource_flags(l) | IORESOURCE_SIZEALIGN;
if (type == pci_bar_io) {
l &= PCI_BASE_ADDRESS_IO_MASK;
- mask = PCI_BASE_ADDRESS_IO_MASK & 0xffff;
+ mask = PCI_BASE_ADDRESS_IO_MASK & IO_SPACE_LIMIT;
} else {
l &= PCI_BASE_ADDRESS_MEM_MASK;
mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
@@ -237,6 +237,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
dev_printk(KERN_DEBUG, &dev->dev,
"reg %x 64bit mmio: %pR\n", pos, res);
}
+
+ res->flags |= IORESOURCE_MEM_64;
} else {
sz = pci_size(l, sz, mask);
@@ -287,7 +289,7 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
struct resource *res;
int i;
- if (!dev) /* It's a host bus, nothing to read */
+ if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */
return;
if (dev->transparent) {
@@ -362,7 +364,10 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
}
}
if (base <= limit) {
- res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM | IORESOURCE_PREFETCH;
+ res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
+ IORESOURCE_MEM | IORESOURCE_PREFETCH;
+ if (res->flags & PCI_PREF_RANGE_TYPE_64)
+ res->flags |= IORESOURCE_MEM_64;
res->start = base;
res->end = limit + 0xfffff;
dev_printk(KERN_DEBUG, &dev->dev, "bridge %sbit mmio pref: %pR\n",
@@ -511,21 +516,21 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
/*
* If we already got to this bus through a different bridge,
- * ignore it. This can happen with the i450NX chipset.
+ * don't re-add it. This can happen with the i450NX chipset.
+ *
+ * However, we continue to descend down the hierarchy and
+ * scan remaining child buses.
*/
- if (pci_find_bus(pci_domain_nr(bus), busnr)) {
- dev_info(&dev->dev, "bus %04x:%02x already known\n",
- pci_domain_nr(bus), busnr);
- goto out;
+ child = pci_find_bus(pci_domain_nr(bus), busnr);
+ if (!child) {
+ child = pci_add_new_bus(bus, dev, busnr);
+ if (!child)
+ goto out;
+ child->primary = buses & 0xFF;
+ child->subordinate = (buses >> 16) & 0xFF;
+ child->bridge_ctl = bctl;
}
- child = pci_add_new_bus(bus, dev, busnr);
- if (!child)
- goto out;
- child->primary = buses & 0xFF;
- child->subordinate = (buses >> 16) & 0xFF;
- child->bridge_ctl = bctl;
-
cmax = pci_scan_child_bus(child);
if (cmax > max)
max = cmax;
@@ -674,6 +679,19 @@ static void pci_read_irq(struct pci_dev *dev)
dev->irq = irq;
}
+static void set_pcie_port_type(struct pci_dev *pdev)
+{
+ int pos;
+ u16 reg16;
+
+ pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+ if (!pos)
+ return;
+ pdev->is_pcie = 1;
+ pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
+ pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4;
+}
+
#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
/**
@@ -683,12 +701,33 @@ static void pci_read_irq(struct pci_dev *dev)
* Initialize the device structure with information about the device's
* vendor,class,memory and IO-space addresses,IRQ lines etc.
* Called at initialisation of the PCI subsystem and by CardBus services.
- * Returns 0 on success and -1 if unknown type of device (not normal, bridge
- * or CardBus).
+ * Returns 0 on success and negative if unknown type of device (not normal,
+ * bridge or CardBus).
*/
-static int pci_setup_device(struct pci_dev * dev)
+int pci_setup_device(struct pci_dev *dev)
{
u32 class;
+ u8 hdr_type;
+ struct pci_slot *slot;
+
+ if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type))
+ return -EIO;
+
+ dev->sysdata = dev->bus->sysdata;
+ dev->dev.parent = dev->bus->bridge;
+ dev->dev.bus = &pci_bus_type;
+ dev->hdr_type = hdr_type & 0x7f;
+ dev->multifunction = !!(hdr_type & 0x80);
+ dev->error_state = pci_channel_io_normal;
+ set_pcie_port_type(dev);
+
+ list_for_each_entry(slot, &dev->bus->slots, list)
+ if (PCI_SLOT(dev->devfn) == slot->number)
+ dev->slot = slot;
+
+ /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
+ set this higher, assuming the system even supports it. */
+ dev->dma_mask = 0xffffffff;
dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
dev->bus->number, PCI_SLOT(dev->devfn),
@@ -703,11 +742,15 @@ static int pci_setup_device(struct pci_dev * dev)
dev_dbg(&dev->dev, "found [%04x:%04x] class %06x header type %02x\n",
dev->vendor, dev->device, class, dev->hdr_type);
+ /* need to have dev->class ready */
+ dev->cfg_size = pci_cfg_space_size(dev);
+
/* "Unknown power state" */
dev->current_state = PCI_UNKNOWN;
/* Early fixups, before probing the BARs */
pci_fixup_device(pci_fixup_early, dev);
+ /* device class may be changed after fixup */
class = dev->class >> 8;
switch (dev->hdr_type) { /* header type */
@@ -770,7 +813,7 @@ static int pci_setup_device(struct pci_dev * dev)
default: /* unknown header */
dev_err(&dev->dev, "unknown header type %02x, "
"ignoring device\n", dev->hdr_type);
- return -1;
+ return -EIO;
bad:
dev_err(&dev->dev, "ignoring class %02x (doesn't match header "
@@ -785,6 +828,7 @@ static int pci_setup_device(struct pci_dev * dev)
static void pci_release_capabilities(struct pci_dev *dev)
{
pci_vpd_release(dev);
+ pci_iov_release(dev);
}
/**
@@ -803,19 +847,6 @@ static void pci_release_dev(struct device *dev)
kfree(pci_dev);
}
-static void set_pcie_port_type(struct pci_dev *pdev)
-{
- int pos;
- u16 reg16;
-
- pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
- if (!pos)
- return;
- pdev->is_pcie = 1;
- pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
- pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4;
-}
-
/**
* pci_cfg_space_size - get the configuration space size of the PCI device.
* @dev: PCI device
@@ -847,6 +878,11 @@ int pci_cfg_space_size(struct pci_dev *dev)
{
int pos;
u32 status;
+ u16 class;
+
+ class = dev->class >> 8;
+ if (class == PCI_CLASS_BRIDGE_HOST)
+ return pci_cfg_space_size_ext(dev);
pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
if (!pos) {
@@ -891,9 +927,7 @@ EXPORT_SYMBOL(alloc_pci_dev);
static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
{
struct pci_dev *dev;
- struct pci_slot *slot;
u32 l;
- u8 hdr_type;
int delay = 1;
if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, &l))
@@ -920,34 +954,16 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
}
}
- if (pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type))
- return NULL;
-
dev = alloc_pci_dev();
if (!dev)
return NULL;
dev->bus = bus;
- dev->sysdata = bus->sysdata;
- dev->dev.parent = bus->bridge;
- dev->dev.bus = &pci_bus_type;
dev->devfn = devfn;
- dev->hdr_type = hdr_type & 0x7f;
- dev->multifunction = !!(hdr_type & 0x80);
dev->vendor = l & 0xffff;
dev->device = (l >> 16) & 0xffff;
- dev->cfg_size = pci_cfg_space_size(dev);
- dev->error_state = pci_channel_io_normal;
- set_pcie_port_type(dev);
-
- list_for_each_entry(slot, &bus->slots, list)
- if (PCI_SLOT(devfn) == slot->number)
- dev->slot = slot;
- /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
- set this higher, assuming the system even supports it. */
- dev->dma_mask = 0xffffffff;
- if (pci_setup_device(dev) < 0) {
+ if (pci_setup_device(dev)) {
kfree(dev);
return NULL;
}
@@ -972,6 +988,9 @@ static void pci_init_capabilities(struct pci_dev *dev)
/* Alternative Routing-ID Forwarding */
pci_enable_ari(dev);
+
+ /* Single Root I/O Virtualization */
+ pci_iov_init(dev);
}
void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
@@ -1006,6 +1025,12 @@ struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn)
{
struct pci_dev *dev;
+ dev = pci_get_slot(bus, devfn);
+ if (dev) {
+ pci_dev_put(dev);
+ return dev;
+ }
+
dev = pci_scan_device(bus, devfn);
if (!dev)
return NULL;
@@ -1024,35 +1049,27 @@ EXPORT_SYMBOL(pci_scan_single_device);
* Scan a PCI slot on the specified PCI bus for devices, adding
* discovered devices to the @bus->devices list. New devices
* will not have is_added set.
+ *
+ * Returns the number of new devices found.
*/
int pci_scan_slot(struct pci_bus *bus, int devfn)
{
- int func, nr = 0;
- int scan_all_fns;
-
- scan_all_fns = pcibios_scan_all_fns(bus, devfn);
-
- for (func = 0; func < 8; func++, devfn++) {
- struct pci_dev *dev;
-
- dev = pci_scan_single_device(bus, devfn);
- if (dev) {
- nr++;
+ int fn, nr = 0;
+ struct pci_dev *dev;
- /*
- * If this is a single function device,
- * don't scan past the first function.
- */
- if (!dev->multifunction) {
- if (func > 0) {
- dev->multifunction = 1;
- } else {
- break;
- }
+ dev = pci_scan_single_device(bus, devfn);
+ if (dev && !dev->is_added) /* new device? */
+ nr++;
+
+ if ((dev && dev->multifunction) ||
+ (!dev && pcibios_scan_all_fns(bus, devfn))) {
+ for (fn = 1; fn < 8; fn++) {
+ dev = pci_scan_single_device(bus, devfn + fn);
+ if (dev) {
+ if (!dev->is_added)
+ nr++;
+ dev->multifunction = 1;
}
- } else {
- if (func == 0 && !scan_all_fns)
- break;
}
}
@@ -1074,12 +1091,21 @@ unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus)
for (devfn = 0; devfn < 0x100; devfn += 8)
pci_scan_slot(bus, devfn);
+ /* Reserve buses for SR-IOV capability. */
+ max += pci_iov_bus_range(bus);
+
/*
* After performing arch-dependent fixup of the bus, look behind
* all PCI-to-PCI bridges on this bus.
*/
- pr_debug("PCI: Fixups for bus %04x:%02x\n", pci_domain_nr(bus), bus->number);
- pcibios_fixup_bus(bus);
+ if (!bus->is_added) {
+ pr_debug("PCI: Fixups for bus %04x:%02x\n",
+ pci_domain_nr(bus), bus->number);
+ pcibios_fixup_bus(bus);
+ if (pci_is_root_bus(bus))
+ bus->is_added = 1;
+ }
+
for (pass=0; pass < 2; pass++)
list_for_each_entry(dev, &bus->devices, bus_list) {
if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
@@ -1099,10 +1125,6 @@ unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus)
return max;
}
-void __attribute__((weak)) set_pci_bus_resources_arch_default(struct pci_bus *b)
-{
-}
-
struct pci_bus * pci_create_bus(struct device *parent,
int bus, struct pci_ops *ops, void *sysdata)
{
@@ -1114,7 +1136,7 @@ struct pci_bus * pci_create_bus(struct device *parent,
if (!b)
return NULL;
- dev = kmalloc(sizeof(*dev), GFP_KERNEL);
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev){
kfree(b);
return NULL;
@@ -1133,7 +1155,6 @@ struct pci_bus * pci_create_bus(struct device *parent,
list_add_tail(&b->node, &pci_root_buses);
up_write(&pci_bus_sem);
- memset(dev, 0, sizeof(*dev));
dev->parent = parent;
dev->release = pci_release_bus_bridge_dev;
dev_set_name(dev, "pci%04x:%02x", pci_domain_nr(b), bus);
@@ -1162,8 +1183,6 @@ struct pci_bus * pci_create_bus(struct device *parent,
b->resource[0] = &ioport_resource;
b->resource[1] = &iomem_resource;
- set_pci_bus_resources_arch_default(b);
-
return b;
dev_create_file_err:
@@ -1193,6 +1212,38 @@ struct pci_bus * __devinit pci_scan_bus_parented(struct device *parent,
EXPORT_SYMBOL(pci_scan_bus_parented);
#ifdef CONFIG_HOTPLUG
+/**
+ * pci_rescan_bus - scan a PCI bus for devices.
+ * @bus: PCI bus to scan
+ *
+ * Scan a PCI bus and child buses for new devices, adds them,
+ * and enables them.
+ *
+ * Returns the max number of subordinate bus discovered.
+ */
+unsigned int __ref pci_rescan_bus(struct pci_bus *bus)
+{
+ unsigned int max;
+ struct pci_dev *dev;
+
+ max = pci_scan_child_bus(bus);
+
+ down_read(&pci_bus_sem);
+ list_for_each_entry(dev, &bus->devices, bus_list)
+ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
+ dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
+ if (dev->subordinate)
+ pci_bus_size_bridges(dev->subordinate);
+ up_read(&pci_bus_sem);
+
+ pci_bus_assign_resources(bus);
+ pci_enable_bridges(bus);
+ pci_bus_add_devices(bus);
+
+ return max;
+}
+EXPORT_SYMBOL_GPL(pci_rescan_bus);
+
EXPORT_SYMBOL(pci_add_new_bus);
EXPORT_SYMBOL(pci_scan_slot);
EXPORT_SYMBOL(pci_scan_bridge);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 92b9efe9bca..56552d74abe 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -24,6 +24,7 @@
#include <linux/kallsyms.h>
#include <linux/dmi.h>
#include <linux/pci-aspm.h>
+#include <linux/ioport.h>
#include "pci.h"
int isa_dma_bridge_buggy;
@@ -34,6 +35,69 @@ int pcie_mch_quirk;
EXPORT_SYMBOL(pcie_mch_quirk);
#ifdef CONFIG_PCI_QUIRKS
+/*
+ * This quirk function disables memory decoding and releases memory resources
+ * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
+ * It also rounds up size to specified alignment.
+ * Later on, the kernel will assign page-aligned memory resource back
+ * to the device.
+ */
+static void __devinit quirk_resource_alignment(struct pci_dev *dev)
+{
+ int i;
+ struct resource *r;
+ resource_size_t align, size;
+ u16 command;
+
+ if (!pci_is_reassigndev(dev))
+ return;
+
+ if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
+ (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
+ dev_warn(&dev->dev,
+ "Can't reassign resources to host bridge.\n");
+ return;
+ }
+
+ dev_info(&dev->dev,
+ "Disabling memory decoding and releasing memory resources.\n");
+ pci_read_config_word(dev, PCI_COMMAND, &command);
+ command &= ~PCI_COMMAND_MEMORY;
+ pci_write_config_word(dev, PCI_COMMAND, command);
+
+ align = pci_specified_resource_alignment(dev);
+ for (i=0; i < PCI_BRIDGE_RESOURCES; i++) {
+ r = &dev->resource[i];
+ if (!(r->flags & IORESOURCE_MEM))
+ continue;
+ size = resource_size(r);
+ if (size < align) {
+ size = align;
+ dev_info(&dev->dev,
+ "Rounding up size of resource #%d to %#llx.\n",
+ i, (unsigned long long)size);
+ }
+ r->end = size - 1;
+ r->start = 0;
+ }
+ /* Need to disable bridge's resource window,
+ * to enable the kernel to reassign new resource
+ * window later on.
+ */
+ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
+ (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
+ for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
+ r = &dev->resource[i];
+ if (!(r->flags & IORESOURCE_MEM))
+ continue;
+ r->end = resource_size(r) - 1;
+ r->start = 0;
+ }
+ pci_disable_bridge_window(dev);
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, quirk_resource_alignment);
+
/* The Mellanox Tavor device gives false positive parity errors
* Mark this device with a broken_parity_status, to allow
* PCI scanning code to "skip" this now blacklisted device.
@@ -1069,6 +1133,7 @@ static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev)
switch (dev->subsystem_device) {
case 0x1751: /* M2N notebook */
case 0x1821: /* M5N notebook */
+ case 0x1897: /* A6L notebook */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
@@ -1099,6 +1164,7 @@ static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev)
switch (dev->subsystem_device) {
case 0x12bc: /* HP D330L */
case 0x12bd: /* HP D530 */
+ case 0x006a: /* HP Compaq nx9500 */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82875_HB)
@@ -1126,10 +1192,15 @@ static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev)
* its on-board VGA controller */
asus_hides_smbus = 1;
}
- else if (dev->device == PCI_DEVICE_ID_INTEL_82845G_IG)
+ else if (dev->device == PCI_DEVICE_ID_INTEL_82801DB_2)
switch(dev->subsystem_device) {
case 0x00b8: /* Compaq Evo D510 CMT */
case 0x00b9: /* Compaq Evo D510 SFF */
+ /* Motherboard doesn't have Host bridge
+ * subvendor/subdevice IDs and on-board VGA
+ * controller is disabled if an AGP card is
+ * inserted, therefore checking USB UHCI
+ * Controller #1 */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82815_CGC)
@@ -1154,7 +1225,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855GM_HB, as
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82915GM_HB, asus_hides_smbus_hostbridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82810_IG3, asus_hides_smbus_hostbridge);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845G_IG, asus_hides_smbus_hostbridge);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_2, asus_hides_smbus_hostbridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82815_CGC, asus_hides_smbus_hostbridge);
static void asus_hides_smbus_lpc(struct pci_dev *dev)
@@ -1664,9 +1735,13 @@ static void __devinit quirk_netmos(struct pci_dev *dev)
* of parallel ports and <S> is the number of serial ports.
*/
switch (dev->device) {
+ case PCI_DEVICE_ID_NETMOS_9835:
+ /* Well, this rule doesn't hold for the following 9835 device */
+ if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM &&
+ dev->subsystem_device == 0x0299)
+ return;
case PCI_DEVICE_ID_NETMOS_9735:
case PCI_DEVICE_ID_NETMOS_9745:
- case PCI_DEVICE_ID_NETMOS_9835:
case PCI_DEVICE_ID_NETMOS_9845:
case PCI_DEVICE_ID_NETMOS_9855:
if ((dev->class >> 8) == PCI_CLASS_COMMUNICATION_SERIAL &&
@@ -1943,6 +2018,28 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
PCI_DEVICE_ID_NX2_5709S,
quirk_brcm_570x_limit_vpd);
+/* Originally in EDAC sources for i82875P:
+ * Intel tells BIOS developers to hide device 6 which
+ * configures the overflow device access containing
+ * the DRBs - this is where we expose device 6.
+ * http://www.x86-secret.com/articles/tweak/pat/patsecrets-2.htm
+ */
+static void __devinit quirk_unhide_mch_dev6(struct pci_dev *dev)
+{
+ u8 reg;
+
+ if (pci_read_config_byte(dev, 0xF4, &reg) == 0 && !(reg & 0x02)) {
+ dev_info(&dev->dev, "Enabling MCH 'Overflow' Device\n");
+ pci_write_config_byte(dev, 0xF4, reg | 0x02);
+ }
+}
+
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB,
+ quirk_unhide_mch_dev6);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB,
+ quirk_unhide_mch_dev6);
+
+
#ifdef CONFIG_PCI_MSI
/* Some chipsets do not support MSI. We cannot easily rely on setting
* PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually
@@ -1960,6 +2057,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS400_200, quirk_di
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS480, quirk_disable_all_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3336, quirk_disable_all_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3351, quirk_disable_all_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3364, quirk_disable_all_msi);
/* Disable MSI on chipsets that are known to not support it */
static void __devinit quirk_disable_msi(struct pci_dev *dev)
@@ -2078,6 +2176,92 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_NVENET_15,
nvenet_msi_disable);
+static int __devinit ht_check_msi_mapping(struct pci_dev *dev)
+{
+ int pos, ttl = 48;
+ int found = 0;
+
+ /* check if there is HT MSI cap or enabled on this device */
+ pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
+ while (pos && ttl--) {
+ u8 flags;
+
+ if (found < 1)
+ found = 1;
+ if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
+ &flags) == 0) {
+ if (flags & HT_MSI_FLAGS_ENABLE) {
+ if (found < 2) {
+ found = 2;
+ break;
+ }
+ }
+ }
+ pos = pci_find_next_ht_capability(dev, pos,
+ HT_CAPTYPE_MSI_MAPPING);
+ }
+
+ return found;
+}
+
+static int __devinit host_bridge_with_leaf(struct pci_dev *host_bridge)
+{
+ struct pci_dev *dev;
+ int pos;
+ int i, dev_no;
+ int found = 0;
+
+ dev_no = host_bridge->devfn >> 3;
+ for (i = dev_no + 1; i < 0x20; i++) {
+ dev = pci_get_slot(host_bridge->bus, PCI_DEVFN(i, 0));
+ if (!dev)
+ continue;
+
+ /* found next host bridge ?*/
+ pos = pci_find_ht_capability(dev, HT_CAPTYPE_SLAVE);
+ if (pos != 0) {
+ pci_dev_put(dev);
+ break;
+ }
+
+ if (ht_check_msi_mapping(dev)) {
+ found = 1;
+ pci_dev_put(dev);
+ break;
+ }
+ pci_dev_put(dev);
+ }
+
+ return found;
+}
+
+#define PCI_HT_CAP_SLAVE_CTRL0 4 /* link control */
+#define PCI_HT_CAP_SLAVE_CTRL1 8 /* link control to */
+
+static int __devinit is_end_of_ht_chain(struct pci_dev *dev)
+{
+ int pos, ctrl_off;
+ int end = 0;
+ u16 flags, ctrl;
+
+ pos = pci_find_ht_capability(dev, HT_CAPTYPE_SLAVE);
+
+ if (!pos)
+ goto out;
+
+ pci_read_config_word(dev, pos + PCI_CAP_FLAGS, &flags);
+
+ ctrl_off = ((flags >> 10) & 1) ?
+ PCI_HT_CAP_SLAVE_CTRL0 : PCI_HT_CAP_SLAVE_CTRL1;
+ pci_read_config_word(dev, pos + ctrl_off, &ctrl);
+
+ if (ctrl & (1 << 6))
+ end = 1;
+
+out:
+ return end;
+}
+
static void __devinit nv_ht_enable_msi_mapping(struct pci_dev *dev)
{
struct pci_dev *host_bridge;
@@ -2102,6 +2286,11 @@ static void __devinit nv_ht_enable_msi_mapping(struct pci_dev *dev)
if (!found)
return;
+ /* don't enable end_device/host_bridge with leaf directly here */
+ if (host_bridge == dev && is_end_of_ht_chain(host_bridge) &&
+ host_bridge_with_leaf(host_bridge))
+ goto out;
+
/* root did that ! */
if (msi_ht_cap_enabled(host_bridge))
goto out;
@@ -2132,44 +2321,12 @@ static void __devinit ht_disable_msi_mapping(struct pci_dev *dev)
}
}
-static int __devinit ht_check_msi_mapping(struct pci_dev *dev)
-{
- int pos, ttl = 48;
- int found = 0;
-
- /* check if there is HT MSI cap or enabled on this device */
- pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
- while (pos && ttl--) {
- u8 flags;
-
- if (found < 1)
- found = 1;
- if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
- &flags) == 0) {
- if (flags & HT_MSI_FLAGS_ENABLE) {
- if (found < 2) {
- found = 2;
- break;
- }
- }
- }
- pos = pci_find_next_ht_capability(dev, pos,
- HT_CAPTYPE_MSI_MAPPING);
- }
-
- return found;
-}
-
-static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev)
+static void __devinit __nv_msi_ht_cap_quirk(struct pci_dev *dev, int all)
{
struct pci_dev *host_bridge;
int pos;
int found;
- /* Enabling HT MSI mapping on this device breaks MCP51 */
- if (dev->device == 0x270)
- return;
-
/* check if there is HT MSI cap or enabled on this device */
found = ht_check_msi_mapping(dev);
@@ -2193,7 +2350,10 @@ static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev)
/* Host bridge is to HT */
if (found == 1) {
/* it is not enabled, try to enable it */
- nv_ht_enable_msi_mapping(dev);
+ if (all)
+ ht_enable_msi_mapping(dev);
+ else
+ nv_ht_enable_msi_mapping(dev);
}
return;
}
@@ -2205,8 +2365,20 @@ static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev)
/* Host bridge is not to HT, disable HT MSI mapping on this device */
ht_disable_msi_mapping(dev);
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk);
+
+static void __devinit nv_msi_ht_cap_quirk_all(struct pci_dev *dev)
+{
+ return __nv_msi_ht_cap_quirk(dev, 1);
+}
+
+static void __devinit nv_msi_ht_cap_quirk_leaf(struct pci_dev *dev)
+{
+ return __nv_msi_ht_cap_quirk(dev, 0);
+}
+
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf);
+
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
static void __devinit quirk_msi_intx_disable_bug(struct pci_dev *dev)
{
@@ -2268,6 +2440,56 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4375,
#endif /* CONFIG_PCI_MSI */
+#ifdef CONFIG_PCI_IOV
+
+/*
+ * For Intel 82576 SR-IOV NIC, if BIOS doesn't allocate resources for the
+ * SR-IOV BARs, zero the Flash BAR and program the SR-IOV BARs to use the
+ * old Flash Memory Space.
+ */
+static void __devinit quirk_i82576_sriov(struct pci_dev *dev)
+{
+ int pos, flags;
+ u32 bar, start, size;
+
+ if (PAGE_SIZE > 0x10000)
+ return;
+
+ flags = pci_resource_flags(dev, 0);
+ if ((flags & PCI_BASE_ADDRESS_SPACE) !=
+ PCI_BASE_ADDRESS_SPACE_MEMORY ||
+ (flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK) !=
+ PCI_BASE_ADDRESS_MEM_TYPE_32)
+ return;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
+ if (!pos)
+ return;
+
+ pci_read_config_dword(dev, pos + PCI_SRIOV_BAR, &bar);
+ if (bar & PCI_BASE_ADDRESS_MEM_MASK)
+ return;
+
+ start = pci_resource_start(dev, 1);
+ size = pci_resource_len(dev, 1);
+ if (!start || size != 0x400000 || start & (size - 1))
+ return;
+
+ pci_resource_flags(dev, 1) = 0;
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_1, 0);
+ pci_write_config_dword(dev, pos + PCI_SRIOV_BAR, start);
+ pci_write_config_dword(dev, pos + PCI_SRIOV_BAR + 12, start + size / 2);
+
+ dev_info(&dev->dev, "use Flash Memory Space for SR-IOV BARs\n");
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10c9, quirk_i82576_sriov);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e6, quirk_i82576_sriov);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e7, quirk_i82576_sriov);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e8, quirk_i82576_sriov);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150a, quirk_i82576_sriov);
+
+#endif /* CONFIG_PCI_IOV */
+
static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
struct pci_fixup *end)
{
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 042e0892442..176615e7231 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -32,8 +32,6 @@ static void pci_stop_dev(struct pci_dev *dev)
static void pci_destroy_dev(struct pci_dev *dev)
{
- pci_stop_dev(dev);
-
/* Remove the device from the device lists, and prevent any further
* list accesses from this device */
down_write(&pci_bus_sem);
@@ -71,6 +69,9 @@ void pci_remove_bus(struct pci_bus *pci_bus)
down_write(&pci_bus_sem);
list_del(&pci_bus->node);
up_write(&pci_bus_sem);
+ if (!pci_bus->is_added)
+ return;
+
pci_remove_legacy_files(pci_bus);
device_remove_file(&pci_bus->dev, &dev_attr_cpuaffinity);
device_remove_file(&pci_bus->dev, &dev_attr_cpulistaffinity);
@@ -92,6 +93,7 @@ EXPORT_SYMBOL(pci_remove_bus);
*/
void pci_remove_bus_device(struct pci_dev *dev)
{
+ pci_stop_bus_device(dev);
if (dev->subordinate) {
struct pci_bus *b = dev->subordinate;
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index 5af8bd53814..e8cb5051c31 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -29,7 +29,7 @@ pci_find_upstream_pcie_bridge(struct pci_dev *pdev)
if (pdev->is_pcie)
return NULL;
while (1) {
- if (!pdev->bus->self)
+ if (pci_is_root_bus(pdev->bus))
break;
pdev = pdev->bus->self;
/* a p2p bridge */
@@ -115,36 +115,6 @@ pci_find_next_bus(const struct pci_bus *from)
#ifdef CONFIG_PCI_LEGACY
/**
- * pci_find_slot - locate PCI device from a given PCI slot
- * @bus: number of PCI bus on which desired PCI device resides
- * @devfn: encodes number of PCI slot in which the desired PCI
- * device resides and the logical device number within that slot
- * in case of multi-function devices.
- *
- * Given a PCI bus and slot/function number, the desired PCI device
- * is located in system global list of PCI devices. If the device
- * is found, a pointer to its data structure is returned. If no
- * device is found, %NULL is returned.
- *
- * NOTE: Do not use this function any more; use pci_get_slot() instead, as
- * the PCI device returned by this function can disappear at any moment in
- * time.
- */
-struct pci_dev *pci_find_slot(unsigned int bus, unsigned int devfn)
-{
- struct pci_dev *dev = NULL;
-
- while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
- if (dev->bus->number == bus && dev->devfn == devfn) {
- pci_dev_put(dev);
- return dev;
- }
- }
- return NULL;
-}
-EXPORT_SYMBOL(pci_find_slot);
-
-/**
* pci_find_device - begin or continue searching for a PCI device by vendor/device id
* @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids
* @device: PCI device id to match, or %PCI_ANY_ID to match all device ids
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 70460894578..b636e245445 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -27,7 +27,7 @@
#include <linux/slab.h>
-static void pbus_assign_resources_sorted(struct pci_bus *bus)
+static void pbus_assign_resources_sorted(const struct pci_bus *bus)
{
struct pci_dev *dev;
struct resource *res;
@@ -58,7 +58,6 @@ static void pbus_assign_resources_sorted(struct pci_bus *bus)
res = list->res;
idx = res - &list->dev->resource[0];
if (pci_assign_resource(list->dev, idx)) {
- /* FIXME: get rid of this */
res->start = 0;
res->end = 0;
res->flags = 0;
@@ -143,6 +142,10 @@ static void pci_setup_bridge(struct pci_bus *bus)
struct pci_dev *bridge = bus->self;
struct pci_bus_region region;
u32 l, bu, lu, io_upper16;
+ int pref_mem64;
+
+ if (pci_is_enabled(bridge))
+ return;
dev_info(&bridge->dev, "PCI bridge, secondary bus %04x:%02x\n",
pci_domain_nr(bus), bus->number);
@@ -195,16 +198,22 @@ static void pci_setup_bridge(struct pci_bus *bus)
pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, 0);
/* Set up PREF base/limit. */
+ pref_mem64 = 0;
bu = lu = 0;
pcibios_resource_to_bus(bridge, &region, bus->resource[2]);
if (bus->resource[2]->flags & IORESOURCE_PREFETCH) {
+ int width = 8;
l = (region.start >> 16) & 0xfff0;
l |= region.end & 0xfff00000;
- bu = upper_32_bits(region.start);
- lu = upper_32_bits(region.end);
- dev_info(&bridge->dev, " PREFETCH window: %#016llx-%#016llx\n",
- (unsigned long long)region.start,
- (unsigned long long)region.end);
+ if (bus->resource[2]->flags & IORESOURCE_MEM_64) {
+ pref_mem64 = 1;
+ bu = upper_32_bits(region.start);
+ lu = upper_32_bits(region.end);
+ width = 16;
+ }
+ dev_info(&bridge->dev, " PREFETCH window: %#0*llx-%#0*llx\n",
+ width, (unsigned long long)region.start,
+ width, (unsigned long long)region.end);
}
else {
l = 0x0000fff0;
@@ -212,9 +221,11 @@ static void pci_setup_bridge(struct pci_bus *bus)
}
pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l);
- /* Set the upper 32 bits of PREF base & limit. */
- pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu);
- pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu);
+ if (pref_mem64) {
+ /* Set the upper 32 bits of PREF base & limit. */
+ pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu);
+ pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu);
+ }
pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl);
}
@@ -252,8 +263,25 @@ static void pci_bridge_check_ranges(struct pci_bus *bus)
pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0);
}
- if (pmem)
+ if (pmem) {
b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
+ if ((pmem & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64)
+ b_res[2].flags |= IORESOURCE_MEM_64;
+ }
+
+ /* double check if bridge does support 64 bit pref */
+ if (b_res[2].flags & IORESOURCE_MEM_64) {
+ u32 mem_base_hi, tmp;
+ pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32,
+ &mem_base_hi);
+ pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
+ 0xffffffff);
+ pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &tmp);
+ if (!tmp)
+ b_res[2].flags &= ~IORESOURCE_MEM_64;
+ pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
+ mem_base_hi);
+ }
}
/* Helper function for sizing routines: find first available
@@ -333,6 +361,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long
resource_size_t aligns[12]; /* Alignments from 1Mb to 2Gb */
int order, max_order;
struct resource *b_res = find_free_bus_resource(bus, type);
+ unsigned int mem64_mask = 0;
if (!b_res)
return 0;
@@ -341,9 +370,12 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long
max_order = 0;
size = 0;
+ mem64_mask = b_res->flags & IORESOURCE_MEM_64;
+ b_res->flags &= ~IORESOURCE_MEM_64;
+
list_for_each_entry(dev, &bus->devices, bus_list) {
int i;
-
+
for (i = 0; i < PCI_NUM_RESOURCES; i++) {
struct resource *r = &dev->resource[i];
resource_size_t r_size;
@@ -369,6 +401,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long
aligns[order] += align;
if (order > max_order)
max_order = order;
+ mem64_mask &= r->flags & IORESOURCE_MEM_64;
}
}
@@ -393,6 +426,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long
b_res->start = min_align;
b_res->end = size + min_align - 1;
b_res->flags |= IORESOURCE_STARTALIGN;
+ b_res->flags |= mem64_mask;
return 1;
}
@@ -495,7 +529,7 @@ void __ref pci_bus_size_bridges(struct pci_bus *bus)
}
EXPORT_SYMBOL(pci_bus_size_bridges);
-void __ref pci_bus_assign_resources(struct pci_bus *bus)
+void __ref pci_bus_assign_resources(const struct pci_bus *bus)
{
struct pci_bus *b;
struct pci_dev *dev;
@@ -533,11 +567,13 @@ static void pci_bus_dump_res(struct pci_bus *bus)
for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
struct resource *res = bus->resource[i];
- if (!res)
+ if (!res || !res->end)
continue;
dev_printk(KERN_DEBUG, &bus->dev, "resource %d %s %pR\n", i,
- (res->flags & IORESOURCE_IO) ? "io: " : "mem:", res);
+ (res->flags & IORESOURCE_IO) ? "io: " :
+ ((res->flags & IORESOURCE_PREFETCH)? "pref mem":"mem:"),
+ res);
}
}
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 32e8d88a461..b711fb7181e 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -99,11 +99,11 @@ void pci_update_resource(struct pci_dev *dev, int resno)
int pci_claim_resource(struct pci_dev *dev, int resource)
{
struct resource *res = &dev->resource[resource];
- struct resource *root = NULL;
+ struct resource *root;
char *dtype = resource < PCI_BRIDGE_RESOURCES ? "device" : "bridge";
int err;
- root = pcibios_select_root(dev, res);
+ root = pci_find_parent_resource(dev, res);
err = -EINVAL;
if (root != NULL)
@@ -120,23 +120,31 @@ int pci_claim_resource(struct pci_dev *dev, int resource)
return err;
}
-int pci_assign_resource(struct pci_dev *dev, int resno)
+#ifdef CONFIG_PCI_QUIRKS
+void pci_disable_bridge_window(struct pci_dev *dev)
+{
+ dev_dbg(&dev->dev, "Disabling bridge window.\n");
+
+ /* MMIO Base/Limit */
+ pci_write_config_dword(dev, PCI_MEMORY_BASE, 0x0000fff0);
+
+ /* Prefetchable MMIO Base/Limit */
+ pci_write_config_dword(dev, PCI_PREF_LIMIT_UPPER32, 0);
+ pci_write_config_dword(dev, PCI_PREF_MEMORY_BASE, 0x0000fff0);
+ pci_write_config_dword(dev, PCI_PREF_BASE_UPPER32, 0xffffffff);
+}
+#endif /* CONFIG_PCI_QUIRKS */
+
+static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
+ int resno)
{
- struct pci_bus *bus = dev->bus;
struct resource *res = dev->resource + resno;
resource_size_t size, min, align;
int ret;
size = resource_size(res);
min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM;
-
align = resource_alignment(res);
- if (!align) {
- dev_info(&dev->dev, "BAR %d: can't allocate resource (bogus "
- "alignment) %pR flags %#lx\n",
- resno, res, res->flags);
- return -EINVAL;
- }
/* First, try exact prefetching match.. */
ret = pci_bus_alloc_resource(bus, res, size, align, min,
@@ -154,10 +162,7 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
pcibios_align_resource, dev);
}
- if (ret) {
- dev_info(&dev->dev, "BAR %d: can't allocate %s resource %pR\n",
- resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", res);
- } else {
+ if (!ret) {
res->flags &= ~IORESOURCE_STARTALIGN;
if (resno < PCI_BRIDGE_RESOURCES)
pci_update_resource(dev, resno);
@@ -166,6 +171,39 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
return ret;
}
+int pci_assign_resource(struct pci_dev *dev, int resno)
+{
+ struct resource *res = dev->resource + resno;
+ resource_size_t align;
+ struct pci_bus *bus;
+ int ret;
+
+ align = resource_alignment(res);
+ if (!align) {
+ dev_info(&dev->dev, "BAR %d: can't allocate resource (bogus "
+ "alignment) %pR flags %#lx\n",
+ resno, res, res->flags);
+ return -EINVAL;
+ }
+
+ bus = dev->bus;
+ while ((ret = __pci_assign_resource(bus, dev, resno))) {
+ if (bus->parent && bus->self->transparent)
+ bus = bus->parent;
+ else
+ bus = NULL;
+ if (bus)
+ continue;
+ break;
+ }
+
+ if (ret)
+ dev_info(&dev->dev, "BAR %d: can't allocate %s resource %pR\n",
+ resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", res);
+
+ return ret;
+}
+
#if 0
int pci_assign_resource_fixed(struct pci_dev *dev, int resno)
{
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index 5a8ccb4f604..eddb0748b0e 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -1,8 +1,8 @@
/*
* drivers/pci/slot.c
* Copyright (C) 2006 Matthew Wilcox <matthew@wil.cx>
- * Copyright (C) 2006-2008 Hewlett-Packard Development Company, L.P.
- * Alex Chiang <achiang@hp.com>
+ * Copyright (C) 2006-2009 Hewlett-Packard Development Company, L.P.
+ * Alex Chiang <achiang@hp.com>
*/
#include <linux/kobject.h>
@@ -52,8 +52,8 @@ static void pci_slot_release(struct kobject *kobj)
struct pci_dev *dev;
struct pci_slot *slot = to_pci_slot(kobj);
- pr_debug("%s: releasing pci_slot on %x:%d\n", __func__,
- slot->bus->number, slot->number);
+ dev_dbg(&slot->bus->dev, "dev %02x, released physical slot %s\n",
+ slot->number, pci_slot_name(slot));
list_for_each_entry(dev, &slot->bus->devices, bus_list)
if (PCI_SLOT(dev->devfn) == slot->number)
@@ -248,9 +248,8 @@ placeholder:
if (PCI_SLOT(dev->devfn) == slot_nr)
dev->slot = slot;
- /* Don't care if debug printk has a -1 for slot_nr */
- pr_debug("%s: created pci_slot on %04x:%02x:%02x\n",
- __func__, pci_domain_nr(parent), parent->number, slot_nr);
+ dev_dbg(&parent->dev, "dev %02x, created physical slot %s\n",
+ slot_nr, pci_slot_name(slot));
out:
kfree(slot_name);
@@ -265,8 +264,8 @@ EXPORT_SYMBOL_GPL(pci_create_slot);
/**
* pci_renumber_slot - update %struct pci_slot -> number
- * @slot - %struct pci_slot to update
- * @slot_nr - new number for slot
+ * @slot: &struct pci_slot to update
+ * @slot_nr: new number for slot
*
* The primary purpose of this interface is to allow callers who earlier
* created a placeholder slot in pci_create_slot() by passing a -1 as
@@ -299,9 +298,8 @@ EXPORT_SYMBOL_GPL(pci_renumber_slot);
*/
void pci_destroy_slot(struct pci_slot *slot)
{
- pr_debug("%s: dec refcount to %d on %04x:%02x:%02x\n", __func__,
- atomic_read(&slot->kobj.kref.refcount) - 1,
- pci_domain_nr(slot->bus), slot->bus->number, slot->number);
+ dev_dbg(&slot->bus->dev, "dev %02x, dec refcount to %d\n",
+ slot->number, atomic_read(&slot->kobj.kref.refcount) - 1);
down_write(&pci_bus_sem);
kobject_put(&slot->kobj);
@@ -309,6 +307,45 @@ void pci_destroy_slot(struct pci_slot *slot)
}
EXPORT_SYMBOL_GPL(pci_destroy_slot);
+#if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE)
+#include <linux/pci_hotplug.h>
+/**
+ * pci_hp_create_link - create symbolic link to the hotplug driver module.
+ * @slot: struct pci_slot
+ *
+ * Helper function for pci_hotplug_core.c to create symbolic link to
+ * the hotplug driver module.
+ */
+void pci_hp_create_module_link(struct pci_slot *pci_slot)
+{
+ struct hotplug_slot *slot = pci_slot->hotplug;
+ struct kobject *kobj = NULL;
+ int no_warn;
+
+ if (!slot || !slot->ops)
+ return;
+ kobj = kset_find_obj(module_kset, slot->ops->mod_name);
+ if (!kobj)
+ return;
+ no_warn = sysfs_create_link(&pci_slot->kobj, kobj, "module");
+ kobject_put(kobj);
+}
+EXPORT_SYMBOL_GPL(pci_hp_create_module_link);
+
+/**
+ * pci_hp_remove_link - remove symbolic link to the hotplug driver module.
+ * @slot: struct pci_slot
+ *
+ * Helper function for pci_hotplug_core.c to remove symbolic link to
+ * the hotplug driver module.
+ */
+void pci_hp_remove_module_link(struct pci_slot *pci_slot)
+{
+ sysfs_remove_link(&pci_slot->kobj, "module");
+}
+EXPORT_SYMBOL_GPL(pci_hp_remove_module_link);
+#endif
+
static int pci_slot_init(void)
{
struct kset *pci_bus_kset;