diff options
Diffstat (limited to 'drivers/pci')
-rw-r--r-- | drivers/pci/hotplug/cpci_hotplug_core.c | 1 | ||||
-rw-r--r-- | drivers/pci/hotplug/cpqphp_ctrl.c | 1 | ||||
-rw-r--r-- | drivers/pci/hotplug/cpqphp_sysfs.c | 1 | ||||
-rw-r--r-- | drivers/pci/hotplug/pci_hotplug_core.c | 2 | ||||
-rw-r--r-- | drivers/pci/hotplug/pciehp_ctrl.c | 1 | ||||
-rw-r--r-- | drivers/pci/hotplug/sgi_hotplug.c | 7 | ||||
-rw-r--r-- | drivers/pci/intel-iommu.c | 130 | ||||
-rw-r--r-- | drivers/pci/msi.c | 64 | ||||
-rw-r--r-- | drivers/pci/msi.h | 10 | ||||
-rw-r--r-- | drivers/pci/pci.c | 15 | ||||
-rw-r--r-- | drivers/pci/pcie/aer/ecrc.c | 2 | ||||
-rw-r--r-- | drivers/pci/quirks.c | 5 | ||||
-rw-r--r-- | drivers/pci/setup-res.c | 4 | ||||
-rw-r--r-- | drivers/pci/slot.c | 4 | ||||
-rw-r--r-- | drivers/pci/syscall.c | 1 |
15 files changed, 175 insertions, 73 deletions
diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c index a5b9f6ae507..d703e73fffa 100644 --- a/drivers/pci/hotplug/cpci_hotplug_core.c +++ b/drivers/pci/hotplug/cpci_hotplug_core.c @@ -32,7 +32,6 @@ #include <linux/pci_hotplug.h> #include <linux/init.h> #include <linux/interrupt.h> -#include <linux/smp_lock.h> #include <asm/atomic.h> #include <linux/delay.h> #include <linux/kthread.h> diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c index 2fa47af992a..0ff689afa75 100644 --- a/drivers/pci/hotplug/cpqphp_ctrl.c +++ b/drivers/pci/hotplug/cpqphp_ctrl.c @@ -34,7 +34,6 @@ #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/wait.h> -#include <linux/smp_lock.h> #include <linux/pci.h> #include <linux/pci_hotplug.h> #include <linux/kthread.h> diff --git a/drivers/pci/hotplug/cpqphp_sysfs.c b/drivers/pci/hotplug/cpqphp_sysfs.c index 8450f4a6568..e6089bdb6e5 100644 --- a/drivers/pci/hotplug/cpqphp_sysfs.c +++ b/drivers/pci/hotplug/cpqphp_sysfs.c @@ -33,6 +33,7 @@ #include <linux/workqueue.h> #include <linux/pci.h> #include <linux/pci_hotplug.h> +#include <linux/smp_lock.h> #include <linux/debugfs.h> #include "cpqphp.h" diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c index 844580489d4..5c5043f239c 100644 --- a/drivers/pci/hotplug/pci_hotplug_core.c +++ b/drivers/pci/hotplug/pci_hotplug_core.c @@ -555,6 +555,8 @@ static struct hotplug_slot *get_slot_from_name (const char *name) * @slot: pointer to the &struct hotplug_slot to register * @devnr: device number * @name: name registered with kobject core + * @owner: caller module owner + * @mod_name: caller module name * * Registers a hotplug slot with the pci hotplug subsystem, which will allow * userspace interaction to the slot. diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c index ff4034502d2..8aab8edf123 100644 --- a/drivers/pci/hotplug/pciehp_ctrl.c +++ b/drivers/pci/hotplug/pciehp_ctrl.c @@ -30,7 +30,6 @@ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> -#include <linux/smp_lock.h> #include <linux/pci.h> #include <linux/workqueue.h> #include "../pci.h" diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c index a4494d78e7c..8aebe1e9d3d 100644 --- a/drivers/pci/hotplug/sgi_hotplug.c +++ b/drivers/pci/hotplug/sgi_hotplug.c @@ -90,11 +90,10 @@ static struct hotplug_slot_ops sn_hotplug_slot_ops = { static DEFINE_MUTEX(sn_hotplug_mutex); -static ssize_t path_show (struct hotplug_slot *bss_hotplug_slot, - char *buf) +static ssize_t path_show(struct pci_slot *pci_slot, char *buf) { int retval = -ENOENT; - struct slot *slot = bss_hotplug_slot->private; + struct slot *slot = pci_slot->hotplug->private; if (!slot) return retval; @@ -103,7 +102,7 @@ static ssize_t path_show (struct hotplug_slot *bss_hotplug_slot, return retval; } -static struct hotplug_slot_attribute sn_slot_path_attr = __ATTR_RO(path); +static struct pci_slot_attribute sn_slot_path_attr = __ATTR_RO(path); static int sn_pci_slot_valid(struct pci_bus *pci_bus, int device) { diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 53075424a43..2314ad7ee5f 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -1505,7 +1505,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment, } set_bit(num, iommu->domain_ids); - set_bit(iommu->seq_id, &domain->iommu_bmp); iommu->domains[num] = domain; id = num; } @@ -1648,6 +1647,14 @@ static int domain_context_mapped(struct pci_dev *pdev) tmp->devfn); } +/* Returns a number of VTD pages, but aligned to MM page size */ +static inline unsigned long aligned_nrpages(unsigned long host_addr, + size_t size) +{ + host_addr &= ~PAGE_MASK; + return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT; +} + static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, struct scatterlist *sg, unsigned long phys_pfn, unsigned long nr_pages, int prot) @@ -1675,7 +1682,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, uint64_t tmp; if (!sg_res) { - sg_res = (sg->offset + sg->length + VTD_PAGE_SIZE - 1) >> VTD_PAGE_SHIFT; + sg_res = aligned_nrpages(sg->offset, sg->length); sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset; sg->dma_length = sg->length; pteval = page_to_phys(sg_page(sg)) | prot; @@ -2117,6 +2124,47 @@ static int domain_add_dev_info(struct dmar_domain *domain, return 0; } +static int iommu_should_identity_map(struct pci_dev *pdev, int startup) +{ + if (iommu_identity_mapping == 2) + return IS_GFX_DEVICE(pdev); + + /* + * We want to start off with all devices in the 1:1 domain, and + * take them out later if we find they can't access all of memory. + * + * However, we can't do this for PCI devices behind bridges, + * because all PCI devices behind the same bridge will end up + * with the same source-id on their transactions. + * + * Practically speaking, we can't change things around for these + * devices at run-time, because we can't be sure there'll be no + * DMA transactions in flight for any of their siblings. + * + * So PCI devices (unless they're on the root bus) as well as + * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of + * the 1:1 domain, just in _case_ one of their siblings turns out + * not to be able to map all of memory. + */ + if (!pdev->is_pcie) { + if (!pci_is_root_bus(pdev->bus)) + return 0; + if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI) + return 0; + } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) + return 0; + + /* + * At boot time, we don't yet know if devices will be 64-bit capable. + * Assume that they will -- if they turn out not to be, then we can + * take them out of the 1:1 domain later. + */ + if (!startup) + return pdev->dma_mask > DMA_BIT_MASK(32); + + return 1; +} + static int iommu_prepare_static_identity_mapping(void) { struct pci_dev *pdev = NULL; @@ -2127,16 +2175,18 @@ static int iommu_prepare_static_identity_mapping(void) return -EFAULT; for_each_pci_dev(pdev) { - printk(KERN_INFO "IOMMU: identity mapping for device %s\n", - pci_name(pdev)); + if (iommu_should_identity_map(pdev, 1)) { + printk(KERN_INFO "IOMMU: identity mapping for device %s\n", + pci_name(pdev)); - ret = domain_context_mapping(si_domain, pdev, - CONTEXT_TT_MULTI_LEVEL); - if (ret) - return ret; - ret = domain_add_dev_info(si_domain, pdev); - if (ret) - return ret; + ret = domain_context_mapping(si_domain, pdev, + CONTEXT_TT_MULTI_LEVEL); + if (ret) + return ret; + ret = domain_add_dev_info(si_domain, pdev); + if (ret) + return ret; + } } return 0; @@ -2291,6 +2341,10 @@ int __init init_dmars(void) * identity mapping if iommu_identity_mapping is set. */ if (!iommu_pass_through) { +#ifdef CONFIG_DMAR_BROKEN_GFX_WA + if (!iommu_identity_mapping) + iommu_identity_mapping = 2; +#endif if (iommu_identity_mapping) iommu_prepare_static_identity_mapping(); /* @@ -2368,15 +2422,7 @@ error: return ret; } -static inline unsigned long aligned_nrpages(unsigned long host_addr, - size_t size) -{ - host_addr &= ~PAGE_MASK; - host_addr += size + PAGE_SIZE - 1; - - return host_addr >> VTD_PAGE_SHIFT; -} - +/* This takes a number of _MM_ pages, not VTD pages */ static struct iova *intel_alloc_iova(struct device *dev, struct dmar_domain *domain, unsigned long nrpages, uint64_t dma_mask) @@ -2443,16 +2489,24 @@ static int iommu_dummy(struct pci_dev *pdev) } /* Check if the pdev needs to go through non-identity map and unmap process.*/ -static int iommu_no_mapping(struct pci_dev *pdev) +static int iommu_no_mapping(struct device *dev) { + struct pci_dev *pdev; int found; + if (unlikely(dev->bus != &pci_bus_type)) + return 1; + + pdev = to_pci_dev(dev); + if (iommu_dummy(pdev)) + return 1; + if (!iommu_identity_mapping) - return iommu_dummy(pdev); + return 0; found = identity_mapping(pdev); if (found) { - if (pdev->dma_mask > DMA_BIT_MASK(32)) + if (iommu_should_identity_map(pdev, 0)) return 1; else { /* @@ -2469,9 +2523,12 @@ static int iommu_no_mapping(struct pci_dev *pdev) * In case of a detached 64 bit DMA device from vm, the device * is put into si_domain for identity mapping. */ - if (pdev->dma_mask > DMA_BIT_MASK(32)) { + if (iommu_should_identity_map(pdev, 0)) { int ret; ret = domain_add_dev_info(si_domain, pdev); + if (ret) + return 0; + ret = domain_context_mapping(si_domain, pdev, CONTEXT_TT_MULTI_LEVEL); if (!ret) { printk(KERN_INFO "64bit %s uses identity mapping\n", pci_name(pdev)); @@ -2480,7 +2537,7 @@ static int iommu_no_mapping(struct pci_dev *pdev) } } - return iommu_dummy(pdev); + return 0; } static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, @@ -2493,10 +2550,11 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, int prot = 0; int ret; struct intel_iommu *iommu; + unsigned long paddr_pfn = paddr >> PAGE_SHIFT; BUG_ON(dir == DMA_NONE); - if (iommu_no_mapping(pdev)) + if (iommu_no_mapping(hwdev)) return paddr; domain = get_valid_domain_for_dev(pdev); @@ -2506,7 +2564,8 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, iommu = domain_get_iommu(domain); size = aligned_nrpages(paddr, size); - iova = intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); + iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), + pdev->dma_mask); if (!iova) goto error; @@ -2526,7 +2585,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, * is not a big problem */ ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo), - paddr >> VTD_PAGE_SHIFT, size, prot); + mm_to_dma_pfn(paddr_pfn), size, prot); if (ret) goto error; @@ -2635,7 +2694,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, struct iova *iova; struct intel_iommu *iommu; - if (iommu_no_mapping(pdev)) + if (iommu_no_mapping(dev)) return; domain = find_domain(pdev); @@ -2726,7 +2785,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, struct iova *iova; struct intel_iommu *iommu; - if (iommu_no_mapping(pdev)) + if (iommu_no_mapping(hwdev)) return; domain = find_domain(pdev); @@ -2785,7 +2844,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne struct intel_iommu *iommu; BUG_ON(dir == DMA_NONE); - if (iommu_no_mapping(pdev)) + if (iommu_no_mapping(hwdev)) return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir); domain = get_valid_domain_for_dev(pdev); @@ -2797,7 +2856,8 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne for_each_sg(sglist, sg, nelems, i) size += aligned_nrpages(sg->offset, sg->length); - iova = intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); + iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), + pdev->dma_mask); if (!iova) { sglist->dma_length = 0; return 0; @@ -2815,7 +2875,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne start_vpfn = mm_to_dma_pfn(iova->pfn_lo); - ret = domain_sg_mapping(domain, start_vpfn, sglist, mm_to_dma_pfn(size), prot); + ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot); if (unlikely(ret)) { /* clear the page */ dma_pte_clear_range(domain, start_vpfn, @@ -3348,6 +3408,7 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width) domain->iommu_count = 0; domain->iommu_coherency = 0; + domain->iommu_snooping = 0; domain->max_addr = 0; /* always allocate the top pgd */ @@ -3540,6 +3601,9 @@ static void intel_iommu_unmap_range(struct iommu_domain *domain, { struct dmar_domain *dmar_domain = domain->priv; + if (!size) + return; + dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT, (iova + size - 1) >> VTD_PAGE_SHIFT); diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index d9f06fbfa0b..d986afb7032 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -127,17 +127,23 @@ static inline __attribute_const__ u32 msi_enabled_mask(u16 control) * reliably as devices without an INTx disable bit will then generate a * level IRQ which will never be cleared. */ -static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) +static u32 __msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) { u32 mask_bits = desc->masked; if (!desc->msi_attrib.maskbit) - return; + return 0; mask_bits &= ~mask; mask_bits |= flag; pci_write_config_dword(desc->dev, desc->mask_pos, mask_bits); - desc->masked = mask_bits; + + return mask_bits; +} + +static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) +{ + desc->masked = __msi_mask_irq(desc, mask, flag); } /* @@ -147,15 +153,21 @@ static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) * file. This saves a few milliseconds when initialising devices with lots * of MSI-X interrupts. */ -static void msix_mask_irq(struct msi_desc *desc, u32 flag) +static u32 __msix_mask_irq(struct msi_desc *desc, u32 flag) { u32 mask_bits = desc->masked; unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + - PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET; + PCI_MSIX_ENTRY_VECTOR_CTRL; mask_bits &= ~1; mask_bits |= flag; writel(mask_bits, desc->mask_base + offset); - desc->masked = mask_bits; + + return mask_bits; +} + +static void msix_mask_irq(struct msi_desc *desc, u32 flag) +{ + desc->masked = __msix_mask_irq(desc, flag); } static void msi_set_mask_bit(unsigned irq, u32 flag) @@ -188,9 +200,9 @@ void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) void __iomem *base = entry->mask_base + entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; - msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); - msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET); - msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET); + msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR); + msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR); + msg->data = readl(base + PCI_MSIX_ENTRY_DATA); } else { struct pci_dev *dev = entry->dev; int pos = entry->msi_attrib.pos; @@ -225,11 +237,9 @@ void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) base = entry->mask_base + entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; - writel(msg->address_lo, - base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); - writel(msg->address_hi, - base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET); - writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET); + writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR); + writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR); + writel(msg->data, base + PCI_MSIX_ENTRY_DATA); } else { struct pci_dev *dev = entry->dev; int pos = entry->msi_attrib.pos; @@ -385,6 +395,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec) /* Configure MSI capability structure */ ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); if (ret) { + msi_mask_irq(entry, mask, ~mask); msi_free_irqs(dev); return ret; } @@ -439,8 +450,14 @@ static int msix_capability_init(struct pci_dev *dev, for (i = 0; i < nvec; i++) { entry = alloc_msi_entry(dev); - if (!entry) - break; + if (!entry) { + if (!i) + iounmap(base); + else + msi_free_irqs(dev); + /* No enough memory. Don't try again */ + return -ENOMEM; + } j = entries[i].entry; entry->msi_attrib.is_msix = 1; @@ -487,7 +504,7 @@ static int msix_capability_init(struct pci_dev *dev, set_irq_msi(entry->irq, entry); j = entries[i].entry; entry->masked = readl(base + j * PCI_MSIX_ENTRY_SIZE + - PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET); + PCI_MSIX_ENTRY_VECTOR_CTRL); msix_mask_irq(entry, 1); i++; } @@ -611,9 +628,11 @@ void pci_msi_shutdown(struct pci_dev *dev) pci_intx_for_msi(dev, 1); dev->msi_enabled = 0; + /* Return the device with MSI unmasked as initial states */ pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &ctrl); mask = msi_capable_mask(ctrl); - msi_mask_irq(desc, mask, ~mask); + /* Keep cached state to be restored */ + __msi_mask_irq(desc, mask, ~mask); /* Restore dev->irq to its default pin-assertion irq */ dev->irq = desc->msi_attrib.default_irq; @@ -653,7 +672,6 @@ static int msi_free_irqs(struct pci_dev* dev) list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) { if (entry->msi_attrib.is_msix) { - msix_mask_irq(entry, 1); if (list_is_last(&entry->list, &dev->msi_list)) iounmap(entry->mask_base); } @@ -741,9 +759,17 @@ static void msix_free_all_irqs(struct pci_dev *dev) void pci_msix_shutdown(struct pci_dev* dev) { + struct msi_desc *entry; + if (!pci_msi_enable || !dev || !dev->msix_enabled) return; + /* Return the device with MSI-X masked as initial states */ + list_for_each_entry(entry, &dev->msi_list, list) { + /* Keep cached states to be restored */ + __msix_mask_irq(entry, 1); + } + msix_set_enable(dev, 0); pci_intx_for_msi(dev, 1); dev->msix_enabled = 0; diff --git a/drivers/pci/msi.h b/drivers/pci/msi.h index a0662842550..de27c1cb5a2 100644 --- a/drivers/pci/msi.h +++ b/drivers/pci/msi.h @@ -6,11 +6,11 @@ #ifndef MSI_H #define MSI_H -#define PCI_MSIX_ENTRY_SIZE 16 -#define PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET 0 -#define PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET 4 -#define PCI_MSIX_ENTRY_DATA_OFFSET 8 -#define PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET 12 +#define PCI_MSIX_ENTRY_SIZE 16 +#define PCI_MSIX_ENTRY_LOWER_ADDR 0 +#define PCI_MSIX_ENTRY_UPPER_ADDR 4 +#define PCI_MSIX_ENTRY_DATA 8 +#define PCI_MSIX_ENTRY_VECTOR_CTRL 12 #define msi_control_reg(base) (base + PCI_MSI_FLAGS) #define msi_lower_address_reg(base) (base + PCI_MSI_ADDRESS_LO) diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 6c93af5ced1..dbd0f947f49 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -1517,11 +1517,20 @@ void pci_enable_ari(struct pci_dev *dev) * * Perform INTx swizzling for a device behind one level of bridge. This is * required by section 9.1 of the PCI-to-PCI bridge specification for devices - * behind bridges on add-in cards. + * behind bridges on add-in cards. For devices with ARI enabled, the slot + * number is always 0 (see the Implementation Note in section 2.2.8.1 of + * the PCI Express Base Specification, Revision 2.1) */ u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin) { - return (((pin - 1) + PCI_SLOT(dev->devfn)) % 4) + 1; + int slot; + + if (pci_ari_enabled(dev->bus)) + slot = 0; + else + slot = PCI_SLOT(dev->devfn); + + return (((pin - 1) + slot) % 4) + 1; } int @@ -2171,7 +2180,7 @@ static int pci_parent_bus_reset(struct pci_dev *dev, int probe) u16 ctrl; struct pci_dev *pdev; - if (dev->subordinate) + if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self) return -ENOTTY; list_for_each_entry(pdev, &dev->bus->devices, bus_list) diff --git a/drivers/pci/pcie/aer/ecrc.c b/drivers/pci/pcie/aer/ecrc.c index ece97df4df6..a928d8ab6bd 100644 --- a/drivers/pci/pcie/aer/ecrc.c +++ b/drivers/pci/pcie/aer/ecrc.c @@ -106,7 +106,7 @@ void pcie_set_ecrc_checking(struct pci_dev *dev) disable_ecrc_checking(dev); break; case ECRC_POLICY_ON: - enable_ecrc_checking(dev);; + enable_ecrc_checking(dev); break; default: return; diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 56552d74abe..06b96562396 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -1058,6 +1058,11 @@ static void __devinit quirk_no_ata_d3(struct pci_dev *pdev) } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_ANY_ID, quirk_no_ata_d3); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ATI, PCI_ANY_ID, quirk_no_ata_d3); +/* ALi loses some register settings that we cannot then restore */ +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID, quirk_no_ata_d3); +/* VIA comes back fine but we need to keep it alive or ACPI GTM failures + occur when mode detecting */ +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_VIA, PCI_ANY_ID, quirk_no_ata_d3); /* This was originally an Alpha specific thing, but it really fits here. * The i82375 PCI/EISA bridge appears as non-classified. Fix that. diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index b711fb7181e..1898c7b4790 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c @@ -100,16 +100,16 @@ int pci_claim_resource(struct pci_dev *dev, int resource) { struct resource *res = &dev->resource[resource]; struct resource *root; - char *dtype = resource < PCI_BRIDGE_RESOURCES ? "device" : "bridge"; int err; root = pci_find_parent_resource(dev, res); err = -EINVAL; if (root != NULL) - err = insert_resource(root, res); + err = request_resource(root, res); if (err) { + const char *dtype = resource < PCI_BRIDGE_RESOURCES ? "device" : "bridge"; dev_err(&dev->dev, "BAR %d: %s of %s %pR\n", resource, root ? "address space collision on" : diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c index eddb0748b0e..8c02b6c53bd 100644 --- a/drivers/pci/slot.c +++ b/drivers/pci/slot.c @@ -311,7 +311,7 @@ EXPORT_SYMBOL_GPL(pci_destroy_slot); #include <linux/pci_hotplug.h> /** * pci_hp_create_link - create symbolic link to the hotplug driver module. - * @slot: struct pci_slot + * @pci_slot: struct pci_slot * * Helper function for pci_hotplug_core.c to create symbolic link to * the hotplug driver module. @@ -334,7 +334,7 @@ EXPORT_SYMBOL_GPL(pci_hp_create_module_link); /** * pci_hp_remove_link - remove symbolic link to the hotplug driver module. - * @slot: struct pci_slot + * @pci_slot: struct pci_slot * * Helper function for pci_hotplug_core.c to remove symbolic link to * the hotplug driver module. diff --git a/drivers/pci/syscall.c b/drivers/pci/syscall.c index ec22284eed3..e1c1ec54089 100644 --- a/drivers/pci/syscall.c +++ b/drivers/pci/syscall.c @@ -9,7 +9,6 @@ #include <linux/errno.h> #include <linux/pci.h> -#include <linux/smp_lock.h> #include <linux/syscalls.h> #include <asm/uaccess.h> #include "pci.h" |