diff options
Diffstat (limited to 'drivers/pci')
40 files changed, 2091 insertions, 1433 deletions
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile index 7d63f8ced24..4b47f4ece5b 100644 --- a/drivers/pci/Makefile +++ b/drivers/pci/Makefile @@ -26,6 +26,8 @@ obj-$(CONFIG_HT_IRQ) += htirq.o # Build Intel IOMMU support obj-$(CONFIG_DMAR) += dmar.o iova.o intel-iommu.o +obj-$(CONFIG_INTR_REMAP) += dmar.o intr_remapping.o + # # Some architectures use the generic PCI setup functions # diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index 529d9d7727b..999cc4088b5 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c @@ -151,6 +151,13 @@ void pci_bus_add_devices(struct pci_bus *bus) if (retval) dev_err(&dev->dev, "Error creating cpuaffinity" " file, continuing...\n"); + + retval = device_create_file(&child_bus->dev, + &dev_attr_cpulistaffinity); + if (retval) + dev_err(&dev->dev, + "Error creating cpulistaffinity" + " file, continuing...\n"); } } } diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c index 8bf86ae2333..691b3adeb87 100644 --- a/drivers/pci/dmar.c +++ b/drivers/pci/dmar.c @@ -19,15 +19,18 @@ * Author: Shaohua Li <shaohua.li@intel.com> * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> * - * This file implements early detection/parsing of DMA Remapping Devices + * This file implements early detection/parsing of Remapping Devices * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI * tables. + * + * These routines are used by both DMA-remapping and Interrupt-remapping */ #include <linux/pci.h> #include <linux/dmar.h> -#include "iova.h" -#include "intel-iommu.h" +#include <linux/iova.h> +#include <linux/intel-iommu.h> +#include <linux/timer.h> #undef PREFIX #define PREFIX "DMAR:" @@ -37,7 +40,6 @@ * these units are not supported by the architecture. */ LIST_HEAD(dmar_drhd_units); -LIST_HEAD(dmar_rmrr_units); static struct acpi_table_header * __initdata dmar_tbl; @@ -53,11 +55,6 @@ static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd) list_add(&drhd->list, &dmar_drhd_units); } -static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr) -{ - list_add(&rmrr->list, &dmar_rmrr_units); -} - static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope, struct pci_dev **dev, u16 segment) { @@ -172,19 +169,36 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header) struct acpi_dmar_hardware_unit *drhd; struct dmar_drhd_unit *dmaru; int ret = 0; - static int include_all; dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL); if (!dmaru) return -ENOMEM; + dmaru->hdr = header; drhd = (struct acpi_dmar_hardware_unit *)header; dmaru->reg_base_addr = drhd->address; dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */ + ret = alloc_iommu(dmaru); + if (ret) { + kfree(dmaru); + return ret; + } + dmar_register_drhd_unit(dmaru); + return 0; +} + +static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru) +{ + struct acpi_dmar_hardware_unit *drhd; + static int include_all; + int ret = 0; + + drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr; + if (!dmaru->include_all) ret = dmar_parse_dev_scope((void *)(drhd + 1), - ((void *)drhd) + header->length, + ((void *)drhd) + drhd->header.length, &dmaru->devices_cnt, &dmaru->devices, drhd->segment); else { @@ -197,37 +211,59 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header) include_all = 1; } - if (ret || (dmaru->devices_cnt == 0 && !dmaru->include_all)) + if (ret) { + list_del(&dmaru->list); kfree(dmaru); - else - dmar_register_drhd_unit(dmaru); + } return ret; } +#ifdef CONFIG_DMAR +LIST_HEAD(dmar_rmrr_units); + +static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr) +{ + list_add(&rmrr->list, &dmar_rmrr_units); +} + + static int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header) { struct acpi_dmar_reserved_memory *rmrr; struct dmar_rmrr_unit *rmrru; - int ret = 0; rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL); if (!rmrru) return -ENOMEM; + rmrru->hdr = header; rmrr = (struct acpi_dmar_reserved_memory *)header; rmrru->base_address = rmrr->base_address; rmrru->end_address = rmrr->end_address; + + dmar_register_rmrr_unit(rmrru); + return 0; +} + +static int __init +rmrr_parse_dev(struct dmar_rmrr_unit *rmrru) +{ + struct acpi_dmar_reserved_memory *rmrr; + int ret; + + rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr; ret = dmar_parse_dev_scope((void *)(rmrr + 1), - ((void *)rmrr) + header->length, + ((void *)rmrr) + rmrr->header.length, &rmrru->devices_cnt, &rmrru->devices, rmrr->segment); - if (ret || (rmrru->devices_cnt == 0)) + if (ret || (rmrru->devices_cnt == 0)) { + list_del(&rmrru->list); kfree(rmrru); - else - dmar_register_rmrr_unit(rmrru); + } return ret; } +#endif static void __init dmar_table_print_dmar_entry(struct acpi_dmar_header *header) @@ -240,19 +276,39 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header) drhd = (struct acpi_dmar_hardware_unit *)header; printk (KERN_INFO PREFIX "DRHD (flags: 0x%08x)base: 0x%016Lx\n", - drhd->flags, drhd->address); + drhd->flags, (unsigned long long)drhd->address); break; case ACPI_DMAR_TYPE_RESERVED_MEMORY: rmrr = (struct acpi_dmar_reserved_memory *)header; printk (KERN_INFO PREFIX "RMRR base: 0x%016Lx end: 0x%016Lx\n", - rmrr->base_address, rmrr->end_address); + (unsigned long long)rmrr->base_address, + (unsigned long long)rmrr->end_address); break; } } /** + * dmar_table_detect - checks to see if the platform supports DMAR devices + */ +static int __init dmar_table_detect(void) +{ + acpi_status status = AE_OK; + + /* if we could find DMAR table, then there are DMAR devices */ + status = acpi_get_table(ACPI_SIG_DMAR, 0, + (struct acpi_table_header **)&dmar_tbl); + + if (ACPI_SUCCESS(status) && !dmar_tbl) { + printk (KERN_WARNING PREFIX "Unable to map DMAR\n"); + status = AE_NOT_FOUND; + } + + return (ACPI_SUCCESS(status) ? 1 : 0); +} + +/** * parse_dmar_table - parses the DMA reporting table */ static int __init @@ -262,11 +318,17 @@ parse_dmar_table(void) struct acpi_dmar_header *entry_header; int ret = 0; + /* + * Do it again, earlier dmar_tbl mapping could be mapped with + * fixed map. + */ + dmar_table_detect(); + dmar = (struct acpi_table_dmar *)dmar_tbl; if (!dmar) return -ENODEV; - if (dmar->width < PAGE_SHIFT_4K - 1) { + if (dmar->width < PAGE_SHIFT - 1) { printk(KERN_WARNING PREFIX "Invalid DMAR haw\n"); return -EINVAL; } @@ -284,7 +346,9 @@ parse_dmar_table(void) ret = dmar_parse_one_drhd(entry_header); break; case ACPI_DMAR_TYPE_RESERVED_MEMORY: +#ifdef CONFIG_DMAR ret = dmar_parse_one_rmrr(entry_header); +#endif break; default: printk(KERN_WARNING PREFIX @@ -300,15 +364,77 @@ parse_dmar_table(void) return ret; } +int dmar_pci_device_match(struct pci_dev *devices[], int cnt, + struct pci_dev *dev) +{ + int index; -int __init dmar_table_init(void) + while (dev) { + for (index = 0; index < cnt; index++) + if (dev == devices[index]) + return 1; + + /* Check our parent */ + dev = dev->bus->self; + } + + return 0; +} + +struct dmar_drhd_unit * +dmar_find_matched_drhd_unit(struct pci_dev *dev) { + struct dmar_drhd_unit *drhd = NULL; + + list_for_each_entry(drhd, &dmar_drhd_units, list) { + if (drhd->include_all || dmar_pci_device_match(drhd->devices, + drhd->devices_cnt, dev)) + return drhd; + } + + return NULL; +} + +int __init dmar_dev_scope_init(void) +{ + struct dmar_drhd_unit *drhd, *drhd_n; + int ret = -ENODEV; + + list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) { + ret = dmar_parse_dev(drhd); + if (ret) + return ret; + } + +#ifdef CONFIG_DMAR + { + struct dmar_rmrr_unit *rmrr, *rmrr_n; + list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) { + ret = rmrr_parse_dev(rmrr); + if (ret) + return ret; + } + } +#endif + + return ret; +} + +int __init dmar_table_init(void) +{ + static int dmar_table_initialized; int ret; + if (dmar_table_initialized) + return 0; + + dmar_table_initialized = 1; + ret = parse_dmar_table(); if (ret) { - printk(KERN_INFO PREFIX "parse DMAR table failure.\n"); + if (ret != -ENODEV) + printk(KERN_INFO PREFIX "parse DMAR table failure.\n"); return ret; } @@ -317,27 +443,320 @@ int __init dmar_table_init(void) return -ENODEV; } +#ifdef CONFIG_DMAR if (list_empty(&dmar_rmrr_units)) printk(KERN_INFO PREFIX "No RMRR found\n"); +#endif +#ifdef CONFIG_INTR_REMAP + parse_ioapics_under_ir(); +#endif return 0; } -/** - * early_dmar_detect - checks to see if the platform supports DMAR devices +void __init detect_intel_iommu(void) +{ + int ret; + + ret = dmar_table_detect(); + + { +#ifdef CONFIG_INTR_REMAP + struct acpi_table_dmar *dmar; + /* + * for now we will disable dma-remapping when interrupt + * remapping is enabled. + * When support for queued invalidation for IOTLB invalidation + * is added, we will not need this any more. + */ + dmar = (struct acpi_table_dmar *) dmar_tbl; + if (ret && cpu_has_x2apic && dmar->flags & 0x1) + printk(KERN_INFO + "Queued invalidation will be enabled to support " + "x2apic and Intr-remapping.\n"); +#endif +#ifdef CONFIG_DMAR + if (ret && !no_iommu && !iommu_detected && !swiotlb && + !dmar_disabled) + iommu_detected = 1; +#endif + } + dmar_tbl = NULL; +} + + +int alloc_iommu(struct dmar_drhd_unit *drhd) +{ + struct intel_iommu *iommu; + int map_size; + u32 ver; + static int iommu_allocated = 0; + + iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); + if (!iommu) + return -ENOMEM; + + iommu->seq_id = iommu_allocated++; + + iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE); + if (!iommu->reg) { + printk(KERN_ERR "IOMMU: can't map the region\n"); + goto error; + } + iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); + iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); + + /* the registers might be more than one page */ + map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), + cap_max_fault_reg_offset(iommu->cap)); + map_size = VTD_PAGE_ALIGN(map_size); + if (map_size > VTD_PAGE_SIZE) { + iounmap(iommu->reg); + iommu->reg = ioremap(drhd->reg_base_addr, map_size); + if (!iommu->reg) { + printk(KERN_ERR "IOMMU: can't map the region\n"); + goto error; + } + } + + ver = readl(iommu->reg + DMAR_VER_REG); + pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n", + (unsigned long long)drhd->reg_base_addr, + DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver), + (unsigned long long)iommu->cap, + (unsigned long long)iommu->ecap); + + spin_lock_init(&iommu->register_lock); + + drhd->iommu = iommu; + return 0; +error: + kfree(iommu); + return -1; +} + +void free_iommu(struct intel_iommu *iommu) +{ + if (!iommu) + return; + +#ifdef CONFIG_DMAR + free_dmar_iommu(iommu); +#endif + + if (iommu->reg) + iounmap(iommu->reg); + kfree(iommu); +} + +/* + * Reclaim all the submitted descriptors which have completed its work. */ -int __init early_dmar_detect(void) +static inline void reclaim_free_desc(struct q_inval *qi) { - acpi_status status = AE_OK; + while (qi->desc_status[qi->free_tail] == QI_DONE) { + qi->desc_status[qi->free_tail] = QI_FREE; + qi->free_tail = (qi->free_tail + 1) % QI_LENGTH; + qi->free_cnt++; + } +} - /* if we could find DMAR table, then there are DMAR devices */ - status = acpi_get_table(ACPI_SIG_DMAR, 0, - (struct acpi_table_header **)&dmar_tbl); +/* + * Submit the queued invalidation descriptor to the remapping + * hardware unit and wait for its completion. + */ +void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) +{ + struct q_inval *qi = iommu->qi; + struct qi_desc *hw, wait_desc; + int wait_index, index; + unsigned long flags; - if (ACPI_SUCCESS(status) && !dmar_tbl) { - printk (KERN_WARNING PREFIX "Unable to map DMAR\n"); - status = AE_NOT_FOUND; + if (!qi) + return; + + hw = qi->desc; + + spin_lock_irqsave(&qi->q_lock, flags); + while (qi->free_cnt < 3) { + spin_unlock_irqrestore(&qi->q_lock, flags); + cpu_relax(); + spin_lock_irqsave(&qi->q_lock, flags); } - return (ACPI_SUCCESS(status) ? 1 : 0); + index = qi->free_head; + wait_index = (index + 1) % QI_LENGTH; + + qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE; + + hw[index] = *desc; + + wait_desc.low = QI_IWD_STATUS_DATA(2) | QI_IWD_STATUS_WRITE | QI_IWD_TYPE; + wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]); + + hw[wait_index] = wait_desc; + + __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc)); + __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc)); + + qi->free_head = (qi->free_head + 2) % QI_LENGTH; + qi->free_cnt -= 2; + + spin_lock(&iommu->register_lock); + /* + * update the HW tail register indicating the presence of + * new descriptors. + */ + writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG); + spin_unlock(&iommu->register_lock); + + while (qi->desc_status[wait_index] != QI_DONE) { + /* + * We will leave the interrupts disabled, to prevent interrupt + * context to queue another cmd while a cmd is already submitted + * and waiting for completion on this cpu. This is to avoid + * a deadlock where the interrupt context can wait indefinitely + * for free slots in the queue. + */ + spin_unlock(&qi->q_lock); + cpu_relax(); + spin_lock(&qi->q_lock); + } + + qi->desc_status[index] = QI_DONE; + + reclaim_free_desc(qi); + spin_unlock_irqrestore(&qi->q_lock, flags); +} + +/* + * Flush the global interrupt entry cache. + */ +void qi_global_iec(struct intel_iommu *iommu) +{ + struct qi_desc desc; + + desc.low = QI_IEC_TYPE; + desc.high = 0; + + qi_submit_sync(&desc, iommu); +} + +int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, + u64 type, int non_present_entry_flush) +{ + + struct qi_desc desc; + + if (non_present_entry_flush) { + if (!cap_caching_mode(iommu->cap)) + return 1; + else + did = 0; + } + + desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did) + | QI_CC_GRAN(type) | QI_CC_TYPE; + desc.high = 0; + + qi_submit_sync(&desc, iommu); + + return 0; + +} + +int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, + unsigned int size_order, u64 type, + int non_present_entry_flush) +{ + u8 dw = 0, dr = 0; + + struct qi_desc desc; + int ih = 0; + + if (non_present_entry_flush) { + if (!cap_caching_mode(iommu->cap)) + return 1; + else + did = 0; + } + + if (cap_write_drain(iommu->cap)) + dw = 1; + + if (cap_read_drain(iommu->cap)) + dr = 1; + + desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw) + | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE; + desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih) + | QI_IOTLB_AM(size_order); + + qi_submit_sync(&desc, iommu); + + return 0; + +} + +/* + * Enable Queued Invalidation interface. This is a must to support + * interrupt-remapping. Also used by DMA-remapping, which replaces + * register based IOTLB invalidation. + */ +int dmar_enable_qi(struct intel_iommu *iommu) +{ + u32 cmd, sts; + unsigned long flags; + struct q_inval *qi; + + if (!ecap_qis(iommu->ecap)) + return -ENOENT; + + /* + * queued invalidation is already setup and enabled. + */ + if (iommu->qi) + return 0; + + iommu->qi = kmalloc(sizeof(*qi), GFP_KERNEL); + if (!iommu->qi) + return -ENOMEM; + + qi = iommu->qi; + + qi->desc = (void *)(get_zeroed_page(GFP_KERNEL)); + if (!qi->desc) { + kfree(qi); + iommu->qi = 0; + return -ENOMEM; + } + + qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_KERNEL); + if (!qi->desc_status) { + free_page((unsigned long) qi->desc); + kfree(qi); + iommu->qi = 0; + return -ENOMEM; + } + + qi->free_head = qi->free_tail = 0; + qi->free_cnt = QI_LENGTH; + + spin_lock_init(&qi->q_lock); + + spin_lock_irqsave(&iommu->register_lock, flags); + /* write zero to the tail reg */ + writel(0, iommu->reg + DMAR_IQT_REG); + + dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc)); + + cmd = iommu->gcmd | DMA_GCMD_QIE; + iommu->gcmd |= DMA_GCMD_QIE; + writel(cmd, iommu->reg + DMAR_GCMD_REG); + + /* Make sure hardware complete it */ + IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts); + spin_unlock_irqrestore(&iommu->register_lock, flags); + + return 0; } diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c index 8467d028732..8cfd1c4926c 100644 --- a/drivers/pci/hotplug/ibmphp_ebda.c +++ b/drivers/pci/hotplug/ibmphp_ebda.c @@ -123,10 +123,8 @@ static struct ebda_pci_rsrc *alloc_ebda_pci_rsrc (void) static void __init print_bus_info (void) { struct bus_info *ptr; - struct list_head *ptr1; - list_for_each (ptr1, &bus_info_head) { - ptr = list_entry (ptr1, struct bus_info, bus_info_list); + list_for_each_entry(ptr, &bus_info_head, bus_info_list) { debug ("%s - slot_min = %x\n", __func__, ptr->slot_min); debug ("%s - slot_max = %x\n", __func__, ptr->slot_max); debug ("%s - slot_count = %x\n", __func__, ptr->slot_count); @@ -146,10 +144,8 @@ static void __init print_bus_info (void) static void print_lo_info (void) { struct rio_detail *ptr; - struct list_head *ptr1; debug ("print_lo_info ----\n"); - list_for_each (ptr1, &rio_lo_head) { - ptr = list_entry (ptr1, struct rio_detail, rio_detail_list); + list_for_each_entry(ptr, &rio_lo_head, rio_detail_list) { debug ("%s - rio_node_id = %x\n", __func__, ptr->rio_node_id); debug ("%s - rio_type = %x\n", __func__, ptr->rio_type); debug ("%s - owner_id = %x\n", __func__, ptr->owner_id); @@ -163,10 +159,8 @@ static void print_lo_info (void) static void print_vg_info (void) { struct rio_detail *ptr; - struct list_head *ptr1; debug ("%s ---\n", __func__); - list_for_each (ptr1, &rio_vg_head) { - ptr = list_entry (ptr1, struct rio_detail, rio_detail_list); + list_for_each_entry(ptr, &rio_vg_head, rio_detail_list) { debug ("%s - rio_node_id = %x\n", __func__, ptr->rio_node_id); debug ("%s - rio_type = %x\n", __func__, ptr->rio_type); debug ("%s - owner_id = %x\n", __func__, ptr->owner_id); @@ -180,10 +174,8 @@ static void print_vg_info (void) static void __init print_ebda_pci_rsrc (void) { struct ebda_pci_rsrc *ptr; - struct list_head *ptr1; - list_for_each (ptr1, &ibmphp_ebda_pci_rsrc_head) { - ptr = list_entry (ptr1, struct ebda_pci_rsrc, ebda_pci_rsrc_list); + list_for_each_entry(ptr, &ibmphp_ebda_pci_rsrc_head, ebda_pci_rsrc_list) { debug ("%s - rsrc type: %x bus#: %x dev_func: %x start addr: %x end addr: %x\n", __func__, ptr->rsrc_type ,ptr->bus_num, ptr->dev_fun,ptr->start_addr, ptr->end_addr); } @@ -192,10 +184,8 @@ static void __init print_ebda_pci_rsrc (void) static void __init print_ibm_slot (void) { struct slot *ptr; - struct list_head *ptr1; - list_for_each (ptr1, &ibmphp_slot_head) { - ptr = list_entry (ptr1, struct slot, ibm_slot_list); + list_for_each_entry(ptr, &ibmphp_slot_head, ibm_slot_list) { debug ("%s - slot_number: %x\n", __func__, ptr->number); } } @@ -203,10 +193,8 @@ static void __init print_ibm_slot (void) static void __init print_opt_vg (void) { struct opt_rio *ptr; - struct list_head *ptr1; debug ("%s ---\n", __func__); - list_for_each (ptr1, &opt_vg_head) { - ptr = list_entry (ptr1, struct opt_rio, opt_rio_list); + list_for_each_entry(ptr, &opt_vg_head, opt_rio_list) { debug ("%s - rio_type %x\n", __func__, ptr->rio_type); debug ("%s - chassis_num: %x\n", __func__, ptr->chassis_num); debug ("%s - first_slot_num: %x\n", __func__, ptr->first_slot_num); @@ -217,13 +205,9 @@ static void __init print_opt_vg (void) static void __init print_ebda_hpc (void) { struct controller *hpc_ptr; - struct list_head *ptr1; u16 index; - list_for_each (ptr1, &ebda_hpc_head) { - - hpc_ptr = list_entry (ptr1, struct controller, ebda_hpc_list); - + list_for_each_entry(hpc_ptr, &ebda_hpc_head, ebda_hpc_list) { for (index = 0; index < hpc_ptr->slot_count; index++) { debug ("%s - physical slot#: %x\n", __func__, hpc_ptr->slots[index].slot_num); debug ("%s - pci bus# of the slot: %x\n", __func__, hpc_ptr->slots[index].slot_bus_num); @@ -276,7 +260,7 @@ int __init ibmphp_access_ebda (void) iounmap (io_mem); debug ("returned ebda segment: %x\n", ebda_seg); - io_mem = ioremap (ebda_seg<<4, 65000); + io_mem = ioremap(ebda_seg<<4, 1024); if (!io_mem ) return -ENOMEM; next_offset = 0x180; @@ -460,9 +444,7 @@ static int __init ebda_rio_table (void) static struct opt_rio *search_opt_vg (u8 chassis_num) { struct opt_rio *ptr; - struct list_head *ptr1; - list_for_each (ptr1, &opt_vg_head) { - ptr = list_entry (ptr1, struct opt_rio, opt_rio_list); + list_for_each_entry(ptr, &opt_vg_head, opt_rio_list) { if (ptr->chassis_num == chassis_num) return ptr; } @@ -473,10 +455,8 @@ static int __init combine_wpg_for_chassis (void) { struct opt_rio *opt_rio_ptr = NULL; struct rio_detail *rio_detail_ptr = NULL; - struct list_head *list_head_ptr = NULL; - list_for_each (list_head_ptr, &rio_vg_head) { - rio_detail_ptr = list_entry (list_head_ptr, struct rio_detail, rio_detail_list); + list_for_each_entry(rio_detail_ptr, &rio_vg_head, rio_detail_list) { opt_rio_ptr = search_opt_vg (rio_detail_ptr->chassis_num); if (!opt_rio_ptr) { opt_rio_ptr = kzalloc(sizeof(struct opt_rio), GFP_KERNEL); @@ -497,14 +477,12 @@ static int __init combine_wpg_for_chassis (void) } /* - * reorgnizing linked list of expansion box + * reorganizing linked list of expansion box */ static struct opt_rio_lo *search_opt_lo (u8 chassis_num) { struct opt_rio_lo *ptr; - struct list_head *ptr1; - list_for_each (ptr1, &opt_lo_head) { - ptr = list_entry (ptr1, struct opt_rio_lo, opt_rio_lo_list); + list_for_each_entry(ptr, &opt_lo_head, opt_rio_lo_list) { if (ptr->chassis_num == chassis_num) return ptr; } @@ -515,10 +493,8 @@ static int combine_wpg_for_expansion (void) { struct opt_rio_lo *opt_rio_lo_ptr = NULL; struct rio_detail *rio_detail_ptr = NULL; - struct list_head *list_head_ptr = NULL; - list_for_each (list_head_ptr, &rio_lo_head) { - rio_detail_ptr = list_entry (list_head_ptr, struct rio_detail, rio_detail_list); + list_for_each_entry(rio_detail_ptr, &rio_lo_head, rio_detail_list) { opt_rio_lo_ptr = search_opt_lo (rio_detail_ptr->chassis_num); if (!opt_rio_lo_ptr) { opt_rio_lo_ptr = kzalloc(sizeof(struct opt_rio_lo), GFP_KERNEL); @@ -550,20 +526,17 @@ static int first_slot_num (u8 slot_num, u8 first_slot, u8 var) { struct opt_rio *opt_vg_ptr = NULL; struct opt_rio_lo *opt_lo_ptr = NULL; - struct list_head *ptr = NULL; int rc = 0; if (!var) { - list_for_each (ptr, &opt_vg_head) { - opt_vg_ptr = list_entry (ptr, struct opt_rio, opt_rio_list); + list_for_each_entry(opt_vg_ptr, &opt_vg_head, opt_rio_list) { if ((first_slot < opt_vg_ptr->first_slot_num) && (slot_num >= opt_vg_ptr->first_slot_num)) { rc = -ENODEV; break; } } } else { - list_for_each (ptr, &opt_lo_head) { - opt_lo_ptr = list_entry (ptr, struct opt_rio_lo, opt_rio_lo_list); + list_for_each_entry(opt_lo_ptr, &opt_lo_head, opt_rio_lo_list) { if ((first_slot < opt_lo_ptr->first_slot_num) && (slot_num >= opt_lo_ptr->first_slot_num)) { rc = -ENODEV; break; @@ -576,10 +549,8 @@ static int first_slot_num (u8 slot_num, u8 first_slot, u8 var) static struct opt_rio_lo * find_rxe_num (u8 slot_num) { struct opt_rio_lo *opt_lo_ptr; - struct list_head *ptr; - list_for_each (ptr, &opt_lo_head) { - opt_lo_ptr = list_entry (ptr, struct opt_rio_lo, opt_rio_lo_list); + list_for_each_entry(opt_lo_ptr, &opt_lo_head, opt_rio_lo_list) { //check to see if this slot_num belongs to expansion box if ((slot_num >= opt_lo_ptr->first_slot_num) && (!first_slot_num (slot_num, opt_lo_ptr->first_slot_num, 1))) return opt_lo_ptr; @@ -590,10 +561,8 @@ static struct opt_rio_lo * find_rxe_num (u8 slot_num) static struct opt_rio * find_chassis_num (u8 slot_num) { struct opt_rio *opt_vg_ptr; - struct list_head *ptr; - list_for_each (ptr, &opt_vg_head) { - opt_vg_ptr = list_entry (ptr, struct opt_rio, opt_rio_list); + list_for_each_entry(opt_vg_ptr, &opt_vg_head, opt_rio_list) { //check to see if this slot_num belongs to chassis if ((slot_num >= opt_vg_ptr->first_slot_num) && (!first_slot_num (slot_num, opt_vg_ptr->first_slot_num, 0))) return opt_vg_ptr; @@ -607,11 +576,9 @@ static struct opt_rio * find_chassis_num (u8 slot_num) static u8 calculate_first_slot (u8 slot_num) { u8 first_slot = 1; - struct list_head * list; struct slot * slot_cur; - list_for_each (list, &ibmphp_slot_head) { - slot_cur = list_entry (list, struct slot, ibm_slot_list); + list_for_each_entry(slot_cur, &ibmphp_slot_head, ibm_slot_list) { if (slot_cur->ctrl) { if ((slot_cur->ctrl->ctlr_type != 4) && (slot_cur->ctrl->ending_slot_num > first_slot) && (slot_num > slot_cur->ctrl->ending_slot_num)) first_slot = slot_cur->ctrl->ending_slot_num; @@ -767,7 +734,6 @@ static int __init ebda_rsrc_controller (void) struct bus_info *bus_info_ptr1, *bus_info_ptr2; int rc; struct slot *tmp_slot; - struct list_head *list; addr = hpc_list_ptr->phys_addr; for (ctlr = 0; ctlr < hpc_list_ptr->num_ctlrs; ctlr++) { @@ -997,9 +963,7 @@ static int __init ebda_rsrc_controller (void) } /* each hpc */ - list_for_each (list, &ibmphp_slot_head) { - tmp_slot = list_entry (list, struct slot, ibm_slot_list); - + list_for_each_entry(tmp_slot, &ibmphp_slot_head, ibm_slot_list) { snprintf (tmp_slot->hotplug_slot->name, 30, "%s", create_file_name (tmp_slot)); pci_hp_register(tmp_slot->hotplug_slot, pci_find_bus(0, tmp_slot->bus), tmp_slot->device); @@ -1101,10 +1065,8 @@ u16 ibmphp_get_total_controllers (void) struct slot *ibmphp_get_slot_from_physical_num (u8 physical_num) { struct slot *slot; - struct list_head *list; - list_for_each (list, &ibmphp_slot_head) { - slot = list_entry (list, struct slot, ibm_slot_list); + list_for_each_entry(slot, &ibmphp_slot_head, ibm_slot_list) { if (slot->number == physical_num) return slot; } @@ -1120,10 +1082,8 @@ struct slot *ibmphp_get_slot_from_physical_num (u8 physical_num) struct bus_info *ibmphp_find_same_bus_num (u32 num) { struct bus_info *ptr; - struct list_head *ptr1; - list_for_each (ptr1, &bus_info_head) { - ptr = list_entry (ptr1, struct bus_info, bus_info_list); + list_for_each_entry(ptr, &bus_info_head, bus_info_list) { if (ptr->busno == num) return ptr; } @@ -1136,10 +1096,8 @@ struct bus_info *ibmphp_find_same_bus_num (u32 num) int ibmphp_get_bus_index (u8 num) { struct bus_info *ptr; - struct list_head *ptr1; - list_for_each (ptr1, &bus_info_head) { - ptr = list_entry (ptr1, struct bus_info, bus_info_list); + list_for_each_entry(ptr, &bus_info_head, bus_info_list) { if (ptr->busno == num) return ptr->index; } @@ -1212,11 +1170,9 @@ static struct pci_driver ibmphp_driver = { int ibmphp_register_pci (void) { struct controller *ctrl; - struct list_head *tmp; int rc = 0; - list_for_each (tmp, &ebda_hpc_head) { - ctrl = list_entry (tmp, struct controller, ebda_hpc_list); + list_for_each_entry(ctrl, &ebda_hpc_head, ebda_hpc_list) { if (ctrl->ctlr_type == 1) { rc = pci_register_driver(&ibmphp_driver); break; @@ -1227,12 +1183,10 @@ int ibmphp_register_pci (void) static int ibmphp_probe (struct pci_dev * dev, const struct pci_device_id *ids) { struct controller *ctrl; - struct list_head *tmp; debug ("inside ibmphp_probe\n"); - list_for_each (tmp, &ebda_hpc_head) { - ctrl = list_entry (tmp, struct controller, ebda_hpc_list); + list_for_each_entry(ctrl, &ebda_hpc_head, ebda_hpc_list) { if (ctrl->ctlr_type == 1) { if ((dev->devfn == ctrl->u.pci_ctlr.dev_fun) && (dev->bus->number == ctrl->u.pci_ctlr.bus)) { ctrl->ctrl_dev = dev; diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c index 5f85b1b120e..2e6c4474644 100644 --- a/drivers/pci/hotplug/pci_hotplug_core.c +++ b/drivers/pci/hotplug/pci_hotplug_core.c @@ -102,13 +102,13 @@ static int get_##name (struct hotplug_slot *slot, type *value) \ { \ struct hotplug_slot_ops *ops = slot->ops; \ int retval = 0; \ - if (try_module_get(ops->owner)) { \ - if (ops->get_##name) \ - retval = ops->get_##name(slot, value); \ - else \ - *value = slot->info->name; \ - module_put(ops->owner); \ - } \ + if (!try_module_get(ops->owner)) \ + return -ENODEV; \ + if (ops->get_##name) \ + retval = ops->get_##name(slot, value); \ + else \ + *value = slot->info->name; \ + module_put(ops->owner); \ return retval; \ } diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index 9e6cec67e1c..c367978bd7f 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h @@ -57,6 +57,19 @@ extern struct workqueue_struct *pciehp_wq; #define warn(format, arg...) \ printk(KERN_WARNING "%s: " format, MY_NAME , ## arg) +#define ctrl_dbg(ctrl, format, arg...) \ + do { \ + if (pciehp_debug) \ + dev_printk(, &ctrl->pcie->device, \ + format, ## arg); \ + } while (0) +#define ctrl_err(ctrl, format, arg...) \ + dev_err(&ctrl->pcie->device, format, ## arg) +#define ctrl_info(ctrl, format, arg...) \ + dev_info(&ctrl->pcie->device, format, ## arg) +#define ctrl_warn(ctrl, format, arg...) \ + dev_warn(&ctrl->pcie->device, format, ## arg) + #define SLOT_NAME_SIZE 10 struct slot { u8 bus; @@ -87,6 +100,7 @@ struct controller { int num_slots; /* Number of slots on ctlr */ int slot_num_inc; /* 1 or -1 */ struct pci_dev *pci_dev; + struct pcie_device *pcie; /* PCI Express port service */ struct list_head slot_list; struct hpc_ops *hpc_ops; wait_queue_head_t queue; /* sleep & wake process */ @@ -170,7 +184,7 @@ static inline struct slot *pciehp_find_slot(struct controller *ctrl, u8 device) return slot; } - err("%s: slot (device=0x%x) not found\n", __func__, device); + ctrl_err(ctrl, "%s: slot (device=0x%x) not found\n", __func__, device); return NULL; } diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index 4fd5355bc3b..c748a19db89 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c @@ -144,9 +144,10 @@ set_lock_exit: * sysfs interface which allows the user to toggle the Electro Mechanical * Interlock. Valid values are either 0 or 1. 0 == unlock, 1 == lock */ -static ssize_t lock_write_file(struct hotplug_slot *slot, const char *buf, - size_t count) +static ssize_t lock_write_file(struct hotplug_slot *hotplug_slot, + const char *buf, size_t count) { + struct slot *slot = hotplug_slot->private; unsigned long llock; u8 lock; int retval = 0; @@ -157,10 +158,11 @@ static ssize_t lock_write_file(struct hotplug_slot *slot, const char *buf, switch (lock) { case 0: case 1: - retval = set_lock_status(slot, lock); + retval = set_lock_status(hotplug_slot, lock); break; default: - err ("%d is an invalid lock value\n", lock); + ctrl_err(slot->ctrl, "%d is an invalid lock value\n", + lock); retval = -EINVAL; } if (retval) @@ -180,7 +182,10 @@ static struct hotplug_slot_attribute hotplug_slot_attr_lock = { */ static void release_slot(struct hotplug_slot *hotplug_slot) { - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); + struct slot *slot = hotplug_slot->private; + + ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n", + __func__, hotplug_slot->name); kfree(hotplug_slot->info); kfree(hotplug_slot); @@ -215,9 +220,9 @@ static int init_slots(struct controller *ctrl) get_adapter_status(hotplug_slot, &info->adapter_status); slot->hotplug_slot = hotplug_slot; - dbg("Registering bus=%x dev=%x hp_slot=%x sun=%x " - "slot_device_offset=%x\n", slot->bus, slot->device, - slot->hp_slot, slot->number, ctrl->slot_device_offset); + ctrl_dbg(ctrl, "Registering bus=%x dev=%x hp_slot=%x sun=%x " + "slot_device_offset=%x\n", slot->bus, slot->device, + slot->hp_slot, slot->number, ctrl->slot_device_offset); duplicate_name: retval = pci_hp_register(hotplug_slot, ctrl->pci_dev->subordinate, @@ -233,9 +238,11 @@ duplicate_name: if (len < SLOT_NAME_SIZE) goto duplicate_name; else - err("duplicate slot name overflow\n"); + ctrl_err(ctrl, "duplicate slot name " + "overflow\n"); } - err("pci_hp_register failed with error %d\n", retval); + ctrl_err(ctrl, "pci_hp_register failed with error %d\n", + retval); goto error_info; } /* create additional sysfs entries */ @@ -244,7 +251,8 @@ duplicate_name: &hotplug_slot_attr_lock.attr); if (retval) { pci_hp_deregister(hotplug_slot); - err("cannot create additional sysfs entries\n"); + ctrl_err(ctrl, "cannot create additional sysfs " + "entries\n"); goto error_info; } } @@ -278,7 +286,8 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status) { struct slot *slot = hotplug_slot->private; - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); + ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n", + __func__, hotplug_slot->name); hotplug_slot->info->attention_status = status; @@ -293,7 +302,8 @@ static int enable_slot(struct hotplug_slot *hotplug_slot) { struct slot *slot = hotplug_slot->private; - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); + ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n", + __func__, hotplug_slot->name); return pciehp_sysfs_enable_slot(slot); } @@ -303,7 +313,8 @@ static int disable_slot(struct hotplug_slot *hotplug_slot) { struct slot *slot = hotplug_slot->private; - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); + ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n", + __func__, hotplug_slot->name); return pciehp_sysfs_disable_slot(slot); } @@ -313,7 +324,8 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value) struct slot *slot = hotplug_slot->private; int retval; - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); + ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n", + __func__, hotplug_slot->name); retval = slot->hpc_ops->get_power_status(slot, value); if (retval < 0) @@ -327,7 +339,8 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value) struct slot *slot = hotplug_slot->private; int retval; - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); + ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n", + __func__, hotplug_slot->name); retval = slot->hpc_ops->get_attention_status(slot, value); if (retval < 0) @@ -341,7 +354,8 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value) struct slot *slot = hotplug_slot->private; int retval; - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); + ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n", + __func__, hotplug_slot->name); retval = slot->hpc_ops->get_latch_status(slot, value); if (retval < 0) @@ -355,7 +369,8 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value) struct slot *slot = hotplug_slot->private; int retval; - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); + ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n", + __func__, hotplug_slot->name); retval = slot->hpc_ops->get_adapter_status(slot, value); if (retval < 0) @@ -370,7 +385,8 @@ static int get_max_bus_speed(struct hotplug_slot *hotplug_slot, struct slot *slot = hotplug_slot->private; int retval; - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); + ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n", + __func__, hotplug_slot->name); retval = slot->hpc_ops->get_max_bus_speed(slot, value); if (retval < 0) @@ -384,7 +400,8 @@ static int get_cur_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_spe struct slot *slot = hotplug_slot->private; int retval; - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); + ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n", + __func__, hotplug_slot->name); retval = slot->hpc_ops->get_cur_bus_speed(slot, value); if (retval < 0) @@ -402,14 +419,15 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_ struct pci_dev *pdev = dev->port; if (pciehp_force) - dbg("Bypassing BIOS check for pciehp use on %s\n", - pci_name(pdev)); + dev_info(&dev->device, + "Bypassing BIOS check for pciehp use on %s\n", + pci_name(pdev)); else if (pciehp_get_hp_hw_control_from_firmware(pdev)) goto err_out_none; ctrl = pcie_init(dev); if (!ctrl) { - dbg("%s: controller initialization failed\n", PCIE_MODULE_NAME); + dev_err(&dev->device, "controller initialization failed\n"); goto err_out_none; } set_service_data(dev, ctrl); @@ -418,11 +436,10 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_ rc = init_slots(ctrl); if (rc) { if (rc == -EBUSY) - warn("%s: slot already registered by another " - "hotplug driver\n", PCIE_MODULE_NAME); + ctrl_warn(ctrl, "slot already registered by another " + "hotplug driver\n"); else - err("%s: slot initialization failed\n", - PCIE_MODULE_NAME); + ctrl_err(ctrl, "slot initialization failed\n"); goto err_out_release_ctlr; } @@ -461,13 +478,13 @@ static void pciehp_remove (struct pcie_device *dev) #ifdef CONFIG_PM static int pciehp_suspend (struct pcie_device *dev, pm_message_t state) { - printk("%s ENTRY\n", __func__); + dev_info(&dev->device, "%s ENTRY\n", __func__); return 0; } static int pciehp_resume (struct pcie_device *dev) { - printk("%s ENTRY\n", __func__); + dev_info(&dev->device, "%s ENTRY\n", __func__); if (pciehp_force) { struct controller *ctrl = get_service_data(dev); struct slot *t_slot; @@ -497,10 +514,9 @@ static struct pcie_port_service_id port_pci_ids[] = { { .driver_data = 0, }, { /* end: all zeroes */ } }; -static const char device_name[] = "hpdriver"; static struct pcie_port_service_driver hpdriver_portdrv = { - .name = (char *)device_name, + .name = PCIE_MODULE_NAME, .id_table = &port_pci_ids[0], .probe = pciehp_probe, diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c index 96a5d55a498..acb7f9efd18 100644 --- a/drivers/pci/hotplug/pciehp_ctrl.c +++ b/drivers/pci/hotplug/pciehp_ctrl.c @@ -58,14 +58,15 @@ static int queue_interrupt_event(struct slot *p_slot, u32 event_type) u8 pciehp_handle_attention_button(struct slot *p_slot) { u32 event_type; + struct controller *ctrl = p_slot->ctrl; /* Attention Button Change */ - dbg("pciehp: Attention button interrupt received.\n"); + ctrl_dbg(ctrl, "Attention button interrupt received.\n"); /* * Button pressed - See if need to TAKE ACTION!!! */ - info("Button pressed on Slot(%s)\n", p_slot->name); + ctrl_info(ctrl, "Button pressed on Slot(%s)\n", p_slot->name); event_type = INT_BUTTON_PRESS; queue_interrupt_event(p_slot, event_type); @@ -77,22 +78,23 @@ u8 pciehp_handle_switch_change(struct slot *p_slot) { u8 getstatus; u32 event_type; + struct controller *ctrl = p_slot->ctrl; /* Switch Change */ - dbg("pciehp: Switch interrupt received.\n"); + ctrl_dbg(ctrl, "Switch interrupt received.\n"); p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); if (getstatus) { /* * Switch opened */ - info("Latch open on Slot(%s)\n", p_slot->name); + ctrl_info(ctrl, "Latch open on Slot(%s)\n", p_slot->name); event_type = INT_SWITCH_OPEN; } else { /* * Switch closed */ - info("Latch close on Slot(%s)\n", p_slot->name); + ctrl_info(ctrl, "Latch close on Slot(%s)\n", p_slot->name); event_type = INT_SWITCH_CLOSE; } @@ -105,9 +107,10 @@ u8 pciehp_handle_presence_change(struct slot *p_slot) { u32 event_type; u8 presence_save; + struct controller *ctrl = p_slot->ctrl; /* Presence Change */ - dbg("pciehp: Presence/Notify input change.\n"); + ctrl_dbg(ctrl, "Presence/Notify input change.\n"); /* Switch is open, assume a presence change * Save the presence state @@ -117,13 +120,13 @@ u8 pciehp_handle_presence_change(struct slot *p_slot) /* * Card Present */ - info("Card present on Slot(%s)\n", p_slot->name); + ctrl_info(ctrl, "Card present on Slot(%s)\n", p_slot->name); event_type = INT_PRESENCE_ON; } else { /* * Not Present */ - info("Card not present on Slot(%s)\n", p_slot->name); + ctrl_info(ctrl, "Card not present on Slot(%s)\n", p_slot->name); event_type = INT_PRESENCE_OFF; } @@ -135,23 +138,25 @@ u8 pciehp_handle_presence_change(struct slot *p_slot) u8 pciehp_handle_power_fault(struct slot *p_slot) { u32 event_type; + struct controller *ctrl = p_slot->ctrl; /* power fault */ - dbg("pciehp: Power fault interrupt received.\n"); + ctrl_dbg(ctrl, "Power fault interrupt received.\n"); if ( !(p_slot->hpc_ops->query_power_fault(p_slot))) { /* * power fault Cleared */ - info("Power fault cleared on Slot(%s)\n", p_slot->name); + ctrl_info(ctrl, "Power fault cleared on Slot(%s)\n", + p_slot->name); event_type = INT_POWER_FAULT_CLEAR; } else { /* * power fault */ - info("Power fault on Slot(%s)\n", p_slot->name); + ctrl_info(ctrl, "Power fault on Slot(%s)\n", p_slot->name); event_type = INT_POWER_FAULT; - info("power fault bit %x set\n", 0); + ctrl_info(ctrl, "power fault bit %x set\n", 0); } queue_interrupt_event(p_slot, event_type); @@ -168,8 +173,9 @@ static void set_slot_off(struct controller *ctrl, struct slot * pslot) /* turn off slot, turn on Amber LED, turn off Green LED if supported*/ if (POWER_CTRL(ctrl)) { if (pslot->hpc_ops->power_off_slot(pslot)) { - err("%s: Issue of Slot Power Off command failed\n", - __func__); + ctrl_err(ctrl, + "%s: Issue of Slot Power Off command failed\n", + __func__); return; } } @@ -186,8 +192,8 @@ static void set_slot_off(struct controller *ctrl, struct slot * pslot) if (ATTN_LED(ctrl)) { if (pslot->hpc_ops->set_attention_status(pslot, 1)) { - err("%s: Issue of Set Attention Led command failed\n", - __func__); + ctrl_err(ctrl, "%s: Issue of Set Attention " + "Led command failed\n", __func__); return; } } @@ -205,9 +211,9 @@ static int board_added(struct slot *p_slot) int retval = 0; struct controller *ctrl = p_slot->ctrl; - dbg("%s: slot device, slot offset, hp slot = %d, %d ,%d\n", - __func__, p_slot->device, - ctrl->slot_device_offset, p_slot->hp_slot); + ctrl_dbg(ctrl, "%s: slot device, slot offset, hp slot = %d, %d ,%d\n", + __func__, p_slot->device, ctrl->slot_device_offset, + p_slot->hp_slot); if (POWER_CTRL(ctrl)) { /* Power on slot */ @@ -225,22 +231,22 @@ static int board_added(struct slot *p_slot) /* Check link training status */ retval = p_slot->hpc_ops->check_lnk_status(ctrl); if (retval) { - err("%s: Failed to check link status\n", __func__); + ctrl_err(ctrl, "%s: Failed to check link status\n", __func__); set_slot_off(ctrl, p_slot); return retval; } /* Check for a power fault */ if (p_slot->hpc_ops->query_power_fault(p_slot)) { - dbg("%s: power fault detected\n", __func__); + ctrl_dbg(ctrl, "%s: power fault detected\n", __func__); retval = POWER_FAILURE; goto err_exit; } retval = pciehp_configure_device(p_slot); if (retval) { - err("Cannot add device 0x%x:%x\n", p_slot->bus, - p_slot->device); + ctrl_err(ctrl, "Cannot add device 0x%x:%x\n", + p_slot->bus, p_slot->device); goto err_exit; } @@ -272,14 +278,14 @@ static int remove_board(struct slot *p_slot) if (retval) return retval; - dbg("In %s, hp_slot = %d\n", __func__, p_slot->hp_slot); + ctrl_dbg(ctrl, "In %s, hp_slot = %d\n", __func__, p_slot->hp_slot); if (POWER_CTRL(ctrl)) { /* power off slot */ retval = p_slot->hpc_ops->power_off_slot(p_slot); if (retval) { - err("%s: Issue of Slot Disable command failed\n", - __func__); + ctrl_err(ctrl, "%s: Issue of Slot Disable command " + "failed\n", __func__); return retval; } } @@ -320,8 +326,8 @@ static void pciehp_power_thread(struct work_struct *work) switch (p_slot->state) { case POWEROFF_STATE: mutex_unlock(&p_slot->lock); - dbg("%s: disabling bus:device(%x:%x)\n", - __func__, p_slot->bus, p_slot->device); + ctrl_dbg(p_slot->ctrl, "%s: disabling bus:device(%x:%x)\n", + __func__, p_slot->bus, p_slot->device); pciehp_disable_slot(p_slot); mutex_lock(&p_slot->lock); p_slot->state = STATIC_STATE; @@ -349,7 +355,8 @@ void pciehp_queue_pushbutton_work(struct work_struct *work) info = kmalloc(sizeof(*info), GFP_KERNEL); if (!info) { - err("%s: Cannot allocate memory\n", __func__); + ctrl_err(p_slot->ctrl, "%s: Cannot allocate memory\n", + __func__); return; } info->p_slot = p_slot; @@ -403,12 +410,14 @@ static void handle_button_press_event(struct slot *p_slot) p_slot->hpc_ops->get_power_status(p_slot, &getstatus); if (getstatus) { p_slot->state = BLINKINGOFF_STATE; - info("PCI slot #%s - powering off due to button " - "press.\n", p_slot->name); + ctrl_info(ctrl, + "PCI slot #%s - powering off due to button " + "press.\n", p_slot->name); } else { p_slot->state = BLINKINGON_STATE; - info("PCI slot #%s - powering on due to button " - "press.\n", p_slot->name); + ctrl_info(ctrl, + "PCI slot #%s - powering on due to button " + "press.\n", p_slot->name); } /* blink green LED and turn off amber */ if (PWR_LED(ctrl)) @@ -425,8 +434,8 @@ static void handle_button_press_event(struct slot *p_slot) * press the attention again before the 5 sec. limit * expires to cancel hot-add or hot-remove */ - info("Button cancel on Slot(%s)\n", p_slot->name); - dbg("%s: button cancel\n", __func__); + ctrl_info(ctrl, "Button cancel on Slot(%s)\n", p_slot->name); + ctrl_dbg(ctrl, "%s: button cancel\n", __func__); cancel_delayed_work(&p_slot->work); if (p_slot->state == BLINKINGOFF_STATE) { if (PWR_LED(ctrl)) @@ -437,8 +446,8 @@ static void handle_button_press_event(struct slot *p_slot) } if (ATTN_LED(ctrl)) p_slot->hpc_ops->set_attention_status(p_slot, 0); - info("PCI slot #%s - action canceled due to button press\n", - p_slot->name); + ctrl_info(ctrl, "PCI slot #%s - action canceled " + "due to button press\n", p_slot->name); p_slot->state = STATIC_STATE; break; case POWEROFF_STATE: @@ -448,11 +457,11 @@ static void handle_button_press_event(struct slot *p_slot) * this means that the previous attention button action * to hot-add or hot-remove is undergoing */ - info("Button ignore on Slot(%s)\n", p_slot->name); + ctrl_info(ctrl, "Button ignore on Slot(%s)\n", p_slot->name); update_slot_info(p_slot); break; default: - warn("Not a valid state\n"); + ctrl_warn(ctrl, "Not a valid state\n"); break; } } @@ -467,7 +476,8 @@ static void handle_surprise_event(struct slot *p_slot) info = kmalloc(sizeof(*info), GFP_KERNEL); if (!info) { - err("%s: Cannot allocate memory\n", __func__); + ctrl_err(p_slot->ctrl, "%s: Cannot allocate memory\n", + __func__); return; } info->p_slot = p_slot; @@ -505,7 +515,7 @@ static void interrupt_event_handler(struct work_struct *work) case INT_PRESENCE_OFF: if (!HP_SUPR_RM(ctrl)) break; - dbg("Surprise Removal\n"); + ctrl_dbg(ctrl, "Surprise Removal\n"); update_slot_info(p_slot); handle_surprise_event(p_slot); break; @@ -522,22 +532,23 @@ int pciehp_enable_slot(struct slot *p_slot) { u8 getstatus = 0; int rc; + struct controller *ctrl = p_slot->ctrl; /* Check to see if (latch closed, card present, power off) */ mutex_lock(&p_slot->ctrl->crit_sect); rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus); if (rc || !getstatus) { - info("%s: no adapter on slot(%s)\n", __func__, - p_slot->name); + ctrl_info(ctrl, "%s: no adapter on slot(%s)\n", + __func__, p_slot->name); mutex_unlock(&p_slot->ctrl->crit_sect); return -ENODEV; } if (MRL_SENS(p_slot->ctrl)) { rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); if (rc || getstatus) { - info("%s: latch open on slot(%s)\n", __func__, - p_slot->name); + ctrl_info(ctrl, "%s: latch open on slot(%s)\n", + __func__, p_slot->name); mutex_unlock(&p_slot->ctrl->crit_sect); return -ENODEV; } @@ -546,8 +557,8 @@ int pciehp_enable_slot(struct slot *p_slot) if (POWER_CTRL(p_slot->ctrl)) { rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus); if (rc || getstatus) { - info("%s: already enabled on slot(%s)\n", __func__, - p_slot->name); + ctrl_info(ctrl, "%s: already enabled on slot(%s)\n", + __func__, p_slot->name); mutex_unlock(&p_slot->ctrl->crit_sect); return -EINVAL; } @@ -571,6 +582,7 @@ int pciehp_disable_slot(struct slot *p_slot) { u8 getstatus = 0; int ret = 0; + struct controller *ctrl = p_slot->ctrl; if (!p_slot->ctrl) return 1; @@ -581,8 +593,8 @@ int pciehp_disable_slot(struct slot *p_slot) if (!HP_SUPR_RM(p_slot->ctrl)) { ret = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus); if (ret || !getstatus) { - info("%s: no adapter on slot(%s)\n", __func__, - p_slot->name); + ctrl_info(ctrl, "%s: no adapter on slot(%s)\n", + __func__, p_slot->name); mutex_unlock(&p_slot->ctrl->crit_sect); return -ENODEV; } @@ -591,8 +603,8 @@ int pciehp_disable_slot(struct slot *p_slot) if (MRL_SENS(p_slot->ctrl)) { ret = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); if (ret || getstatus) { - info("%s: latch open on slot(%s)\n", __func__, - p_slot->name); + ctrl_info(ctrl, "%s: latch open on slot(%s)\n", + __func__, p_slot->name); mutex_unlock(&p_slot->ctrl->crit_sect); return -ENODEV; } @@ -601,8 +613,8 @@ int pciehp_disable_slot(struct slot *p_slot) if (POWER_CTRL(p_slot->ctrl)) { ret = p_slot->hpc_ops->get_power_status(p_slot, &getstatus); if (ret || !getstatus) { - info("%s: already disabled slot(%s)\n", __func__, - p_slot->name); + ctrl_info(ctrl, "%s: already disabled slot(%s)\n", + __func__, p_slot->name); mutex_unlock(&p_slot->ctrl->crit_sect); return -EINVAL; } @@ -618,6 +630,7 @@ int pciehp_disable_slot(struct slot *p_slot) int pciehp_sysfs_enable_slot(struct slot *p_slot) { int retval = -ENODEV; + struct controller *ctrl = p_slot->ctrl; mutex_lock(&p_slot->lock); switch (p_slot->state) { @@ -631,15 +644,15 @@ int pciehp_sysfs_enable_slot(struct slot *p_slot) p_slot->state = STATIC_STATE; break; case POWERON_STATE: - info("Slot %s is already in powering on state\n", - p_slot->name); + ctrl_info(ctrl, "Slot %s is already in powering on state\n", + p_slot->name); break; case BLINKINGOFF_STATE: case POWEROFF_STATE: - info("Already enabled on slot %s\n", p_slot->name); + ctrl_info(ctrl, "Already enabled on slot %s\n", p_slot->name); break; default: - err("Not a valid state on slot %s\n", p_slot->name); + ctrl_err(ctrl, "Not a valid state on slot %s\n", p_slot->name); break; } mutex_unlock(&p_slot->lock); @@ -650,6 +663,7 @@ int pciehp_sysfs_enable_slot(struct slot *p_slot) int pciehp_sysfs_disable_slot(struct slot *p_slot) { int retval = -ENODEV; + struct controller *ctrl = p_slot->ctrl; mutex_lock(&p_slot->lock); switch (p_slot->state) { @@ -663,15 +677,15 @@ int pciehp_sysfs_disable_slot(struct slot *p_slot) p_slot->state = STATIC_STATE; break; case POWEROFF_STATE: - info("Slot %s is already in powering off state\n", - p_slot->name); + ctrl_info(ctrl, "Slot %s is already in powering off state\n", + p_slot->name); break; case BLINKINGON_STATE: case POWERON_STATE: - info("Already disabled on slot %s\n", p_slot->name); + ctrl_info(ctrl, "Already disabled on slot %s\n", p_slot->name); break; default: - err("Not a valid state on slot %s\n", p_slot->name); + ctrl_err(ctrl, "Not a valid state on slot %s\n", p_slot->name); break; } mutex_unlock(&p_slot->lock); diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 9d934ddee95..8e9530c4c36 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -223,7 +223,7 @@ static void start_int_poll_timer(struct controller *ctrl, int sec) static inline int pciehp_request_irq(struct controller *ctrl) { - int retval, irq = ctrl->pci_dev->irq; + int retval, irq = ctrl->pcie->irq; /* Install interrupt polling timer. Start with 10 sec delay */ if (pciehp_poll_mode) { @@ -235,7 +235,8 @@ static inline int pciehp_request_irq(struct controller *ctrl) /* Installs the interrupt handler */ retval = request_irq(irq, pcie_isr, IRQF_SHARED, MY_NAME, ctrl); if (retval) - err("Cannot get irq %d for the hotplug controller\n", irq); + ctrl_err(ctrl, "Cannot get irq %d for the hotplug controller\n", + irq); return retval; } @@ -244,7 +245,7 @@ static inline void pciehp_free_irq(struct controller *ctrl) if (pciehp_poll_mode) del_timer_sync(&ctrl->poll_timer); else - free_irq(ctrl->pci_dev->irq, ctrl); + free_irq(ctrl->pcie->irq, ctrl); } static int pcie_poll_cmd(struct controller *ctrl) @@ -282,7 +283,7 @@ static void pcie_wait_cmd(struct controller *ctrl, int poll) else rc = wait_event_timeout(ctrl->queue, !ctrl->cmd_busy, timeout); if (!rc) - dbg("Command not completed in 1000 msec\n"); + ctrl_dbg(ctrl, "Command not completed in 1000 msec\n"); } /** @@ -301,7 +302,8 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask) retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); if (retval) { - err("%s: Cannot read SLOTSTATUS register\n", __func__); + ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n", + __func__); goto out; } @@ -312,26 +314,28 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask) * proceed forward to issue the next command according * to spec. Just print out the error message. */ - dbg("%s: CMD_COMPLETED not clear after 1 sec.\n", - __func__); + ctrl_dbg(ctrl, + "%s: CMD_COMPLETED not clear after 1 sec.\n", + __func__); } else if (!NO_CMD_CMPL(ctrl)) { /* * This controller semms to notify of command completed * event even though it supports none of power * controller, attention led, power led and EMI. */ - dbg("%s: Unexpected CMD_COMPLETED. Need to wait for " - "command completed event.\n", __func__); + ctrl_dbg(ctrl, "%s: Unexpected CMD_COMPLETED. Need to " + "wait for command completed event.\n", + __func__); ctrl->no_cmd_complete = 0; } else { - dbg("%s: Unexpected CMD_COMPLETED. Maybe the " - "controller is broken.\n", __func__); + ctrl_dbg(ctrl, "%s: Unexpected CMD_COMPLETED. Maybe " + "the controller is broken.\n", __func__); } } retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl); if (retval) { - err("%s: Cannot read SLOTCTRL register\n", __func__); + ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__); goto out; } @@ -341,7 +345,8 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask) smp_mb(); retval = pciehp_writew(ctrl, SLOTCTRL, slot_ctrl); if (retval) - err("%s: Cannot write to SLOTCTRL register\n", __func__); + ctrl_err(ctrl, "%s: Cannot write to SLOTCTRL register\n", + __func__); /* * Wait for command completion. @@ -370,14 +375,15 @@ static int hpc_check_lnk_status(struct controller *ctrl) retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status); if (retval) { - err("%s: Cannot read LNKSTATUS register\n", __func__); + ctrl_err(ctrl, "%s: Cannot read LNKSTATUS register\n", + __func__); return retval; } - dbg("%s: lnk_status = %x\n", __func__, lnk_status); + ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status); if ( (lnk_status & LNK_TRN) || (lnk_status & LNK_TRN_ERR) || !(lnk_status & NEG_LINK_WD)) { - err("%s : Link Training Error occurs \n", __func__); + ctrl_err(ctrl, "%s : Link Training Error occurs \n", __func__); retval = -1; return retval; } @@ -394,12 +400,12 @@ static int hpc_get_attention_status(struct slot *slot, u8 *status) retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl); if (retval) { - err("%s: Cannot read SLOTCTRL register\n", __func__); + ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__); return retval; } - dbg("%s: SLOTCTRL %x, value read %x\n", - __func__, ctrl->cap_base + SLOTCTRL, slot_ctrl); + ctrl_dbg(ctrl, "%s: SLOTCTRL %x, value read %x\n", + __func__, ctrl->cap_base + SLOTCTRL, slot_ctrl); atten_led_state = (slot_ctrl & ATTN_LED_CTRL) >> 6; @@ -433,11 +439,11 @@ static int hpc_get_power_status(struct slot *slot, u8 *status) retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl); if (retval) { - err("%s: Cannot read SLOTCTRL register\n", __func__); + ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__); return retval; } - dbg("%s: SLOTCTRL %x value read %x\n", - __func__, ctrl->cap_base + SLOTCTRL, slot_ctrl); + ctrl_dbg(ctrl, "%s: SLOTCTRL %x value read %x\n", + __func__, ctrl->cap_base + SLOTCTRL, slot_ctrl); pwr_state = (slot_ctrl & PWR_CTRL) >> 10; @@ -464,7 +470,8 @@ static int hpc_get_latch_status(struct slot *slot, u8 *status) retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); if (retval) { - err("%s: Cannot read SLOTSTATUS register\n", __func__); + ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n", + __func__); return retval; } @@ -482,7 +489,8 @@ static int hpc_get_adapter_status(struct slot *slot, u8 *status) retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); if (retval) { - err("%s: Cannot read SLOTSTATUS register\n", __func__); + ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n", + __func__); return retval; } card_state = (u8)((slot_status & PRSN_STATE) >> 6); @@ -500,7 +508,7 @@ static int hpc_query_power_fault(struct slot *slot) retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); if (retval) { - err("%s: Cannot check for power fault\n", __func__); + ctrl_err(ctrl, "%s: Cannot check for power fault\n", __func__); return retval; } pwr_fault = (u8)((slot_status & PWR_FAULT_DETECTED) >> 1); @@ -516,7 +524,7 @@ static int hpc_get_emi_status(struct slot *slot, u8 *status) retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); if (retval) { - err("%s : Cannot check EMI status\n", __func__); + ctrl_err(ctrl, "%s : Cannot check EMI status\n", __func__); return retval; } *status = (slot_status & EMI_STATE) >> EMI_STATUS_BIT; @@ -560,8 +568,8 @@ static int hpc_set_attention_status(struct slot *slot, u8 value) return -1; } rc = pcie_write_cmd(ctrl, slot_cmd, cmd_mask); - dbg("%s: SLOTCTRL %x write cmd %x\n", - __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); + ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", + __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); return rc; } @@ -575,8 +583,8 @@ static void hpc_set_green_led_on(struct slot *slot) slot_cmd = 0x0100; cmd_mask = PWR_LED_CTRL; pcie_write_cmd(ctrl, slot_cmd, cmd_mask); - dbg("%s: SLOTCTRL %x write cmd %x\n", - __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); + ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", + __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); } static void hpc_set_green_led_off(struct slot *slot) @@ -588,8 +596,8 @@ static void hpc_set_green_led_off(struct slot *slot) slot_cmd = 0x0300; cmd_mask = PWR_LED_CTRL; pcie_write_cmd(ctrl, slot_cmd, cmd_mask); - dbg("%s: SLOTCTRL %x write cmd %x\n", - __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); + ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", + __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); } static void hpc_set_green_led_blink(struct slot *slot) @@ -601,8 +609,8 @@ static void hpc_set_green_led_blink(struct slot *slot) slot_cmd = 0x0200; cmd_mask = PWR_LED_CTRL; pcie_write_cmd(ctrl, slot_cmd, cmd_mask); - dbg("%s: SLOTCTRL %x write cmd %x\n", - __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); + ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", + __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); } static int hpc_power_on_slot(struct slot * slot) @@ -613,20 +621,22 @@ static int hpc_power_on_slot(struct slot * slot) u16 slot_status; int retval = 0; - dbg("%s: slot->hp_slot %x\n", __func__, slot->hp_slot); + ctrl_dbg(ctrl, "%s: slot->hp_slot %x\n", __func__, slot->hp_slot); /* Clear sticky power-fault bit from previous power failures */ retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); if (retval) { - err("%s: Cannot read SLOTSTATUS register\n", __func__); + ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n", + __func__); return retval; } slot_status &= PWR_FAULT_DETECTED; if (slot_status) { retval = pciehp_writew(ctrl, SLOTSTATUS, slot_status); if (retval) { - err("%s: Cannot write to SLOTSTATUS register\n", - __func__); + ctrl_err(ctrl, + "%s: Cannot write to SLOTSTATUS register\n", + __func__); return retval; } } @@ -644,11 +654,12 @@ static int hpc_power_on_slot(struct slot * slot) retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask); if (retval) { - err("%s: Write %x command failed!\n", __func__, slot_cmd); + ctrl_err(ctrl, "%s: Write %x command failed!\n", + __func__, slot_cmd); return -1; } - dbg("%s: SLOTCTRL %x write cmd %x\n", - __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); + ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", + __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); return retval; } @@ -694,7 +705,7 @@ static int hpc_power_off_slot(struct slot * slot) int retval = 0; int changed; - dbg("%s: slot->hp_slot %x\n", __func__, slot->hp_slot); + ctrl_dbg(ctrl, "%s: slot->hp_slot %x\n", __func__, slot->hp_slot); /* * Set Bad DLLP Mask bit in Correctable Error Mask @@ -722,12 +733,12 @@ static int hpc_power_off_slot(struct slot * slot) retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask); if (retval) { - err("%s: Write command failed!\n", __func__); + ctrl_err(ctrl, "%s: Write command failed!\n", __func__); retval = -1; goto out; } - dbg("%s: SLOTCTRL %x write cmd %x\n", - __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); + ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", + __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); out: if (changed) pcie_unmask_bad_dllp(ctrl); @@ -749,7 +760,8 @@ static irqreturn_t pcie_isr(int irq, void *dev_id) intr_loc = 0; do { if (pciehp_readw(ctrl, SLOTSTATUS, &detected)) { - err("%s: Cannot read SLOTSTATUS\n", __func__); + ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS\n", + __func__); return IRQ_NONE; } @@ -760,12 +772,13 @@ static irqreturn_t pcie_isr(int irq, void *dev_id) if (!intr_loc) return IRQ_NONE; if (detected && pciehp_writew(ctrl, SLOTSTATUS, detected)) { - err("%s: Cannot write to SLOTSTATUS\n", __func__); + ctrl_err(ctrl, "%s: Cannot write to SLOTSTATUS\n", + __func__); return IRQ_NONE; } } while (detected); - dbg("%s: intr_loc %x\n", __FUNCTION__, intr_loc); + ctrl_dbg(ctrl, "%s: intr_loc %x\n", __func__, intr_loc); /* Check Command Complete Interrupt Pending */ if (intr_loc & CMD_COMPLETED) { @@ -807,7 +820,7 @@ static int hpc_get_max_lnk_speed(struct slot *slot, enum pci_bus_speed *value) retval = pciehp_readl(ctrl, LNKCAP, &lnk_cap); if (retval) { - err("%s: Cannot read LNKCAP register\n", __func__); + ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__); return retval; } @@ -821,7 +834,7 @@ static int hpc_get_max_lnk_speed(struct slot *slot, enum pci_bus_speed *value) } *value = lnk_speed; - dbg("Max link speed = %d\n", lnk_speed); + ctrl_dbg(ctrl, "Max link speed = %d\n", lnk_speed); return retval; } @@ -836,7 +849,7 @@ static int hpc_get_max_lnk_width(struct slot *slot, retval = pciehp_readl(ctrl, LNKCAP, &lnk_cap); if (retval) { - err("%s: Cannot read LNKCAP register\n", __func__); + ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__); return retval; } @@ -871,7 +884,7 @@ static int hpc_get_max_lnk_width(struct slot *slot, } *value = lnk_wdth; - dbg("Max link width = %d\n", lnk_wdth); + ctrl_dbg(ctrl, "Max link width = %d\n", lnk_wdth); return retval; } @@ -885,7 +898,8 @@ static int hpc_get_cur_lnk_speed(struct slot *slot, enum pci_bus_speed *value) retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status); if (retval) { - err("%s: Cannot read LNKSTATUS register\n", __func__); + ctrl_err(ctrl, "%s: Cannot read LNKSTATUS register\n", + __func__); return retval; } @@ -899,7 +913,7 @@ static int hpc_get_cur_lnk_speed(struct slot *slot, enum pci_bus_speed *value) } *value = lnk_speed; - dbg("Current link speed = %d\n", lnk_speed); + ctrl_dbg(ctrl, "Current link speed = %d\n", lnk_speed); return retval; } @@ -914,7 +928,8 @@ static int hpc_get_cur_lnk_width(struct slot *slot, retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status); if (retval) { - err("%s: Cannot read LNKSTATUS register\n", __func__); + ctrl_err(ctrl, "%s: Cannot read LNKSTATUS register\n", + __func__); return retval; } @@ -949,7 +964,7 @@ static int hpc_get_cur_lnk_width(struct slot *slot, } *value = lnk_wdth; - dbg("Current link width = %d\n", lnk_wdth); + ctrl_dbg(ctrl, "Current link width = %d\n", lnk_wdth); return retval; } @@ -998,7 +1013,8 @@ int pcie_enable_notification(struct controller *ctrl) PWR_FAULT_DETECT_ENABLE | HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE; if (pcie_write_cmd(ctrl, cmd, mask)) { - err("%s: Cannot enable software notification\n", __func__); + ctrl_err(ctrl, "%s: Cannot enable software notification\n", + __func__); return -1; } return 0; @@ -1010,7 +1026,8 @@ static void pcie_disable_notification(struct controller *ctrl) mask = PRSN_DETECT_ENABLE | ATTN_BUTTN_ENABLE | MRL_DETECT_ENABLE | PWR_FAULT_DETECT_ENABLE | HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE; if (pcie_write_cmd(ctrl, 0, mask)) - warn("%s: Cannot disable software notification\n", __func__); + ctrl_warn(ctrl, "%s: Cannot disable software notification\n", + __func__); } static int pcie_init_notification(struct controller *ctrl) @@ -1071,34 +1088,45 @@ static inline void dbg_ctrl(struct controller *ctrl) if (!pciehp_debug) return; - dbg("Hotplug Controller:\n"); - dbg(" Seg/Bus/Dev/Func/IRQ : %s IRQ %d\n", pci_name(pdev), pdev->irq); - dbg(" Vendor ID : 0x%04x\n", pdev->vendor); - dbg(" Device ID : 0x%04x\n", pdev->device); - dbg(" Subsystem ID : 0x%04x\n", pdev->subsystem_device); - dbg(" Subsystem Vendor ID : 0x%04x\n", pdev->subsystem_vendor); - dbg(" PCIe Cap offset : 0x%02x\n", ctrl->cap_base); + ctrl_info(ctrl, "Hotplug Controller:\n"); + ctrl_info(ctrl, " Seg/Bus/Dev/Func/IRQ : %s IRQ %d\n", + pci_name(pdev), pdev->irq); + ctrl_info(ctrl, " Vendor ID : 0x%04x\n", pdev->vendor); + ctrl_info(ctrl, " Device ID : 0x%04x\n", pdev->device); + ctrl_info(ctrl, " Subsystem ID : 0x%04x\n", + pdev->subsystem_device); + ctrl_info(ctrl, " Subsystem Vendor ID : 0x%04x\n", + pdev->subsystem_vendor); + ctrl_info(ctrl, " PCIe Cap offset : 0x%02x\n", ctrl->cap_base); for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { if (!pci_resource_len(pdev, i)) continue; - dbg(" PCI resource [%d] : 0x%llx@0x%llx\n", i, - (unsigned long long)pci_resource_len(pdev, i), - (unsigned long long)pci_resource_start(pdev, i)); + ctrl_info(ctrl, " PCI resource [%d] : 0x%llx@0x%llx\n", + i, (unsigned long long)pci_resource_len(pdev, i), + (unsigned long long)pci_resource_start(pdev, i)); } - dbg("Slot Capabilities : 0x%08x\n", ctrl->slot_cap); - dbg(" Physical Slot Number : %d\n", ctrl->first_slot); - dbg(" Attention Button : %3s\n", ATTN_BUTTN(ctrl) ? "yes" : "no"); - dbg(" Power Controller : %3s\n", POWER_CTRL(ctrl) ? "yes" : "no"); - dbg(" MRL Sensor : %3s\n", MRL_SENS(ctrl) ? "yes" : "no"); - dbg(" Attention Indicator : %3s\n", ATTN_LED(ctrl) ? "yes" : "no"); - dbg(" Power Indicator : %3s\n", PWR_LED(ctrl) ? "yes" : "no"); - dbg(" Hot-Plug Surprise : %3s\n", HP_SUPR_RM(ctrl) ? "yes" : "no"); - dbg(" EMI Present : %3s\n", EMI(ctrl) ? "yes" : "no"); - dbg(" Command Completed : %3s\n", NO_CMD_CMPL(ctrl)? "no" : "yes"); + ctrl_info(ctrl, "Slot Capabilities : 0x%08x\n", ctrl->slot_cap); + ctrl_info(ctrl, " Physical Slot Number : %d\n", ctrl->first_slot); + ctrl_info(ctrl, " Attention Button : %3s\n", + ATTN_BUTTN(ctrl) ? "yes" : "no"); + ctrl_info(ctrl, " Power Controller : %3s\n", + POWER_CTRL(ctrl) ? "yes" : "no"); + ctrl_info(ctrl, " MRL Sensor : %3s\n", + MRL_SENS(ctrl) ? "yes" : "no"); + ctrl_info(ctrl, " Attention Indicator : %3s\n", + ATTN_LED(ctrl) ? "yes" : "no"); + ctrl_info(ctrl, " Power Indicator : %3s\n", + PWR_LED(ctrl) ? "yes" : "no"); + ctrl_info(ctrl, " Hot-Plug Surprise : %3s\n", + HP_SUPR_RM(ctrl) ? "yes" : "no"); + ctrl_info(ctrl, " EMI Present : %3s\n", + EMI(ctrl) ? "yes" : "no"); + ctrl_info(ctrl, " Command Completed : %3s\n", + NO_CMD_CMPL(ctrl) ? "no" : "yes"); pciehp_readw(ctrl, SLOTSTATUS, ®16); - dbg("Slot Status : 0x%04x\n", reg16); + ctrl_info(ctrl, "Slot Status : 0x%04x\n", reg16); pciehp_readw(ctrl, SLOTCTRL, ®16); - dbg("Slot Control : 0x%04x\n", reg16); + ctrl_info(ctrl, "Slot Control : 0x%04x\n", reg16); } struct controller *pcie_init(struct pcie_device *dev) @@ -1109,19 +1137,21 @@ struct controller *pcie_init(struct pcie_device *dev) ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); if (!ctrl) { - err("%s : out of memory\n", __func__); + dev_err(&dev->device, "%s : out of memory\n", __func__); goto abort; } INIT_LIST_HEAD(&ctrl->slot_list); + ctrl->pcie = dev; ctrl->pci_dev = pdev; ctrl->cap_base = pci_find_capability(pdev, PCI_CAP_ID_EXP); if (!ctrl->cap_base) { - err("%s: Cannot find PCI Express capability\n", __func__); + ctrl_err(ctrl, "%s: Cannot find PCI Express capability\n", + __func__); goto abort; } if (pciehp_readl(ctrl, SLOTCAP, &slot_cap)) { - err("%s: Cannot read SLOTCAP register\n", __func__); + ctrl_err(ctrl, "%s: Cannot read SLOTCAP register\n", __func__); goto abort; } @@ -1161,9 +1191,9 @@ struct controller *pcie_init(struct pcie_device *dev) goto abort_ctrl; } - info("HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n", - pdev->vendor, pdev->device, - pdev->subsystem_vendor, pdev->subsystem_device); + ctrl_info(ctrl, "HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n", + pdev->vendor, pdev->device, pdev->subsystem_vendor, + pdev->subsystem_device); if (pcie_init_slot(ctrl)) goto abort_ctrl; diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c index 6040dcceb25..ffd11148fbe 100644 --- a/drivers/pci/hotplug/pciehp_pci.c +++ b/drivers/pci/hotplug/pciehp_pci.c @@ -198,18 +198,20 @@ int pciehp_configure_device(struct slot *p_slot) struct pci_dev *dev; struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate; int num, fn; + struct controller *ctrl = p_slot->ctrl; dev = pci_get_slot(parent, PCI_DEVFN(p_slot->device, 0)); if (dev) { - err("Device %s already exists at %x:%x, cannot hot-add\n", - pci_name(dev), p_slot->bus, p_slot->device); + ctrl_err(ctrl, + "Device %s already exists at %x:%x, cannot hot-add\n", + pci_name(dev), p_slot->bus, p_slot->device); pci_dev_put(dev); return -EINVAL; } num = pci_scan_slot(parent, PCI_DEVFN(p_slot->device, 0)); if (num == 0) { - err("No new device found\n"); + ctrl_err(ctrl, "No new device found\n"); return -ENODEV; } @@ -218,8 +220,8 @@ int pciehp_configure_device(struct slot *p_slot) if (!dev) continue; if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) { - err("Cannot hot-add display device %s\n", - pci_name(dev)); + ctrl_err(ctrl, "Cannot hot-add display device %s\n", + pci_name(dev)); pci_dev_put(dev); continue; } @@ -244,9 +246,10 @@ int pciehp_unconfigure_device(struct slot *p_slot) u8 presence = 0; struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate; u16 command; + struct controller *ctrl = p_slot->ctrl; - dbg("%s: bus/dev = %x/%x\n", __func__, p_slot->bus, - p_slot->device); + ctrl_dbg(ctrl, "%s: bus/dev = %x/%x\n", __func__, + p_slot->bus, p_slot->device); ret = p_slot->hpc_ops->get_adapter_status(p_slot, &presence); if (ret) presence = 0; @@ -257,16 +260,17 @@ int pciehp_unconfigure_device(struct slot *p_slot) if (!temp) continue; if ((temp->class >> 16) == PCI_BASE_CLASS_DISPLAY) { - err("Cannot remove display device %s\n", - pci_name(temp)); + ctrl_err(ctrl, "Cannot remove display device %s\n", + pci_name(temp)); pci_dev_put(temp); continue; } if (temp->hdr_type == PCI_HEADER_TYPE_BRIDGE && presence) { pci_read_config_byte(temp, PCI_BRIDGE_CONTROL, &bctl); if (bctl & PCI_BRIDGE_CTL_VGA) { - err("Cannot remove display device %s\n", - pci_name(temp)); + ctrl_err(ctrl, + "Cannot remove display device %s\n", + pci_name(temp)); pci_dev_put(temp); continue; } diff --git a/drivers/pci/hotplug/rpaphp.h b/drivers/pci/hotplug/rpaphp.h index 7d5921b1ee7..419919a87b0 100644 --- a/drivers/pci/hotplug/rpaphp.h +++ b/drivers/pci/hotplug/rpaphp.h @@ -46,10 +46,10 @@ #define PRESENT 1 /* Card in slot */ #define MY_NAME "rpaphp" -extern int debug; +extern int rpaphp_debug; #define dbg(format, arg...) \ do { \ - if (debug) \ + if (rpaphp_debug) \ printk(KERN_DEBUG "%s: " format, \ MY_NAME , ## arg); \ } while (0) diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c index 1f84f402acd..95d02a08fdc 100644 --- a/drivers/pci/hotplug/rpaphp_core.c +++ b/drivers/pci/hotplug/rpaphp_core.c @@ -37,7 +37,7 @@ /* and pci_do_scan_bus */ #include "rpaphp.h" -int debug; +int rpaphp_debug; LIST_HEAD(rpaphp_slot_head); #define DRIVER_VERSION "0.1" @@ -50,7 +50,7 @@ MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); -module_param(debug, bool, 0644); +module_param_named(debug, rpaphp_debug, bool, 0644); /** * set_attention_status - set attention LED diff --git a/drivers/pci/hotplug/rpaphp_pci.c b/drivers/pci/hotplug/rpaphp_pci.c index 5acfd4f3d4c..513e1e28239 100644 --- a/drivers/pci/hotplug/rpaphp_pci.c +++ b/drivers/pci/hotplug/rpaphp_pci.c @@ -123,7 +123,7 @@ int rpaphp_enable_slot(struct slot *slot) slot->state = CONFIGURED; } - if (debug) { + if (rpaphp_debug) { struct pci_dev *dev; dbg("%s: pci_devs of slot[%s]\n", __func__, slot->dn->full_name); list_for_each_entry (dev, &bus->devices, bus_list) diff --git a/drivers/pci/hotplug/rpaphp_slot.c b/drivers/pci/hotplug/rpaphp_slot.c index 9b714ea93d2..50884507b8b 100644 --- a/drivers/pci/hotplug/rpaphp_slot.c +++ b/drivers/pci/hotplug/rpaphp_slot.c @@ -147,9 +147,5 @@ int rpaphp_register_slot(struct slot *slot) list_add(&slot->rpaphp_slot_list, &rpaphp_slot_head); info("Slot [%s] registered\n", slot->name); return 0; - -sysfs_fail: - pci_hp_deregister(php_slot); - return retval; } diff --git a/drivers/pci/htirq.c b/drivers/pci/htirq.c index 279c940a003..bf7d6ce9bbb 100644 --- a/drivers/pci/htirq.c +++ b/drivers/pci/htirq.c @@ -126,7 +126,8 @@ int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update) cfg->msg.address_hi = 0xffffffff; irq = create_irq(); - if (irq < 0) { + + if (irq <= 0) { kfree(cfg); return -EBUSY; } diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index c3edcdc08e7..a2692724b68 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -18,6 +18,7 @@ * Author: Ashok Raj <ashok.raj@intel.com> * Author: Shaohua Li <shaohua.li@intel.com> * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> + * Author: Fenghua Yu <fenghua.yu@intel.com> */ #include <linux/init.h> @@ -33,13 +34,15 @@ #include <linux/dma-mapping.h> #include <linux/mempool.h> #include <linux/timer.h> -#include "iova.h" -#include "intel-iommu.h" -#include <asm/proto.h> /* force_iommu in this header in x86-64*/ +#include <linux/iova.h> +#include <linux/intel-iommu.h> #include <asm/cacheflush.h> #include <asm/iommu.h> #include "pci.h" +#define ROOT_SIZE VTD_PAGE_SIZE +#define CONTEXT_SIZE VTD_PAGE_SIZE + #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) @@ -49,8 +52,6 @@ #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48 -#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) /* 10sec */ - #define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1) @@ -58,8 +59,6 @@ static void flush_unmaps_timeout(unsigned long data); DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0); -static struct intel_iommu *g_iommus; - #define HIGH_WATER_MARK 250 struct deferred_flush_tables { int next; @@ -80,7 +79,7 @@ static long list_size; static void domain_remove_dev_info(struct dmar_domain *domain); -static int dmar_disabled; +int dmar_disabled; static int __initdata dmar_map_gfx = 1; static int dmar_forcedac; static int intel_iommu_strict; @@ -160,7 +159,7 @@ static inline void *alloc_domain_mem(void) return iommu_kmem_cache_alloc(iommu_domain_cache); } -static inline void free_domain_mem(void *vaddr) +static void free_domain_mem(void *vaddr) { kmem_cache_free(iommu_domain_cache, vaddr); } @@ -185,13 +184,6 @@ void free_iova_mem(struct iova *iova) kmem_cache_free(iommu_iova_cache, iova); } -static inline void __iommu_flush_cache( - struct intel_iommu *iommu, void *addr, int size) -{ - if (!ecap_coherent(iommu->ecap)) - clflush_cache_range(addr, size); -} - /* Gets context entry for a given bus and devfn */ static struct context_entry * device_to_context_entry(struct intel_iommu *iommu, u8 bus, u8 devfn) @@ -210,7 +202,7 @@ static struct context_entry * device_to_context_entry(struct intel_iommu *iommu, spin_unlock_irqrestore(&iommu->lock, flags); return NULL; } - __iommu_flush_cache(iommu, (void *)context, PAGE_SIZE_4K); + __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE); phy_addr = virt_to_phys((void *)context); set_root_value(root, phy_addr); set_root_present(root); @@ -356,7 +348,7 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr) return NULL; } __iommu_flush_cache(domain->iommu, tmp_page, - PAGE_SIZE_4K); + PAGE_SIZE); dma_set_pte_addr(*pte, virt_to_phys(tmp_page)); /* * high level table always sets r/w, last level page @@ -419,13 +411,13 @@ static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end) start &= (((u64)1) << addr_width) - 1; end &= (((u64)1) << addr_width) - 1; /* in case it's partial page */ - start = PAGE_ALIGN_4K(start); - end &= PAGE_MASK_4K; + start = PAGE_ALIGN(start); + end &= PAGE_MASK; /* we don't need lock here, nobody else touches the iova range */ while (start < end) { dma_pte_clear_one(domain, start); - start += PAGE_SIZE_4K; + start += VTD_PAGE_SIZE; } } @@ -479,7 +471,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu) if (!root) return -ENOMEM; - __iommu_flush_cache(iommu, root, PAGE_SIZE_4K); + __iommu_flush_cache(iommu, root, ROOT_SIZE); spin_lock_irqsave(&iommu->lock, flags); iommu->root_entry = root; @@ -488,19 +480,6 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu) return 0; } -#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \ -{\ - cycles_t start_time = get_cycles();\ - while (1) {\ - sts = op (iommu->reg + offset);\ - if (cond)\ - break;\ - if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\ - panic("DMAR hardware is malfunctioning\n");\ - cpu_relax();\ - }\ -} - static void iommu_set_root_entry(struct intel_iommu *iommu) { void *addr; @@ -587,31 +566,10 @@ static int __iommu_flush_context(struct intel_iommu *iommu, spin_unlock_irqrestore(&iommu->register_lock, flag); - /* flush context entry will implictly flush write buffer */ + /* flush context entry will implicitly flush write buffer */ return 0; } -static int inline iommu_flush_context_global(struct intel_iommu *iommu, - int non_present_entry_flush) -{ - return __iommu_flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL, - non_present_entry_flush); -} - -static int inline iommu_flush_context_domain(struct intel_iommu *iommu, u16 did, - int non_present_entry_flush) -{ - return __iommu_flush_context(iommu, did, 0, 0, DMA_CCMD_DOMAIN_INVL, - non_present_entry_flush); -} - -static int inline iommu_flush_context_device(struct intel_iommu *iommu, - u16 did, u16 source_id, u8 function_mask, int non_present_entry_flush) -{ - return __iommu_flush_context(iommu, did, source_id, function_mask, - DMA_CCMD_DEVICE_INVL, non_present_entry_flush); -} - /* return value determine if we need a write buffer flush */ static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, unsigned int size_order, u64 type, @@ -679,37 +637,25 @@ static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, printk(KERN_ERR"IOMMU: flush IOTLB failed\n"); if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type)) pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n", - DMA_TLB_IIRG(type), DMA_TLB_IAIG(val)); - /* flush context entry will implictly flush write buffer */ + (unsigned long long)DMA_TLB_IIRG(type), + (unsigned long long)DMA_TLB_IAIG(val)); + /* flush iotlb entry will implicitly flush write buffer */ return 0; } -static int inline iommu_flush_iotlb_global(struct intel_iommu *iommu, - int non_present_entry_flush) -{ - return __iommu_flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH, - non_present_entry_flush); -} - -static int inline iommu_flush_iotlb_dsi(struct intel_iommu *iommu, u16 did, - int non_present_entry_flush) -{ - return __iommu_flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH, - non_present_entry_flush); -} - static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, u64 addr, unsigned int pages, int non_present_entry_flush) { unsigned int mask; - BUG_ON(addr & (~PAGE_MASK_4K)); + BUG_ON(addr & (~VTD_PAGE_MASK)); BUG_ON(pages == 0); /* Fallback to domain selective flush if no PSI support */ if (!cap_pgsel_inv(iommu->cap)) - return iommu_flush_iotlb_dsi(iommu, did, - non_present_entry_flush); + return iommu->flush.flush_iotlb(iommu, did, 0, 0, + DMA_TLB_DSI_FLUSH, + non_present_entry_flush); /* * PSI requires page size to be 2 ^ x, and the base address is naturally @@ -718,11 +664,12 @@ static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, mask = ilog2(__roundup_pow_of_two(pages)); /* Fallback to domain selective flush if size is too big */ if (mask > cap_max_amask_val(iommu->cap)) - return iommu_flush_iotlb_dsi(iommu, did, - non_present_entry_flush); + return iommu->flush.flush_iotlb(iommu, did, 0, 0, + DMA_TLB_DSI_FLUSH, non_present_entry_flush); - return __iommu_flush_iotlb(iommu, did, addr, mask, - DMA_TLB_PSI_FLUSH, non_present_entry_flush); + return iommu->flush.flush_iotlb(iommu, did, addr, mask, + DMA_TLB_PSI_FLUSH, + non_present_entry_flush); } static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) @@ -855,7 +802,7 @@ void dmar_msi_read(int irq, struct msi_msg *msg) } static int iommu_page_fault_do_one(struct intel_iommu *iommu, int type, - u8 fault_reason, u16 source_id, u64 addr) + u8 fault_reason, u16 source_id, unsigned long long addr) { const char *reason; @@ -990,6 +937,8 @@ static int iommu_init_domains(struct intel_iommu *iommu) return -ENOMEM; } + spin_lock_init(&iommu->lock); + /* * if Caching mode is set, then invalid translations are tagged * with domainid 0. Hence we need to pre-allocate it. @@ -998,62 +947,15 @@ static int iommu_init_domains(struct intel_iommu *iommu) set_bit(0, iommu->domain_ids); return 0; } -static struct intel_iommu *alloc_iommu(struct intel_iommu *iommu, - struct dmar_drhd_unit *drhd) -{ - int ret; - int map_size; - u32 ver; - - iommu->reg = ioremap(drhd->reg_base_addr, PAGE_SIZE_4K); - if (!iommu->reg) { - printk(KERN_ERR "IOMMU: can't map the region\n"); - goto error; - } - iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); - iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); - - /* the registers might be more than one page */ - map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), - cap_max_fault_reg_offset(iommu->cap)); - map_size = PAGE_ALIGN_4K(map_size); - if (map_size > PAGE_SIZE_4K) { - iounmap(iommu->reg); - iommu->reg = ioremap(drhd->reg_base_addr, map_size); - if (!iommu->reg) { - printk(KERN_ERR "IOMMU: can't map the region\n"); - goto error; - } - } - - ver = readl(iommu->reg + DMAR_VER_REG); - pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n", - drhd->reg_base_addr, DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver), - iommu->cap, iommu->ecap); - ret = iommu_init_domains(iommu); - if (ret) - goto error_unmap; - spin_lock_init(&iommu->lock); - spin_lock_init(&iommu->register_lock); - drhd->iommu = iommu; - return iommu; -error_unmap: - iounmap(iommu->reg); -error: - kfree(iommu); - return NULL; -} static void domain_exit(struct dmar_domain *domain); -static void free_iommu(struct intel_iommu *iommu) + +void free_dmar_iommu(struct intel_iommu *iommu) { struct dmar_domain *domain; int i; - if (!iommu) - return; - i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap)); for (; i < cap_ndoms(iommu->cap); ) { domain = iommu->domains[i]; @@ -1078,10 +980,6 @@ static void free_iommu(struct intel_iommu *iommu) /* free context mapping */ free_context_table(iommu); - - if (iommu->reg) - iounmap(iommu->reg); - kfree(iommu); } static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu) @@ -1157,9 +1055,9 @@ static void dmar_init_reserved_ranges(void) if (!r->flags || !(r->flags & IORESOURCE_MEM)) continue; addr = r->start; - addr &= PAGE_MASK_4K; + addr &= PAGE_MASK; size = r->end - addr; - size = PAGE_ALIGN_4K(size); + size = PAGE_ALIGN(size); iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr), IOVA_PFN(size + addr) - 1); if (!iova) @@ -1221,7 +1119,7 @@ static int domain_init(struct dmar_domain *domain, int guest_width) domain->pgd = (struct dma_pte *)alloc_pgtable_page(); if (!domain->pgd) return -ENOMEM; - __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE_4K); + __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE); return 0; } @@ -1237,7 +1135,7 @@ static void domain_exit(struct dmar_domain *domain) /* destroy iovas */ put_iova_domain(&domain->iovad); end = DOMAIN_MAX_ADDR(domain->gaw); - end = end & (~PAGE_MASK_4K); + end = end & (~PAGE_MASK); /* clear ptes */ dma_pte_clear_range(domain, 0, end); @@ -1277,11 +1175,13 @@ static int domain_context_mapping_one(struct dmar_domain *domain, __iommu_flush_cache(iommu, context, sizeof(*context)); /* it's a non-present to present mapping */ - if (iommu_flush_context_device(iommu, domain->id, - (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT, 1)) + if (iommu->flush.flush_context(iommu, domain->id, + (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT, + DMA_CCMD_DEVICE_INVL, 1)) iommu_flush_write_buffer(iommu); else - iommu_flush_iotlb_dsi(iommu, 0, 0); + iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH, 0); + spin_unlock_irqrestore(&iommu->lock, flags); return 0; } @@ -1356,22 +1256,25 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova, u64 start_pfn, end_pfn; struct dma_pte *pte; int index; + int addr_width = agaw_to_width(domain->agaw); + + hpa &= (((u64)1) << addr_width) - 1; if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0) return -EINVAL; - iova &= PAGE_MASK_4K; - start_pfn = ((u64)hpa) >> PAGE_SHIFT_4K; - end_pfn = (PAGE_ALIGN_4K(((u64)hpa) + size)) >> PAGE_SHIFT_4K; + iova &= PAGE_MASK; + start_pfn = ((u64)hpa) >> VTD_PAGE_SHIFT; + end_pfn = (VTD_PAGE_ALIGN(((u64)hpa) + size)) >> VTD_PAGE_SHIFT; index = 0; while (start_pfn < end_pfn) { - pte = addr_to_dma_pte(domain, iova + PAGE_SIZE_4K * index); + pte = addr_to_dma_pte(domain, iova + VTD_PAGE_SIZE * index); if (!pte) return -ENOMEM; /* We don't need lock here, nobody else * touches the iova range */ BUG_ON(dma_pte_addr(*pte)); - dma_set_pte_addr(*pte, start_pfn << PAGE_SHIFT_4K); + dma_set_pte_addr(*pte, start_pfn << VTD_PAGE_SHIFT); dma_set_pte_prot(*pte, prot); __iommu_flush_cache(domain->iommu, pte, sizeof(*pte)); start_pfn++; @@ -1383,8 +1286,10 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova, static void detach_domain_for_dev(struct dmar_domain *domain, u8 bus, u8 devfn) { clear_context_table(domain->iommu, bus, devfn); - iommu_flush_context_global(domain->iommu, 0); - iommu_flush_iotlb_global(domain->iommu, 0); + domain->iommu->flush.flush_context(domain->iommu, 0, 0, 0, + DMA_CCMD_GLOBAL_INVL, 0); + domain->iommu->flush.flush_iotlb(domain->iommu, 0, 0, 0, + DMA_TLB_GLOBAL_FLUSH, 0); } static void domain_remove_dev_info(struct dmar_domain *domain) @@ -1414,7 +1319,7 @@ static void domain_remove_dev_info(struct dmar_domain *domain) * find_domain * Note: we use struct pci_dev->dev.archdata.iommu stores the info */ -struct dmar_domain * +static struct dmar_domain * find_domain(struct pci_dev *pdev) { struct device_domain_info *info; @@ -1426,37 +1331,6 @@ find_domain(struct pci_dev *pdev) return NULL; } -static int dmar_pci_device_match(struct pci_dev *devices[], int cnt, - struct pci_dev *dev) -{ - int index; - - while (dev) { - for (index = 0; index < cnt; index++) - if (dev == devices[index]) - return 1; - - /* Check our parent */ - dev = dev->bus->self; - } - - return 0; -} - -static struct dmar_drhd_unit * -dmar_find_matched_drhd_unit(struct pci_dev *dev) -{ - struct dmar_drhd_unit *drhd = NULL; - - list_for_each_entry(drhd, &dmar_drhd_units, list) { - if (drhd->include_all || dmar_pci_device_match(drhd->devices, - drhd->devices_cnt, dev)) - return drhd; - } - - return NULL; -} - /* domain is initialized */ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) { @@ -1578,11 +1452,13 @@ error: return find_domain(pdev); } -static int iommu_prepare_identity_map(struct pci_dev *pdev, u64 start, u64 end) +static int iommu_prepare_identity_map(struct pci_dev *pdev, + unsigned long long start, + unsigned long long end) { struct dmar_domain *domain; unsigned long size; - u64 base; + unsigned long long base; int ret; printk(KERN_INFO @@ -1594,9 +1470,9 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev, u64 start, u64 end) return -ENOMEM; /* The address might not be aligned */ - base = start & PAGE_MASK_4K; + base = start & PAGE_MASK; size = end - base; - size = PAGE_ALIGN_4K(size); + size = PAGE_ALIGN(size); if (!reserve_iova(&domain->iovad, IOVA_PFN(base), IOVA_PFN(base + size) - 1)) { printk(KERN_ERR "IOMMU: reserve iova failed\n"); @@ -1729,8 +1605,6 @@ int __init init_dmars(void) * endfor */ for_each_drhd_unit(drhd) { - if (drhd->ignored) - continue; g_num_of_iommus++; /* * lock not needed as this is only incremented in the single @@ -1739,12 +1613,6 @@ int __init init_dmars(void) */ } - g_iommus = kzalloc(g_num_of_iommus * sizeof(*iommu), GFP_KERNEL); - if (!g_iommus) { - ret = -ENOMEM; - goto error; - } - deferred_flush = kzalloc(g_num_of_iommus * sizeof(struct deferred_flush_tables), GFP_KERNEL); if (!deferred_flush) { @@ -1752,16 +1620,15 @@ int __init init_dmars(void) goto error; } - i = 0; for_each_drhd_unit(drhd) { if (drhd->ignored) continue; - iommu = alloc_iommu(&g_iommus[i], drhd); - i++; - if (!iommu) { - ret = -ENOMEM; + + iommu = drhd->iommu; + + ret = iommu_init_domains(iommu); + if (ret) goto error; - } /* * TBD: @@ -1775,6 +1642,28 @@ int __init init_dmars(void) } } + for_each_drhd_unit(drhd) { + if (drhd->ignored) + continue; + + iommu = drhd->iommu; + if (dmar_enable_qi(iommu)) { + /* + * Queued Invalidate not enabled, use Register Based + * Invalidate + */ + iommu->flush.flush_context = __iommu_flush_context; + iommu->flush.flush_iotlb = __iommu_flush_iotlb; + printk(KERN_INFO "IOMMU 0x%Lx: using Register based " + "invalidation\n", drhd->reg_base_addr); + } else { + iommu->flush.flush_context = qi_flush_context; + iommu->flush.flush_iotlb = qi_flush_iotlb; + printk(KERN_INFO "IOMMU 0x%Lx: using Queued " + "invalidation\n", drhd->reg_base_addr); + } + } + /* * For each rmrr * for each dev attached to rmrr @@ -1827,9 +1716,10 @@ int __init init_dmars(void) iommu_set_root_entry(iommu); - iommu_flush_context_global(iommu, 0); - iommu_flush_iotlb_global(iommu, 0); - + iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL, + 0); + iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH, + 0); iommu_disable_protect_mem_regions(iommu); ret = iommu_enable_translation(iommu); @@ -1845,15 +1735,14 @@ error: iommu = drhd->iommu; free_iommu(iommu); } - kfree(g_iommus); return ret; } static inline u64 aligned_size(u64 host_addr, size_t size) { u64 addr; - addr = (host_addr & (~PAGE_MASK_4K)) + size; - return PAGE_ALIGN_4K(addr); + addr = (host_addr & (~PAGE_MASK)) + size; + return PAGE_ALIGN(addr); } struct iova * @@ -1867,20 +1756,20 @@ iommu_alloc_iova(struct dmar_domain *domain, size_t size, u64 end) return NULL; piova = alloc_iova(&domain->iovad, - size >> PAGE_SHIFT_4K, IOVA_PFN(end), 1); + size >> PAGE_SHIFT, IOVA_PFN(end), 1); return piova; } static struct iova * __intel_alloc_iova(struct device *dev, struct dmar_domain *domain, - size_t size) + size_t size, u64 dma_mask) { struct pci_dev *pdev = to_pci_dev(dev); struct iova *iova = NULL; - if ((pdev->dma_mask <= DMA_32BIT_MASK) || (dmar_forcedac)) { - iova = iommu_alloc_iova(domain, size, pdev->dma_mask); - } else { + if (dma_mask <= DMA_32BIT_MASK || dmar_forcedac) + iova = iommu_alloc_iova(domain, size, dma_mask); + else { /* * First try to allocate an io virtual address in * DMA_32BIT_MASK and if that fails then try allocating @@ -1888,7 +1777,7 @@ __intel_alloc_iova(struct device *dev, struct dmar_domain *domain, */ iova = iommu_alloc_iova(domain, size, DMA_32BIT_MASK); if (!iova) - iova = iommu_alloc_iova(domain, size, pdev->dma_mask); + iova = iommu_alloc_iova(domain, size, dma_mask); } if (!iova) { @@ -1927,12 +1816,12 @@ get_valid_domain_for_dev(struct pci_dev *pdev) return domain; } -static dma_addr_t -intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir) +static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, + size_t size, int dir, u64 dma_mask) { struct pci_dev *pdev = to_pci_dev(hwdev); struct dmar_domain *domain; - unsigned long start_paddr; + phys_addr_t start_paddr; struct iova *iova; int prot = 0; int ret; @@ -1947,11 +1836,11 @@ intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir) size = aligned_size((u64)paddr, size); - iova = __intel_alloc_iova(hwdev, domain, size); + iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); if (!iova) goto error; - start_paddr = iova->pfn_lo << PAGE_SHIFT_4K; + start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT; /* * Check if DMAR supports zero-length reads on write only @@ -1969,30 +1858,33 @@ intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir) * is not a big problem */ ret = domain_page_mapping(domain, start_paddr, - ((u64)paddr) & PAGE_MASK_4K, size, prot); + ((u64)paddr) & PAGE_MASK, size, prot); if (ret) goto error; - pr_debug("Device %s request: %lx@%llx mapping: %lx@%llx, dir %d\n", - pci_name(pdev), size, (u64)paddr, - size, (u64)start_paddr, dir); - /* it's a non-present to present mapping */ ret = iommu_flush_iotlb_psi(domain->iommu, domain->id, - start_paddr, size >> PAGE_SHIFT_4K, 1); + start_paddr, size >> VTD_PAGE_SHIFT, 1); if (ret) iommu_flush_write_buffer(domain->iommu); - return (start_paddr + ((u64)paddr & (~PAGE_MASK_4K))); + return start_paddr + ((u64)paddr & (~PAGE_MASK)); error: if (iova) __free_iova(&domain->iovad, iova); printk(KERN_ERR"Device %s request: %lx@%llx dir %d --- failed\n", - pci_name(pdev), size, (u64)paddr, dir); + pci_name(pdev), size, (unsigned long long)paddr, dir); return 0; } +dma_addr_t intel_map_single(struct device *hwdev, phys_addr_t paddr, + size_t size, int dir) +{ + return __intel_map_single(hwdev, paddr, size, dir, + to_pci_dev(hwdev)->dma_mask); +} + static void flush_unmaps(void) { int i, j; @@ -2002,7 +1894,11 @@ static void flush_unmaps(void) /* just flush them all */ for (i = 0; i < g_num_of_iommus; i++) { if (deferred_flush[i].next) { - iommu_flush_iotlb_global(&g_iommus[i], 0); + struct intel_iommu *iommu = + deferred_flush[i].domain[0]->iommu; + + iommu->flush.flush_iotlb(iommu, 0, 0, 0, + DMA_TLB_GLOBAL_FLUSH, 0); for (j = 0; j < deferred_flush[i].next; j++) { __free_iova(&deferred_flush[i].domain[j]->iovad, deferred_flush[i].iova[j]); @@ -2032,7 +1928,8 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova) if (list_size == HIGH_WATER_MARK) flush_unmaps(); - iommu_id = dom->iommu - g_iommus; + iommu_id = dom->iommu->seq_id; + next = deferred_flush[iommu_id].next; deferred_flush[iommu_id].domain[next] = dom; deferred_flush[iommu_id].iova[next] = iova; @@ -2046,8 +1943,8 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova) spin_unlock_irqrestore(&async_umap_flush_lock, flags); } -static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, - size_t size, int dir) +void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size, + int dir) { struct pci_dev *pdev = to_pci_dev(dev); struct dmar_domain *domain; @@ -2063,11 +1960,11 @@ static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, if (!iova) return; - start_addr = iova->pfn_lo << PAGE_SHIFT_4K; + start_addr = iova->pfn_lo << PAGE_SHIFT; size = aligned_size((u64)dev_addr, size); pr_debug("Device %s unmapping: %lx@%llx\n", - pci_name(pdev), size, (u64)start_addr); + pci_name(pdev), size, (unsigned long long)start_addr); /* clear the whole page */ dma_pte_clear_range(domain, start_addr, start_addr + size); @@ -2075,7 +1972,7 @@ static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, dma_pte_free_pagetable(domain, start_addr, start_addr + size); if (intel_iommu_strict) { if (iommu_flush_iotlb_psi(domain->iommu, - domain->id, start_addr, size >> PAGE_SHIFT_4K, 0)) + domain->id, start_addr, size >> VTD_PAGE_SHIFT, 0)) iommu_flush_write_buffer(domain->iommu); /* free iova */ __free_iova(&domain->iovad, iova); @@ -2088,13 +1985,13 @@ static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, } } -static void * intel_alloc_coherent(struct device *hwdev, size_t size, - dma_addr_t *dma_handle, gfp_t flags) +void *intel_alloc_coherent(struct device *hwdev, size_t size, + dma_addr_t *dma_handle, gfp_t flags) { void *vaddr; int order; - size = PAGE_ALIGN_4K(size); + size = PAGE_ALIGN(size); order = get_order(size); flags &= ~(GFP_DMA | GFP_DMA32); @@ -2103,19 +2000,21 @@ static void * intel_alloc_coherent(struct device *hwdev, size_t size, return NULL; memset(vaddr, 0, size); - *dma_handle = intel_map_single(hwdev, virt_to_bus(vaddr), size, DMA_BIDIRECTIONAL); + *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size, + DMA_BIDIRECTIONAL, + hwdev->coherent_dma_mask); if (*dma_handle) return vaddr; free_pages((unsigned long)vaddr, order); return NULL; } -static void intel_free_coherent(struct device *hwdev, size_t size, - void *vaddr, dma_addr_t dma_handle) +void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, + dma_addr_t dma_handle) { int order; - size = PAGE_ALIGN_4K(size); + size = PAGE_ALIGN(size); order = get_order(size); intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL); @@ -2123,8 +2022,9 @@ static void intel_free_coherent(struct device *hwdev, size_t size, } #define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg))) -static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, - int nelems, int dir) + +void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, + int nelems, int dir) { int i; struct pci_dev *pdev = to_pci_dev(hwdev); @@ -2148,7 +2048,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, size += aligned_size((u64)addr, sg->length); } - start_addr = iova->pfn_lo << PAGE_SHIFT_4K; + start_addr = iova->pfn_lo << PAGE_SHIFT; /* clear the whole page */ dma_pte_clear_range(domain, start_addr, start_addr + size); @@ -2156,7 +2056,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, dma_pte_free_pagetable(domain, start_addr, start_addr + size); if (iommu_flush_iotlb_psi(domain->iommu, domain->id, start_addr, - size >> PAGE_SHIFT_4K, 0)) + size >> VTD_PAGE_SHIFT, 0)) iommu_flush_write_buffer(domain->iommu); /* free iova */ @@ -2177,8 +2077,8 @@ static int intel_nontranslate_map_sg(struct device *hddev, return nelems; } -static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, - int nelems, int dir) +int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, + int dir) { void *addr; int i; @@ -2206,7 +2106,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, size += aligned_size((u64)addr, sg->length); } - iova = __intel_alloc_iova(hwdev, domain, size); + iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); if (!iova) { sglist->dma_length = 0; return 0; @@ -2222,14 +2122,14 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) prot |= DMA_PTE_WRITE; - start_addr = iova->pfn_lo << PAGE_SHIFT_4K; + start_addr = iova->pfn_lo << PAGE_SHIFT; offset = 0; for_each_sg(sglist, sg, nelems, i) { addr = SG_ENT_VIRT_ADDRESS(sg); addr = (void *)virt_to_phys(addr); size = aligned_size((u64)addr, sg->length); ret = domain_page_mapping(domain, start_addr + offset, - ((u64)addr) & PAGE_MASK_4K, + ((u64)addr) & PAGE_MASK, size, prot); if (ret) { /* clear the page */ @@ -2243,14 +2143,14 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, return 0; } sg->dma_address = start_addr + offset + - ((u64)addr & (~PAGE_MASK_4K)); + ((u64)addr & (~PAGE_MASK)); sg->dma_length = sg->length; offset += size; } /* it's a non-present to present mapping */ if (iommu_flush_iotlb_psi(domain->iommu, domain->id, - start_addr, offset >> PAGE_SHIFT_4K, 1)) + start_addr, offset >> VTD_PAGE_SHIFT, 1)) iommu_flush_write_buffer(domain->iommu); return nelems; } @@ -2290,7 +2190,6 @@ static inline int iommu_devinfo_cache_init(void) sizeof(struct device_domain_info), 0, SLAB_HWCACHE_ALIGN, - NULL); if (!iommu_devinfo_cache) { printk(KERN_ERR "Couldn't create devinfo cache\n"); @@ -2308,7 +2207,6 @@ static inline int iommu_iova_cache_init(void) sizeof(struct iova), 0, SLAB_HWCACHE_ALIGN, - NULL); if (!iommu_iova_cache) { printk(KERN_ERR "Couldn't create iova cache\n"); @@ -2348,38 +2246,6 @@ static void __init iommu_exit_mempool(void) } -static int blacklist_iommu(const struct dmi_system_id *id) -{ - printk(KERN_INFO "%s detected; disabling IOMMU\n", - id->ident); - dmar_disabled = 1; - return 0; -} - -static struct dmi_system_id __initdata intel_iommu_dmi_table[] = { - { /* Some DG33BU BIOS revisions advertised non-existent VT-d */ - .callback = blacklist_iommu, - .ident = "Intel DG33BU", - { DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"), - DMI_MATCH(DMI_BOARD_NAME, "DG33BU"), - } - }, - { } -}; - - -void __init detect_intel_iommu(void) -{ - if (swiotlb || no_iommu || iommu_detected || dmar_disabled) - return; - if (early_dmar_detect()) { - dmi_check_system(intel_iommu_dmi_table); - if (dmar_disabled) - return; - iommu_detected = 1; - } -} - static void __init init_no_remapping_devices(void) { struct dmar_drhd_unit *drhd; @@ -2426,12 +2292,19 @@ int __init intel_iommu_init(void) { int ret = 0; - if (no_iommu || swiotlb || dmar_disabled) - return -ENODEV; - if (dmar_table_init()) return -ENODEV; + if (dmar_dev_scope_init()) + return -ENODEV; + + /* + * Check the need for DMA-remapping initialization now. + * Above initialization will also be used by Interrupt-remapping. + */ + if (no_iommu || swiotlb || dmar_disabled) + return -ENODEV; + iommu_init_mempool(); dmar_init_reserved_ranges(); @@ -2453,3 +2326,111 @@ int __init intel_iommu_init(void) return 0; } +void intel_iommu_domain_exit(struct dmar_domain *domain) +{ + u64 end; + + /* Domain 0 is reserved, so dont process it */ + if (!domain) + return; + + end = DOMAIN_MAX_ADDR(domain->gaw); + end = end & (~VTD_PAGE_MASK); + + /* clear ptes */ + dma_pte_clear_range(domain, 0, end); + + /* free page tables */ + dma_pte_free_pagetable(domain, 0, end); + + iommu_free_domain(domain); + free_domain_mem(domain); +} +EXPORT_SYMBOL_GPL(intel_iommu_domain_exit); + +struct dmar_domain *intel_iommu_domain_alloc(struct pci_dev *pdev) +{ + struct dmar_drhd_unit *drhd; + struct dmar_domain *domain; + struct intel_iommu *iommu; + + drhd = dmar_find_matched_drhd_unit(pdev); + if (!drhd) { + printk(KERN_ERR "intel_iommu_domain_alloc: drhd == NULL\n"); + return NULL; + } + + iommu = drhd->iommu; + if (!iommu) { + printk(KERN_ERR + "intel_iommu_domain_alloc: iommu == NULL\n"); + return NULL; + } + domain = iommu_alloc_domain(iommu); + if (!domain) { + printk(KERN_ERR + "intel_iommu_domain_alloc: domain == NULL\n"); + return NULL; + } + if (domain_init(domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { + printk(KERN_ERR + "intel_iommu_domain_alloc: domain_init() failed\n"); + intel_iommu_domain_exit(domain); + return NULL; + } + return domain; +} +EXPORT_SYMBOL_GPL(intel_iommu_domain_alloc); + +int intel_iommu_context_mapping( + struct dmar_domain *domain, struct pci_dev *pdev) +{ + int rc; + rc = domain_context_mapping(domain, pdev); + return rc; +} +EXPORT_SYMBOL_GPL(intel_iommu_context_mapping); + +int intel_iommu_page_mapping( + struct dmar_domain *domain, dma_addr_t iova, + u64 hpa, size_t size, int prot) +{ + int rc; + rc = domain_page_mapping(domain, iova, hpa, size, prot); + return rc; +} +EXPORT_SYMBOL_GPL(intel_iommu_page_mapping); + +void intel_iommu_detach_dev(struct dmar_domain *domain, u8 bus, u8 devfn) +{ + detach_domain_for_dev(domain, bus, devfn); +} +EXPORT_SYMBOL_GPL(intel_iommu_detach_dev); + +struct dmar_domain * +intel_iommu_find_domain(struct pci_dev *pdev) +{ + return find_domain(pdev); +} +EXPORT_SYMBOL_GPL(intel_iommu_find_domain); + +int intel_iommu_found(void) +{ + return g_num_of_iommus; +} +EXPORT_SYMBOL_GPL(intel_iommu_found); + +u64 intel_iommu_iova_to_pfn(struct dmar_domain *domain, u64 iova) +{ + struct dma_pte *pte; + u64 pfn; + + pfn = 0; + pte = addr_to_dma_pte(domain, iova); + + if (pte) + pfn = dma_pte_addr(*pte); + + return pfn >> VTD_PAGE_SHIFT; +} +EXPORT_SYMBOL_GPL(intel_iommu_iova_to_pfn); diff --git a/drivers/pci/intel-iommu.h b/drivers/pci/intel-iommu.h deleted file mode 100644 index afc0ad96122..00000000000 --- a/drivers/pci/intel-iommu.h +++ /dev/null @@ -1,344 +0,0 @@ -/* - * Copyright (c) 2006, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple - * Place - Suite 330, Boston, MA 02111-1307 USA. - * - * Copyright (C) 2006-2008 Intel Corporation - * Author: Ashok Raj <ashok.raj@intel.com> - * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> - */ - -#ifndef _INTEL_IOMMU_H_ -#define _INTEL_IOMMU_H_ - -#include <linux/types.h> -#include <linux/msi.h> -#include <linux/sysdev.h> -#include "iova.h" -#include <linux/io.h> - -/* - * We need a fixed PAGE_SIZE of 4K irrespective of - * arch PAGE_SIZE for IOMMU page tables. - */ -#define PAGE_SHIFT_4K (12) -#define PAGE_SIZE_4K (1UL << PAGE_SHIFT_4K) -#define PAGE_MASK_4K (((u64)-1) << PAGE_SHIFT_4K) -#define PAGE_ALIGN_4K(addr) (((addr) + PAGE_SIZE_4K - 1) & PAGE_MASK_4K) - -#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT_4K) -#define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK) -#define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK) - -/* - * Intel IOMMU register specification per version 1.0 public spec. - */ - -#define DMAR_VER_REG 0x0 /* Arch version supported by this IOMMU */ -#define DMAR_CAP_REG 0x8 /* Hardware supported capabilities */ -#define DMAR_ECAP_REG 0x10 /* Extended capabilities supported */ -#define DMAR_GCMD_REG 0x18 /* Global command register */ -#define DMAR_GSTS_REG 0x1c /* Global status register */ -#define DMAR_RTADDR_REG 0x20 /* Root entry table */ -#define DMAR_CCMD_REG 0x28 /* Context command reg */ -#define DMAR_FSTS_REG 0x34 /* Fault Status register */ -#define DMAR_FECTL_REG 0x38 /* Fault control register */ -#define DMAR_FEDATA_REG 0x3c /* Fault event interrupt data register */ -#define DMAR_FEADDR_REG 0x40 /* Fault event interrupt addr register */ -#define DMAR_FEUADDR_REG 0x44 /* Upper address register */ -#define DMAR_AFLOG_REG 0x58 /* Advanced Fault control */ -#define DMAR_PMEN_REG 0x64 /* Enable Protected Memory Region */ -#define DMAR_PLMBASE_REG 0x68 /* PMRR Low addr */ -#define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */ -#define DMAR_PHMBASE_REG 0x70 /* pmrr high base addr */ -#define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */ - -#define OFFSET_STRIDE (9) -/* -#define dmar_readl(dmar, reg) readl(dmar + reg) -#define dmar_readq(dmar, reg) ({ \ - u32 lo, hi; \ - lo = readl(dmar + reg); \ - hi = readl(dmar + reg + 4); \ - (((u64) hi) << 32) + lo; }) -*/ -static inline u64 dmar_readq(void __iomem *addr) -{ - u32 lo, hi; - lo = readl(addr); - hi = readl(addr + 4); - return (((u64) hi) << 32) + lo; -} - -static inline void dmar_writeq(void __iomem *addr, u64 val) -{ - writel((u32)val, addr); - writel((u32)(val >> 32), addr + 4); -} - -#define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4) -#define DMAR_VER_MINOR(v) ((v) & 0x0f) - -/* - * Decoding Capability Register - */ -#define cap_read_drain(c) (((c) >> 55) & 1) -#define cap_write_drain(c) (((c) >> 54) & 1) -#define cap_max_amask_val(c) (((c) >> 48) & 0x3f) -#define cap_num_fault_regs(c) ((((c) >> 40) & 0xff) + 1) -#define cap_pgsel_inv(c) (((c) >> 39) & 1) - -#define cap_super_page_val(c) (((c) >> 34) & 0xf) -#define cap_super_offset(c) (((find_first_bit(&cap_super_page_val(c), 4)) \ - * OFFSET_STRIDE) + 21) - -#define cap_fault_reg_offset(c) ((((c) >> 24) & 0x3ff) * 16) -#define cap_max_fault_reg_offset(c) \ - (cap_fault_reg_offset(c) + cap_num_fault_regs(c) * 16) - -#define cap_zlr(c) (((c) >> 22) & 1) -#define cap_isoch(c) (((c) >> 23) & 1) -#define cap_mgaw(c) ((((c) >> 16) & 0x3f) + 1) -#define cap_sagaw(c) (((c) >> 8) & 0x1f) -#define cap_caching_mode(c) (((c) >> 7) & 1) -#define cap_phmr(c) (((c) >> 6) & 1) -#define cap_plmr(c) (((c) >> 5) & 1) -#define cap_rwbf(c) (((c) >> 4) & 1) -#define cap_afl(c) (((c) >> 3) & 1) -#define cap_ndoms(c) (((unsigned long)1) << (4 + 2 * ((c) & 0x7))) -/* - * Extended Capability Register - */ - -#define ecap_niotlb_iunits(e) ((((e) >> 24) & 0xff) + 1) -#define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16) -#define ecap_max_iotlb_offset(e) \ - (ecap_iotlb_offset(e) + ecap_niotlb_iunits(e) * 16) -#define ecap_coherent(e) ((e) & 0x1) - - -/* IOTLB_REG */ -#define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60) -#define DMA_TLB_DSI_FLUSH (((u64)2) << 60) -#define DMA_TLB_PSI_FLUSH (((u64)3) << 60) -#define DMA_TLB_IIRG(type) ((type >> 60) & 7) -#define DMA_TLB_IAIG(val) (((val) >> 57) & 7) -#define DMA_TLB_READ_DRAIN (((u64)1) << 49) -#define DMA_TLB_WRITE_DRAIN (((u64)1) << 48) -#define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32) -#define DMA_TLB_IVT (((u64)1) << 63) -#define DMA_TLB_IH_NONLEAF (((u64)1) << 6) -#define DMA_TLB_MAX_SIZE (0x3f) - -/* PMEN_REG */ -#define DMA_PMEN_EPM (((u32)1)<<31) -#define DMA_PMEN_PRS (((u32)1)<<0) - -/* GCMD_REG */ -#define DMA_GCMD_TE (((u32)1) << 31) -#define DMA_GCMD_SRTP (((u32)1) << 30) -#define DMA_GCMD_SFL (((u32)1) << 29) -#define DMA_GCMD_EAFL (((u32)1) << 28) -#define DMA_GCMD_WBF (((u32)1) << 27) - -/* GSTS_REG */ -#define DMA_GSTS_TES (((u32)1) << 31) -#define DMA_GSTS_RTPS (((u32)1) << 30) -#define DMA_GSTS_FLS (((u32)1) << 29) -#define DMA_GSTS_AFLS (((u32)1) << 28) -#define DMA_GSTS_WBFS (((u32)1) << 27) - -/* CCMD_REG */ -#define DMA_CCMD_ICC (((u64)1) << 63) -#define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61) -#define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61) -#define DMA_CCMD_DEVICE_INVL (((u64)3) << 61) -#define DMA_CCMD_FM(m) (((u64)((m) & 0x3)) << 32) -#define DMA_CCMD_MASK_NOBIT 0 -#define DMA_CCMD_MASK_1BIT 1 -#define DMA_CCMD_MASK_2BIT 2 -#define DMA_CCMD_MASK_3BIT 3 -#define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16) -#define DMA_CCMD_DID(d) ((u64)((d) & 0xffff)) - -/* FECTL_REG */ -#define DMA_FECTL_IM (((u32)1) << 31) - -/* FSTS_REG */ -#define DMA_FSTS_PPF ((u32)2) -#define DMA_FSTS_PFO ((u32)1) -#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff) - -/* FRCD_REG, 32 bits access */ -#define DMA_FRCD_F (((u32)1) << 31) -#define dma_frcd_type(d) ((d >> 30) & 1) -#define dma_frcd_fault_reason(c) (c & 0xff) -#define dma_frcd_source_id(c) (c & 0xffff) -#define dma_frcd_page_addr(d) (d & (((u64)-1) << 12)) /* low 64 bit */ - -/* - * 0: Present - * 1-11: Reserved - * 12-63: Context Ptr (12 - (haw-1)) - * 64-127: Reserved - */ -struct root_entry { - u64 val; - u64 rsvd1; -}; -#define ROOT_ENTRY_NR (PAGE_SIZE_4K/sizeof(struct root_entry)) -static inline bool root_present(struct root_entry *root) -{ - return (root->val & 1); -} -static inline void set_root_present(struct root_entry *root) -{ - root->val |= 1; -} -static inline void set_root_value(struct root_entry *root, unsigned long value) -{ - root->val |= value & PAGE_MASK_4K; -} - -struct context_entry; -static inline struct context_entry * -get_context_addr_from_root(struct root_entry *root) -{ - return (struct context_entry *) - (root_present(root)?phys_to_virt( - root->val & PAGE_MASK_4K): - NULL); -} - -/* - * low 64 bits: - * 0: present - * 1: fault processing disable - * 2-3: translation type - * 12-63: address space root - * high 64 bits: - * 0-2: address width - * 3-6: aval - * 8-23: domain id - */ -struct context_entry { - u64 lo; - u64 hi; -}; -#define context_present(c) ((c).lo & 1) -#define context_fault_disable(c) (((c).lo >> 1) & 1) -#define context_translation_type(c) (((c).lo >> 2) & 3) -#define context_address_root(c) ((c).lo & PAGE_MASK_4K) -#define context_address_width(c) ((c).hi & 7) -#define context_domain_id(c) (((c).hi >> 8) & ((1 << 16) - 1)) - -#define context_set_present(c) do {(c).lo |= 1;} while (0) -#define context_set_fault_enable(c) \ - do {(c).lo &= (((u64)-1) << 2) | 1;} while (0) -#define context_set_translation_type(c, val) \ - do { \ - (c).lo &= (((u64)-1) << 4) | 3; \ - (c).lo |= ((val) & 3) << 2; \ - } while (0) -#define CONTEXT_TT_MULTI_LEVEL 0 -#define context_set_address_root(c, val) \ - do {(c).lo |= (val) & PAGE_MASK_4K;} while (0) -#define context_set_address_width(c, val) do {(c).hi |= (val) & 7;} while (0) -#define context_set_domain_id(c, val) \ - do {(c).hi |= ((val) & ((1 << 16) - 1)) << 8;} while (0) -#define context_clear_entry(c) do {(c).lo = 0; (c).hi = 0;} while (0) - -/* - * 0: readable - * 1: writable - * 2-6: reserved - * 7: super page - * 8-11: available - * 12-63: Host physcial address - */ -struct dma_pte { - u64 val; -}; -#define dma_clear_pte(p) do {(p).val = 0;} while (0) - -#define DMA_PTE_READ (1) -#define DMA_PTE_WRITE (2) - -#define dma_set_pte_readable(p) do {(p).val |= DMA_PTE_READ;} while (0) -#define dma_set_pte_writable(p) do {(p).val |= DMA_PTE_WRITE;} while (0) -#define dma_set_pte_prot(p, prot) \ - do {(p).val = ((p).val & ~3) | ((prot) & 3); } while (0) -#define dma_pte_addr(p) ((p).val & PAGE_MASK_4K) -#define dma_set_pte_addr(p, addr) do {\ - (p).val |= ((addr) & PAGE_MASK_4K); } while (0) -#define dma_pte_present(p) (((p).val & 3) != 0) - -struct intel_iommu; - -struct dmar_domain { - int id; /* domain id */ - struct intel_iommu *iommu; /* back pointer to owning iommu */ - - struct list_head devices; /* all devices' list */ - struct iova_domain iovad; /* iova's that belong to this domain */ - - struct dma_pte *pgd; /* virtual address */ - spinlock_t mapping_lock; /* page table lock */ - int gaw; /* max guest address width */ - - /* adjusted guest address width, 0 is level 2 30-bit */ - int agaw; - -#define DOMAIN_FLAG_MULTIPLE_DEVICES 1 - int flags; -}; - -/* PCI domain-device relationship */ -struct device_domain_info { - struct list_head link; /* link to domain siblings */ - struct list_head global; /* link to global list */ - u8 bus; /* PCI bus numer */ - u8 devfn; /* PCI devfn number */ - struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */ - struct dmar_domain *domain; /* pointer to domain */ -}; - -extern int init_dmars(void); - -struct intel_iommu { - void __iomem *reg; /* Pointer to hardware regs, virtual addr */ - u64 cap; - u64 ecap; - unsigned long *domain_ids; /* bitmap of domains */ - struct dmar_domain **domains; /* ptr to domains */ - int seg; - u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */ - spinlock_t lock; /* protect context, domain ids */ - spinlock_t register_lock; /* protect register handling */ - struct root_entry *root_entry; /* virtual address */ - - unsigned int irq; - unsigned char name[7]; /* Device Name */ - struct msi_msg saved_msg; - struct sys_device sysdev; -}; - -#ifndef CONFIG_DMAR_GFX_WA -static inline void iommu_prepare_gfx_mapping(void) -{ - return; -} -#endif /* !CONFIG_DMAR_GFX_WA */ - -#endif diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c new file mode 100644 index 00000000000..2de5a3238c9 --- /dev/null +++ b/drivers/pci/intr_remapping.c @@ -0,0 +1,512 @@ +#include <linux/interrupt.h> +#include <linux/dmar.h> +#include <linux/spinlock.h> +#include <linux/jiffies.h> +#include <linux/pci.h> +#include <linux/irq.h> +#include <asm/io_apic.h> +#include <linux/intel-iommu.h> +#include "intr_remapping.h" + +static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; +static int ir_ioapic_num; +int intr_remapping_enabled; + +struct irq_2_iommu { + struct intel_iommu *iommu; + u16 irte_index; + u16 sub_handle; + u8 irte_mask; +}; + +static struct irq_2_iommu irq_2_iommuX[NR_IRQS]; + +static struct irq_2_iommu *irq_2_iommu(unsigned int irq) +{ + return (irq < nr_irqs) ? irq_2_iommuX + irq : NULL; +} + +static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) +{ + return irq_2_iommu(irq); +} + +static DEFINE_SPINLOCK(irq_2_ir_lock); + +static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq) +{ + struct irq_2_iommu *irq_iommu; + + irq_iommu = irq_2_iommu(irq); + + if (!irq_iommu) + return NULL; + + if (!irq_iommu->iommu) + return NULL; + + return irq_iommu; +} + +int irq_remapped(int irq) +{ + return valid_irq_2_iommu(irq) != NULL; +} + +int get_irte(int irq, struct irte *entry) +{ + int index; + struct irq_2_iommu *irq_iommu; + + if (!entry) + return -1; + + spin_lock(&irq_2_ir_lock); + irq_iommu = valid_irq_2_iommu(irq); + if (!irq_iommu) { + spin_unlock(&irq_2_ir_lock); + return -1; + } + + index = irq_iommu->irte_index + irq_iommu->sub_handle; + *entry = *(irq_iommu->iommu->ir_table->base + index); + + spin_unlock(&irq_2_ir_lock); + return 0; +} + +int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) +{ + struct ir_table *table = iommu->ir_table; + struct irq_2_iommu *irq_iommu; + u16 index, start_index; + unsigned int mask = 0; + int i; + + if (!count) + return -1; + + /* protect irq_2_iommu_alloc later */ + if (irq >= nr_irqs) + return -1; + + /* + * start the IRTE search from index 0. + */ + index = start_index = 0; + + if (count > 1) { + count = __roundup_pow_of_two(count); + mask = ilog2(count); + } + + if (mask > ecap_max_handle_mask(iommu->ecap)) { + printk(KERN_ERR + "Requested mask %x exceeds the max invalidation handle" + " mask value %Lx\n", mask, + ecap_max_handle_mask(iommu->ecap)); + return -1; + } + + spin_lock(&irq_2_ir_lock); + do { + for (i = index; i < index + count; i++) + if (table->base[i].present) + break; + /* empty index found */ + if (i == index + count) + break; + + index = (index + count) % INTR_REMAP_TABLE_ENTRIES; + + if (index == start_index) { + spin_unlock(&irq_2_ir_lock); + printk(KERN_ERR "can't allocate an IRTE\n"); + return -1; + } + } while (1); + + for (i = index; i < index + count; i++) + table->base[i].present = 1; + + irq_iommu = irq_2_iommu_alloc(irq); + irq_iommu->iommu = iommu; + irq_iommu->irte_index = index; + irq_iommu->sub_handle = 0; + irq_iommu->irte_mask = mask; + + spin_unlock(&irq_2_ir_lock); + + return index; +} + +static void qi_flush_iec(struct intel_iommu *iommu, int index, int mask) +{ + struct qi_desc desc; + + desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask) + | QI_IEC_SELECTIVE; + desc.high = 0; + + qi_submit_sync(&desc, iommu); +} + +int map_irq_to_irte_handle(int irq, u16 *sub_handle) +{ + int index; + struct irq_2_iommu *irq_iommu; + + spin_lock(&irq_2_ir_lock); + irq_iommu = valid_irq_2_iommu(irq); + if (!irq_iommu) { + spin_unlock(&irq_2_ir_lock); + return -1; + } + + *sub_handle = irq_iommu->sub_handle; + index = irq_iommu->irte_index; + spin_unlock(&irq_2_ir_lock); + return index; +} + +int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) +{ + struct irq_2_iommu *irq_iommu; + + spin_lock(&irq_2_ir_lock); + + irq_iommu = irq_2_iommu_alloc(irq); + + irq_iommu->iommu = iommu; + irq_iommu->irte_index = index; + irq_iommu->sub_handle = subhandle; + irq_iommu->irte_mask = 0; + + spin_unlock(&irq_2_ir_lock); + + return 0; +} + +int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index) +{ + struct irq_2_iommu *irq_iommu; + + spin_lock(&irq_2_ir_lock); + irq_iommu = valid_irq_2_iommu(irq); + if (!irq_iommu) { + spin_unlock(&irq_2_ir_lock); + return -1; + } + + irq_iommu->iommu = NULL; + irq_iommu->irte_index = 0; + irq_iommu->sub_handle = 0; + irq_2_iommu(irq)->irte_mask = 0; + + spin_unlock(&irq_2_ir_lock); + + return 0; +} + +int modify_irte(int irq, struct irte *irte_modified) +{ + int index; + struct irte *irte; + struct intel_iommu *iommu; + struct irq_2_iommu *irq_iommu; + + spin_lock(&irq_2_ir_lock); + irq_iommu = valid_irq_2_iommu(irq); + if (!irq_iommu) { + spin_unlock(&irq_2_ir_lock); + return -1; + } + + iommu = irq_iommu->iommu; + + index = irq_iommu->irte_index + irq_iommu->sub_handle; + irte = &iommu->ir_table->base[index]; + + set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1)); + __iommu_flush_cache(iommu, irte, sizeof(*irte)); + + qi_flush_iec(iommu, index, 0); + + spin_unlock(&irq_2_ir_lock); + return 0; +} + +int flush_irte(int irq) +{ + int index; + struct intel_iommu *iommu; + struct irq_2_iommu *irq_iommu; + + spin_lock(&irq_2_ir_lock); + irq_iommu = valid_irq_2_iommu(irq); + if (!irq_iommu) { + spin_unlock(&irq_2_ir_lock); + return -1; + } + + iommu = irq_iommu->iommu; + + index = irq_iommu->irte_index + irq_iommu->sub_handle; + + qi_flush_iec(iommu, index, irq_iommu->irte_mask); + spin_unlock(&irq_2_ir_lock); + + return 0; +} + +struct intel_iommu *map_ioapic_to_ir(int apic) +{ + int i; + + for (i = 0; i < MAX_IO_APICS; i++) + if (ir_ioapic[i].id == apic) + return ir_ioapic[i].iommu; + return NULL; +} + +struct intel_iommu *map_dev_to_ir(struct pci_dev *dev) +{ + struct dmar_drhd_unit *drhd; + + drhd = dmar_find_matched_drhd_unit(dev); + if (!drhd) + return NULL; + + return drhd->iommu; +} + +int free_irte(int irq) +{ + int index, i; + struct irte *irte; + struct intel_iommu *iommu; + struct irq_2_iommu *irq_iommu; + + spin_lock(&irq_2_ir_lock); + irq_iommu = valid_irq_2_iommu(irq); + if (!irq_iommu) { + spin_unlock(&irq_2_ir_lock); + return -1; + } + + iommu = irq_iommu->iommu; + + index = irq_iommu->irte_index + irq_iommu->sub_handle; + irte = &iommu->ir_table->base[index]; + + if (!irq_iommu->sub_handle) { + for (i = 0; i < (1 << irq_iommu->irte_mask); i++) + set_64bit((unsigned long *)irte, 0); + qi_flush_iec(iommu, index, irq_iommu->irte_mask); + } + + irq_iommu->iommu = NULL; + irq_iommu->irte_index = 0; + irq_iommu->sub_handle = 0; + irq_iommu->irte_mask = 0; + + spin_unlock(&irq_2_ir_lock); + + return 0; +} + +static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) +{ + u64 addr; + u32 cmd, sts; + unsigned long flags; + + addr = virt_to_phys((void *)iommu->ir_table->base); + + spin_lock_irqsave(&iommu->register_lock, flags); + + dmar_writeq(iommu->reg + DMAR_IRTA_REG, + (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE); + + /* Set interrupt-remapping table pointer */ + cmd = iommu->gcmd | DMA_GCMD_SIRTP; + writel(cmd, iommu->reg + DMAR_GCMD_REG); + + IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, + readl, (sts & DMA_GSTS_IRTPS), sts); + spin_unlock_irqrestore(&iommu->register_lock, flags); + + /* + * global invalidation of interrupt entry cache before enabling + * interrupt-remapping. + */ + qi_global_iec(iommu); + + spin_lock_irqsave(&iommu->register_lock, flags); + + /* Enable interrupt-remapping */ + cmd = iommu->gcmd | DMA_GCMD_IRE; + iommu->gcmd |= DMA_GCMD_IRE; + writel(cmd, iommu->reg + DMAR_GCMD_REG); + + IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, + readl, (sts & DMA_GSTS_IRES), sts); + + spin_unlock_irqrestore(&iommu->register_lock, flags); +} + + +static int setup_intr_remapping(struct intel_iommu *iommu, int mode) +{ + struct ir_table *ir_table; + struct page *pages; + + ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table), + GFP_KERNEL); + + if (!iommu->ir_table) + return -ENOMEM; + + pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, INTR_REMAP_PAGE_ORDER); + + if (!pages) { + printk(KERN_ERR "failed to allocate pages of order %d\n", + INTR_REMAP_PAGE_ORDER); + kfree(iommu->ir_table); + return -ENOMEM; + } + + ir_table->base = page_address(pages); + + iommu_set_intr_remapping(iommu, mode); + return 0; +} + +int __init enable_intr_remapping(int eim) +{ + struct dmar_drhd_unit *drhd; + int setup = 0; + + /* + * check for the Interrupt-remapping support + */ + for_each_drhd_unit(drhd) { + struct intel_iommu *iommu = drhd->iommu; + + if (!ecap_ir_support(iommu->ecap)) + continue; + + if (eim && !ecap_eim_support(iommu->ecap)) { + printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, " + " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap); + return -1; + } + } + + /* + * Enable queued invalidation for all the DRHD's. + */ + for_each_drhd_unit(drhd) { + int ret; + struct intel_iommu *iommu = drhd->iommu; + ret = dmar_enable_qi(iommu); + + if (ret) { + printk(KERN_ERR "DRHD %Lx: failed to enable queued, " + " invalidation, ecap %Lx, ret %d\n", + drhd->reg_base_addr, iommu->ecap, ret); + return -1; + } + } + + /* + * Setup Interrupt-remapping for all the DRHD's now. + */ + for_each_drhd_unit(drhd) { + struct intel_iommu *iommu = drhd->iommu; + + if (!ecap_ir_support(iommu->ecap)) + continue; + + if (setup_intr_remapping(iommu, eim)) + goto error; + + setup = 1; + } + + if (!setup) + goto error; + + intr_remapping_enabled = 1; + + return 0; + +error: + /* + * handle error condition gracefully here! + */ + return -1; +} + +static int ir_parse_ioapic_scope(struct acpi_dmar_header *header, + struct intel_iommu *iommu) +{ + struct acpi_dmar_hardware_unit *drhd; + struct acpi_dmar_device_scope *scope; + void *start, *end; + + drhd = (struct acpi_dmar_hardware_unit *)header; + + start = (void *)(drhd + 1); + end = ((void *)drhd) + header->length; + + while (start < end) { + scope = start; + if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) { + if (ir_ioapic_num == MAX_IO_APICS) { + printk(KERN_WARNING "Exceeded Max IO APICS\n"); + return -1; + } + + printk(KERN_INFO "IOAPIC id %d under DRHD base" + " 0x%Lx\n", scope->enumeration_id, + drhd->address); + + ir_ioapic[ir_ioapic_num].iommu = iommu; + ir_ioapic[ir_ioapic_num].id = scope->enumeration_id; + ir_ioapic_num++; + } + start += scope->length; + } + + return 0; +} + +/* + * Finds the assocaition between IOAPIC's and its Interrupt-remapping + * hardware unit. + */ +int __init parse_ioapics_under_ir(void) +{ + struct dmar_drhd_unit *drhd; + int ir_supported = 0; + + for_each_drhd_unit(drhd) { + struct intel_iommu *iommu = drhd->iommu; + + if (ecap_ir_support(iommu->ecap)) { + if (ir_parse_ioapic_scope(drhd->hdr, iommu)) + return -1; + + ir_supported = 1; + } + } + + if (ir_supported && ir_ioapic_num != nr_ioapics) { + printk(KERN_WARNING + "Not all IO-APIC's listed under remapping hardware\n"); + return -1; + } + + return ir_supported; +} diff --git a/drivers/pci/intr_remapping.h b/drivers/pci/intr_remapping.h new file mode 100644 index 00000000000..ca48f0df8ac --- /dev/null +++ b/drivers/pci/intr_remapping.h @@ -0,0 +1,8 @@ +#include <linux/intel-iommu.h> + +struct ioapic_scope { + struct intel_iommu *iommu; + unsigned int id; +}; + +#define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0) diff --git a/drivers/pci/iova.c b/drivers/pci/iova.c index 3ef4ac06431..2287116e982 100644 --- a/drivers/pci/iova.c +++ b/drivers/pci/iova.c @@ -7,7 +7,7 @@ * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> */ -#include "iova.h" +#include <linux/iova.h> void init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit) diff --git a/drivers/pci/iova.h b/drivers/pci/iova.h deleted file mode 100644 index 228f6c94b69..00000000000 --- a/drivers/pci/iova.h +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright (c) 2006, Intel Corporation. - * - * This file is released under the GPLv2. - * - * Copyright (C) 2006-2008 Intel Corporation - * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> - * - */ - -#ifndef _IOVA_H_ -#define _IOVA_H_ - -#include <linux/types.h> -#include <linux/kernel.h> -#include <linux/rbtree.h> -#include <linux/dma-mapping.h> - -/* IO virtual address start page frame number */ -#define IOVA_START_PFN (1) - -/* iova structure */ -struct iova { - struct rb_node node; - unsigned long pfn_hi; /* IOMMU dish out addr hi */ - unsigned long pfn_lo; /* IOMMU dish out addr lo */ -}; - -/* holds all the iova translations for a domain */ -struct iova_domain { - spinlock_t iova_alloc_lock;/* Lock to protect iova allocation */ - spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */ - struct rb_root rbroot; /* iova domain rbtree root */ - struct rb_node *cached32_node; /* Save last alloced node */ - unsigned long dma_32bit_pfn; -}; - -struct iova *alloc_iova_mem(void); -void free_iova_mem(struct iova *iova); -void free_iova(struct iova_domain *iovad, unsigned long pfn); -void __free_iova(struct iova_domain *iovad, struct iova *iova); -struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size, - unsigned long limit_pfn, - bool size_aligned); -struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, - unsigned long pfn_hi); -void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to); -void init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit); -struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); -void put_iova_domain(struct iova_domain *iovad); - -#endif diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 4a10b5624f7..d2812013fd2 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -378,23 +378,21 @@ static int msi_capability_init(struct pci_dev *dev) entry->msi_attrib.masked = 1; entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ entry->msi_attrib.pos = pos; - if (is_mask_bit_support(control)) { + if (entry->msi_attrib.maskbit) { entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos, - is_64bit_address(control)); + entry->msi_attrib.is_64); } entry->dev = dev; if (entry->msi_attrib.maskbit) { unsigned int maskbits, temp; /* All MSIs are unmasked by default, Mask them all */ pci_read_config_dword(dev, - msi_mask_bits_reg(pos, is_64bit_address(control)), + msi_mask_bits_reg(pos, entry->msi_attrib.is_64), &maskbits); temp = (1 << multi_msi_capable(control)); temp = ((temp - 1) & ~temp); maskbits |= temp; - pci_write_config_dword(dev, - msi_mask_bits_reg(pos, is_64bit_address(control)), - maskbits); + pci_write_config_dword(dev, entry->msi_attrib.is_64, maskbits); entry->msi_attrib.maskbits_mask = temp; } list_add_tail(&entry->list, &dev->msi_list); diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index a13f5348611..b4cdd690ae7 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -43,18 +43,32 @@ store_new_id(struct device_driver *driver, const char *buf, size_t count) { struct pci_dynid *dynid; struct pci_driver *pdrv = to_pci_driver(driver); + const struct pci_device_id *ids = pdrv->id_table; __u32 vendor, device, subvendor=PCI_ANY_ID, subdevice=PCI_ANY_ID, class=0, class_mask=0; unsigned long driver_data=0; int fields=0; - int retval = 0; + int retval; - fields = sscanf(buf, "%x %x %x %x %x %x %lux", + fields = sscanf(buf, "%x %x %x %x %x %x %lx", &vendor, &device, &subvendor, &subdevice, &class, &class_mask, &driver_data); if (fields < 2) return -EINVAL; + /* Only accept driver_data values that match an existing id_table + entry */ + retval = -EINVAL; + while (ids->vendor || ids->subvendor || ids->class_mask) { + if (driver_data == ids->driver_data) { + retval = 0; + break; + } + ids++; + } + if (retval) /* No match */ + return retval; + dynid = kzalloc(sizeof(*dynid), GFP_KERNEL); if (!dynid) return -ENOMEM; @@ -65,8 +79,7 @@ store_new_id(struct device_driver *driver, const char *buf, size_t count) dynid->id.subdevice = subdevice; dynid->id.class = class; dynid->id.class_mask = class_mask; - dynid->id.driver_data = pdrv->dynids.use_driver_data ? - driver_data : 0UL; + dynid->id.driver_data = driver_data; spin_lock(&pdrv->dynids.lock); list_add_tail(&dynid->node, &pdrv->dynids.list); diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 9c718583a23..110022d7868 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -16,6 +16,7 @@ #include <linux/kernel.h> +#include <linux/sched.h> #include <linux/pci.h> #include <linux/stat.h> #include <linux/topology.h> @@ -422,7 +423,7 @@ pci_write_vpd(struct kobject *kobj, struct bin_attribute *bin_attr, * Reads 1, 2, or 4 bytes from legacy I/O port space using an arch specific * callback routine (pci_legacy_read). */ -ssize_t +static ssize_t pci_read_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { @@ -447,7 +448,7 @@ pci_read_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr, * Writes 1, 2, or 4 bytes from legacy I/O port space using an arch specific * callback routine (pci_legacy_write). */ -ssize_t +static ssize_t pci_write_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { @@ -467,11 +468,11 @@ pci_write_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr, * @attr: struct bin_attribute for this file * @vma: struct vm_area_struct passed to mmap * - * Uses an arch specific callback, pci_mmap_legacy_page_range, to mmap + * Uses an arch specific callback, pci_mmap_legacy_mem_page_range, to mmap * legacy memory space (first meg of bus space) into application virtual * memory space. */ -int +static int pci_mmap_legacy_mem(struct kobject *kobj, struct bin_attribute *attr, struct vm_area_struct *vma) { @@ -479,11 +480,109 @@ pci_mmap_legacy_mem(struct kobject *kobj, struct bin_attribute *attr, struct device, kobj)); - return pci_mmap_legacy_page_range(bus, vma); + return pci_mmap_legacy_page_range(bus, vma, pci_mmap_mem); +} + +/** + * pci_mmap_legacy_io - map legacy PCI IO into user memory space + * @kobj: kobject corresponding to device to be mapped + * @attr: struct bin_attribute for this file + * @vma: struct vm_area_struct passed to mmap + * + * Uses an arch specific callback, pci_mmap_legacy_io_page_range, to mmap + * legacy IO space (first meg of bus space) into application virtual + * memory space. Returns -ENOSYS if the operation isn't supported + */ +static int +pci_mmap_legacy_io(struct kobject *kobj, struct bin_attribute *attr, + struct vm_area_struct *vma) +{ + struct pci_bus *bus = to_pci_bus(container_of(kobj, + struct device, + kobj)); + + return pci_mmap_legacy_page_range(bus, vma, pci_mmap_io); +} + +/** + * pci_create_legacy_files - create legacy I/O port and memory files + * @b: bus to create files under + * + * Some platforms allow access to legacy I/O port and ISA memory space on + * a per-bus basis. This routine creates the files and ties them into + * their associated read, write and mmap files from pci-sysfs.c + * + * On error unwind, but don't propogate the error to the caller + * as it is ok to set up the PCI bus without these files. + */ +void pci_create_legacy_files(struct pci_bus *b) +{ + int error; + + b->legacy_io = kzalloc(sizeof(struct bin_attribute) * 2, + GFP_ATOMIC); + if (!b->legacy_io) + goto kzalloc_err; + + b->legacy_io->attr.name = "legacy_io"; + b->legacy_io->size = 0xffff; + b->legacy_io->attr.mode = S_IRUSR | S_IWUSR; + b->legacy_io->read = pci_read_legacy_io; + b->legacy_io->write = pci_write_legacy_io; + b->legacy_io->mmap = pci_mmap_legacy_io; + error = device_create_bin_file(&b->dev, b->legacy_io); + if (error) + goto legacy_io_err; + + /* Allocated above after the legacy_io struct */ + b->legacy_mem = b->legacy_io + 1; + b->legacy_mem->attr.name = "legacy_mem"; + b->legacy_mem->size = 1024*1024; + b->legacy_mem->attr.mode = S_IRUSR | S_IWUSR; + b->legacy_mem->mmap = pci_mmap_legacy_mem; + error = device_create_bin_file(&b->dev, b->legacy_mem); + if (error) + goto legacy_mem_err; + + return; + +legacy_mem_err: + device_remove_bin_file(&b->dev, b->legacy_io); +legacy_io_err: + kfree(b->legacy_io); + b->legacy_io = NULL; +kzalloc_err: + printk(KERN_WARNING "pci: warning: could not create legacy I/O port " + "and ISA memory resources to sysfs\n"); + return; +} + +void pci_remove_legacy_files(struct pci_bus *b) +{ + if (b->legacy_io) { + device_remove_bin_file(&b->dev, b->legacy_io); + device_remove_bin_file(&b->dev, b->legacy_mem); + kfree(b->legacy_io); /* both are allocated here */ + } } #endif /* HAVE_PCI_LEGACY */ #ifdef HAVE_PCI_MMAP + +static int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma) +{ + unsigned long nr, start, size; + + nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; + start = vma->vm_pgoff; + size = pci_resource_len(pdev, resno) >> PAGE_SHIFT; + if (start < size && size - start >= nr) + return 1; + WARN(1, "process \"%s\" tried to map 0x%08lx-0x%08lx on %s BAR %d (size 0x%08lx)\n", + current->comm, start, start+nr, pci_name(pdev), resno, size); + return 0; +} + /** * pci_mmap_resource - map a PCI resource into user memory space * @kobj: kobject for mapping @@ -510,6 +609,9 @@ pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr, if (i >= PCI_ROM_RESOURCE) return -ENODEV; + if (!pci_mmap_fits(pdev, i, vma)) + return -EINVAL; + /* pci_mmap_page_range() expects the same kind of entry as coming * from /proc/bus/pci/ which is a "user visible" value. If this is * different from the resource itself, arch will do necessary fixup. @@ -696,7 +798,7 @@ static struct bin_attribute pci_config_attr = { .name = "config", .mode = S_IRUGO | S_IWUSR, }, - .size = 256, + .size = PCI_CFG_SPACE_SIZE, .read = pci_read_config, .write = pci_write_config, }; @@ -706,7 +808,7 @@ static struct bin_attribute pcie_config_attr = { .name = "config", .mode = S_IRUGO | S_IWUSR, }, - .size = 4096, + .size = PCI_CFG_SPACE_EXP_SIZE, .read = pci_read_config, .write = pci_write_config, }; @@ -716,86 +818,103 @@ int __attribute__ ((weak)) pcibios_add_platform_entries(struct pci_dev *dev) return 0; } +static int pci_create_capabilities_sysfs(struct pci_dev *dev) +{ + int retval; + struct bin_attribute *attr; + + /* If the device has VPD, try to expose it in sysfs. */ + if (dev->vpd) { + attr = kzalloc(sizeof(*attr), GFP_ATOMIC); + if (!attr) + return -ENOMEM; + + attr->size = dev->vpd->len; + attr->attr.name = "vpd"; + attr->attr.mode = S_IRUSR | S_IWUSR; + attr->read = pci_read_vpd; + attr->write = pci_write_vpd; + retval = sysfs_create_bin_file(&dev->dev.kobj, attr); + if (retval) { + kfree(dev->vpd->attr); + return retval; + } + dev->vpd->attr = attr; + } + + /* Active State Power Management */ + pcie_aspm_create_sysfs_dev_files(dev); + + return 0; +} + int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev) { - struct bin_attribute *attr = NULL; int retval; + int rom_size = 0; + struct bin_attribute *attr; if (!sysfs_initialized) return -EACCES; - if (pdev->cfg_size < 4096) + if (pdev->cfg_size < PCI_CFG_SPACE_EXP_SIZE) retval = sysfs_create_bin_file(&pdev->dev.kobj, &pci_config_attr); else retval = sysfs_create_bin_file(&pdev->dev.kobj, &pcie_config_attr); if (retval) goto err; - /* If the device has VPD, try to expose it in sysfs. */ - if (pdev->vpd) { - attr = kzalloc(sizeof(*attr), GFP_ATOMIC); - if (attr) { - pdev->vpd->attr = attr; - attr->size = pdev->vpd->len; - attr->attr.name = "vpd"; - attr->attr.mode = S_IRUSR | S_IWUSR; - attr->read = pci_read_vpd; - attr->write = pci_write_vpd; - retval = sysfs_create_bin_file(&pdev->dev.kobj, attr); - if (retval) - goto err_vpd; - } else { - retval = -ENOMEM; - goto err_config_file; - } - } - retval = pci_create_resource_files(pdev); if (retval) - goto err_vpd_file; + goto err_config_file; + + if (pci_resource_len(pdev, PCI_ROM_RESOURCE)) + rom_size = pci_resource_len(pdev, PCI_ROM_RESOURCE); + else if (pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW) + rom_size = 0x20000; /* If the device has a ROM, try to expose it in sysfs. */ - if (pci_resource_len(pdev, PCI_ROM_RESOURCE) || - (pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW)) { + if (rom_size) { attr = kzalloc(sizeof(*attr), GFP_ATOMIC); - if (attr) { - pdev->rom_attr = attr; - attr->size = pci_resource_len(pdev, PCI_ROM_RESOURCE); - attr->attr.name = "rom"; - attr->attr.mode = S_IRUSR; - attr->read = pci_read_rom; - attr->write = pci_write_rom; - retval = sysfs_create_bin_file(&pdev->dev.kobj, attr); - if (retval) - goto err_rom; - } else { + if (!attr) { retval = -ENOMEM; goto err_resource_files; } + attr->size = rom_size; + attr->attr.name = "rom"; + attr->attr.mode = S_IRUSR; + attr->read = pci_read_rom; + attr->write = pci_write_rom; + retval = sysfs_create_bin_file(&pdev->dev.kobj, attr); + if (retval) { + kfree(attr); + goto err_resource_files; + } + pdev->rom_attr = attr; } + /* add platform-specific attributes */ - if (pcibios_add_platform_entries(pdev)) + retval = pcibios_add_platform_entries(pdev); + if (retval) goto err_rom_file; - pcie_aspm_create_sysfs_dev_files(pdev); + /* add sysfs entries for various capabilities */ + retval = pci_create_capabilities_sysfs(pdev); + if (retval) + goto err_rom_file; return 0; err_rom_file: - if (pci_resource_len(pdev, PCI_ROM_RESOURCE)) + if (rom_size) { sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr); -err_rom: - kfree(pdev->rom_attr); + kfree(pdev->rom_attr); + pdev->rom_attr = NULL; + } err_resource_files: pci_remove_resource_files(pdev); -err_vpd_file: - if (pdev->vpd) { - sysfs_remove_bin_file(&pdev->dev.kobj, pdev->vpd->attr); -err_vpd: - kfree(pdev->vpd->attr); - } err_config_file: - if (pdev->cfg_size < 4096) + if (pdev->cfg_size < PCI_CFG_SPACE_EXP_SIZE) sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr); else sysfs_remove_bin_file(&pdev->dev.kobj, &pcie_config_attr); @@ -803,6 +922,16 @@ err: return retval; } +static void pci_remove_capabilities_sysfs(struct pci_dev *dev) +{ + if (dev->vpd && dev->vpd->attr) { + sysfs_remove_bin_file(&dev->dev.kobj, dev->vpd->attr); + kfree(dev->vpd->attr); + } + + pcie_aspm_remove_sysfs_dev_files(dev); +} + /** * pci_remove_sysfs_dev_files - cleanup PCI specific sysfs files * @pdev: device whose entries we should free @@ -811,27 +940,28 @@ err: */ void pci_remove_sysfs_dev_files(struct pci_dev *pdev) { + int rom_size = 0; + if (!sysfs_initialized) return; - pcie_aspm_remove_sysfs_dev_files(pdev); + pci_remove_capabilities_sysfs(pdev); - if (pdev->vpd) { - sysfs_remove_bin_file(&pdev->dev.kobj, pdev->vpd->attr); - kfree(pdev->vpd->attr); - } - if (pdev->cfg_size < 4096) + if (pdev->cfg_size < PCI_CFG_SPACE_EXP_SIZE) sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr); else sysfs_remove_bin_file(&pdev->dev.kobj, &pcie_config_attr); pci_remove_resource_files(pdev); - if (pci_resource_len(pdev, PCI_ROM_RESOURCE)) { - if (pdev->rom_attr) { - sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr); - kfree(pdev->rom_attr); - } + if (pci_resource_len(pdev, PCI_ROM_RESOURCE)) + rom_size = pci_resource_len(pdev, PCI_ROM_RESOURCE); + else if (pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW) + rom_size = 0x20000; + + if (rom_size && pdev->rom_attr) { + sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr); + kfree(pdev->rom_attr); } } diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index c9884bba22d..4db261e13e6 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -213,10 +213,13 @@ int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap) int pci_find_ext_capability(struct pci_dev *dev, int cap) { u32 header; - int ttl = 480; /* 3840 bytes, minimum 8 bytes per capability */ - int pos = 0x100; + int ttl; + int pos = PCI_CFG_SPACE_SIZE; - if (dev->cfg_size <= 256) + /* minimum 8 bytes per capability */ + ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8; + + if (dev->cfg_size <= PCI_CFG_SPACE_SIZE) return 0; if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) @@ -234,7 +237,7 @@ int pci_find_ext_capability(struct pci_dev *dev, int cap) return pos; pos = PCI_EXT_CAP_NEXT(header); - if (pos < 0x100) + if (pos < PCI_CFG_SPACE_SIZE) break; if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) @@ -1127,6 +1130,27 @@ int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable) } /** + * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold + * @dev: PCI device to prepare + * @enable: True to enable wake-up event generation; false to disable + * + * Many drivers want the device to wake up the system from D3_hot or D3_cold + * and this function allows them to set that up cleanly - pci_enable_wake() + * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI + * ordering constraints. + * + * This function only returns error code if the device is not capable of + * generating PME# from both D3_hot and D3_cold, and the platform is unable to + * enable wake-up power for it. + */ +int pci_wake_from_d3(struct pci_dev *dev, bool enable) +{ + return pci_pme_capable(dev, PCI_D3cold) ? + pci_enable_wake(dev, PCI_D3cold, enable) : + pci_enable_wake(dev, PCI_D3hot, enable); +} + +/** * pci_target_state - find an appropriate low power state for a given PCI dev * @dev: PCI device * @@ -1242,25 +1266,25 @@ void pci_pm_init(struct pci_dev *dev) dev->d1_support = false; dev->d2_support = false; if (!pci_no_d1d2(dev)) { - if (pmc & PCI_PM_CAP_D1) { - dev_printk(KERN_DEBUG, &dev->dev, "supports D1\n"); + if (pmc & PCI_PM_CAP_D1) dev->d1_support = true; - } - if (pmc & PCI_PM_CAP_D2) { - dev_printk(KERN_DEBUG, &dev->dev, "supports D2\n"); + if (pmc & PCI_PM_CAP_D2) dev->d2_support = true; - } + + if (dev->d1_support || dev->d2_support) + dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n", + dev->d1_support ? " D1" : "", + dev->d2_support ? " D2" : ""); } pmc &= PCI_PM_CAP_PME_MASK; if (pmc) { - dev_printk(KERN_INFO, &dev->dev, - "PME# supported from%s%s%s%s%s\n", - (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "", - (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "", - (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "", - (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "", - (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : ""); + dev_info(&dev->dev, "PME# supported from%s%s%s%s%s\n", + (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "", + (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "", + (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "", + (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "", + (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : ""); dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT; /* * Make device's PM flags reflect the wake-up capability, but @@ -1275,6 +1299,38 @@ void pci_pm_init(struct pci_dev *dev) } } +/** + * pci_enable_ari - enable ARI forwarding if hardware support it + * @dev: the PCI device + */ +void pci_enable_ari(struct pci_dev *dev) +{ + int pos; + u32 cap; + u16 ctrl; + + if (!dev->is_pcie) + return; + + if (dev->pcie_type != PCI_EXP_TYPE_ROOT_PORT && + dev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) + return; + + pos = pci_find_capability(dev, PCI_CAP_ID_EXP); + if (!pos) + return; + + pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap); + if (!(cap & PCI_EXP_DEVCAP2_ARI)) + return; + + pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl); + ctrl |= PCI_EXP_DEVCTL2_ARI; + pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl); + + dev->ari_enabled = 1; +} + int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge) { @@ -1358,11 +1414,10 @@ int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) return 0; err_out: - dev_warn(&pdev->dev, "BAR %d: can't reserve %s region [%#llx-%#llx]\n", + dev_warn(&pdev->dev, "BAR %d: can't reserve %s region %pR\n", bar, pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem", - (unsigned long long)pci_resource_start(pdev, bar), - (unsigned long long)pci_resource_end(pdev, bar)); + &pdev->resource[bar]); return -EBUSY; } @@ -1943,6 +1998,7 @@ EXPORT_SYMBOL(pci_restore_state); EXPORT_SYMBOL(pci_pme_capable); EXPORT_SYMBOL(pci_pme_active); EXPORT_SYMBOL(pci_enable_wake); +EXPORT_SYMBOL(pci_wake_from_d3); EXPORT_SYMBOL(pci_target_state); EXPORT_SYMBOL(pci_prepare_to_sleep); EXPORT_SYMBOL(pci_back_from_sleep); diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index d807cd786f2..b205ab866a1 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -1,3 +1,9 @@ +#ifndef DRIVERS_PCI_H +#define DRIVERS_PCI_H + +#define PCI_CFG_SPACE_SIZE 256 +#define PCI_CFG_SPACE_EXP_SIZE 4096 + /* Functions internal to the PCI core code */ extern int pci_uevent(struct device *dev, struct kobj_uevent_env *env); @@ -76,7 +82,13 @@ static inline int pci_proc_detach_bus(struct pci_bus *bus) { return 0; } /* Functions for PCI Hotplug drivers to use */ extern unsigned int pci_do_scan_bus(struct pci_bus *bus); +#ifdef HAVE_PCI_LEGACY +extern void pci_create_legacy_files(struct pci_bus *bus); extern void pci_remove_legacy_files(struct pci_bus *bus); +#else +static inline void pci_create_legacy_files(struct pci_bus *bus) { return; } +static inline void pci_remove_legacy_files(struct pci_bus *bus) { return; } +#endif /* Lock for read/write access to pci device and bus lists */ extern struct rw_semaphore pci_bus_sem; @@ -109,6 +121,7 @@ static inline int pci_no_d1d2(struct pci_dev *dev) extern int pcie_mch_quirk; extern struct device_attribute pci_dev_attrs[]; extern struct device_attribute dev_attr_cpuaffinity; +extern struct device_attribute dev_attr_cpulistaffinity; /** * pci_match_one_device - Tell if a PCI device structure has a matching @@ -144,3 +157,16 @@ struct pci_slot_attribute { }; #define to_pci_slot_attr(s) container_of(s, struct pci_slot_attribute, attr) +extern void pci_enable_ari(struct pci_dev *dev); +/** + * pci_ari_enabled - query ARI forwarding status + * @dev: the PCI device + * + * Returns 1 if ARI forwarding is enabled, or 0 if not enabled; + */ +static inline int pci_ari_enabled(struct pci_dev *dev) +{ + return dev->ari_enabled; +} + +#endif /* DRIVERS_PCI_H */ diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c index 77036f46acf..e390707661d 100644 --- a/drivers/pci/pcie/aer/aerdrv.c +++ b/drivers/pci/pcie/aer/aerdrv.c @@ -105,7 +105,7 @@ static irqreturn_t aer_irq(int irq, void *context) unsigned long flags; int pos; - pos = pci_find_aer_capability(pdev->port); + pos = pci_find_ext_capability(pdev->port, PCI_EXT_CAP_ID_ERR); /* * Must lock access to Root Error Status Reg, Root Error ID Reg, * and Root error producer/consumer index @@ -252,7 +252,7 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev) u32 status; int pos; - pos = pci_find_aer_capability(dev); + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); /* Disable Root's interrupt in response to error messages */ pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, 0); @@ -316,7 +316,7 @@ static void aer_error_resume(struct pci_dev *dev) pci_write_config_word(dev, pos + PCI_EXP_DEVSTA, reg16); /* Clean AER Root Error Status */ - pos = pci_find_aer_capability(dev); + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask); if (dev->error_state == pci_channel_io_normal) diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c index ee5e7b5176d..dfc63d01f20 100644 --- a/drivers/pci/pcie/aer/aerdrv_core.c +++ b/drivers/pci/pcie/aer/aerdrv_core.c @@ -28,41 +28,15 @@ static int forceload; module_param(forceload, bool, 0); -#define PCI_CFG_SPACE_SIZE (0x100) -int pci_find_aer_capability(struct pci_dev *dev) -{ - int pos; - u32 reg32 = 0; - - /* Check if it's a pci-express device */ - pos = pci_find_capability(dev, PCI_CAP_ID_EXP); - if (!pos) - return 0; - - /* Check if it supports pci-express AER */ - pos = PCI_CFG_SPACE_SIZE; - while (pos) { - if (pci_read_config_dword(dev, pos, ®32)) - return 0; - - /* some broken boards return ~0 */ - if (reg32 == 0xffffffff) - return 0; - - if (PCI_EXT_CAP_ID(reg32) == PCI_EXT_CAP_ID_ERR) - break; - - pos = reg32 >> 20; - } - - return pos; -} - int pci_enable_pcie_error_reporting(struct pci_dev *dev) { u16 reg16 = 0; int pos; + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); + if (!pos) + return -EIO; + pos = pci_find_capability(dev, PCI_CAP_ID_EXP); if (!pos) return -EIO; @@ -102,7 +76,7 @@ int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev) int pos; u32 status, mask; - pos = pci_find_aer_capability(dev); + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); if (!pos) return -EIO; @@ -123,7 +97,7 @@ int pci_cleanup_aer_correct_error_status(struct pci_dev *dev) int pos; u32 status; - pos = pci_find_aer_capability(dev); + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); if (!pos) return -EIO; @@ -502,7 +476,7 @@ static void handle_error_source(struct pcie_device * aerdev, * Correctable error does not need software intevention. * No need to go through error recovery process. */ - pos = pci_find_aer_capability(dev); + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); if (pos) pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, info.status); @@ -542,7 +516,7 @@ void aer_enable_rootport(struct aer_rpc *rpc) reg16 &= ~(SYSTEM_ERROR_INTR_ON_MESG_MASK); pci_write_config_word(pdev, pos + PCI_EXP_RTCTL, reg16); - aer_pos = pci_find_aer_capability(pdev); + aer_pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); /* Clear error status */ pci_read_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, ®32); pci_write_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, reg32); @@ -579,7 +553,7 @@ static void disable_root_aer(struct aer_rpc *rpc) u32 reg32; int pos; - pos = pci_find_aer_capability(pdev); + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); /* Disable Root's interrupt in response to error messages */ pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, 0); @@ -618,7 +592,7 @@ static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info) { int pos; - pos = pci_find_aer_capability(dev); + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); /* The device might not support AER */ if (!pos) @@ -755,7 +729,6 @@ int aer_init(struct pcie_device *dev) return AER_SUCCESS; } -EXPORT_SYMBOL_GPL(pci_find_aer_capability); EXPORT_SYMBOL_GPL(pci_enable_pcie_error_reporting); EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting); EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status); diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 9a7c9e1408a..8f63f4c6b85 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -527,10 +527,10 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev) */ pci_read_config_dword(child_dev, child_pos + PCI_EXP_DEVCAP, ®32); - if (!(reg32 & PCI_EXP_DEVCAP_RBER && !aspm_force)) { - printk("Pre-1.1 PCIe device detected, " - "disable ASPM for %s. It can be enabled forcedly" - " with 'pcie_aspm=force'\n", pci_name(pdev)); + if (!(reg32 & PCI_EXP_DEVCAP_RBER) && !aspm_force) { + dev_printk(KERN_INFO, &child_dev->dev, "disabling ASPM" + " on pre-1.1 PCIe device. You can enable it" + " with 'pcie_aspm=force'\n"); return -EINVAL; } } diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h index 3656e0349dd..2529f3f2ea5 100644 --- a/drivers/pci/pcie/portdrv.h +++ b/drivers/pci/pcie/portdrv.h @@ -25,7 +25,6 @@ #define PCIE_CAPABILITIES_REG 0x2 #define PCIE_SLOT_CAPABILITIES_REG 0x14 #define PCIE_PORT_DEVICE_MAXSERVICES 4 -#define PCI_CFG_SPACE_SIZE 256 #define get_descriptor_id(type, service) (((type - 4) << 4) | service) diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index 890f0d2b370..2e091e01482 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c @@ -195,24 +195,11 @@ static int get_port_device_capability(struct pci_dev *dev) /* PME Capable - root port capability */ if (((reg16 >> 4) & PORT_TYPE_MASK) == PCIE_RC_PORT) services |= PCIE_PORT_SERVICE_PME; - - pos = PCI_CFG_SPACE_SIZE; - while (pos) { - pci_read_config_dword(dev, pos, ®32); - switch (reg32 & 0xffff) { - case PCI_EXT_CAP_ID_ERR: - services |= PCIE_PORT_SERVICE_AER; - pos = reg32 >> 20; - break; - case PCI_EXT_CAP_ID_VC: - services |= PCIE_PORT_SERVICE_VC; - pos = reg32 >> 20; - break; - default: - pos = 0; - break; - } - } + + if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR)) + services |= PCIE_PORT_SERVICE_AER; + if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_VC)) + services |= PCIE_PORT_SERVICE_VC; return services; } diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index 367c9c20000..584422da8d8 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c @@ -91,7 +91,7 @@ static int __devinit pcie_portdrv_probe (struct pci_dev *dev, pci_set_master(dev); if (!dev->irq && dev->pin) { - dev_warn(&dev->dev, "device [%04x/%04x] has invalid IRQ; " + dev_warn(&dev->dev, "device [%04x:%04x] has invalid IRQ; " "check vendor BIOS\n", dev->vendor, dev->device); } if (pcie_port_device_register(dev)) { diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 36698e57b97..aaaf0a1fed2 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -14,8 +14,6 @@ #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */ #define CARDBUS_RESERVE_BUSNR 3 -#define PCI_CFG_SPACE_SIZE 256 -#define PCI_CFG_SPACE_EXP_SIZE 4096 /* Ugh. Need to stop exporting this to modules. */ LIST_HEAD(pci_root_buses); @@ -44,72 +42,6 @@ int no_pci_devices(void) } EXPORT_SYMBOL(no_pci_devices); -#ifdef HAVE_PCI_LEGACY -/** - * pci_create_legacy_files - create legacy I/O port and memory files - * @b: bus to create files under - * - * Some platforms allow access to legacy I/O port and ISA memory space on - * a per-bus basis. This routine creates the files and ties them into - * their associated read, write and mmap files from pci-sysfs.c - * - * On error unwind, but don't propogate the error to the caller - * as it is ok to set up the PCI bus without these files. - */ -static void pci_create_legacy_files(struct pci_bus *b) -{ - int error; - - b->legacy_io = kzalloc(sizeof(struct bin_attribute) * 2, - GFP_ATOMIC); - if (!b->legacy_io) - goto kzalloc_err; - - b->legacy_io->attr.name = "legacy_io"; - b->legacy_io->size = 0xffff; - b->legacy_io->attr.mode = S_IRUSR | S_IWUSR; - b->legacy_io->read = pci_read_legacy_io; - b->legacy_io->write = pci_write_legacy_io; - error = device_create_bin_file(&b->dev, b->legacy_io); - if (error) - goto legacy_io_err; - - /* Allocated above after the legacy_io struct */ - b->legacy_mem = b->legacy_io + 1; - b->legacy_mem->attr.name = "legacy_mem"; - b->legacy_mem->size = 1024*1024; - b->legacy_mem->attr.mode = S_IRUSR | S_IWUSR; - b->legacy_mem->mmap = pci_mmap_legacy_mem; - error = device_create_bin_file(&b->dev, b->legacy_mem); - if (error) - goto legacy_mem_err; - - return; - -legacy_mem_err: - device_remove_bin_file(&b->dev, b->legacy_io); -legacy_io_err: - kfree(b->legacy_io); - b->legacy_io = NULL; -kzalloc_err: - printk(KERN_WARNING "pci: warning: could not create legacy I/O port " - "and ISA memory resources to sysfs\n"); - return; -} - -void pci_remove_legacy_files(struct pci_bus *b) -{ - if (b->legacy_io) { - device_remove_bin_file(&b->dev, b->legacy_io); - device_remove_bin_file(&b->dev, b->legacy_mem); - kfree(b->legacy_io); /* both are allocated here */ - } -} -#else /* !HAVE_PCI_LEGACY */ -static inline void pci_create_legacy_files(struct pci_bus *bus) { return; } -void pci_remove_legacy_files(struct pci_bus *bus) { return; } -#endif /* HAVE_PCI_LEGACY */ - /* * PCI Bus Class Devices */ @@ -219,7 +151,7 @@ static inline enum pci_bar_type decode_bar(struct resource *res, u32 bar) res->flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK; - if (res->flags == PCI_BASE_ADDRESS_MEM_TYPE_64) + if (res->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) return pci_bar_mem64; return pci_bar_mem32; } @@ -304,9 +236,8 @@ static int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, } else { res->start = l64; res->end = l64 + sz64; - printk(KERN_DEBUG "PCI: %s reg %x 64bit mmio: [%llx, %llx]\n", - pci_name(dev), pos, (unsigned long long)res->start, - (unsigned long long)res->end); + dev_printk(KERN_DEBUG, &dev->dev, + "reg %x 64bit mmio: %pR\n", pos, res); } } else { sz = pci_size(l, sz, mask); @@ -316,9 +247,10 @@ static int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, res->start = l; res->end = l + sz; - printk(KERN_DEBUG "PCI: %s reg %x %s: [%llx, %llx]\n", pci_name(dev), - pos, (res->flags & IORESOURCE_IO) ? "io port":"32bit mmio", - (unsigned long long)res->start, (unsigned long long)res->end); + + dev_printk(KERN_DEBUG, &dev->dev, "reg %x %s: %pR\n", pos, + (res->flags & IORESOURCE_IO) ? "io port" : "32bit mmio", + res); } out: @@ -389,9 +321,7 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child) res->start = base; if (!res->end) res->end = limit + 0xfff; - printk(KERN_DEBUG "PCI: bridge %s io port: [%llx, %llx]\n", - pci_name(dev), (unsigned long long) res->start, - (unsigned long long) res->end); + dev_printk(KERN_DEBUG, &dev->dev, "bridge io port: %pR\n", res); } res = child->resource[1]; @@ -403,9 +333,8 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child) res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM; res->start = base; res->end = limit + 0xfffff; - printk(KERN_DEBUG "PCI: bridge %s 32bit mmio: [%llx, %llx]\n", - pci_name(dev), (unsigned long long) res->start, - (unsigned long long) res->end); + dev_printk(KERN_DEBUG, &dev->dev, "bridge 32bit mmio: %pR\n", + res); } res = child->resource[2]; @@ -441,9 +370,9 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child) res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM | IORESOURCE_PREFETCH; res->start = base; res->end = limit + 0xfffff; - printk(KERN_DEBUG "PCI: bridge %s %sbit mmio pref: [%llx, %llx]\n", - pci_name(dev), (res->flags & PCI_PREF_RANGE_TYPE_64) ? "64" : "32", - (unsigned long long) res->start, (unsigned long long) res->end); + dev_printk(KERN_DEBUG, &dev->dev, "bridge %sbit mmio pref: %pR\n", + (res->flags & PCI_PREF_RANGE_TYPE_64) ? "64" : "32", + res); } } @@ -764,7 +693,7 @@ static int pci_setup_device(struct pci_dev * dev) dev->class = class; class >>= 8; - dev_dbg(&dev->dev, "found [%04x/%04x] class %06x header type %02x\n", + dev_dbg(&dev->dev, "found [%04x:%04x] class %06x header type %02x\n", dev->vendor, dev->device, class, dev->hdr_type); /* "Unknown power state" */ @@ -846,6 +775,11 @@ static int pci_setup_device(struct pci_dev * dev) return 0; } +static void pci_release_capabilities(struct pci_dev *dev) +{ + pci_vpd_release(dev); +} + /** * pci_release_dev - free a pci device structure when all users of it are finished. * @dev: device that's been disconnected @@ -858,7 +792,7 @@ static void pci_release_dev(struct device *dev) struct pci_dev *pci_dev; pci_dev = to_pci_dev(dev); - pci_vpd_release(pci_dev); + pci_release_capabilities(pci_dev); kfree(pci_dev); } @@ -889,8 +823,9 @@ static void set_pcie_port_type(struct pci_dev *pdev) int pci_cfg_space_size_ext(struct pci_dev *dev) { u32 status; + int pos = PCI_CFG_SPACE_SIZE; - if (pci_read_config_dword(dev, 256, &status) != PCIBIOS_SUCCESSFUL) + if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL) goto fail; if (status == 0xffffffff) goto fail; @@ -938,8 +873,6 @@ struct pci_dev *alloc_pci_dev(void) INIT_LIST_HEAD(&dev->bus_list); - pci_msi_init_pci_dev(dev); - return dev; } EXPORT_SYMBOL(alloc_pci_dev); @@ -951,6 +884,7 @@ EXPORT_SYMBOL(alloc_pci_dev); static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn) { struct pci_dev *dev; + struct pci_slot *slot; u32 l; u8 hdr_type; int delay = 1; @@ -999,6 +933,10 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn) dev->error_state = pci_channel_io_normal; set_pcie_port_type(dev); + list_for_each_entry(slot, &bus->slots, list) + if (PCI_SLOT(devfn) == slot->number) + dev->slot = slot; + /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer) set this higher, assuming the system even supports it. */ dev->dma_mask = 0xffffffff; @@ -1007,9 +945,22 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn) return NULL; } + return dev; +} + +static void pci_init_capabilities(struct pci_dev *dev) +{ + /* MSI/MSI-X list */ + pci_msi_init_pci_dev(dev); + + /* Power Management */ + pci_pm_init(dev); + + /* Vital Product Data */ pci_vpd_pci22_init(dev); - return dev; + /* Alternative Routing-ID Forwarding */ + pci_enable_ari(dev); } void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) @@ -1028,8 +979,8 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) /* Fix up broken headers */ pci_fixup_device(pci_fixup_header, dev); - /* Initialize power management of the device */ - pci_pm_init(dev); + /* Initialize various capabilities */ + pci_init_capabilities(dev); /* * Add the device to our list of discovered devices @@ -1237,8 +1188,11 @@ EXPORT_SYMBOL(pci_scan_bridge); EXPORT_SYMBOL_GPL(pci_scan_child_bus); #endif -static int __init pci_sort_bf_cmp(const struct pci_dev *a, const struct pci_dev *b) +static int __init pci_sort_bf_cmp(const struct device *d_a, const struct device *d_b) { + const struct pci_dev *a = to_pci_dev(d_a); + const struct pci_dev *b = to_pci_dev(d_b); + if (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1; else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return 1; @@ -1251,50 +1205,7 @@ static int __init pci_sort_bf_cmp(const struct pci_dev *a, const struct pci_dev return 0; } -/* - * Yes, this forcably breaks the klist abstraction temporarily. It - * just wants to sort the klist, not change reference counts and - * take/drop locks rapidly in the process. It does all this while - * holding the lock for the list, so objects can't otherwise be - * added/removed while we're swizzling. - */ -static void __init pci_insertion_sort_klist(struct pci_dev *a, struct list_head *list) -{ - struct list_head *pos; - struct klist_node *n; - struct device *dev; - struct pci_dev *b; - - list_for_each(pos, list) { - n = container_of(pos, struct klist_node, n_node); - dev = container_of(n, struct device, knode_bus); - b = to_pci_dev(dev); - if (pci_sort_bf_cmp(a, b) <= 0) { - list_move_tail(&a->dev.knode_bus.n_node, &b->dev.knode_bus.n_node); - return; - } - } - list_move_tail(&a->dev.knode_bus.n_node, list); -} - void __init pci_sort_breadthfirst(void) { - LIST_HEAD(sorted_devices); - struct list_head *pos, *tmp; - struct klist_node *n; - struct device *dev; - struct pci_dev *pdev; - struct klist *device_klist; - - device_klist = bus_get_device_klist(&pci_bus_type); - - spin_lock(&device_klist->k_lock); - list_for_each_safe(pos, tmp, &device_klist->k_list) { - n = container_of(pos, struct klist_node, n_node); - dev = container_of(n, struct device, knode_bus); - pdev = to_pci_dev(dev); - pci_insertion_sort_klist(pdev, &sorted_devices); - } - list_splice(&sorted_devices, &device_klist->k_list); - spin_unlock(&device_klist->k_lock); + bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp); } diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 9236e7f869c..96cf8ecd04c 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -24,6 +24,14 @@ #include <linux/kallsyms.h> #include "pci.h" +int isa_dma_bridge_buggy; +EXPORT_SYMBOL(isa_dma_bridge_buggy); +int pci_pci_problems; +EXPORT_SYMBOL(pci_pci_problems); +int pcie_mch_quirk; +EXPORT_SYMBOL(pcie_mch_quirk); + +#ifdef CONFIG_PCI_QUIRKS /* The Mellanox Tavor device gives false positive parity errors * Mark this device with a broken_parity_status, to allow * PCI scanning code to "skip" this now blacklisted device. @@ -35,6 +43,20 @@ static void __devinit quirk_mellanox_tavor(struct pci_dev *dev) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR,quirk_mellanox_tavor); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE,quirk_mellanox_tavor); +/* Many VIA bridges seem to corrupt data for DAC. Disable it here */ +int forbid_dac __read_mostly; +EXPORT_SYMBOL(forbid_dac); + +static __devinit void via_no_dac(struct pci_dev *dev) +{ + if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) { + dev_info(&dev->dev, + "VIA PCI bridge detected. Disabling DAC.\n"); + forbid_dac = 1; + } +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac); + /* Deal with broken BIOS'es that neglect to enable passive release, which can cause problems in combination with the 82441FX/PPro MTRRs */ static void quirk_passive_release(struct pci_dev *dev) @@ -62,8 +84,6 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_p This appears to be BIOS not version dependent. So presumably there is a chipset level fix */ -int isa_dma_bridge_buggy; -EXPORT_SYMBOL(isa_dma_bridge_buggy); static void __devinit quirk_isa_dma_hangs(struct pci_dev *dev) { @@ -84,9 +104,6 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_1, quirk_isa_d DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_dma_hangs); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs); -int pci_pci_problems; -EXPORT_SYMBOL(pci_pci_problems); - /* * Chipsets where PCI->PCI transfers vanish or hang */ @@ -1362,9 +1379,6 @@ static void __init quirk_alder_ioapic(struct pci_dev *pdev) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EESSC, quirk_alder_ioapic); #endif -int pcie_mch_quirk; -EXPORT_SYMBOL(pcie_mch_quirk); - static void __devinit quirk_pcie_mch(struct pci_dev *pdev) { pcie_mch_quirk = 1; @@ -1555,85 +1569,6 @@ static void __devinit fixup_rev1_53c810(struct pci_dev* dev) } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, fixup_rev1_53c810); -static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, struct pci_fixup *end) -{ - while (f < end) { - if ((f->vendor == dev->vendor || f->vendor == (u16) PCI_ANY_ID) && - (f->device == dev->device || f->device == (u16) PCI_ANY_ID)) { -#ifdef DEBUG - dev_dbg(&dev->dev, "calling "); - print_fn_descriptor_symbol("%s\n", f->hook); -#endif - f->hook(dev); - } - f++; - } -} - -extern struct pci_fixup __start_pci_fixups_early[]; -extern struct pci_fixup __end_pci_fixups_early[]; -extern struct pci_fixup __start_pci_fixups_header[]; -extern struct pci_fixup __end_pci_fixups_header[]; -extern struct pci_fixup __start_pci_fixups_final[]; -extern struct pci_fixup __end_pci_fixups_final[]; -extern struct pci_fixup __start_pci_fixups_enable[]; -extern struct pci_fixup __end_pci_fixups_enable[]; -extern struct pci_fixup __start_pci_fixups_resume[]; -extern struct pci_fixup __end_pci_fixups_resume[]; -extern struct pci_fixup __start_pci_fixups_resume_early[]; -extern struct pci_fixup __end_pci_fixups_resume_early[]; -extern struct pci_fixup __start_pci_fixups_suspend[]; -extern struct pci_fixup __end_pci_fixups_suspend[]; - - -void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) -{ - struct pci_fixup *start, *end; - - switch(pass) { - case pci_fixup_early: - start = __start_pci_fixups_early; - end = __end_pci_fixups_early; - break; - - case pci_fixup_header: - start = __start_pci_fixups_header; - end = __end_pci_fixups_header; - break; - - case pci_fixup_final: - start = __start_pci_fixups_final; - end = __end_pci_fixups_final; - break; - - case pci_fixup_enable: - start = __start_pci_fixups_enable; - end = __end_pci_fixups_enable; - break; - - case pci_fixup_resume: - start = __start_pci_fixups_resume; - end = __end_pci_fixups_resume; - break; - - case pci_fixup_resume_early: - start = __start_pci_fixups_resume_early; - end = __end_pci_fixups_resume_early; - break; - - case pci_fixup_suspend: - start = __start_pci_fixups_suspend; - end = __end_pci_fixups_suspend; - break; - - default: - /* stupid compiler warning, you would think with an enum... */ - return; - } - pci_do_fixups(dev, start, end); -} -EXPORT_SYMBOL(pci_fixup_device); - /* Enable 1k I/O space granularity on the Intel P64H2 */ static void __devinit quirk_p64h2_1k_io(struct pci_dev *dev) { @@ -2007,3 +1942,82 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4375, quirk_msi_intx_disable_bug); #endif /* CONFIG_PCI_MSI */ + +static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, struct pci_fixup *end) +{ + while (f < end) { + if ((f->vendor == dev->vendor || f->vendor == (u16) PCI_ANY_ID) && + (f->device == dev->device || f->device == (u16) PCI_ANY_ID)) { + dev_dbg(&dev->dev, "calling %pF\n", f->hook); + f->hook(dev); + } + f++; + } +} + +extern struct pci_fixup __start_pci_fixups_early[]; +extern struct pci_fixup __end_pci_fixups_early[]; +extern struct pci_fixup __start_pci_fixups_header[]; +extern struct pci_fixup __end_pci_fixups_header[]; +extern struct pci_fixup __start_pci_fixups_final[]; +extern struct pci_fixup __end_pci_fixups_final[]; +extern struct pci_fixup __start_pci_fixups_enable[]; +extern struct pci_fixup __end_pci_fixups_enable[]; +extern struct pci_fixup __start_pci_fixups_resume[]; +extern struct pci_fixup __end_pci_fixups_resume[]; +extern struct pci_fixup __start_pci_fixups_resume_early[]; +extern struct pci_fixup __end_pci_fixups_resume_early[]; +extern struct pci_fixup __start_pci_fixups_suspend[]; +extern struct pci_fixup __end_pci_fixups_suspend[]; + + +void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) +{ + struct pci_fixup *start, *end; + + switch(pass) { + case pci_fixup_early: + start = __start_pci_fixups_early; + end = __end_pci_fixups_early; + break; + + case pci_fixup_header: + start = __start_pci_fixups_header; + end = __end_pci_fixups_header; + break; + + case pci_fixup_final: + start = __start_pci_fixups_final; + end = __end_pci_fixups_final; + break; + + case pci_fixup_enable: + start = __start_pci_fixups_enable; + end = __end_pci_fixups_enable; + break; + + case pci_fixup_resume: + start = __start_pci_fixups_resume; + end = __end_pci_fixups_resume; + break; + + case pci_fixup_resume_early: + start = __start_pci_fixups_resume_early; + end = __end_pci_fixups_resume_early; + break; + + case pci_fixup_suspend: + start = __start_pci_fixups_suspend; + end = __end_pci_fixups_suspend; + break; + + default: + /* stupid compiler warning, you would think with an enum... */ + return; + } + pci_do_fixups(dev, start, end); +} +#else +void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) {} +#endif +EXPORT_SYMBOL(pci_fixup_device); diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c index bdc2a44d68e..042e0892442 100644 --- a/drivers/pci/remove.c +++ b/drivers/pci/remove.c @@ -73,6 +73,7 @@ void pci_remove_bus(struct pci_bus *pci_bus) up_write(&pci_bus_sem); pci_remove_legacy_files(pci_bus); device_remove_file(&pci_bus->dev, &dev_attr_cpuaffinity); + device_remove_file(&pci_bus->dev, &dev_attr_cpulistaffinity); device_unregister(&pci_bus->dev); } EXPORT_SYMBOL(pci_remove_bus); @@ -114,13 +115,9 @@ void pci_remove_behind_bridge(struct pci_dev *dev) { struct list_head *l, *n; - if (dev->subordinate) { - list_for_each_safe(l, n, &dev->subordinate->devices) { - struct pci_dev *dev = pci_dev_b(l); - - pci_remove_bus_device(dev); - } - } + if (dev->subordinate) + list_for_each_safe(l, n, &dev->subordinate->devices) + pci_remove_bus_device(pci_dev_b(l)); } static void pci_stop_bus_devices(struct pci_bus *bus) diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c index bd5c0e03139..1f5f6143f35 100644 --- a/drivers/pci/rom.c +++ b/drivers/pci/rom.c @@ -21,7 +21,7 @@ * between the ROM and other resources, so enabling it may disable access * to MMIO registers or other card memory. */ -static int pci_enable_rom(struct pci_dev *pdev) +int pci_enable_rom(struct pci_dev *pdev) { struct resource *res = pdev->resource + PCI_ROM_RESOURCE; struct pci_bus_region region; @@ -45,7 +45,7 @@ static int pci_enable_rom(struct pci_dev *pdev) * Disable ROM decoding on a PCI device by turning off the last bit in the * ROM BAR. */ -static void pci_disable_rom(struct pci_dev *pdev) +void pci_disable_rom(struct pci_dev *pdev) { u32 rom_addr; pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr); @@ -260,3 +260,5 @@ void pci_cleanup_rom(struct pci_dev *pdev) EXPORT_SYMBOL(pci_map_rom); EXPORT_SYMBOL(pci_unmap_rom); +EXPORT_SYMBOL_GPL(pci_enable_rom); +EXPORT_SYMBOL_GPL(pci_disable_rom); diff --git a/drivers/pci/search.c b/drivers/pci/search.c index 3b3b5f17879..4edfc4731bd 100644 --- a/drivers/pci/search.c +++ b/drivers/pci/search.c @@ -162,7 +162,7 @@ EXPORT_SYMBOL(pci_find_slot); * time. */ struct pci_dev *pci_find_device(unsigned int vendor, unsigned int device, - const struct pci_dev *from) + struct pci_dev *from) { struct pci_dev *pdev; @@ -263,7 +263,7 @@ static int match_pci_dev_by_id(struct device *dev, void *data) * this file. */ static struct pci_dev *pci_get_dev_by_id(const struct pci_device_id *id, - const struct pci_dev *from) + struct pci_dev *from) { struct device *dev; struct device *dev_start = NULL; @@ -303,7 +303,7 @@ static struct pci_dev *pci_get_dev_by_id(const struct pci_device_id *id, */ struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device, unsigned int ss_vendor, unsigned int ss_device, - const struct pci_dev *from) + struct pci_dev *from) { struct pci_dev *pdev; struct pci_device_id *id; diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 3abbfad9dda..ea979f2bc6d 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -299,7 +299,7 @@ static void pbus_size_io(struct pci_bus *bus) if (r->parent || !(r->flags & IORESOURCE_IO)) continue; - r_size = r->end - r->start + 1; + r_size = resource_size(r); if (r_size < 0x400) /* Might be re-aligned for ISA */ @@ -350,16 +350,13 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long if (r->parent || (r->flags & mask) != type) continue; - r_size = r->end - r->start + 1; + r_size = resource_size(r); /* For bridges size != alignment */ align = resource_alignment(r); order = __ffs(align) - 20; if (order > 11) { dev_warn(&dev->dev, "BAR %d bad alignment %llx: " - "%#016llx-%#016llx\n", i, - (unsigned long long)align, - (unsigned long long)r->start, - (unsigned long long)r->end); + "%pR\n", i, (unsigned long long)align, r); r->flags = 0; continue; } @@ -378,11 +375,10 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long align = 0; min_align = 0; for (order = 0; order <= max_order; order++) { -#ifdef CONFIG_RESOURCES_64BIT - resource_size_t align1 = 1ULL << (order + 20); -#else - resource_size_t align1 = 1U << (order + 20); -#endif + resource_size_t align1 = 1; + + align1 <<= (order + 20); + if (!align) min_align = align1; else if (ALIGN(align + min_align, min_align) < align1) @@ -540,11 +536,9 @@ static void pci_bus_dump_res(struct pci_bus *bus) if (!res) continue; - printk(KERN_INFO "bus: %02x index %x %s: [%llx, %llx]\n", - bus->number, i, - (res->flags & IORESOURCE_IO) ? "io port" : "mmio", - (unsigned long long) res->start, - (unsigned long long) res->end); + printk(KERN_INFO "bus: %02x index %x %s: %pR\n", + bus->number, i, + (res->flags & IORESOURCE_IO) ? "io port" : "mmio", res); } } diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index 1a5fc83c71b..2dbd96cce2d 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c @@ -49,10 +49,8 @@ void pci_update_resource(struct pci_dev *dev, struct resource *res, int resno) pcibios_resource_to_bus(dev, ®ion, res); - dev_dbg(&dev->dev, "BAR %d: got res [%#llx-%#llx] bus [%#llx-%#llx] " - "flags %#lx\n", resno, - (unsigned long long)res->start, - (unsigned long long)res->end, + dev_dbg(&dev->dev, "BAR %d: got res %pR bus [%#llx-%#llx] " + "flags %#lx\n", resno, res, (unsigned long long)region.start, (unsigned long long)region.end, (unsigned long)res->flags); @@ -114,13 +112,11 @@ int pci_claim_resource(struct pci_dev *dev, int resource) err = insert_resource(root, res); if (err) { - dev_err(&dev->dev, "BAR %d: %s of %s [%#llx-%#llx]\n", + dev_err(&dev->dev, "BAR %d: %s of %s %pR\n", resource, root ? "address space collision on" : "no parent found for", - dtype, - (unsigned long long)res->start, - (unsigned long long)res->end); + dtype, res); } return err; @@ -133,15 +129,14 @@ int pci_assign_resource(struct pci_dev *dev, int resno) resource_size_t size, min, align; int ret; - size = res->end - res->start + 1; + size = resource_size(res); min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM; align = resource_alignment(res); if (!align) { dev_err(&dev->dev, "BAR %d: can't allocate resource (bogus " - "alignment) [%#llx-%#llx] flags %#lx\n", - resno, (unsigned long long)res->start, - (unsigned long long)res->end, res->flags); + "alignment) %pR flags %#lx\n", + resno, res, res->flags); return -EINVAL; } @@ -162,11 +157,8 @@ int pci_assign_resource(struct pci_dev *dev, int resno) } if (ret) { - dev_err(&dev->dev, "BAR %d: can't allocate %s resource " - "[%#llx-%#llx]\n", resno, - res->flags & IORESOURCE_IO ? "I/O" : "mem", - (unsigned long long)res->start, - (unsigned long long)res->end); + dev_err(&dev->dev, "BAR %d: can't allocate %s resource %pR\n", + resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", res); } else { res->flags &= ~IORESOURCE_STARTALIGN; if (resno < PCI_BRIDGE_RESOURCES) @@ -202,11 +194,8 @@ int pci_assign_resource_fixed(struct pci_dev *dev, int resno) } if (ret) { - dev_err(&dev->dev, "BAR %d: can't allocate %s resource " - "[%#llx-%#llx\n]", resno, - res->flags & IORESOURCE_IO ? "I/O" : "mem", - (unsigned long long)res->start, - (unsigned long long)res->end); + dev_err(&dev->dev, "BAR %d: can't allocate %s resource %pR\n", + resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", res); } else if (resno < PCI_BRIDGE_RESOURCES) { pci_update_resource(dev, res, resno); } @@ -237,9 +226,8 @@ void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head) r_align = resource_alignment(r); if (!r_align) { dev_warn(&dev->dev, "BAR %d: bogus alignment " - "[%#llx-%#llx] flags %#lx\n", - i, (unsigned long long)r->start, - (unsigned long long)r->end, r->flags); + "%pR flags %#lx\n", + i, r, r->flags); continue; } for (list = head; ; list = list->next) { @@ -287,9 +275,7 @@ int pci_enable_resources(struct pci_dev *dev, int mask) if (!r->parent) { dev_err(&dev->dev, "device not available because of " - "BAR %d [%#llx-%#llx] collisions\n", i, - (unsigned long long) r->start, - (unsigned long long) r->end); + "BAR %d %pR collisions\n", i, r); return -EINVAL; } diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c index 7e5b85cbd94..0c6db03698e 100644 --- a/drivers/pci/slot.c +++ b/drivers/pci/slot.c @@ -49,11 +49,16 @@ static ssize_t address_read_file(struct pci_slot *slot, char *buf) static void pci_slot_release(struct kobject *kobj) { + struct pci_dev *dev; struct pci_slot *slot = to_pci_slot(kobj); pr_debug("%s: releasing pci_slot on %x:%d\n", __func__, slot->bus->number, slot->number); + list_for_each_entry(dev, &slot->bus->devices, bus_list) + if (PCI_SLOT(dev->devfn) == slot->number) + dev->slot = NULL; + list_del(&slot->list); kfree(slot); @@ -108,6 +113,7 @@ static struct kobj_type pci_slot_ktype = { struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr, const char *name) { + struct pci_dev *dev; struct pci_slot *slot; int err; @@ -150,6 +156,10 @@ placeholder: INIT_LIST_HEAD(&slot->list); list_add(&slot->list, &parent->slots); + list_for_each_entry(dev, &parent->devices, bus_list) + if (PCI_SLOT(dev->devfn) == slot_nr) + dev->slot = slot; + /* Don't care if debug printk has a -1 for slot_nr */ pr_debug("%s: created pci_slot on %04x:%02x:%02x\n", __func__, pci_domain_nr(parent), parent->number, slot_nr); |