From 23d6f82bd1f07886b3a974c5193baa715475dd37 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 11 Oct 2007 11:16:23 +0200 Subject: i386: move kernel/acpi Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- arch/i386/kernel/Makefile_32 | 2 +- arch/i386/kernel/acpi/Makefile | 5 - arch/i386/kernel/acpi/Makefile_32 | 10 - arch/i386/kernel/acpi/boot.c | 1326 --------------------------------- arch/i386/kernel/acpi/cstate.c | 164 ---- arch/i386/kernel/acpi/earlyquirk_32.c | 84 --- arch/i386/kernel/acpi/processor.c | 75 -- arch/i386/kernel/acpi/sleep_32.c | 110 --- arch/i386/kernel/acpi/wakeup_32.S | 321 -------- arch/x86/kernel/acpi/Makefile | 5 + arch/x86/kernel/acpi/Makefile_32 | 10 + arch/x86/kernel/acpi/boot.c | 1326 +++++++++++++++++++++++++++++++++ arch/x86/kernel/acpi/cstate.c | 164 ++++ arch/x86/kernel/acpi/earlyquirk_32.c | 84 +++ arch/x86/kernel/acpi/processor.c | 75 ++ arch/x86/kernel/acpi/sleep_32.c | 110 +++ arch/x86/kernel/acpi/wakeup_32.S | 321 ++++++++ arch/x86_64/kernel/acpi/Makefile | 2 +- arch/x86_64/kernel/acpi/Makefile_64 | 4 +- 19 files changed, 2099 insertions(+), 2099 deletions(-) delete mode 100644 arch/i386/kernel/acpi/Makefile delete mode 100644 arch/i386/kernel/acpi/Makefile_32 delete mode 100644 arch/i386/kernel/acpi/boot.c delete mode 100644 arch/i386/kernel/acpi/cstate.c delete mode 100644 arch/i386/kernel/acpi/earlyquirk_32.c delete mode 100644 arch/i386/kernel/acpi/processor.c delete mode 100644 arch/i386/kernel/acpi/sleep_32.c delete mode 100644 arch/i386/kernel/acpi/wakeup_32.S create mode 100644 arch/x86/kernel/acpi/Makefile create mode 100644 arch/x86/kernel/acpi/Makefile_32 create mode 100644 arch/x86/kernel/acpi/boot.c create mode 100644 arch/x86/kernel/acpi/cstate.c create mode 100644 arch/x86/kernel/acpi/earlyquirk_32.c create mode 100644 arch/x86/kernel/acpi/processor.c create mode 100644 arch/x86/kernel/acpi/sleep_32.c create mode 100644 arch/x86/kernel/acpi/wakeup_32.S (limited to 'arch') diff --git a/arch/i386/kernel/Makefile_32 b/arch/i386/kernel/Makefile_32 index d9dd3d677b7..af8304b921d 100644 --- a/arch/i386/kernel/Makefile_32 +++ b/arch/i386/kernel/Makefile_32 @@ -11,7 +11,7 @@ obj-y := process_32.o signal_32.o entry_32.o traps_32.o irq_32.o \ obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-y += cpu/ -obj-y += acpi/ +obj-y += ../../x86/kernel/acpi/ obj-$(CONFIG_X86_BIOS_REBOOT) += reboot_32.o obj-$(CONFIG_MCA) += mca_32.o obj-$(CONFIG_X86_MSR) += msr.o diff --git a/arch/i386/kernel/acpi/Makefile b/arch/i386/kernel/acpi/Makefile deleted file mode 100644 index 6e00bfeb59a..00000000000 --- a/arch/i386/kernel/acpi/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -ifeq ($(CONFIG_X86_32),y) -include ${srctree}/arch/i386/kernel/acpi/Makefile_32 -else -include ${srctree}/arch/x86_64/kernel/acpi/Makefile_64 -endif diff --git a/arch/i386/kernel/acpi/Makefile_32 b/arch/i386/kernel/acpi/Makefile_32 deleted file mode 100644 index a4852a2e919..00000000000 --- a/arch/i386/kernel/acpi/Makefile_32 +++ /dev/null @@ -1,10 +0,0 @@ -obj-$(CONFIG_ACPI) += boot.o -ifneq ($(CONFIG_PCI),) -obj-$(CONFIG_X86_IO_APIC) += earlyquirk_32.o -endif -obj-$(CONFIG_ACPI_SLEEP) += sleep_32.o wakeup_32.o - -ifneq ($(CONFIG_ACPI_PROCESSOR),) -obj-y += cstate.o processor.o -endif - diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c deleted file mode 100644 index cacdd883bf2..00000000000 --- a/arch/i386/kernel/acpi/boot.c +++ /dev/null @@ -1,1326 +0,0 @@ -/* - * boot.c - Architecture-Specific Low-Level ACPI Boot Support - * - * Copyright (C) 2001, 2002 Paul Diefenbaugh - * Copyright (C) 2001 Jun Nakajima - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -static int __initdata acpi_force = 0; - -#ifdef CONFIG_ACPI -int acpi_disabled = 0; -#else -int acpi_disabled = 1; -#endif -EXPORT_SYMBOL(acpi_disabled); - -#ifdef CONFIG_X86_64 - -#include - -static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) { return 0; } - - -#else /* X86 */ - -#ifdef CONFIG_X86_LOCAL_APIC -#include -#include -#endif /* CONFIG_X86_LOCAL_APIC */ - -#endif /* X86 */ - -#define BAD_MADT_ENTRY(entry, end) ( \ - (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ - ((struct acpi_subtable_header *)entry)->length < sizeof(*entry)) - -#define PREFIX "ACPI: " - -int acpi_noirq; /* skip ACPI IRQ initialization */ -int acpi_pci_disabled __initdata; /* skip ACPI PCI scan and IRQ initialization */ -int acpi_ht __initdata = 1; /* enable HT */ - -int acpi_lapic; -int acpi_ioapic; -int acpi_strict; -EXPORT_SYMBOL(acpi_strict); - -u8 acpi_sci_flags __initdata; -int acpi_sci_override_gsi __initdata; -int acpi_skip_timer_override __initdata; -int acpi_use_timer_override __initdata; - -#ifdef CONFIG_X86_LOCAL_APIC -static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; -#endif - -#ifndef __HAVE_ARCH_CMPXCHG -#warning ACPI uses CMPXCHG, i486 and later hardware -#endif - -/* -------------------------------------------------------------------------- - Boot-time Configuration - -------------------------------------------------------------------------- */ - -/* - * The default interrupt routing model is PIC (8259). This gets - * overriden if IOAPICs are enumerated (below). - */ -enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC; - -#ifdef CONFIG_X86_64 - -/* rely on all ACPI tables being in the direct mapping */ -char *__acpi_map_table(unsigned long phys_addr, unsigned long size) -{ - if (!phys_addr || !size) - return NULL; - - if (phys_addr+size <= (end_pfn_map << PAGE_SHIFT) + PAGE_SIZE) - return __va(phys_addr); - - return NULL; -} - -#else - -/* - * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END, - * to map the target physical address. The problem is that set_fixmap() - * provides a single page, and it is possible that the page is not - * sufficient. - * By using this area, we can map up to MAX_IO_APICS pages temporarily, - * i.e. until the next __va_range() call. - * - * Important Safety Note: The fixed I/O APIC page numbers are *subtracted* - * from the fixed base. That's why we start at FIX_IO_APIC_BASE_END and - * count idx down while incrementing the phys address. - */ -char *__acpi_map_table(unsigned long phys, unsigned long size) -{ - unsigned long base, offset, mapped_size; - int idx; - - if (phys + size < 8 * 1024 * 1024) - return __va(phys); - - offset = phys & (PAGE_SIZE - 1); - mapped_size = PAGE_SIZE - offset; - set_fixmap(FIX_ACPI_END, phys); - base = fix_to_virt(FIX_ACPI_END); - - /* - * Most cases can be covered by the below. - */ - idx = FIX_ACPI_END; - while (mapped_size < size) { - if (--idx < FIX_ACPI_BEGIN) - return NULL; /* cannot handle this */ - phys += PAGE_SIZE; - set_fixmap(idx, phys); - mapped_size += PAGE_SIZE; - } - - return ((unsigned char *)base + offset); -} -#endif - -#ifdef CONFIG_PCI_MMCONFIG -/* The physical address of the MMCONFIG aperture. Set from ACPI tables. */ -struct acpi_mcfg_allocation *pci_mmcfg_config; -int pci_mmcfg_config_num; - -int __init acpi_parse_mcfg(struct acpi_table_header *header) -{ - struct acpi_table_mcfg *mcfg; - unsigned long i; - int config_size; - - if (!header) - return -EINVAL; - - mcfg = (struct acpi_table_mcfg *)header; - - /* how many config structures do we have */ - pci_mmcfg_config_num = 0; - i = header->length - sizeof(struct acpi_table_mcfg); - while (i >= sizeof(struct acpi_mcfg_allocation)) { - ++pci_mmcfg_config_num; - i -= sizeof(struct acpi_mcfg_allocation); - }; - if (pci_mmcfg_config_num == 0) { - printk(KERN_ERR PREFIX "MMCONFIG has no entries\n"); - return -ENODEV; - } - - config_size = pci_mmcfg_config_num * sizeof(*pci_mmcfg_config); - pci_mmcfg_config = kmalloc(config_size, GFP_KERNEL); - if (!pci_mmcfg_config) { - printk(KERN_WARNING PREFIX - "No memory for MCFG config tables\n"); - return -ENOMEM; - } - - memcpy(pci_mmcfg_config, &mcfg[1], config_size); - for (i = 0; i < pci_mmcfg_config_num; ++i) { - if (pci_mmcfg_config[i].address > 0xFFFFFFFF) { - printk(KERN_ERR PREFIX - "MMCONFIG not in low 4GB of memory\n"); - kfree(pci_mmcfg_config); - pci_mmcfg_config_num = 0; - return -ENODEV; - } - } - - return 0; -} -#endif /* CONFIG_PCI_MMCONFIG */ - -#ifdef CONFIG_X86_LOCAL_APIC -static int __init acpi_parse_madt(struct acpi_table_header *table) -{ - struct acpi_table_madt *madt = NULL; - - if (!cpu_has_apic) - return -EINVAL; - - madt = (struct acpi_table_madt *)table; - if (!madt) { - printk(KERN_WARNING PREFIX "Unable to map MADT\n"); - return -ENODEV; - } - - if (madt->address) { - acpi_lapic_addr = (u64) madt->address; - - printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n", - madt->address); - } - - acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id); - - return 0; -} - -static int __init -acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end) -{ - struct acpi_madt_local_apic *processor = NULL; - - processor = (struct acpi_madt_local_apic *)header; - - if (BAD_MADT_ENTRY(processor, end)) - return -EINVAL; - - acpi_table_print_madt_entry(header); - - /* - * We need to register disabled CPU as well to permit - * counting disabled CPUs. This allows us to size - * cpus_possible_map more accurately, to permit - * to not preallocating memory for all NR_CPUS - * when we use CPU hotplug. - */ - mp_register_lapic(processor->id, /* APIC ID */ - processor->lapic_flags & ACPI_MADT_ENABLED); /* Enabled? */ - - return 0; -} - -static int __init -acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header, - const unsigned long end) -{ - struct acpi_madt_local_apic_override *lapic_addr_ovr = NULL; - - lapic_addr_ovr = (struct acpi_madt_local_apic_override *)header; - - if (BAD_MADT_ENTRY(lapic_addr_ovr, end)) - return -EINVAL; - - acpi_lapic_addr = lapic_addr_ovr->address; - - return 0; -} - -static int __init -acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long end) -{ - struct acpi_madt_local_apic_nmi *lapic_nmi = NULL; - - lapic_nmi = (struct acpi_madt_local_apic_nmi *)header; - - if (BAD_MADT_ENTRY(lapic_nmi, end)) - return -EINVAL; - - acpi_table_print_madt_entry(header); - - if (lapic_nmi->lint != 1) - printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n"); - - return 0; -} - -#endif /*CONFIG_X86_LOCAL_APIC */ - -#ifdef CONFIG_X86_IO_APIC - -static int __init -acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end) -{ - struct acpi_madt_io_apic *ioapic = NULL; - - ioapic = (struct acpi_madt_io_apic *)header; - - if (BAD_MADT_ENTRY(ioapic, end)) - return -EINVAL; - - acpi_table_print_madt_entry(header); - - mp_register_ioapic(ioapic->id, - ioapic->address, ioapic->global_irq_base); - - return 0; -} - -/* - * Parse Interrupt Source Override for the ACPI SCI - */ -static void __init acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger) -{ - if (trigger == 0) /* compatible SCI trigger is level */ - trigger = 3; - - if (polarity == 0) /* compatible SCI polarity is low */ - polarity = 3; - - /* Command-line over-ride via acpi_sci= */ - if (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK) - trigger = (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK) >> 2; - - if (acpi_sci_flags & ACPI_MADT_POLARITY_MASK) - polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK; - - /* - * mp_config_acpi_legacy_irqs() already setup IRQs < 16 - * If GSI is < 16, this will update its flags, - * else it will create a new mp_irqs[] entry. - */ - mp_override_legacy_irq(gsi, polarity, trigger, gsi); - - /* - * stash over-ride to indicate we've been here - * and for later update of acpi_gbl_FADT - */ - acpi_sci_override_gsi = gsi; - return; -} - -static int __init -acpi_parse_int_src_ovr(struct acpi_subtable_header * header, - const unsigned long end) -{ - struct acpi_madt_interrupt_override *intsrc = NULL; - - intsrc = (struct acpi_madt_interrupt_override *)header; - - if (BAD_MADT_ENTRY(intsrc, end)) - return -EINVAL; - - acpi_table_print_madt_entry(header); - - if (intsrc->source_irq == acpi_gbl_FADT.sci_interrupt) { - acpi_sci_ioapic_setup(intsrc->global_irq, - intsrc->inti_flags & ACPI_MADT_POLARITY_MASK, - (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2); - return 0; - } - - if (acpi_skip_timer_override && - intsrc->source_irq == 0 && intsrc->global_irq == 2) { - printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n"); - return 0; - } - - mp_override_legacy_irq(intsrc->source_irq, - intsrc->inti_flags & ACPI_MADT_POLARITY_MASK, - (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2, - intsrc->global_irq); - - return 0; -} - -static int __init -acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long end) -{ - struct acpi_madt_nmi_source *nmi_src = NULL; - - nmi_src = (struct acpi_madt_nmi_source *)header; - - if (BAD_MADT_ENTRY(nmi_src, end)) - return -EINVAL; - - acpi_table_print_madt_entry(header); - - /* TBD: Support nimsrc entries? */ - - return 0; -} - -#endif /* CONFIG_X86_IO_APIC */ - -/* - * acpi_pic_sci_set_trigger() - * - * use ELCR to set PIC-mode trigger type for SCI - * - * If a PIC-mode SCI is not recognized or gives spurious IRQ7's - * it may require Edge Trigger -- use "acpi_sci=edge" - * - * Port 0x4d0-4d1 are ECLR1 and ECLR2, the Edge/Level Control Registers - * for the 8259 PIC. bit[n] = 1 means irq[n] is Level, otherwise Edge. - * ECLR1 is IRQ's 0-7 (IRQ 0, 1, 2 must be 0) - * ECLR2 is IRQ's 8-15 (IRQ 8, 13 must be 0) - */ - -void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger) -{ - unsigned int mask = 1 << irq; - unsigned int old, new; - - /* Real old ELCR mask */ - old = inb(0x4d0) | (inb(0x4d1) << 8); - - /* - * If we use ACPI to set PCI irq's, then we should clear ELCR - * since we will set it correctly as we enable the PCI irq - * routing. - */ - new = acpi_noirq ? old : 0; - - /* - * Update SCI information in the ELCR, it isn't in the PCI - * routing tables.. - */ - switch (trigger) { - case 1: /* Edge - clear */ - new &= ~mask; - break; - case 3: /* Level - set */ - new |= mask; - break; - } - - if (old == new) - return; - - printk(PREFIX "setting ELCR to %04x (from %04x)\n", new, old); - outb(new, 0x4d0); - outb(new >> 8, 0x4d1); -} - -int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) -{ - *irq = gsi; - return 0; -} - -/* - * success: return IRQ number (>=0) - * failure: return < 0 - */ -int acpi_register_gsi(u32 gsi, int triggering, int polarity) -{ - unsigned int irq; - unsigned int plat_gsi = gsi; - -#ifdef CONFIG_PCI - /* - * Make sure all (legacy) PCI IRQs are set as level-triggered. - */ - if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) { - extern void eisa_set_level_irq(unsigned int irq); - - if (triggering == ACPI_LEVEL_SENSITIVE) - eisa_set_level_irq(gsi); - } -#endif - -#ifdef CONFIG_X86_IO_APIC - if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) { - plat_gsi = mp_register_gsi(gsi, triggering, polarity); - } -#endif - acpi_gsi_to_irq(plat_gsi, &irq); - return irq; -} - -EXPORT_SYMBOL(acpi_register_gsi); - -/* - * ACPI based hotplug support for CPU - */ -#ifdef CONFIG_ACPI_HOTPLUG_CPU -int acpi_map_lsapic(acpi_handle handle, int *pcpu) -{ - struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; - union acpi_object *obj; - struct acpi_madt_local_apic *lapic; - cpumask_t tmp_map, new_map; - u8 physid; - int cpu; - - if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) - return -EINVAL; - - if (!buffer.length || !buffer.pointer) - return -EINVAL; - - obj = buffer.pointer; - if (obj->type != ACPI_TYPE_BUFFER || - obj->buffer.length < sizeof(*lapic)) { - kfree(buffer.pointer); - return -EINVAL; - } - - lapic = (struct acpi_madt_local_apic *)obj->buffer.pointer; - - if (lapic->header.type != ACPI_MADT_TYPE_LOCAL_APIC || - !(lapic->lapic_flags & ACPI_MADT_ENABLED)) { - kfree(buffer.pointer); - return -EINVAL; - } - - physid = lapic->id; - - kfree(buffer.pointer); - buffer.length = ACPI_ALLOCATE_BUFFER; - buffer.pointer = NULL; - - tmp_map = cpu_present_map; - mp_register_lapic(physid, lapic->lapic_flags & ACPI_MADT_ENABLED); - - /* - * If mp_register_lapic successfully generates a new logical cpu - * number, then the following will get us exactly what was mapped - */ - cpus_andnot(new_map, cpu_present_map, tmp_map); - if (cpus_empty(new_map)) { - printk ("Unable to map lapic to logical cpu number\n"); - return -EINVAL; - } - - cpu = first_cpu(new_map); - - *pcpu = cpu; - return 0; -} - -EXPORT_SYMBOL(acpi_map_lsapic); - -int acpi_unmap_lsapic(int cpu) -{ - x86_cpu_to_apicid[cpu] = -1; - cpu_clear(cpu, cpu_present_map); - num_processors--; - - return (0); -} - -EXPORT_SYMBOL(acpi_unmap_lsapic); -#endif /* CONFIG_ACPI_HOTPLUG_CPU */ - -int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base) -{ - /* TBD */ - return -EINVAL; -} - -EXPORT_SYMBOL(acpi_register_ioapic); - -int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base) -{ - /* TBD */ - return -EINVAL; -} - -EXPORT_SYMBOL(acpi_unregister_ioapic); - -static unsigned long __init -acpi_scan_rsdp(unsigned long start, unsigned long length) -{ - unsigned long offset = 0; - unsigned long sig_len = sizeof("RSD PTR ") - 1; - - /* - * Scan all 16-byte boundaries of the physical memory region for the - * RSDP signature. - */ - for (offset = 0; offset < length; offset += 16) { - if (strncmp((char *)(phys_to_virt(start) + offset), "RSD PTR ", sig_len)) - continue; - return (start + offset); - } - - return 0; -} - -static int __init acpi_parse_sbf(struct acpi_table_header *table) -{ - struct acpi_table_boot *sb; - - sb = (struct acpi_table_boot *)table; - if (!sb) { - printk(KERN_WARNING PREFIX "Unable to map SBF\n"); - return -ENODEV; - } - - sbf_port = sb->cmos_index; /* Save CMOS port */ - - return 0; -} - -#ifdef CONFIG_HPET_TIMER -#include - -static struct __initdata resource *hpet_res; - -static int __init acpi_parse_hpet(struct acpi_table_header *table) -{ - struct acpi_table_hpet *hpet_tbl; - - hpet_tbl = (struct acpi_table_hpet *)table; - if (!hpet_tbl) { - printk(KERN_WARNING PREFIX "Unable to map HPET\n"); - return -ENODEV; - } - - if (hpet_tbl->address.space_id != ACPI_SPACE_MEM) { - printk(KERN_WARNING PREFIX "HPET timers must be located in " - "memory.\n"); - return -1; - } - - hpet_address = hpet_tbl->address.address; - printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n", - hpet_tbl->id, hpet_address); - - /* - * Allocate and initialize the HPET firmware resource for adding into - * the resource tree during the lateinit timeframe. - */ -#define HPET_RESOURCE_NAME_SIZE 9 - hpet_res = alloc_bootmem(sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE); - - if (!hpet_res) - return 0; - - memset(hpet_res, 0, sizeof(*hpet_res)); - hpet_res->name = (void *)&hpet_res[1]; - hpet_res->flags = IORESOURCE_MEM; - snprintf((char *)hpet_res->name, HPET_RESOURCE_NAME_SIZE, "HPET %u", - hpet_tbl->sequence); - - hpet_res->start = hpet_address; - hpet_res->end = hpet_address + (1 * 1024) - 1; - - return 0; -} - -/* - * hpet_insert_resource inserts the HPET resources used into the resource - * tree. - */ -static __init int hpet_insert_resource(void) -{ - if (!hpet_res) - return 1; - - return insert_resource(&iomem_resource, hpet_res); -} - -late_initcall(hpet_insert_resource); - -#else -#define acpi_parse_hpet NULL -#endif - -static int __init acpi_parse_fadt(struct acpi_table_header *table) -{ - -#ifdef CONFIG_X86_PM_TIMER - /* detect the location of the ACPI PM Timer */ - if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) { - /* FADT rev. 2 */ - if (acpi_gbl_FADT.xpm_timer_block.space_id != - ACPI_ADR_SPACE_SYSTEM_IO) - return 0; - - pmtmr_ioport = acpi_gbl_FADT.xpm_timer_block.address; - /* - * "X" fields are optional extensions to the original V1.0 - * fields, so we must selectively expand V1.0 fields if the - * corresponding X field is zero. - */ - if (!pmtmr_ioport) - pmtmr_ioport = acpi_gbl_FADT.pm_timer_block; - } else { - /* FADT rev. 1 */ - pmtmr_ioport = acpi_gbl_FADT.pm_timer_block; - } - if (pmtmr_ioport) - printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x\n", - pmtmr_ioport); -#endif - return 0; -} - -unsigned long __init acpi_find_rsdp(void) -{ - unsigned long rsdp_phys = 0; - - if (efi_enabled) { - if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) - return efi.acpi20; - else if (efi.acpi != EFI_INVALID_TABLE_ADDR) - return efi.acpi; - } - /* - * Scan memory looking for the RSDP signature. First search EBDA (low - * memory) paragraphs and then search upper memory (E0000-FFFFF). - */ - rsdp_phys = acpi_scan_rsdp(0, 0x400); - if (!rsdp_phys) - rsdp_phys = acpi_scan_rsdp(0xE0000, 0x20000); - - return rsdp_phys; -} - -#ifdef CONFIG_X86_LOCAL_APIC -/* - * Parse LAPIC entries in MADT - * returns 0 on success, < 0 on error - */ -static int __init acpi_parse_madt_lapic_entries(void) -{ - int count; - - if (!cpu_has_apic) - return -ENODEV; - - /* - * Note that the LAPIC address is obtained from the MADT (32-bit value) - * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value). - */ - - count = - acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE, - acpi_parse_lapic_addr_ovr, 0); - if (count < 0) { - printk(KERN_ERR PREFIX - "Error parsing LAPIC address override entry\n"); - return count; - } - - mp_register_lapic_address(acpi_lapic_addr); - - count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC, acpi_parse_lapic, - MAX_APICS); - if (!count) { - printk(KERN_ERR PREFIX "No LAPIC entries present\n"); - /* TBD: Cleanup to allow fallback to MPS */ - return -ENODEV; - } else if (count < 0) { - printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n"); - /* TBD: Cleanup to allow fallback to MPS */ - return count; - } - - count = - acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0); - if (count < 0) { - printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n"); - /* TBD: Cleanup to allow fallback to MPS */ - return count; - } - return 0; -} -#endif /* CONFIG_X86_LOCAL_APIC */ - -#ifdef CONFIG_X86_IO_APIC -/* - * Parse IOAPIC related entries in MADT - * returns 0 on success, < 0 on error - */ -static int __init acpi_parse_madt_ioapic_entries(void) -{ - int count; - - /* - * ACPI interpreter is required to complete interrupt setup, - * so if it is off, don't enumerate the io-apics with ACPI. - * If MPS is present, it will handle them, - * otherwise the system will stay in PIC mode - */ - if (acpi_disabled || acpi_noirq) { - return -ENODEV; - } - - if (!cpu_has_apic) - return -ENODEV; - - /* - * if "noapic" boot option, don't look for IO-APICs - */ - if (skip_ioapic_setup) { - printk(KERN_INFO PREFIX "Skipping IOAPIC probe " - "due to 'noapic' option.\n"); - return -ENODEV; - } - - count = - acpi_table_parse_madt(ACPI_MADT_TYPE_IO_APIC, acpi_parse_ioapic, - MAX_IO_APICS); - if (!count) { - printk(KERN_ERR PREFIX "No IOAPIC entries present\n"); - return -ENODEV; - } else if (count < 0) { - printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n"); - return count; - } - - count = - acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr, - NR_IRQ_VECTORS); - if (count < 0) { - printk(KERN_ERR PREFIX - "Error parsing interrupt source overrides entry\n"); - /* TBD: Cleanup to allow fallback to MPS */ - return count; - } - - /* - * If BIOS did not supply an INT_SRC_OVR for the SCI - * pretend we got one so we can set the SCI flags. - */ - if (!acpi_sci_override_gsi) - acpi_sci_ioapic_setup(acpi_gbl_FADT.sci_interrupt, 0, 0); - - /* Fill in identity legacy mapings where no override */ - mp_config_acpi_legacy_irqs(); - - count = - acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src, - NR_IRQ_VECTORS); - if (count < 0) { - printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n"); - /* TBD: Cleanup to allow fallback to MPS */ - return count; - } - - return 0; -} -#else -static inline int acpi_parse_madt_ioapic_entries(void) -{ - return -1; -} -#endif /* !CONFIG_X86_IO_APIC */ - -static void __init acpi_process_madt(void) -{ -#ifdef CONFIG_X86_LOCAL_APIC - int error; - - if (!acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) { - - /* - * Parse MADT LAPIC entries - */ - error = acpi_parse_madt_lapic_entries(); - if (!error) { - acpi_lapic = 1; - -#ifdef CONFIG_X86_GENERICARCH - generic_bigsmp_probe(); -#endif - /* - * Parse MADT IO-APIC entries - */ - error = acpi_parse_madt_ioapic_entries(); - if (!error) { - acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC; - acpi_irq_balance_set(NULL); - acpi_ioapic = 1; - - smp_found_config = 1; - setup_apic_routing(); - } - } - if (error == -EINVAL) { - /* - * Dell Precision Workstation 410, 610 come here. - */ - printk(KERN_ERR PREFIX - "Invalid BIOS MADT, disabling ACPI\n"); - disable_acpi(); - } - } -#endif - return; -} - -#ifdef __i386__ - -static int __init disable_acpi_irq(struct dmi_system_id *d) -{ - if (!acpi_force) { - printk(KERN_NOTICE "%s detected: force use of acpi=noirq\n", - d->ident); - acpi_noirq_set(); - } - return 0; -} - -static int __init disable_acpi_pci(struct dmi_system_id *d) -{ - if (!acpi_force) { - printk(KERN_NOTICE "%s detected: force use of pci=noacpi\n", - d->ident); - acpi_disable_pci(); - } - return 0; -} - -static int __init dmi_disable_acpi(struct dmi_system_id *d) -{ - if (!acpi_force) { - printk(KERN_NOTICE "%s detected: acpi off\n", d->ident); - disable_acpi(); - } else { - printk(KERN_NOTICE - "Warning: DMI blacklist says broken, but acpi forced\n"); - } - return 0; -} - -/* - * Limit ACPI to CPU enumeration for HT - */ -static int __init force_acpi_ht(struct dmi_system_id *d) -{ - if (!acpi_force) { - printk(KERN_NOTICE "%s detected: force use of acpi=ht\n", - d->ident); - disable_acpi(); - acpi_ht = 1; - } else { - printk(KERN_NOTICE - "Warning: acpi=force overrules DMI blacklist: acpi=ht\n"); - } - return 0; -} - -/* - * If your system is blacklisted here, but you find that acpi=force - * works for you, please contact acpi-devel@sourceforge.net - */ -static struct dmi_system_id __initdata acpi_dmi_table[] = { - /* - * Boxes that need ACPI disabled - */ - { - .callback = dmi_disable_acpi, - .ident = "IBM Thinkpad", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), - DMI_MATCH(DMI_BOARD_NAME, "2629H1G"), - }, - }, - - /* - * Boxes that need acpi=ht - */ - { - .callback = force_acpi_ht, - .ident = "FSC Primergy T850", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), - DMI_MATCH(DMI_PRODUCT_NAME, "PRIMERGY T850"), - }, - }, - { - .callback = force_acpi_ht, - .ident = "HP VISUALIZE NT Workstation", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP VISUALIZE NT Workstation"), - }, - }, - { - .callback = force_acpi_ht, - .ident = "Compaq Workstation W8000", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Compaq"), - DMI_MATCH(DMI_PRODUCT_NAME, "Workstation W8000"), - }, - }, - { - .callback = force_acpi_ht, - .ident = "ASUS P4B266", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), - DMI_MATCH(DMI_BOARD_NAME, "P4B266"), - }, - }, - { - .callback = force_acpi_ht, - .ident = "ASUS P2B-DS", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), - DMI_MATCH(DMI_BOARD_NAME, "P2B-DS"), - }, - }, - { - .callback = force_acpi_ht, - .ident = "ASUS CUR-DLS", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), - DMI_MATCH(DMI_BOARD_NAME, "CUR-DLS"), - }, - }, - { - .callback = force_acpi_ht, - .ident = "ABIT i440BX-W83977", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "ABIT "), - DMI_MATCH(DMI_BOARD_NAME, "i440BX-W83977 (BP6)"), - }, - }, - { - .callback = force_acpi_ht, - .ident = "IBM Bladecenter", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), - DMI_MATCH(DMI_BOARD_NAME, "IBM eServer BladeCenter HS20"), - }, - }, - { - .callback = force_acpi_ht, - .ident = "IBM eServer xSeries 360", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), - DMI_MATCH(DMI_BOARD_NAME, "eServer xSeries 360"), - }, - }, - { - .callback = force_acpi_ht, - .ident = "IBM eserver xSeries 330", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), - DMI_MATCH(DMI_BOARD_NAME, "eserver xSeries 330"), - }, - }, - { - .callback = force_acpi_ht, - .ident = "IBM eserver xSeries 440", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), - DMI_MATCH(DMI_PRODUCT_NAME, "eserver xSeries 440"), - }, - }, - - /* - * Boxes that need ACPI PCI IRQ routing disabled - */ - { - .callback = disable_acpi_irq, - .ident = "ASUS A7V", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC"), - DMI_MATCH(DMI_BOARD_NAME, ""), - /* newer BIOS, Revision 1011, does work */ - DMI_MATCH(DMI_BIOS_VERSION, - "ASUS A7V ACPI BIOS Revision 1007"), - }, - }, - { - /* - * Latest BIOS for IBM 600E (1.16) has bad pcinum - * for LPC bridge, which is needed for the PCI - * interrupt links to work. DSDT fix is in bug 5966. - * 2645, 2646 model numbers are shared with 600/600E/600X - */ - .callback = disable_acpi_irq, - .ident = "IBM Thinkpad 600 Series 2645", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), - DMI_MATCH(DMI_BOARD_NAME, "2645"), - }, - }, - { - .callback = disable_acpi_irq, - .ident = "IBM Thinkpad 600 Series 2646", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), - DMI_MATCH(DMI_BOARD_NAME, "2646"), - }, - }, - /* - * Boxes that need ACPI PCI IRQ routing and PCI scan disabled - */ - { /* _BBN 0 bug */ - .callback = disable_acpi_pci, - .ident = "ASUS PR-DLS", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), - DMI_MATCH(DMI_BOARD_NAME, "PR-DLS"), - DMI_MATCH(DMI_BIOS_VERSION, - "ASUS PR-DLS ACPI BIOS Revision 1010"), - DMI_MATCH(DMI_BIOS_DATE, "03/21/2003") - }, - }, - { - .callback = disable_acpi_pci, - .ident = "Acer TravelMate 36x Laptop", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"), - }, - }, - {} -}; - -#endif /* __i386__ */ - -/* - * acpi_boot_table_init() and acpi_boot_init() - * called from setup_arch(), always. - * 1. checksums all tables - * 2. enumerates lapics - * 3. enumerates io-apics - * - * acpi_table_init() is separate to allow reading SRAT without - * other side effects. - * - * side effects of acpi_boot_init: - * acpi_lapic = 1 if LAPIC found - * acpi_ioapic = 1 if IOAPIC found - * if (acpi_lapic && acpi_ioapic) smp_found_config = 1; - * if acpi_blacklisted() acpi_disabled = 1; - * acpi_irq_model=... - * ... - * - * return value: (currently ignored) - * 0: success - * !0: failure - */ - -int __init acpi_boot_table_init(void) -{ - int error; - -#ifdef __i386__ - dmi_check_system(acpi_dmi_table); -#endif - - /* - * If acpi_disabled, bail out - * One exception: acpi=ht continues far enough to enumerate LAPICs - */ - if (acpi_disabled && !acpi_ht) - return 1; - - /* - * Initialize the ACPI boot-time table parser. - */ - error = acpi_table_init(); - if (error) { - disable_acpi(); - return error; - } - - acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf); - - /* - * blacklist may disable ACPI entirely - */ - error = acpi_blacklisted(); - if (error) { - if (acpi_force) { - printk(KERN_WARNING PREFIX "acpi=force override\n"); - } else { - printk(KERN_WARNING PREFIX "Disabling ACPI support\n"); - disable_acpi(); - return error; - } - } - - return 0; -} - -int __init acpi_boot_init(void) -{ - /* - * If acpi_disabled, bail out - * One exception: acpi=ht continues far enough to enumerate LAPICs - */ - if (acpi_disabled && !acpi_ht) - return 1; - - acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf); - - /* - * set sci_int and PM timer address - */ - acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt); - - /* - * Process the Multiple APIC Description Table (MADT), if present - */ - acpi_process_madt(); - - acpi_table_parse(ACPI_SIG_HPET, acpi_parse_hpet); - - return 0; -} - -static int __init parse_acpi(char *arg) -{ - if (!arg) - return -EINVAL; - - /* "acpi=off" disables both ACPI table parsing and interpreter */ - if (strcmp(arg, "off") == 0) { - disable_acpi(); - } - /* acpi=force to over-ride black-list */ - else if (strcmp(arg, "force") == 0) { - acpi_force = 1; - acpi_ht = 1; - acpi_disabled = 0; - } - /* acpi=strict disables out-of-spec workarounds */ - else if (strcmp(arg, "strict") == 0) { - acpi_strict = 1; - } - /* Limit ACPI just to boot-time to enable HT */ - else if (strcmp(arg, "ht") == 0) { - if (!acpi_force) - disable_acpi(); - acpi_ht = 1; - } - /* "acpi=noirq" disables ACPI interrupt routing */ - else if (strcmp(arg, "noirq") == 0) { - acpi_noirq_set(); - } else { - /* Core will printk when we return error. */ - return -EINVAL; - } - return 0; -} -early_param("acpi", parse_acpi); - -/* FIXME: Using pci= for an ACPI parameter is a travesty. */ -static int __init parse_pci(char *arg) -{ - if (arg && strcmp(arg, "noacpi") == 0) - acpi_disable_pci(); - return 0; -} -early_param("pci", parse_pci); - -#ifdef CONFIG_X86_IO_APIC -static int __init parse_acpi_skip_timer_override(char *arg) -{ - acpi_skip_timer_override = 1; - return 0; -} -early_param("acpi_skip_timer_override", parse_acpi_skip_timer_override); - -static int __init parse_acpi_use_timer_override(char *arg) -{ - acpi_use_timer_override = 1; - return 0; -} -early_param("acpi_use_timer_override", parse_acpi_use_timer_override); -#endif /* CONFIG_X86_IO_APIC */ - -static int __init setup_acpi_sci(char *s) -{ - if (!s) - return -EINVAL; - if (!strcmp(s, "edge")) - acpi_sci_flags = ACPI_MADT_TRIGGER_EDGE | - (acpi_sci_flags & ~ACPI_MADT_TRIGGER_MASK); - else if (!strcmp(s, "level")) - acpi_sci_flags = ACPI_MADT_TRIGGER_LEVEL | - (acpi_sci_flags & ~ACPI_MADT_TRIGGER_MASK); - else if (!strcmp(s, "high")) - acpi_sci_flags = ACPI_MADT_POLARITY_ACTIVE_HIGH | - (acpi_sci_flags & ~ACPI_MADT_POLARITY_MASK); - else if (!strcmp(s, "low")) - acpi_sci_flags = ACPI_MADT_POLARITY_ACTIVE_LOW | - (acpi_sci_flags & ~ACPI_MADT_POLARITY_MASK); - else - return -EINVAL; - return 0; -} -early_param("acpi_sci", setup_acpi_sci); - -int __acpi_acquire_global_lock(unsigned int *lock) -{ - unsigned int old, new, val; - do { - old = *lock; - new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1)); - val = cmpxchg(lock, old, new); - } while (unlikely (val != old)); - return (new < 3) ? -1 : 0; -} - -int __acpi_release_global_lock(unsigned int *lock) -{ - unsigned int old, new, val; - do { - old = *lock; - new = old & ~0x3; - val = cmpxchg(lock, old, new); - } while (unlikely (val != old)); - return old & 0x1; -} diff --git a/arch/i386/kernel/acpi/cstate.c b/arch/i386/kernel/acpi/cstate.c deleted file mode 100644 index 2d39f55d29a..00000000000 --- a/arch/i386/kernel/acpi/cstate.c +++ /dev/null @@ -1,164 +0,0 @@ -/* - * arch/i386/kernel/acpi/cstate.c - * - * Copyright (C) 2005 Intel Corporation - * Venkatesh Pallipadi - * - Added _PDC for SMP C-states on Intel CPUs - */ - -#include -#include -#include -#include -#include -#include - -#include -#include - -/* - * Initialize bm_flags based on the CPU cache properties - * On SMP it depends on cache configuration - * - When cache is not shared among all CPUs, we flush cache - * before entering C3. - * - When cache is shared among all CPUs, we use bm_check - * mechanism as in UP case - * - * This routine is called only after all the CPUs are online - */ -void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags, - unsigned int cpu) -{ - struct cpuinfo_x86 *c = cpu_data + cpu; - - flags->bm_check = 0; - if (num_online_cpus() == 1) - flags->bm_check = 1; - else if (c->x86_vendor == X86_VENDOR_INTEL) { - /* - * Today all CPUs that support C3 share cache. - * TBD: This needs to look at cache shared map, once - * multi-core detection patch makes to the base. - */ - flags->bm_check = 1; - } -} -EXPORT_SYMBOL(acpi_processor_power_init_bm_check); - -/* The code below handles cstate entry with monitor-mwait pair on Intel*/ - -struct cstate_entry { - struct { - unsigned int eax; - unsigned int ecx; - } states[ACPI_PROCESSOR_MAX_POWER]; -}; -static struct cstate_entry *cpu_cstate_entry; /* per CPU ptr */ - -static short mwait_supported[ACPI_PROCESSOR_MAX_POWER]; - -#define MWAIT_SUBSTATE_MASK (0xf) -#define MWAIT_SUBSTATE_SIZE (4) - -#define CPUID_MWAIT_LEAF (5) -#define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1) -#define CPUID5_ECX_INTERRUPT_BREAK (0x2) - -#define MWAIT_ECX_INTERRUPT_BREAK (0x1) - -#define NATIVE_CSTATE_BEYOND_HALT (2) - -int acpi_processor_ffh_cstate_probe(unsigned int cpu, - struct acpi_processor_cx *cx, struct acpi_power_register *reg) -{ - struct cstate_entry *percpu_entry; - struct cpuinfo_x86 *c = cpu_data + cpu; - - cpumask_t saved_mask; - int retval; - unsigned int eax, ebx, ecx, edx; - unsigned int edx_part; - unsigned int cstate_type; /* C-state type and not ACPI C-state type */ - unsigned int num_cstate_subtype; - - if (!cpu_cstate_entry || c->cpuid_level < CPUID_MWAIT_LEAF ) - return -1; - - if (reg->bit_offset != NATIVE_CSTATE_BEYOND_HALT) - return -1; - - percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu); - percpu_entry->states[cx->index].eax = 0; - percpu_entry->states[cx->index].ecx = 0; - - /* Make sure we are running on right CPU */ - saved_mask = current->cpus_allowed; - retval = set_cpus_allowed(current, cpumask_of_cpu(cpu)); - if (retval) - return -1; - - cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx); - - /* Check whether this particular cx_type (in CST) is supported or not */ - cstate_type = (cx->address >> MWAIT_SUBSTATE_SIZE) + 1; - edx_part = edx >> (cstate_type * MWAIT_SUBSTATE_SIZE); - num_cstate_subtype = edx_part & MWAIT_SUBSTATE_MASK; - - retval = 0; - if (num_cstate_subtype < (cx->address & MWAIT_SUBSTATE_MASK)) { - retval = -1; - goto out; - } - - /* mwait ecx extensions INTERRUPT_BREAK should be supported for C2/C3 */ - if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || - !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) { - retval = -1; - goto out; - } - percpu_entry->states[cx->index].ecx = MWAIT_ECX_INTERRUPT_BREAK; - - /* Use the hint in CST */ - percpu_entry->states[cx->index].eax = cx->address; - - if (!mwait_supported[cstate_type]) { - mwait_supported[cstate_type] = 1; - printk(KERN_DEBUG "Monitor-Mwait will be used to enter C-%d " - "state\n", cx->type); - } - -out: - set_cpus_allowed(current, saved_mask); - return retval; -} -EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe); - -void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx) -{ - unsigned int cpu = smp_processor_id(); - struct cstate_entry *percpu_entry; - - percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu); - mwait_idle_with_hints(percpu_entry->states[cx->index].eax, - percpu_entry->states[cx->index].ecx); -} -EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_enter); - -static int __init ffh_cstate_init(void) -{ - struct cpuinfo_x86 *c = &boot_cpu_data; - if (c->x86_vendor != X86_VENDOR_INTEL) - return -1; - - cpu_cstate_entry = alloc_percpu(struct cstate_entry); - return 0; -} - -static void __exit ffh_cstate_exit(void) -{ - free_percpu(cpu_cstate_entry); - cpu_cstate_entry = NULL; -} - -arch_initcall(ffh_cstate_init); -__exitcall(ffh_cstate_exit); diff --git a/arch/i386/kernel/acpi/earlyquirk_32.c b/arch/i386/kernel/acpi/earlyquirk_32.c deleted file mode 100644 index 23f78efc577..00000000000 --- a/arch/i386/kernel/acpi/earlyquirk_32.c +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Do early PCI probing for bug detection when the main PCI subsystem is - * not up yet. - */ -#include -#include -#include -#include - -#include -#include -#include - -#ifdef CONFIG_ACPI - -static int __init nvidia_hpet_check(struct acpi_table_header *header) -{ - return 0; -} -#endif - -static int __init check_bridge(int vendor, int device) -{ -#ifdef CONFIG_ACPI - static int warned; - /* According to Nvidia all timer overrides are bogus unless HPET - is enabled. */ - if (!acpi_use_timer_override && vendor == PCI_VENDOR_ID_NVIDIA) { - if (!warned && acpi_table_parse(ACPI_SIG_HPET, - nvidia_hpet_check)) { - warned = 1; - acpi_skip_timer_override = 1; - printk(KERN_INFO "Nvidia board " - "detected. Ignoring ACPI " - "timer override.\n"); - printk(KERN_INFO "If you got timer trouble " - "try acpi_use_timer_override\n"); - - } - } -#endif - if (vendor == PCI_VENDOR_ID_ATI && timer_over_8254 == 1) { - timer_over_8254 = 0; - printk(KERN_INFO "ATI board detected. Disabling timer routing " - "over 8254.\n"); - } - return 0; -} - -void __init check_acpi_pci(void) -{ - int num, slot, func; - - /* Assume the machine supports type 1. If not it will - always read ffffffff and should not have any side effect. - Actually a few buggy systems can machine check. Allow the user - to disable it by command line option at least -AK */ - if (!early_pci_allowed()) - return; - - /* Poor man's PCI discovery */ - for (num = 0; num < 32; num++) { - for (slot = 0; slot < 32; slot++) { - for (func = 0; func < 8; func++) { - u32 class; - u32 vendor; - class = read_pci_config(num, slot, func, - PCI_CLASS_REVISION); - if (class == 0xffffffff) - break; - - if ((class >> 16) != PCI_CLASS_BRIDGE_PCI) - continue; - - vendor = read_pci_config(num, slot, func, - PCI_VENDOR_ID); - - if (check_bridge(vendor & 0xffff, vendor >> 16)) - return; - } - - } - } -} diff --git a/arch/i386/kernel/acpi/processor.c b/arch/i386/kernel/acpi/processor.c deleted file mode 100644 index b54fded4983..00000000000 --- a/arch/i386/kernel/acpi/processor.c +++ /dev/null @@ -1,75 +0,0 @@ -/* - * arch/i386/kernel/acpi/processor.c - * - * Copyright (C) 2005 Intel Corporation - * Venkatesh Pallipadi - * - Added _PDC for platforms with Intel CPUs - */ - -#include -#include -#include -#include - -#include -#include - -static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c) -{ - struct acpi_object_list *obj_list; - union acpi_object *obj; - u32 *buf; - - /* allocate and initialize pdc. It will be used later. */ - obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL); - if (!obj_list) { - printk(KERN_ERR "Memory allocation error\n"); - return; - } - - obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL); - if (!obj) { - printk(KERN_ERR "Memory allocation error\n"); - kfree(obj_list); - return; - } - - buf = kmalloc(12, GFP_KERNEL); - if (!buf) { - printk(KERN_ERR "Memory allocation error\n"); - kfree(obj); - kfree(obj_list); - return; - } - - buf[0] = ACPI_PDC_REVISION_ID; - buf[1] = 1; - buf[2] = ACPI_PDC_C_CAPABILITY_SMP; - - if (cpu_has(c, X86_FEATURE_EST)) - buf[2] |= ACPI_PDC_EST_CAPABILITY_SWSMP; - - obj->type = ACPI_TYPE_BUFFER; - obj->buffer.length = 12; - obj->buffer.pointer = (u8 *) buf; - obj_list->count = 1; - obj_list->pointer = obj; - pr->pdc = obj_list; - - return; -} - -/* Initialize _PDC data based on the CPU vendor */ -void arch_acpi_processor_init_pdc(struct acpi_processor *pr) -{ - unsigned int cpu = pr->id; - struct cpuinfo_x86 *c = cpu_data + cpu; - - pr->pdc = NULL; - if (c->x86_vendor == X86_VENDOR_INTEL) - init_intel_pdc(pr, c); - - return; -} - -EXPORT_SYMBOL(arch_acpi_processor_init_pdc); diff --git a/arch/i386/kernel/acpi/sleep_32.c b/arch/i386/kernel/acpi/sleep_32.c deleted file mode 100644 index c42b5ab49de..00000000000 --- a/arch/i386/kernel/acpi/sleep_32.c +++ /dev/null @@ -1,110 +0,0 @@ -/* - * sleep.c - x86-specific ACPI sleep support. - * - * Copyright (C) 2001-2003 Patrick Mochel - * Copyright (C) 2001-2003 Pavel Machek - */ - -#include -#include -#include -#include - -#include - -/* address in low memory of the wakeup routine. */ -unsigned long acpi_wakeup_address = 0; -unsigned long acpi_realmode_flags; -extern char wakeup_start, wakeup_end; - -extern unsigned long FASTCALL(acpi_copy_wakeup_routine(unsigned long)); - -/** - * acpi_save_state_mem - save kernel state - * - * Create an identity mapped page table and copy the wakeup routine to - * low memory. - */ -int acpi_save_state_mem(void) -{ - if (!acpi_wakeup_address) - return 1; - memcpy((void *)acpi_wakeup_address, &wakeup_start, - &wakeup_end - &wakeup_start); - acpi_copy_wakeup_routine(acpi_wakeup_address); - - return 0; -} - -/* - * acpi_restore_state - undo effects of acpi_save_state_mem - */ -void acpi_restore_state_mem(void) -{ -} - -/** - * acpi_reserve_bootmem - do _very_ early ACPI initialisation - * - * We allocate a page from the first 1MB of memory for the wakeup - * routine for when we come back from a sleep state. The - * runtime allocator allows specification of <16MB pages, but not - * <1MB pages. - */ -void __init acpi_reserve_bootmem(void) -{ - if ((&wakeup_end - &wakeup_start) > PAGE_SIZE) { - printk(KERN_ERR - "ACPI: Wakeup code way too big, S3 disabled.\n"); - return; - } - - acpi_wakeup_address = (unsigned long)alloc_bootmem_low(PAGE_SIZE); - if (!acpi_wakeup_address) - printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n"); -} - -static int __init acpi_sleep_setup(char *str) -{ - while ((str != NULL) && (*str != '\0')) { - if (strncmp(str, "s3_bios", 7) == 0) - acpi_realmode_flags |= 1; - if (strncmp(str, "s3_mode", 7) == 0) - acpi_realmode_flags |= 2; - if (strncmp(str, "s3_beep", 7) == 0) - acpi_realmode_flags |= 4; - str = strchr(str, ','); - if (str != NULL) - str += strspn(str, ", \t"); - } - return 1; -} - -__setup("acpi_sleep=", acpi_sleep_setup); - -/* Ouch, we want to delete this. We already have better version in userspace, in - s2ram from suspend.sf.net project */ -static __init int reset_videomode_after_s3(struct dmi_system_id *d) -{ - acpi_realmode_flags |= 2; - return 0; -} - -static __initdata struct dmi_system_id acpisleep_dmi_table[] = { - { /* Reset video mode after returning from ACPI S3 sleep */ - .callback = reset_videomode_after_s3, - .ident = "Toshiba Satellite 4030cdt", - .matches = { - DMI_MATCH(DMI_PRODUCT_NAME, "S4030CDT/4.3"), - }, - }, - {} -}; - -static int __init acpisleep_dmi_init(void) -{ - dmi_check_system(acpisleep_dmi_table); - return 0; -} - -core_initcall(acpisleep_dmi_init); diff --git a/arch/i386/kernel/acpi/wakeup_32.S b/arch/i386/kernel/acpi/wakeup_32.S deleted file mode 100644 index f22ba8534d2..00000000000 --- a/arch/i386/kernel/acpi/wakeup_32.S +++ /dev/null @@ -1,321 +0,0 @@ -.text -#include -#include -#include - -# -# wakeup_code runs in real mode, and at unknown address (determined at run-time). -# Therefore it must only use relative jumps/calls. -# -# Do we need to deal with A20? It is okay: ACPI specs says A20 must be enabled -# -# If physical address of wakeup_code is 0x12345, BIOS should call us with -# cs = 0x1234, eip = 0x05 -# - -#define BEEP \ - inb $97, %al; \ - outb %al, $0x80; \ - movb $3, %al; \ - outb %al, $97; \ - outb %al, $0x80; \ - movb $-74, %al; \ - outb %al, $67; \ - outb %al, $0x80; \ - movb $-119, %al; \ - outb %al, $66; \ - outb %al, $0x80; \ - movb $15, %al; \ - outb %al, $66; - -ALIGN - .align 4096 -ENTRY(wakeup_start) -wakeup_code: - wakeup_code_start = . - .code16 - - movw $0xb800, %ax - movw %ax,%fs - movw $0x0e00 + 'L', %fs:(0x10) - - cli - cld - - # setup data segment - movw %cs, %ax - movw %ax, %ds # Make ds:0 point to wakeup_start - movw %ax, %ss - - testl $4, realmode_flags - wakeup_code - jz 1f - BEEP -1: - mov $(wakeup_stack - wakeup_code), %sp # Private stack is needed for ASUS board - movw $0x0e00 + 'S', %fs:(0x12) - - pushl $0 # Kill any dangerous flags - popfl - - movl real_magic - wakeup_code, %eax - cmpl $0x12345678, %eax - jne bogus_real_magic - - testl $1, realmode_flags - wakeup_code - jz 1f - lcall $0xc000,$3 - movw %cs, %ax - movw %ax, %ds # Bios might have played with that - movw %ax, %ss -1: - - testl $2, realmode_flags - wakeup_code - jz 1f - mov video_mode - wakeup_code, %ax - call mode_set -1: - - # set up page table - movl $swsusp_pg_dir-__PAGE_OFFSET, %eax - movl %eax, %cr3 - - testl $1, real_efer_save_restore - wakeup_code - jz 4f - # restore efer setting - movl real_save_efer_edx - wakeup_code, %edx - movl real_save_efer_eax - wakeup_code, %eax - mov $0xc0000080, %ecx - wrmsr -4: - # make sure %cr4 is set correctly (features, etc) - movl real_save_cr4 - wakeup_code, %eax - movl %eax, %cr4 - movw $0xb800, %ax - movw %ax,%fs - movw $0x0e00 + 'i', %fs:(0x12) - - # need a gdt -- use lgdtl to force 32-bit operands, in case - # the GDT is located past 16 megabytes. - lgdtl real_save_gdt - wakeup_code - - movl real_save_cr0 - wakeup_code, %eax - movl %eax, %cr0 - jmp 1f -1: - movw $0x0e00 + 'n', %fs:(0x14) - - movl real_magic - wakeup_code, %eax - cmpl $0x12345678, %eax - jne bogus_real_magic - - testl $8, realmode_flags - wakeup_code - jz 1f - BEEP -1: - ljmpl $__KERNEL_CS, $wakeup_pmode_return - -real_save_gdt: .word 0 - .long 0 -real_save_cr0: .long 0 -real_save_cr3: .long 0 -real_save_cr4: .long 0 -real_magic: .long 0 -video_mode: .long 0 -realmode_flags: .long 0 -beep_flags: .long 0 -real_efer_save_restore: .long 0 -real_save_efer_edx: .long 0 -real_save_efer_eax: .long 0 - -bogus_real_magic: - movw $0x0e00 + 'B', %fs:(0x12) - jmp bogus_real_magic - -/* This code uses an extended set of video mode numbers. These include: - * Aliases for standard modes - * NORMAL_VGA (-1) - * EXTENDED_VGA (-2) - * ASK_VGA (-3) - * Video modes numbered by menu position -- NOT RECOMMENDED because of lack - * of compatibility when extending the table. These are between 0x00 and 0xff. - */ -#define VIDEO_FIRST_MENU 0x0000 - -/* Standard BIOS video modes (BIOS number + 0x0100) */ -#define VIDEO_FIRST_BIOS 0x0100 - -/* VESA BIOS video modes (VESA number + 0x0200) */ -#define VIDEO_FIRST_VESA 0x0200 - -/* Video7 special modes (BIOS number + 0x0900) */ -#define VIDEO_FIRST_V7 0x0900 - -# Setting of user mode (AX=mode ID) => CF=success - -# For now, we only handle VESA modes (0x0200..0x03ff). To handle other -# modes, we should probably compile in the video code from the boot -# directory. -mode_set: - movw %ax, %bx - subb $VIDEO_FIRST_VESA>>8, %bh - cmpb $2, %bh - jb check_vesa - -setbad: - clc - ret - -check_vesa: - orw $0x4000, %bx # Use linear frame buffer - movw $0x4f02, %ax # VESA BIOS mode set call - int $0x10 - cmpw $0x004f, %ax # AL=4f if implemented - jnz setbad # AH=0 if OK - - stc - ret - - .code32 - ALIGN - -.org 0x800 -wakeup_stack_begin: # Stack grows down - -.org 0xff0 # Just below end of page -wakeup_stack: -ENTRY(wakeup_end) - -.org 0x1000 - -wakeup_pmode_return: - movw $__KERNEL_DS, %ax - movw %ax, %ss - movw %ax, %ds - movw %ax, %es - movw %ax, %fs - movw %ax, %gs - movw $0x0e00 + 'u', 0xb8016 - - # reload the gdt, as we need the full 32 bit address - lgdt saved_gdt - lidt saved_idt - lldt saved_ldt - ljmp $(__KERNEL_CS),$1f -1: - movl %cr3, %eax - movl %eax, %cr3 - wbinvd - - # and restore the stack ... but you need gdt for this to work - movl saved_context_esp, %esp - - movl %cs:saved_magic, %eax - cmpl $0x12345678, %eax - jne bogus_magic - - # jump to place where we left off - movl saved_eip,%eax - jmp *%eax - -bogus_magic: - movw $0x0e00 + 'B', 0xb8018 - jmp bogus_magic - - -## -# acpi_copy_wakeup_routine -# -# Copy the above routine to low memory. -# -# Parameters: -# %eax: place to copy wakeup routine to -# -# Returned address is location of code in low memory (past data and stack) -# -ENTRY(acpi_copy_wakeup_routine) - - pushl %ebx - sgdt saved_gdt - sidt saved_idt - sldt saved_ldt - str saved_tss - - movl nx_enabled, %edx - movl %edx, real_efer_save_restore - wakeup_start (%eax) - testl $1, real_efer_save_restore - wakeup_start (%eax) - jz 2f - # save efer setting - pushl %eax - movl %eax, %ebx - mov $0xc0000080, %ecx - rdmsr - movl %edx, real_save_efer_edx - wakeup_start (%ebx) - movl %eax, real_save_efer_eax - wakeup_start (%ebx) - popl %eax -2: - - movl %cr3, %edx - movl %edx, real_save_cr3 - wakeup_start (%eax) - movl %cr4, %edx - movl %edx, real_save_cr4 - wakeup_start (%eax) - movl %cr0, %edx - movl %edx, real_save_cr0 - wakeup_start (%eax) - sgdt real_save_gdt - wakeup_start (%eax) - - movl saved_videomode, %edx - movl %edx, video_mode - wakeup_start (%eax) - movl acpi_realmode_flags, %edx - movl %edx, realmode_flags - wakeup_start (%eax) - movl $0x12345678, real_magic - wakeup_start (%eax) - movl $0x12345678, saved_magic - popl %ebx - ret - -save_registers: - leal 4(%esp), %eax - movl %eax, saved_context_esp - movl %ebx, saved_context_ebx - movl %ebp, saved_context_ebp - movl %esi, saved_context_esi - movl %edi, saved_context_edi - pushfl ; popl saved_context_eflags - - movl $ret_point, saved_eip - ret - - -restore_registers: - movl saved_context_ebp, %ebp - movl saved_context_ebx, %ebx - movl saved_context_esi, %esi - movl saved_context_edi, %edi - pushl saved_context_eflags ; popfl - ret - -ENTRY(do_suspend_lowlevel) - call save_processor_state - call save_registers - pushl $3 - call acpi_enter_sleep_state - addl $4, %esp - -# In case of S3 failure, we'll emerge here. Jump -# to ret_point to recover - jmp ret_point - .p2align 4,,7 -ret_point: - call restore_registers - call restore_processor_state - ret - -.data -ALIGN -ENTRY(saved_magic) .long 0 -ENTRY(saved_eip) .long 0 - -# saved registers -saved_gdt: .long 0,0 -saved_idt: .long 0,0 -saved_ldt: .long 0 -saved_tss: .long 0 - diff --git a/arch/x86/kernel/acpi/Makefile b/arch/x86/kernel/acpi/Makefile new file mode 100644 index 00000000000..dd4eb7cef2b --- /dev/null +++ b/arch/x86/kernel/acpi/Makefile @@ -0,0 +1,5 @@ +ifeq ($(CONFIG_X86_32),y) +include ${srctree}/arch/x86/kernel/acpi/Makefile_32 +else +include ${srctree}/arch/x86_64/kernel/acpi/Makefile_64 +endif diff --git a/arch/x86/kernel/acpi/Makefile_32 b/arch/x86/kernel/acpi/Makefile_32 new file mode 100644 index 00000000000..a4852a2e919 --- /dev/null +++ b/arch/x86/kernel/acpi/Makefile_32 @@ -0,0 +1,10 @@ +obj-$(CONFIG_ACPI) += boot.o +ifneq ($(CONFIG_PCI),) +obj-$(CONFIG_X86_IO_APIC) += earlyquirk_32.o +endif +obj-$(CONFIG_ACPI_SLEEP) += sleep_32.o wakeup_32.o + +ifneq ($(CONFIG_ACPI_PROCESSOR),) +obj-y += cstate.o processor.o +endif + diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c new file mode 100644 index 00000000000..cacdd883bf2 --- /dev/null +++ b/arch/x86/kernel/acpi/boot.c @@ -0,0 +1,1326 @@ +/* + * boot.c - Architecture-Specific Low-Level ACPI Boot Support + * + * Copyright (C) 2001, 2002 Paul Diefenbaugh + * Copyright (C) 2001 Jun Nakajima + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +static int __initdata acpi_force = 0; + +#ifdef CONFIG_ACPI +int acpi_disabled = 0; +#else +int acpi_disabled = 1; +#endif +EXPORT_SYMBOL(acpi_disabled); + +#ifdef CONFIG_X86_64 + +#include + +static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) { return 0; } + + +#else /* X86 */ + +#ifdef CONFIG_X86_LOCAL_APIC +#include +#include +#endif /* CONFIG_X86_LOCAL_APIC */ + +#endif /* X86 */ + +#define BAD_MADT_ENTRY(entry, end) ( \ + (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ + ((struct acpi_subtable_header *)entry)->length < sizeof(*entry)) + +#define PREFIX "ACPI: " + +int acpi_noirq; /* skip ACPI IRQ initialization */ +int acpi_pci_disabled __initdata; /* skip ACPI PCI scan and IRQ initialization */ +int acpi_ht __initdata = 1; /* enable HT */ + +int acpi_lapic; +int acpi_ioapic; +int acpi_strict; +EXPORT_SYMBOL(acpi_strict); + +u8 acpi_sci_flags __initdata; +int acpi_sci_override_gsi __initdata; +int acpi_skip_timer_override __initdata; +int acpi_use_timer_override __initdata; + +#ifdef CONFIG_X86_LOCAL_APIC +static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; +#endif + +#ifndef __HAVE_ARCH_CMPXCHG +#warning ACPI uses CMPXCHG, i486 and later hardware +#endif + +/* -------------------------------------------------------------------------- + Boot-time Configuration + -------------------------------------------------------------------------- */ + +/* + * The default interrupt routing model is PIC (8259). This gets + * overriden if IOAPICs are enumerated (below). + */ +enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC; + +#ifdef CONFIG_X86_64 + +/* rely on all ACPI tables being in the direct mapping */ +char *__acpi_map_table(unsigned long phys_addr, unsigned long size) +{ + if (!phys_addr || !size) + return NULL; + + if (phys_addr+size <= (end_pfn_map << PAGE_SHIFT) + PAGE_SIZE) + return __va(phys_addr); + + return NULL; +} + +#else + +/* + * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END, + * to map the target physical address. The problem is that set_fixmap() + * provides a single page, and it is possible that the page is not + * sufficient. + * By using this area, we can map up to MAX_IO_APICS pages temporarily, + * i.e. until the next __va_range() call. + * + * Important Safety Note: The fixed I/O APIC page numbers are *subtracted* + * from the fixed base. That's why we start at FIX_IO_APIC_BASE_END and + * count idx down while incrementing the phys address. + */ +char *__acpi_map_table(unsigned long phys, unsigned long size) +{ + unsigned long base, offset, mapped_size; + int idx; + + if (phys + size < 8 * 1024 * 1024) + return __va(phys); + + offset = phys & (PAGE_SIZE - 1); + mapped_size = PAGE_SIZE - offset; + set_fixmap(FIX_ACPI_END, phys); + base = fix_to_virt(FIX_ACPI_END); + + /* + * Most cases can be covered by the below. + */ + idx = FIX_ACPI_END; + while (mapped_size < size) { + if (--idx < FIX_ACPI_BEGIN) + return NULL; /* cannot handle this */ + phys += PAGE_SIZE; + set_fixmap(idx, phys); + mapped_size += PAGE_SIZE; + } + + return ((unsigned char *)base + offset); +} +#endif + +#ifdef CONFIG_PCI_MMCONFIG +/* The physical address of the MMCONFIG aperture. Set from ACPI tables. */ +struct acpi_mcfg_allocation *pci_mmcfg_config; +int pci_mmcfg_config_num; + +int __init acpi_parse_mcfg(struct acpi_table_header *header) +{ + struct acpi_table_mcfg *mcfg; + unsigned long i; + int config_size; + + if (!header) + return -EINVAL; + + mcfg = (struct acpi_table_mcfg *)header; + + /* how many config structures do we have */ + pci_mmcfg_config_num = 0; + i = header->length - sizeof(struct acpi_table_mcfg); + while (i >= sizeof(struct acpi_mcfg_allocation)) { + ++pci_mmcfg_config_num; + i -= sizeof(struct acpi_mcfg_allocation); + }; + if (pci_mmcfg_config_num == 0) { + printk(KERN_ERR PREFIX "MMCONFIG has no entries\n"); + return -ENODEV; + } + + config_size = pci_mmcfg_config_num * sizeof(*pci_mmcfg_config); + pci_mmcfg_config = kmalloc(config_size, GFP_KERNEL); + if (!pci_mmcfg_config) { + printk(KERN_WARNING PREFIX + "No memory for MCFG config tables\n"); + return -ENOMEM; + } + + memcpy(pci_mmcfg_config, &mcfg[1], config_size); + for (i = 0; i < pci_mmcfg_config_num; ++i) { + if (pci_mmcfg_config[i].address > 0xFFFFFFFF) { + printk(KERN_ERR PREFIX + "MMCONFIG not in low 4GB of memory\n"); + kfree(pci_mmcfg_config); + pci_mmcfg_config_num = 0; + return -ENODEV; + } + } + + return 0; +} +#endif /* CONFIG_PCI_MMCONFIG */ + +#ifdef CONFIG_X86_LOCAL_APIC +static int __init acpi_parse_madt(struct acpi_table_header *table) +{ + struct acpi_table_madt *madt = NULL; + + if (!cpu_has_apic) + return -EINVAL; + + madt = (struct acpi_table_madt *)table; + if (!madt) { + printk(KERN_WARNING PREFIX "Unable to map MADT\n"); + return -ENODEV; + } + + if (madt->address) { + acpi_lapic_addr = (u64) madt->address; + + printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n", + madt->address); + } + + acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id); + + return 0; +} + +static int __init +acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end) +{ + struct acpi_madt_local_apic *processor = NULL; + + processor = (struct acpi_madt_local_apic *)header; + + if (BAD_MADT_ENTRY(processor, end)) + return -EINVAL; + + acpi_table_print_madt_entry(header); + + /* + * We need to register disabled CPU as well to permit + * counting disabled CPUs. This allows us to size + * cpus_possible_map more accurately, to permit + * to not preallocating memory for all NR_CPUS + * when we use CPU hotplug. + */ + mp_register_lapic(processor->id, /* APIC ID */ + processor->lapic_flags & ACPI_MADT_ENABLED); /* Enabled? */ + + return 0; +} + +static int __init +acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header, + const unsigned long end) +{ + struct acpi_madt_local_apic_override *lapic_addr_ovr = NULL; + + lapic_addr_ovr = (struct acpi_madt_local_apic_override *)header; + + if (BAD_MADT_ENTRY(lapic_addr_ovr, end)) + return -EINVAL; + + acpi_lapic_addr = lapic_addr_ovr->address; + + return 0; +} + +static int __init +acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long end) +{ + struct acpi_madt_local_apic_nmi *lapic_nmi = NULL; + + lapic_nmi = (struct acpi_madt_local_apic_nmi *)header; + + if (BAD_MADT_ENTRY(lapic_nmi, end)) + return -EINVAL; + + acpi_table_print_madt_entry(header); + + if (lapic_nmi->lint != 1) + printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n"); + + return 0; +} + +#endif /*CONFIG_X86_LOCAL_APIC */ + +#ifdef CONFIG_X86_IO_APIC + +static int __init +acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end) +{ + struct acpi_madt_io_apic *ioapic = NULL; + + ioapic = (struct acpi_madt_io_apic *)header; + + if (BAD_MADT_ENTRY(ioapic, end)) + return -EINVAL; + + acpi_table_print_madt_entry(header); + + mp_register_ioapic(ioapic->id, + ioapic->address, ioapic->global_irq_base); + + return 0; +} + +/* + * Parse Interrupt Source Override for the ACPI SCI + */ +static void __init acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger) +{ + if (trigger == 0) /* compatible SCI trigger is level */ + trigger = 3; + + if (polarity == 0) /* compatible SCI polarity is low */ + polarity = 3; + + /* Command-line over-ride via acpi_sci= */ + if (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK) + trigger = (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK) >> 2; + + if (acpi_sci_flags & ACPI_MADT_POLARITY_MASK) + polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK; + + /* + * mp_config_acpi_legacy_irqs() already setup IRQs < 16 + * If GSI is < 16, this will update its flags, + * else it will create a new mp_irqs[] entry. + */ + mp_override_legacy_irq(gsi, polarity, trigger, gsi); + + /* + * stash over-ride to indicate we've been here + * and for later update of acpi_gbl_FADT + */ + acpi_sci_override_gsi = gsi; + return; +} + +static int __init +acpi_parse_int_src_ovr(struct acpi_subtable_header * header, + const unsigned long end) +{ + struct acpi_madt_interrupt_override *intsrc = NULL; + + intsrc = (struct acpi_madt_interrupt_override *)header; + + if (BAD_MADT_ENTRY(intsrc, end)) + return -EINVAL; + + acpi_table_print_madt_entry(header); + + if (intsrc->source_irq == acpi_gbl_FADT.sci_interrupt) { + acpi_sci_ioapic_setup(intsrc->global_irq, + intsrc->inti_flags & ACPI_MADT_POLARITY_MASK, + (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2); + return 0; + } + + if (acpi_skip_timer_override && + intsrc->source_irq == 0 && intsrc->global_irq == 2) { + printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n"); + return 0; + } + + mp_override_legacy_irq(intsrc->source_irq, + intsrc->inti_flags & ACPI_MADT_POLARITY_MASK, + (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2, + intsrc->global_irq); + + return 0; +} + +static int __init +acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long end) +{ + struct acpi_madt_nmi_source *nmi_src = NULL; + + nmi_src = (struct acpi_madt_nmi_source *)header; + + if (BAD_MADT_ENTRY(nmi_src, end)) + return -EINVAL; + + acpi_table_print_madt_entry(header); + + /* TBD: Support nimsrc entries? */ + + return 0; +} + +#endif /* CONFIG_X86_IO_APIC */ + +/* + * acpi_pic_sci_set_trigger() + * + * use ELCR to set PIC-mode trigger type for SCI + * + * If a PIC-mode SCI is not recognized or gives spurious IRQ7's + * it may require Edge Trigger -- use "acpi_sci=edge" + * + * Port 0x4d0-4d1 are ECLR1 and ECLR2, the Edge/Level Control Registers + * for the 8259 PIC. bit[n] = 1 means irq[n] is Level, otherwise Edge. + * ECLR1 is IRQ's 0-7 (IRQ 0, 1, 2 must be 0) + * ECLR2 is IRQ's 8-15 (IRQ 8, 13 must be 0) + */ + +void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger) +{ + unsigned int mask = 1 << irq; + unsigned int old, new; + + /* Real old ELCR mask */ + old = inb(0x4d0) | (inb(0x4d1) << 8); + + /* + * If we use ACPI to set PCI irq's, then we should clear ELCR + * since we will set it correctly as we enable the PCI irq + * routing. + */ + new = acpi_noirq ? old : 0; + + /* + * Update SCI information in the ELCR, it isn't in the PCI + * routing tables.. + */ + switch (trigger) { + case 1: /* Edge - clear */ + new &= ~mask; + break; + case 3: /* Level - set */ + new |= mask; + break; + } + + if (old == new) + return; + + printk(PREFIX "setting ELCR to %04x (from %04x)\n", new, old); + outb(new, 0x4d0); + outb(new >> 8, 0x4d1); +} + +int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) +{ + *irq = gsi; + return 0; +} + +/* + * success: return IRQ number (>=0) + * failure: return < 0 + */ +int acpi_register_gsi(u32 gsi, int triggering, int polarity) +{ + unsigned int irq; + unsigned int plat_gsi = gsi; + +#ifdef CONFIG_PCI + /* + * Make sure all (legacy) PCI IRQs are set as level-triggered. + */ + if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) { + extern void eisa_set_level_irq(unsigned int irq); + + if (triggering == ACPI_LEVEL_SENSITIVE) + eisa_set_level_irq(gsi); + } +#endif + +#ifdef CONFIG_X86_IO_APIC + if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) { + plat_gsi = mp_register_gsi(gsi, triggering, polarity); + } +#endif + acpi_gsi_to_irq(plat_gsi, &irq); + return irq; +} + +EXPORT_SYMBOL(acpi_register_gsi); + +/* + * ACPI based hotplug support for CPU + */ +#ifdef CONFIG_ACPI_HOTPLUG_CPU +int acpi_map_lsapic(acpi_handle handle, int *pcpu) +{ + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *obj; + struct acpi_madt_local_apic *lapic; + cpumask_t tmp_map, new_map; + u8 physid; + int cpu; + + if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) + return -EINVAL; + + if (!buffer.length || !buffer.pointer) + return -EINVAL; + + obj = buffer.pointer; + if (obj->type != ACPI_TYPE_BUFFER || + obj->buffer.length < sizeof(*lapic)) { + kfree(buffer.pointer); + return -EINVAL; + } + + lapic = (struct acpi_madt_local_apic *)obj->buffer.pointer; + + if (lapic->header.type != ACPI_MADT_TYPE_LOCAL_APIC || + !(lapic->lapic_flags & ACPI_MADT_ENABLED)) { + kfree(buffer.pointer); + return -EINVAL; + } + + physid = lapic->id; + + kfree(buffer.pointer); + buffer.length = ACPI_ALLOCATE_BUFFER; + buffer.pointer = NULL; + + tmp_map = cpu_present_map; + mp_register_lapic(physid, lapic->lapic_flags & ACPI_MADT_ENABLED); + + /* + * If mp_register_lapic successfully generates a new logical cpu + * number, then the following will get us exactly what was mapped + */ + cpus_andnot(new_map, cpu_present_map, tmp_map); + if (cpus_empty(new_map)) { + printk ("Unable to map lapic to logical cpu number\n"); + return -EINVAL; + } + + cpu = first_cpu(new_map); + + *pcpu = cpu; + return 0; +} + +EXPORT_SYMBOL(acpi_map_lsapic); + +int acpi_unmap_lsapic(int cpu) +{ + x86_cpu_to_apicid[cpu] = -1; + cpu_clear(cpu, cpu_present_map); + num_processors--; + + return (0); +} + +EXPORT_SYMBOL(acpi_unmap_lsapic); +#endif /* CONFIG_ACPI_HOTPLUG_CPU */ + +int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base) +{ + /* TBD */ + return -EINVAL; +} + +EXPORT_SYMBOL(acpi_register_ioapic); + +int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base) +{ + /* TBD */ + return -EINVAL; +} + +EXPORT_SYMBOL(acpi_unregister_ioapic); + +static unsigned long __init +acpi_scan_rsdp(unsigned long start, unsigned long length) +{ + unsigned long offset = 0; + unsigned long sig_len = sizeof("RSD PTR ") - 1; + + /* + * Scan all 16-byte boundaries of the physical memory region for the + * RSDP signature. + */ + for (offset = 0; offset < length; offset += 16) { + if (strncmp((char *)(phys_to_virt(start) + offset), "RSD PTR ", sig_len)) + continue; + return (start + offset); + } + + return 0; +} + +static int __init acpi_parse_sbf(struct acpi_table_header *table) +{ + struct acpi_table_boot *sb; + + sb = (struct acpi_table_boot *)table; + if (!sb) { + printk(KERN_WARNING PREFIX "Unable to map SBF\n"); + return -ENODEV; + } + + sbf_port = sb->cmos_index; /* Save CMOS port */ + + return 0; +} + +#ifdef CONFIG_HPET_TIMER +#include + +static struct __initdata resource *hpet_res; + +static int __init acpi_parse_hpet(struct acpi_table_header *table) +{ + struct acpi_table_hpet *hpet_tbl; + + hpet_tbl = (struct acpi_table_hpet *)table; + if (!hpet_tbl) { + printk(KERN_WARNING PREFIX "Unable to map HPET\n"); + return -ENODEV; + } + + if (hpet_tbl->address.space_id != ACPI_SPACE_MEM) { + printk(KERN_WARNING PREFIX "HPET timers must be located in " + "memory.\n"); + return -1; + } + + hpet_address = hpet_tbl->address.address; + printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n", + hpet_tbl->id, hpet_address); + + /* + * Allocate and initialize the HPET firmware resource for adding into + * the resource tree during the lateinit timeframe. + */ +#define HPET_RESOURCE_NAME_SIZE 9 + hpet_res = alloc_bootmem(sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE); + + if (!hpet_res) + return 0; + + memset(hpet_res, 0, sizeof(*hpet_res)); + hpet_res->name = (void *)&hpet_res[1]; + hpet_res->flags = IORESOURCE_MEM; + snprintf((char *)hpet_res->name, HPET_RESOURCE_NAME_SIZE, "HPET %u", + hpet_tbl->sequence); + + hpet_res->start = hpet_address; + hpet_res->end = hpet_address + (1 * 1024) - 1; + + return 0; +} + +/* + * hpet_insert_resource inserts the HPET resources used into the resource + * tree. + */ +static __init int hpet_insert_resource(void) +{ + if (!hpet_res) + return 1; + + return insert_resource(&iomem_resource, hpet_res); +} + +late_initcall(hpet_insert_resource); + +#else +#define acpi_parse_hpet NULL +#endif + +static int __init acpi_parse_fadt(struct acpi_table_header *table) +{ + +#ifdef CONFIG_X86_PM_TIMER + /* detect the location of the ACPI PM Timer */ + if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) { + /* FADT rev. 2 */ + if (acpi_gbl_FADT.xpm_timer_block.space_id != + ACPI_ADR_SPACE_SYSTEM_IO) + return 0; + + pmtmr_ioport = acpi_gbl_FADT.xpm_timer_block.address; + /* + * "X" fields are optional extensions to the original V1.0 + * fields, so we must selectively expand V1.0 fields if the + * corresponding X field is zero. + */ + if (!pmtmr_ioport) + pmtmr_ioport = acpi_gbl_FADT.pm_timer_block; + } else { + /* FADT rev. 1 */ + pmtmr_ioport = acpi_gbl_FADT.pm_timer_block; + } + if (pmtmr_ioport) + printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x\n", + pmtmr_ioport); +#endif + return 0; +} + +unsigned long __init acpi_find_rsdp(void) +{ + unsigned long rsdp_phys = 0; + + if (efi_enabled) { + if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) + return efi.acpi20; + else if (efi.acpi != EFI_INVALID_TABLE_ADDR) + return efi.acpi; + } + /* + * Scan memory looking for the RSDP signature. First search EBDA (low + * memory) paragraphs and then search upper memory (E0000-FFFFF). + */ + rsdp_phys = acpi_scan_rsdp(0, 0x400); + if (!rsdp_phys) + rsdp_phys = acpi_scan_rsdp(0xE0000, 0x20000); + + return rsdp_phys; +} + +#ifdef CONFIG_X86_LOCAL_APIC +/* + * Parse LAPIC entries in MADT + * returns 0 on success, < 0 on error + */ +static int __init acpi_parse_madt_lapic_entries(void) +{ + int count; + + if (!cpu_has_apic) + return -ENODEV; + + /* + * Note that the LAPIC address is obtained from the MADT (32-bit value) + * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value). + */ + + count = + acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE, + acpi_parse_lapic_addr_ovr, 0); + if (count < 0) { + printk(KERN_ERR PREFIX + "Error parsing LAPIC address override entry\n"); + return count; + } + + mp_register_lapic_address(acpi_lapic_addr); + + count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC, acpi_parse_lapic, + MAX_APICS); + if (!count) { + printk(KERN_ERR PREFIX "No LAPIC entries present\n"); + /* TBD: Cleanup to allow fallback to MPS */ + return -ENODEV; + } else if (count < 0) { + printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n"); + /* TBD: Cleanup to allow fallback to MPS */ + return count; + } + + count = + acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0); + if (count < 0) { + printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n"); + /* TBD: Cleanup to allow fallback to MPS */ + return count; + } + return 0; +} +#endif /* CONFIG_X86_LOCAL_APIC */ + +#ifdef CONFIG_X86_IO_APIC +/* + * Parse IOAPIC related entries in MADT + * returns 0 on success, < 0 on error + */ +static int __init acpi_parse_madt_ioapic_entries(void) +{ + int count; + + /* + * ACPI interpreter is required to complete interrupt setup, + * so if it is off, don't enumerate the io-apics with ACPI. + * If MPS is present, it will handle them, + * otherwise the system will stay in PIC mode + */ + if (acpi_disabled || acpi_noirq) { + return -ENODEV; + } + + if (!cpu_has_apic) + return -ENODEV; + + /* + * if "noapic" boot option, don't look for IO-APICs + */ + if (skip_ioapic_setup) { + printk(KERN_INFO PREFIX "Skipping IOAPIC probe " + "due to 'noapic' option.\n"); + return -ENODEV; + } + + count = + acpi_table_parse_madt(ACPI_MADT_TYPE_IO_APIC, acpi_parse_ioapic, + MAX_IO_APICS); + if (!count) { + printk(KERN_ERR PREFIX "No IOAPIC entries present\n"); + return -ENODEV; + } else if (count < 0) { + printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n"); + return count; + } + + count = + acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr, + NR_IRQ_VECTORS); + if (count < 0) { + printk(KERN_ERR PREFIX + "Error parsing interrupt source overrides entry\n"); + /* TBD: Cleanup to allow fallback to MPS */ + return count; + } + + /* + * If BIOS did not supply an INT_SRC_OVR for the SCI + * pretend we got one so we can set the SCI flags. + */ + if (!acpi_sci_override_gsi) + acpi_sci_ioapic_setup(acpi_gbl_FADT.sci_interrupt, 0, 0); + + /* Fill in identity legacy mapings where no override */ + mp_config_acpi_legacy_irqs(); + + count = + acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src, + NR_IRQ_VECTORS); + if (count < 0) { + printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n"); + /* TBD: Cleanup to allow fallback to MPS */ + return count; + } + + return 0; +} +#else +static inline int acpi_parse_madt_ioapic_entries(void) +{ + return -1; +} +#endif /* !CONFIG_X86_IO_APIC */ + +static void __init acpi_process_madt(void) +{ +#ifdef CONFIG_X86_LOCAL_APIC + int error; + + if (!acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) { + + /* + * Parse MADT LAPIC entries + */ + error = acpi_parse_madt_lapic_entries(); + if (!error) { + acpi_lapic = 1; + +#ifdef CONFIG_X86_GENERICARCH + generic_bigsmp_probe(); +#endif + /* + * Parse MADT IO-APIC entries + */ + error = acpi_parse_madt_ioapic_entries(); + if (!error) { + acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC; + acpi_irq_balance_set(NULL); + acpi_ioapic = 1; + + smp_found_config = 1; + setup_apic_routing(); + } + } + if (error == -EINVAL) { + /* + * Dell Precision Workstation 410, 610 come here. + */ + printk(KERN_ERR PREFIX + "Invalid BIOS MADT, disabling ACPI\n"); + disable_acpi(); + } + } +#endif + return; +} + +#ifdef __i386__ + +static int __init disable_acpi_irq(struct dmi_system_id *d) +{ + if (!acpi_force) { + printk(KERN_NOTICE "%s detected: force use of acpi=noirq\n", + d->ident); + acpi_noirq_set(); + } + return 0; +} + +static int __init disable_acpi_pci(struct dmi_system_id *d) +{ + if (!acpi_force) { + printk(KERN_NOTICE "%s detected: force use of pci=noacpi\n", + d->ident); + acpi_disable_pci(); + } + return 0; +} + +static int __init dmi_disable_acpi(struct dmi_system_id *d) +{ + if (!acpi_force) { + printk(KERN_NOTICE "%s detected: acpi off\n", d->ident); + disable_acpi(); + } else { + printk(KERN_NOTICE + "Warning: DMI blacklist says broken, but acpi forced\n"); + } + return 0; +} + +/* + * Limit ACPI to CPU enumeration for HT + */ +static int __init force_acpi_ht(struct dmi_system_id *d) +{ + if (!acpi_force) { + printk(KERN_NOTICE "%s detected: force use of acpi=ht\n", + d->ident); + disable_acpi(); + acpi_ht = 1; + } else { + printk(KERN_NOTICE + "Warning: acpi=force overrules DMI blacklist: acpi=ht\n"); + } + return 0; +} + +/* + * If your system is blacklisted here, but you find that acpi=force + * works for you, please contact acpi-devel@sourceforge.net + */ +static struct dmi_system_id __initdata acpi_dmi_table[] = { + /* + * Boxes that need ACPI disabled + */ + { + .callback = dmi_disable_acpi, + .ident = "IBM Thinkpad", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), + DMI_MATCH(DMI_BOARD_NAME, "2629H1G"), + }, + }, + + /* + * Boxes that need acpi=ht + */ + { + .callback = force_acpi_ht, + .ident = "FSC Primergy T850", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), + DMI_MATCH(DMI_PRODUCT_NAME, "PRIMERGY T850"), + }, + }, + { + .callback = force_acpi_ht, + .ident = "HP VISUALIZE NT Workstation", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP VISUALIZE NT Workstation"), + }, + }, + { + .callback = force_acpi_ht, + .ident = "Compaq Workstation W8000", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Compaq"), + DMI_MATCH(DMI_PRODUCT_NAME, "Workstation W8000"), + }, + }, + { + .callback = force_acpi_ht, + .ident = "ASUS P4B266", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "P4B266"), + }, + }, + { + .callback = force_acpi_ht, + .ident = "ASUS P2B-DS", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "P2B-DS"), + }, + }, + { + .callback = force_acpi_ht, + .ident = "ASUS CUR-DLS", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "CUR-DLS"), + }, + }, + { + .callback = force_acpi_ht, + .ident = "ABIT i440BX-W83977", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ABIT "), + DMI_MATCH(DMI_BOARD_NAME, "i440BX-W83977 (BP6)"), + }, + }, + { + .callback = force_acpi_ht, + .ident = "IBM Bladecenter", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), + DMI_MATCH(DMI_BOARD_NAME, "IBM eServer BladeCenter HS20"), + }, + }, + { + .callback = force_acpi_ht, + .ident = "IBM eServer xSeries 360", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), + DMI_MATCH(DMI_BOARD_NAME, "eServer xSeries 360"), + }, + }, + { + .callback = force_acpi_ht, + .ident = "IBM eserver xSeries 330", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), + DMI_MATCH(DMI_BOARD_NAME, "eserver xSeries 330"), + }, + }, + { + .callback = force_acpi_ht, + .ident = "IBM eserver xSeries 440", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), + DMI_MATCH(DMI_PRODUCT_NAME, "eserver xSeries 440"), + }, + }, + + /* + * Boxes that need ACPI PCI IRQ routing disabled + */ + { + .callback = disable_acpi_irq, + .ident = "ASUS A7V", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC"), + DMI_MATCH(DMI_BOARD_NAME, ""), + /* newer BIOS, Revision 1011, does work */ + DMI_MATCH(DMI_BIOS_VERSION, + "ASUS A7V ACPI BIOS Revision 1007"), + }, + }, + { + /* + * Latest BIOS for IBM 600E (1.16) has bad pcinum + * for LPC bridge, which is needed for the PCI + * interrupt links to work. DSDT fix is in bug 5966. + * 2645, 2646 model numbers are shared with 600/600E/600X + */ + .callback = disable_acpi_irq, + .ident = "IBM Thinkpad 600 Series 2645", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), + DMI_MATCH(DMI_BOARD_NAME, "2645"), + }, + }, + { + .callback = disable_acpi_irq, + .ident = "IBM Thinkpad 600 Series 2646", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), + DMI_MATCH(DMI_BOARD_NAME, "2646"), + }, + }, + /* + * Boxes that need ACPI PCI IRQ routing and PCI scan disabled + */ + { /* _BBN 0 bug */ + .callback = disable_acpi_pci, + .ident = "ASUS PR-DLS", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "PR-DLS"), + DMI_MATCH(DMI_BIOS_VERSION, + "ASUS PR-DLS ACPI BIOS Revision 1010"), + DMI_MATCH(DMI_BIOS_DATE, "03/21/2003") + }, + }, + { + .callback = disable_acpi_pci, + .ident = "Acer TravelMate 36x Laptop", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"), + }, + }, + {} +}; + +#endif /* __i386__ */ + +/* + * acpi_boot_table_init() and acpi_boot_init() + * called from setup_arch(), always. + * 1. checksums all tables + * 2. enumerates lapics + * 3. enumerates io-apics + * + * acpi_table_init() is separate to allow reading SRAT without + * other side effects. + * + * side effects of acpi_boot_init: + * acpi_lapic = 1 if LAPIC found + * acpi_ioapic = 1 if IOAPIC found + * if (acpi_lapic && acpi_ioapic) smp_found_config = 1; + * if acpi_blacklisted() acpi_disabled = 1; + * acpi_irq_model=... + * ... + * + * return value: (currently ignored) + * 0: success + * !0: failure + */ + +int __init acpi_boot_table_init(void) +{ + int error; + +#ifdef __i386__ + dmi_check_system(acpi_dmi_table); +#endif + + /* + * If acpi_disabled, bail out + * One exception: acpi=ht continues far enough to enumerate LAPICs + */ + if (acpi_disabled && !acpi_ht) + return 1; + + /* + * Initialize the ACPI boot-time table parser. + */ + error = acpi_table_init(); + if (error) { + disable_acpi(); + return error; + } + + acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf); + + /* + * blacklist may disable ACPI entirely + */ + error = acpi_blacklisted(); + if (error) { + if (acpi_force) { + printk(KERN_WARNING PREFIX "acpi=force override\n"); + } else { + printk(KERN_WARNING PREFIX "Disabling ACPI support\n"); + disable_acpi(); + return error; + } + } + + return 0; +} + +int __init acpi_boot_init(void) +{ + /* + * If acpi_disabled, bail out + * One exception: acpi=ht continues far enough to enumerate LAPICs + */ + if (acpi_disabled && !acpi_ht) + return 1; + + acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf); + + /* + * set sci_int and PM timer address + */ + acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt); + + /* + * Process the Multiple APIC Description Table (MADT), if present + */ + acpi_process_madt(); + + acpi_table_parse(ACPI_SIG_HPET, acpi_parse_hpet); + + return 0; +} + +static int __init parse_acpi(char *arg) +{ + if (!arg) + return -EINVAL; + + /* "acpi=off" disables both ACPI table parsing and interpreter */ + if (strcmp(arg, "off") == 0) { + disable_acpi(); + } + /* acpi=force to over-ride black-list */ + else if (strcmp(arg, "force") == 0) { + acpi_force = 1; + acpi_ht = 1; + acpi_disabled = 0; + } + /* acpi=strict disables out-of-spec workarounds */ + else if (strcmp(arg, "strict") == 0) { + acpi_strict = 1; + } + /* Limit ACPI just to boot-time to enable HT */ + else if (strcmp(arg, "ht") == 0) { + if (!acpi_force) + disable_acpi(); + acpi_ht = 1; + } + /* "acpi=noirq" disables ACPI interrupt routing */ + else if (strcmp(arg, "noirq") == 0) { + acpi_noirq_set(); + } else { + /* Core will printk when we return error. */ + return -EINVAL; + } + return 0; +} +early_param("acpi", parse_acpi); + +/* FIXME: Using pci= for an ACPI parameter is a travesty. */ +static int __init parse_pci(char *arg) +{ + if (arg && strcmp(arg, "noacpi") == 0) + acpi_disable_pci(); + return 0; +} +early_param("pci", parse_pci); + +#ifdef CONFIG_X86_IO_APIC +static int __init parse_acpi_skip_timer_override(char *arg) +{ + acpi_skip_timer_override = 1; + return 0; +} +early_param("acpi_skip_timer_override", parse_acpi_skip_timer_override); + +static int __init parse_acpi_use_timer_override(char *arg) +{ + acpi_use_timer_override = 1; + return 0; +} +early_param("acpi_use_timer_override", parse_acpi_use_timer_override); +#endif /* CONFIG_X86_IO_APIC */ + +static int __init setup_acpi_sci(char *s) +{ + if (!s) + return -EINVAL; + if (!strcmp(s, "edge")) + acpi_sci_flags = ACPI_MADT_TRIGGER_EDGE | + (acpi_sci_flags & ~ACPI_MADT_TRIGGER_MASK); + else if (!strcmp(s, "level")) + acpi_sci_flags = ACPI_MADT_TRIGGER_LEVEL | + (acpi_sci_flags & ~ACPI_MADT_TRIGGER_MASK); + else if (!strcmp(s, "high")) + acpi_sci_flags = ACPI_MADT_POLARITY_ACTIVE_HIGH | + (acpi_sci_flags & ~ACPI_MADT_POLARITY_MASK); + else if (!strcmp(s, "low")) + acpi_sci_flags = ACPI_MADT_POLARITY_ACTIVE_LOW | + (acpi_sci_flags & ~ACPI_MADT_POLARITY_MASK); + else + return -EINVAL; + return 0; +} +early_param("acpi_sci", setup_acpi_sci); + +int __acpi_acquire_global_lock(unsigned int *lock) +{ + unsigned int old, new, val; + do { + old = *lock; + new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1)); + val = cmpxchg(lock, old, new); + } while (unlikely (val != old)); + return (new < 3) ? -1 : 0; +} + +int __acpi_release_global_lock(unsigned int *lock) +{ + unsigned int old, new, val; + do { + old = *lock; + new = old & ~0x3; + val = cmpxchg(lock, old, new); + } while (unlikely (val != old)); + return old & 0x1; +} diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c new file mode 100644 index 00000000000..2d39f55d29a --- /dev/null +++ b/arch/x86/kernel/acpi/cstate.c @@ -0,0 +1,164 @@ +/* + * arch/i386/kernel/acpi/cstate.c + * + * Copyright (C) 2005 Intel Corporation + * Venkatesh Pallipadi + * - Added _PDC for SMP C-states on Intel CPUs + */ + +#include +#include +#include +#include +#include +#include + +#include +#include + +/* + * Initialize bm_flags based on the CPU cache properties + * On SMP it depends on cache configuration + * - When cache is not shared among all CPUs, we flush cache + * before entering C3. + * - When cache is shared among all CPUs, we use bm_check + * mechanism as in UP case + * + * This routine is called only after all the CPUs are online + */ +void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags, + unsigned int cpu) +{ + struct cpuinfo_x86 *c = cpu_data + cpu; + + flags->bm_check = 0; + if (num_online_cpus() == 1) + flags->bm_check = 1; + else if (c->x86_vendor == X86_VENDOR_INTEL) { + /* + * Today all CPUs that support C3 share cache. + * TBD: This needs to look at cache shared map, once + * multi-core detection patch makes to the base. + */ + flags->bm_check = 1; + } +} +EXPORT_SYMBOL(acpi_processor_power_init_bm_check); + +/* The code below handles cstate entry with monitor-mwait pair on Intel*/ + +struct cstate_entry { + struct { + unsigned int eax; + unsigned int ecx; + } states[ACPI_PROCESSOR_MAX_POWER]; +}; +static struct cstate_entry *cpu_cstate_entry; /* per CPU ptr */ + +static short mwait_supported[ACPI_PROCESSOR_MAX_POWER]; + +#define MWAIT_SUBSTATE_MASK (0xf) +#define MWAIT_SUBSTATE_SIZE (4) + +#define CPUID_MWAIT_LEAF (5) +#define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1) +#define CPUID5_ECX_INTERRUPT_BREAK (0x2) + +#define MWAIT_ECX_INTERRUPT_BREAK (0x1) + +#define NATIVE_CSTATE_BEYOND_HALT (2) + +int acpi_processor_ffh_cstate_probe(unsigned int cpu, + struct acpi_processor_cx *cx, struct acpi_power_register *reg) +{ + struct cstate_entry *percpu_entry; + struct cpuinfo_x86 *c = cpu_data + cpu; + + cpumask_t saved_mask; + int retval; + unsigned int eax, ebx, ecx, edx; + unsigned int edx_part; + unsigned int cstate_type; /* C-state type and not ACPI C-state type */ + unsigned int num_cstate_subtype; + + if (!cpu_cstate_entry || c->cpuid_level < CPUID_MWAIT_LEAF ) + return -1; + + if (reg->bit_offset != NATIVE_CSTATE_BEYOND_HALT) + return -1; + + percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu); + percpu_entry->states[cx->index].eax = 0; + percpu_entry->states[cx->index].ecx = 0; + + /* Make sure we are running on right CPU */ + saved_mask = current->cpus_allowed; + retval = set_cpus_allowed(current, cpumask_of_cpu(cpu)); + if (retval) + return -1; + + cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx); + + /* Check whether this particular cx_type (in CST) is supported or not */ + cstate_type = (cx->address >> MWAIT_SUBSTATE_SIZE) + 1; + edx_part = edx >> (cstate_type * MWAIT_SUBSTATE_SIZE); + num_cstate_subtype = edx_part & MWAIT_SUBSTATE_MASK; + + retval = 0; + if (num_cstate_subtype < (cx->address & MWAIT_SUBSTATE_MASK)) { + retval = -1; + goto out; + } + + /* mwait ecx extensions INTERRUPT_BREAK should be supported for C2/C3 */ + if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || + !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) { + retval = -1; + goto out; + } + percpu_entry->states[cx->index].ecx = MWAIT_ECX_INTERRUPT_BREAK; + + /* Use the hint in CST */ + percpu_entry->states[cx->index].eax = cx->address; + + if (!mwait_supported[cstate_type]) { + mwait_supported[cstate_type] = 1; + printk(KERN_DEBUG "Monitor-Mwait will be used to enter C-%d " + "state\n", cx->type); + } + +out: + set_cpus_allowed(current, saved_mask); + return retval; +} +EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe); + +void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx) +{ + unsigned int cpu = smp_processor_id(); + struct cstate_entry *percpu_entry; + + percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu); + mwait_idle_with_hints(percpu_entry->states[cx->index].eax, + percpu_entry->states[cx->index].ecx); +} +EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_enter); + +static int __init ffh_cstate_init(void) +{ + struct cpuinfo_x86 *c = &boot_cpu_data; + if (c->x86_vendor != X86_VENDOR_INTEL) + return -1; + + cpu_cstate_entry = alloc_percpu(struct cstate_entry); + return 0; +} + +static void __exit ffh_cstate_exit(void) +{ + free_percpu(cpu_cstate_entry); + cpu_cstate_entry = NULL; +} + +arch_initcall(ffh_cstate_init); +__exitcall(ffh_cstate_exit); diff --git a/arch/x86/kernel/acpi/earlyquirk_32.c b/arch/x86/kernel/acpi/earlyquirk_32.c new file mode 100644 index 00000000000..23f78efc577 --- /dev/null +++ b/arch/x86/kernel/acpi/earlyquirk_32.c @@ -0,0 +1,84 @@ +/* + * Do early PCI probing for bug detection when the main PCI subsystem is + * not up yet. + */ +#include +#include +#include +#include + +#include +#include +#include + +#ifdef CONFIG_ACPI + +static int __init nvidia_hpet_check(struct acpi_table_header *header) +{ + return 0; +} +#endif + +static int __init check_bridge(int vendor, int device) +{ +#ifdef CONFIG_ACPI + static int warned; + /* According to Nvidia all timer overrides are bogus unless HPET + is enabled. */ + if (!acpi_use_timer_override && vendor == PCI_VENDOR_ID_NVIDIA) { + if (!warned && acpi_table_parse(ACPI_SIG_HPET, + nvidia_hpet_check)) { + warned = 1; + acpi_skip_timer_override = 1; + printk(KERN_INFO "Nvidia board " + "detected. Ignoring ACPI " + "timer override.\n"); + printk(KERN_INFO "If you got timer trouble " + "try acpi_use_timer_override\n"); + + } + } +#endif + if (vendor == PCI_VENDOR_ID_ATI && timer_over_8254 == 1) { + timer_over_8254 = 0; + printk(KERN_INFO "ATI board detected. Disabling timer routing " + "over 8254.\n"); + } + return 0; +} + +void __init check_acpi_pci(void) +{ + int num, slot, func; + + /* Assume the machine supports type 1. If not it will + always read ffffffff and should not have any side effect. + Actually a few buggy systems can machine check. Allow the user + to disable it by command line option at least -AK */ + if (!early_pci_allowed()) + return; + + /* Poor man's PCI discovery */ + for (num = 0; num < 32; num++) { + for (slot = 0; slot < 32; slot++) { + for (func = 0; func < 8; func++) { + u32 class; + u32 vendor; + class = read_pci_config(num, slot, func, + PCI_CLASS_REVISION); + if (class == 0xffffffff) + break; + + if ((class >> 16) != PCI_CLASS_BRIDGE_PCI) + continue; + + vendor = read_pci_config(num, slot, func, + PCI_VENDOR_ID); + + if (check_bridge(vendor & 0xffff, vendor >> 16)) + return; + } + + } + } +} diff --git a/arch/x86/kernel/acpi/processor.c b/arch/x86/kernel/acpi/processor.c new file mode 100644 index 00000000000..b54fded4983 --- /dev/null +++ b/arch/x86/kernel/acpi/processor.c @@ -0,0 +1,75 @@ +/* + * arch/i386/kernel/acpi/processor.c + * + * Copyright (C) 2005 Intel Corporation + * Venkatesh Pallipadi + * - Added _PDC for platforms with Intel CPUs + */ + +#include +#include +#include +#include + +#include +#include + +static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c) +{ + struct acpi_object_list *obj_list; + union acpi_object *obj; + u32 *buf; + + /* allocate and initialize pdc. It will be used later. */ + obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL); + if (!obj_list) { + printk(KERN_ERR "Memory allocation error\n"); + return; + } + + obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL); + if (!obj) { + printk(KERN_ERR "Memory allocation error\n"); + kfree(obj_list); + return; + } + + buf = kmalloc(12, GFP_KERNEL); + if (!buf) { + printk(KERN_ERR "Memory allocation error\n"); + kfree(obj); + kfree(obj_list); + return; + } + + buf[0] = ACPI_PDC_REVISION_ID; + buf[1] = 1; + buf[2] = ACPI_PDC_C_CAPABILITY_SMP; + + if (cpu_has(c, X86_FEATURE_EST)) + buf[2] |= ACPI_PDC_EST_CAPABILITY_SWSMP; + + obj->type = ACPI_TYPE_BUFFER; + obj->buffer.length = 12; + obj->buffer.pointer = (u8 *) buf; + obj_list->count = 1; + obj_list->pointer = obj; + pr->pdc = obj_list; + + return; +} + +/* Initialize _PDC data based on the CPU vendor */ +void arch_acpi_processor_init_pdc(struct acpi_processor *pr) +{ + unsigned int cpu = pr->id; + struct cpuinfo_x86 *c = cpu_data + cpu; + + pr->pdc = NULL; + if (c->x86_vendor == X86_VENDOR_INTEL) + init_intel_pdc(pr, c); + + return; +} + +EXPORT_SYMBOL(arch_acpi_processor_init_pdc); diff --git a/arch/x86/kernel/acpi/sleep_32.c b/arch/x86/kernel/acpi/sleep_32.c new file mode 100644 index 00000000000..c42b5ab49de --- /dev/null +++ b/arch/x86/kernel/acpi/sleep_32.c @@ -0,0 +1,110 @@ +/* + * sleep.c - x86-specific ACPI sleep support. + * + * Copyright (C) 2001-2003 Patrick Mochel + * Copyright (C) 2001-2003 Pavel Machek + */ + +#include +#include +#include +#include + +#include + +/* address in low memory of the wakeup routine. */ +unsigned long acpi_wakeup_address = 0; +unsigned long acpi_realmode_flags; +extern char wakeup_start, wakeup_end; + +extern unsigned long FASTCALL(acpi_copy_wakeup_routine(unsigned long)); + +/** + * acpi_save_state_mem - save kernel state + * + * Create an identity mapped page table and copy the wakeup routine to + * low memory. + */ +int acpi_save_state_mem(void) +{ + if (!acpi_wakeup_address) + return 1; + memcpy((void *)acpi_wakeup_address, &wakeup_start, + &wakeup_end - &wakeup_start); + acpi_copy_wakeup_routine(acpi_wakeup_address); + + return 0; +} + +/* + * acpi_restore_state - undo effects of acpi_save_state_mem + */ +void acpi_restore_state_mem(void) +{ +} + +/** + * acpi_reserve_bootmem - do _very_ early ACPI initialisation + * + * We allocate a page from the first 1MB of memory for the wakeup + * routine for when we come back from a sleep state. The + * runtime allocator allows specification of <16MB pages, but not + * <1MB pages. + */ +void __init acpi_reserve_bootmem(void) +{ + if ((&wakeup_end - &wakeup_start) > PAGE_SIZE) { + printk(KERN_ERR + "ACPI: Wakeup code way too big, S3 disabled.\n"); + return; + } + + acpi_wakeup_address = (unsigned long)alloc_bootmem_low(PAGE_SIZE); + if (!acpi_wakeup_address) + printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n"); +} + +static int __init acpi_sleep_setup(char *str) +{ + while ((str != NULL) && (*str != '\0')) { + if (strncmp(str, "s3_bios", 7) == 0) + acpi_realmode_flags |= 1; + if (strncmp(str, "s3_mode", 7) == 0) + acpi_realmode_flags |= 2; + if (strncmp(str, "s3_beep", 7) == 0) + acpi_realmode_flags |= 4; + str = strchr(str, ','); + if (str != NULL) + str += strspn(str, ", \t"); + } + return 1; +} + +__setup("acpi_sleep=", acpi_sleep_setup); + +/* Ouch, we want to delete this. We already have better version in userspace, in + s2ram from suspend.sf.net project */ +static __init int reset_videomode_after_s3(struct dmi_system_id *d) +{ + acpi_realmode_flags |= 2; + return 0; +} + +static __initdata struct dmi_system_id acpisleep_dmi_table[] = { + { /* Reset video mode after returning from ACPI S3 sleep */ + .callback = reset_videomode_after_s3, + .ident = "Toshiba Satellite 4030cdt", + .matches = { + DMI_MATCH(DMI_PRODUCT_NAME, "S4030CDT/4.3"), + }, + }, + {} +}; + +static int __init acpisleep_dmi_init(void) +{ + dmi_check_system(acpisleep_dmi_table); + return 0; +} + +core_initcall(acpisleep_dmi_init); diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S new file mode 100644 index 00000000000..f22ba8534d2 --- /dev/null +++ b/arch/x86/kernel/acpi/wakeup_32.S @@ -0,0 +1,321 @@ +.text +#include +#include +#include + +# +# wakeup_code runs in real mode, and at unknown address (determined at run-time). +# Therefore it must only use relative jumps/calls. +# +# Do we need to deal with A20? It is okay: ACPI specs says A20 must be enabled +# +# If physical address of wakeup_code is 0x12345, BIOS should call us with +# cs = 0x1234, eip = 0x05 +# + +#define BEEP \ + inb $97, %al; \ + outb %al, $0x80; \ + movb $3, %al; \ + outb %al, $97; \ + outb %al, $0x80; \ + movb $-74, %al; \ + outb %al, $67; \ + outb %al, $0x80; \ + movb $-119, %al; \ + outb %al, $66; \ + outb %al, $0x80; \ + movb $15, %al; \ + outb %al, $66; + +ALIGN + .align 4096 +ENTRY(wakeup_start) +wakeup_code: + wakeup_code_start = . + .code16 + + movw $0xb800, %ax + movw %ax,%fs + movw $0x0e00 + 'L', %fs:(0x10) + + cli + cld + + # setup data segment + movw %cs, %ax + movw %ax, %ds # Make ds:0 point to wakeup_start + movw %ax, %ss + + testl $4, realmode_flags - wakeup_code + jz 1f + BEEP +1: + mov $(wakeup_stack - wakeup_code), %sp # Private stack is needed for ASUS board + movw $0x0e00 + 'S', %fs:(0x12) + + pushl $0 # Kill any dangerous flags + popfl + + movl real_magic - wakeup_code, %eax + cmpl $0x12345678, %eax + jne bogus_real_magic + + testl $1, realmode_flags - wakeup_code + jz 1f + lcall $0xc000,$3 + movw %cs, %ax + movw %ax, %ds # Bios might have played with that + movw %ax, %ss +1: + + testl $2, realmode_flags - wakeup_code + jz 1f + mov video_mode - wakeup_code, %ax + call mode_set +1: + + # set up page table + movl $swsusp_pg_dir-__PAGE_OFFSET, %eax + movl %eax, %cr3 + + testl $1, real_efer_save_restore - wakeup_code + jz 4f + # restore efer setting + movl real_save_efer_edx - wakeup_code, %edx + movl real_save_efer_eax - wakeup_code, %eax + mov $0xc0000080, %ecx + wrmsr +4: + # make sure %cr4 is set correctly (features, etc) + movl real_save_cr4 - wakeup_code, %eax + movl %eax, %cr4 + movw $0xb800, %ax + movw %ax,%fs + movw $0x0e00 + 'i', %fs:(0x12) + + # need a gdt -- use lgdtl to force 32-bit operands, in case + # the GDT is located past 16 megabytes. + lgdtl real_save_gdt - wakeup_code + + movl real_save_cr0 - wakeup_code, %eax + movl %eax, %cr0 + jmp 1f +1: + movw $0x0e00 + 'n', %fs:(0x14) + + movl real_magic - wakeup_code, %eax + cmpl $0x12345678, %eax + jne bogus_real_magic + + testl $8, realmode_flags - wakeup_code + jz 1f + BEEP +1: + ljmpl $__KERNEL_CS, $wakeup_pmode_return + +real_save_gdt: .word 0 + .long 0 +real_save_cr0: .long 0 +real_save_cr3: .long 0 +real_save_cr4: .long 0 +real_magic: .long 0 +video_mode: .long 0 +realmode_flags: .long 0 +beep_flags: .long 0 +real_efer_save_restore: .long 0 +real_save_efer_edx: .long 0 +real_save_efer_eax: .long 0 + +bogus_real_magic: + movw $0x0e00 + 'B', %fs:(0x12) + jmp bogus_real_magic + +/* This code uses an extended set of video mode numbers. These include: + * Aliases for standard modes + * NORMAL_VGA (-1) + * EXTENDED_VGA (-2) + * ASK_VGA (-3) + * Video modes numbered by menu position -- NOT RECOMMENDED because of lack + * of compatibility when extending the table. These are between 0x00 and 0xff. + */ +#define VIDEO_FIRST_MENU 0x0000 + +/* Standard BIOS video modes (BIOS number + 0x0100) */ +#define VIDEO_FIRST_BIOS 0x0100 + +/* VESA BIOS video modes (VESA number + 0x0200) */ +#define VIDEO_FIRST_VESA 0x0200 + +/* Video7 special modes (BIOS number + 0x0900) */ +#define VIDEO_FIRST_V7 0x0900 + +# Setting of user mode (AX=mode ID) => CF=success + +# For now, we only handle VESA modes (0x0200..0x03ff). To handle other +# modes, we should probably compile in the video code from the boot +# directory. +mode_set: + movw %ax, %bx + subb $VIDEO_FIRST_VESA>>8, %bh + cmpb $2, %bh + jb check_vesa + +setbad: + clc + ret + +check_vesa: + orw $0x4000, %bx # Use linear frame buffer + movw $0x4f02, %ax # VESA BIOS mode set call + int $0x10 + cmpw $0x004f, %ax # AL=4f if implemented + jnz setbad # AH=0 if OK + + stc + ret + + .code32 + ALIGN + +.org 0x800 +wakeup_stack_begin: # Stack grows down + +.org 0xff0 # Just below end of page +wakeup_stack: +ENTRY(wakeup_end) + +.org 0x1000 + +wakeup_pmode_return: + movw $__KERNEL_DS, %ax + movw %ax, %ss + movw %ax, %ds + movw %ax, %es + movw %ax, %fs + movw %ax, %gs + movw $0x0e00 + 'u', 0xb8016 + + # reload the gdt, as we need the full 32 bit address + lgdt saved_gdt + lidt saved_idt + lldt saved_ldt + ljmp $(__KERNEL_CS),$1f +1: + movl %cr3, %eax + movl %eax, %cr3 + wbinvd + + # and restore the stack ... but you need gdt for this to work + movl saved_context_esp, %esp + + movl %cs:saved_magic, %eax + cmpl $0x12345678, %eax + jne bogus_magic + + # jump to place where we left off + movl saved_eip,%eax + jmp *%eax + +bogus_magic: + movw $0x0e00 + 'B', 0xb8018 + jmp bogus_magic + + +## +# acpi_copy_wakeup_routine +# +# Copy the above routine to low memory. +# +# Parameters: +# %eax: place to copy wakeup routine to +# +# Returned address is location of code in low memory (past data and stack) +# +ENTRY(acpi_copy_wakeup_routine) + + pushl %ebx + sgdt saved_gdt + sidt saved_idt + sldt saved_ldt + str saved_tss + + movl nx_enabled, %edx + movl %edx, real_efer_save_restore - wakeup_start (%eax) + testl $1, real_efer_save_restore - wakeup_start (%eax) + jz 2f + # save efer setting + pushl %eax + movl %eax, %ebx + mov $0xc0000080, %ecx + rdmsr + movl %edx, real_save_efer_edx - wakeup_start (%ebx) + movl %eax, real_save_efer_eax - wakeup_start (%ebx) + popl %eax +2: + + movl %cr3, %edx + movl %edx, real_save_cr3 - wakeup_start (%eax) + movl %cr4, %edx + movl %edx, real_save_cr4 - wakeup_start (%eax) + movl %cr0, %edx + movl %edx, real_save_cr0 - wakeup_start (%eax) + sgdt real_save_gdt - wakeup_start (%eax) + + movl saved_videomode, %edx + movl %edx, video_mode - wakeup_start (%eax) + movl acpi_realmode_flags, %edx + movl %edx, realmode_flags - wakeup_start (%eax) + movl $0x12345678, real_magic - wakeup_start (%eax) + movl $0x12345678, saved_magic + popl %ebx + ret + +save_registers: + leal 4(%esp), %eax + movl %eax, saved_context_esp + movl %ebx, saved_context_ebx + movl %ebp, saved_context_ebp + movl %esi, saved_context_esi + movl %edi, saved_context_edi + pushfl ; popl saved_context_eflags + + movl $ret_point, saved_eip + ret + + +restore_registers: + movl saved_context_ebp, %ebp + movl saved_context_ebx, %ebx + movl saved_context_esi, %esi + movl saved_context_edi, %edi + pushl saved_context_eflags ; popfl + ret + +ENTRY(do_suspend_lowlevel) + call save_processor_state + call save_registers + pushl $3 + call acpi_enter_sleep_state + addl $4, %esp + +# In case of S3 failure, we'll emerge here. Jump +# to ret_point to recover + jmp ret_point + .p2align 4,,7 +ret_point: + call restore_registers + call restore_processor_state + ret + +.data +ALIGN +ENTRY(saved_magic) .long 0 +ENTRY(saved_eip) .long 0 + +# saved registers +saved_gdt: .long 0,0 +saved_idt: .long 0,0 +saved_ldt: .long 0 +saved_tss: .long 0 + diff --git a/arch/x86_64/kernel/acpi/Makefile b/arch/x86_64/kernel/acpi/Makefile index 6e00bfeb59a..dd4eb7cef2b 100644 --- a/arch/x86_64/kernel/acpi/Makefile +++ b/arch/x86_64/kernel/acpi/Makefile @@ -1,5 +1,5 @@ ifeq ($(CONFIG_X86_32),y) -include ${srctree}/arch/i386/kernel/acpi/Makefile_32 +include ${srctree}/arch/x86/kernel/acpi/Makefile_32 else include ${srctree}/arch/x86_64/kernel/acpi/Makefile_64 endif diff --git a/arch/x86_64/kernel/acpi/Makefile_64 b/arch/x86_64/kernel/acpi/Makefile_64 index c24fe95b0d9..fc377c87e0b 100644 --- a/arch/x86_64/kernel/acpi/Makefile_64 +++ b/arch/x86_64/kernel/acpi/Makefile_64 @@ -1,9 +1,9 @@ obj-y := boot.o -boot-y := ../../../i386/kernel/acpi/boot.o +boot-y := ../../../x86/kernel/acpi/boot.o obj-$(CONFIG_ACPI_SLEEP) += sleep_64.o wakeup_64.o ifneq ($(CONFIG_ACPI_PROCESSOR),) obj-y += processor.o -processor-y := ../../../i386/kernel/acpi/processor.o ../../../i386/kernel/acpi/cstate.o +processor-y := ../../../x86/kernel/acpi/processor.o ../../../x86/kernel/acpi/cstate.o endif -- cgit v1.2.3