aboutsummaryrefslogtreecommitdiff
path: root/arch/i386/kernel/acpi
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386/kernel/acpi')
-rw-r--r--arch/i386/kernel/acpi/boot.c32
-rw-r--r--arch/i386/kernel/acpi/cstate.c17
-rw-r--r--arch/i386/kernel/acpi/earlyquirk.c21
3 files changed, 56 insertions, 14 deletions
diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c
index d12fb97a533..cbcb2c27f48 100644
--- a/arch/i386/kernel/acpi/boot.c
+++ b/arch/i386/kernel/acpi/boot.c
@@ -333,7 +333,7 @@ acpi_parse_ioapic(acpi_table_entry_header * header, const unsigned long end)
/*
* Parse Interrupt Source Override for the ACPI SCI
*/
-static void acpi_sci_ioapic_setup(u32 bus_irq, u32 gsi, u16 polarity, u16 trigger)
+static void __init acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
{
if (trigger == 0) /* compatible SCI trigger is level */
trigger = 3;
@@ -353,13 +353,13 @@ static void acpi_sci_ioapic_setup(u32 bus_irq, u32 gsi, u16 polarity, u16 trigge
* If GSI is < 16, this will update its flags,
* else it will create a new mp_irqs[] entry.
*/
- mp_override_legacy_irq(bus_irq, polarity, trigger, gsi);
+ mp_override_legacy_irq(gsi, polarity, trigger, gsi);
/*
* stash over-ride to indicate we've been here
* and for later update of acpi_fadt
*/
- acpi_sci_override_gsi = bus_irq;
+ acpi_sci_override_gsi = gsi;
return;
}
@@ -377,7 +377,7 @@ acpi_parse_int_src_ovr(acpi_table_entry_header * header,
acpi_table_print_madt_entry(header);
if (intsrc->bus_irq == acpi_fadt.sci_int) {
- acpi_sci_ioapic_setup(intsrc->bus_irq, intsrc->global_irq,
+ acpi_sci_ioapic_setup(intsrc->global_irq,
intsrc->flags.polarity,
intsrc->flags.trigger);
return 0;
@@ -880,7 +880,7 @@ static int __init acpi_parse_madt_ioapic_entries(void)
* pretend we got one so we can set the SCI flags.
*/
if (!acpi_sci_override_gsi)
- acpi_sci_ioapic_setup(acpi_fadt.sci_int, acpi_fadt.sci_int, 0, 0);
+ acpi_sci_ioapic_setup(acpi_fadt.sci_int, 0, 0);
/* Fill in identity legacy mapings where no override */
mp_config_acpi_legacy_irqs();
@@ -1327,3 +1327,25 @@ static int __init setup_acpi_sci(char *s)
return 0;
}
early_param("acpi_sci", setup_acpi_sci);
+
+int __acpi_acquire_global_lock(unsigned int *lock)
+{
+ unsigned int old, new, val;
+ do {
+ old = *lock;
+ new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1));
+ val = cmpxchg(lock, old, new);
+ } while (unlikely (val != old));
+ return (new < 3) ? -1 : 0;
+}
+
+int __acpi_release_global_lock(unsigned int *lock)
+{
+ unsigned int old, new, val;
+ do {
+ old = *lock;
+ new = old & ~0x3;
+ val = cmpxchg(lock, old, new);
+ } while (unlikely (val != old));
+ return old & 0x1;
+}
diff --git a/arch/i386/kernel/acpi/cstate.c b/arch/i386/kernel/acpi/cstate.c
index 20563e52c62..2d39f55d29a 100644
--- a/arch/i386/kernel/acpi/cstate.c
+++ b/arch/i386/kernel/acpi/cstate.c
@@ -11,6 +11,7 @@
#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/cpu.h>
+#include <linux/sched.h>
#include <acpi/processor.h>
#include <asm/acpi.h>
@@ -46,13 +47,13 @@ EXPORT_SYMBOL(acpi_processor_power_init_bm_check);
/* The code below handles cstate entry with monitor-mwait pair on Intel*/
-struct cstate_entry_s {
+struct cstate_entry {
struct {
unsigned int eax;
unsigned int ecx;
} states[ACPI_PROCESSOR_MAX_POWER];
};
-static struct cstate_entry_s *cpu_cstate_entry; /* per CPU ptr */
+static struct cstate_entry *cpu_cstate_entry; /* per CPU ptr */
static short mwait_supported[ACPI_PROCESSOR_MAX_POWER];
@@ -70,7 +71,7 @@ static short mwait_supported[ACPI_PROCESSOR_MAX_POWER];
int acpi_processor_ffh_cstate_probe(unsigned int cpu,
struct acpi_processor_cx *cx, struct acpi_power_register *reg)
{
- struct cstate_entry_s *percpu_entry;
+ struct cstate_entry *percpu_entry;
struct cpuinfo_x86 *c = cpu_data + cpu;
cpumask_t saved_mask;
@@ -135,7 +136,7 @@ EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe);
void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx)
{
unsigned int cpu = smp_processor_id();
- struct cstate_entry_s *percpu_entry;
+ struct cstate_entry *percpu_entry;
percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu);
mwait_idle_with_hints(percpu_entry->states[cx->index].eax,
@@ -149,16 +150,14 @@ static int __init ffh_cstate_init(void)
if (c->x86_vendor != X86_VENDOR_INTEL)
return -1;
- cpu_cstate_entry = alloc_percpu(struct cstate_entry_s);
+ cpu_cstate_entry = alloc_percpu(struct cstate_entry);
return 0;
}
static void __exit ffh_cstate_exit(void)
{
- if (cpu_cstate_entry) {
- free_percpu(cpu_cstate_entry);
- cpu_cstate_entry = NULL;
- }
+ free_percpu(cpu_cstate_entry);
+ cpu_cstate_entry = NULL;
}
arch_initcall(ffh_cstate_init);
diff --git a/arch/i386/kernel/acpi/earlyquirk.c b/arch/i386/kernel/acpi/earlyquirk.c
index c9841692bb7..4b60af7f91d 100644
--- a/arch/i386/kernel/acpi/earlyquirk.c
+++ b/arch/i386/kernel/acpi/earlyquirk.c
@@ -10,6 +10,7 @@
#include <asm/pci-direct.h>
#include <asm/acpi.h>
#include <asm/apic.h>
+#include <asm/irq.h>
#ifdef CONFIG_ACPI
@@ -49,6 +50,24 @@ static int __init check_bridge(int vendor, int device)
return 0;
}
+static void check_intel(void)
+{
+ u16 vendor, device;
+
+ vendor = read_pci_config_16(0, 0, 0, PCI_VENDOR_ID);
+
+ if (vendor != PCI_VENDOR_ID_INTEL)
+ return;
+
+ device = read_pci_config_16(0, 0, 0, PCI_DEVICE_ID);
+#ifdef CONFIG_SMP
+ if (device == PCI_DEVICE_ID_INTEL_E7320_MCH ||
+ device == PCI_DEVICE_ID_INTEL_E7520_MCH ||
+ device == PCI_DEVICE_ID_INTEL_E7525_MCH)
+ quirk_intel_irqbalance();
+#endif
+}
+
void __init check_acpi_pci(void)
{
int num, slot, func;
@@ -60,6 +79,8 @@ void __init check_acpi_pci(void)
if (!early_pci_allowed())
return;
+ check_intel();
+
/* Poor man's PCI discovery */
for (num = 0; num < 32; num++) {
for (slot = 0; slot < 32; slot++) {