diff options
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r-- | arch/ia64/kernel/Makefile | 1 | ||||
-rw-r--r-- | arch/ia64/kernel/acpi.c | 58 | ||||
-rw-r--r-- | arch/ia64/kernel/entry.S | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/iosapic.c | 13 | ||||
-rw-r--r-- | arch/ia64/kernel/irq_ia64.c | 15 | ||||
-rw-r--r-- | arch/ia64/kernel/kprobes.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/mca.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/numa.c | 57 | ||||
-rw-r--r-- | arch/ia64/kernel/perfmon.c | 1 | ||||
-rw-r--r-- | arch/ia64/kernel/process.c | 1 | ||||
-rw-r--r-- | arch/ia64/kernel/setup.c | 3 | ||||
-rw-r--r-- | arch/ia64/kernel/signal.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/smpboot.c | 41 | ||||
-rw-r--r-- | arch/ia64/kernel/topology.c | 7 | ||||
-rw-r--r-- | arch/ia64/kernel/traps.c | 6 |
15 files changed, 146 insertions, 65 deletions
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index b2e2f6509eb..e1fb68ddec2 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile @@ -17,6 +17,7 @@ obj-$(CONFIG_IA64_PALINFO) += palinfo.o obj-$(CONFIG_IOSAPIC) += iosapic.o obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_SMP) += smp.o smpboot.o domain.o +obj-$(CONFIG_NUMA) += numa.o obj-$(CONFIG_PERFMON) += perfmon_default_smpl.o obj-$(CONFIG_IA64_CYCLONE) += cyclone.o obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index cda06f88c66..9609f243e5d 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -11,6 +11,7 @@ * Copyright (C) 2001 Jenna Hall <jenna.s.hall@intel.com> * Copyright (C) 2001 Takayoshi Kochi <t-kochi@bq.jp.nec.com> * Copyright (C) 2002 Erich Focht <efocht@ess.nec.de> + * Copyright (C) 2004 Ashok Raj <ashok.raj@intel.com> * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * @@ -67,6 +68,11 @@ EXPORT_SYMBOL(pm_power_off); unsigned char acpi_kbd_controller_present = 1; unsigned char acpi_legacy_devices; +static unsigned int __initdata acpi_madt_rev; + +unsigned int acpi_cpei_override; +unsigned int acpi_cpei_phys_cpuid; + #define MAX_SAPICS 256 u16 ia64_acpiid_to_sapicid[MAX_SAPICS] = { [0 ... MAX_SAPICS - 1] = -1 }; @@ -265,10 +271,56 @@ acpi_parse_plat_int_src ( (plintsrc->flags.trigger == 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL); platform_intr_list[plintsrc->type] = vector; + if (acpi_madt_rev > 1) { + acpi_cpei_override = plintsrc->plint_flags.cpei_override_flag; + } + + /* + * Save the physical id, so we can check when its being removed + */ + acpi_cpei_phys_cpuid = ((plintsrc->id << 8) | (plintsrc->eid)) & 0xffff; + return 0; } +unsigned int can_cpei_retarget(void) +{ + extern int cpe_vector; + + /* + * Only if CPEI is supported and the override flag + * is present, otherwise return that its re-targettable + * if we are in polling mode. + */ + if (cpe_vector > 0 && !acpi_cpei_override) + return 0; + else + return 1; +} + +unsigned int is_cpu_cpei_target(unsigned int cpu) +{ + unsigned int logical_id; + + logical_id = cpu_logical_id(acpi_cpei_phys_cpuid); + + if (logical_id == cpu) + return 1; + else + return 0; +} + +void set_cpei_target_cpu(unsigned int cpu) +{ + acpi_cpei_phys_cpuid = cpu_physical_id(cpu); +} + +unsigned int get_cpei_target_cpu(void) +{ + return acpi_cpei_phys_cpuid; +} + static int __init acpi_parse_int_src_ovr ( acpi_table_entry_header *header, const unsigned long end) @@ -326,6 +378,8 @@ acpi_parse_madt (unsigned long phys_addr, unsigned long size) acpi_madt = (struct acpi_table_madt *) __va(phys_addr); + acpi_madt_rev = acpi_madt->header.revision; + /* remember the value for reference after free_initmem() */ #ifdef CONFIG_ITANIUM has_8259 = 1; /* Firmware on old Itanium systems is broken */ @@ -640,9 +694,11 @@ acpi_boot_init (void) if (smp_boot_data.cpu_phys_id[cpu] != hard_smp_processor_id()) node_cpuid[i++].phys_id = smp_boot_data.cpu_phys_id[cpu]; } - build_cpu_to_node_map(); # endif #endif +#ifdef CONFIG_ACPI_NUMA + build_cpu_to_node_map(); +#endif /* Make boot-up look pretty */ printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus, total_cpus); return 0; diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index 69f88d561d6..bb9a506deb7 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S @@ -1249,7 +1249,7 @@ ENTRY(sys_rt_sigreturn) stf.spill [r17]=f11 adds out0=16,sp // out0 = &sigscratch br.call.sptk.many rp=ia64_rt_sigreturn -.ret19: .restore sp 0 +.ret19: .restore sp,0 adds sp=16,sp ;; ld8 r9=[sp] // load new ar.unat diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index c170be095cc..7936b62f7a2 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c @@ -489,8 +489,6 @@ static int iosapic_find_sharable_vector (unsigned long trigger, unsigned long po } } } - if (vector < 0) - panic("%s: out of interrupt vectors!\n", __FUNCTION__); return vector; } @@ -506,6 +504,8 @@ iosapic_reassign_vector (int vector) if (!list_empty(&iosapic_intr_info[vector].rtes)) { new_vector = assign_irq_vector(AUTO_ASSIGN); + if (new_vector < 0) + panic("%s: out of interrupt vectors!\n", __FUNCTION__); printk(KERN_INFO "Reassigning vector %d to %d\n", vector, new_vector); memcpy(&iosapic_intr_info[new_vector], &iosapic_intr_info[vector], sizeof(struct iosapic_intr_info)); @@ -734,9 +734,12 @@ again: spin_unlock_irqrestore(&iosapic_lock, flags); /* If vector is running out, we try to find a sharable vector */ - vector = assign_irq_vector_nopanic(AUTO_ASSIGN); - if (vector < 0) + vector = assign_irq_vector(AUTO_ASSIGN); + if (vector < 0) { vector = iosapic_find_sharable_vector(trigger, polarity); + if (vector < 0) + panic("%s: out of interrupt vectors!\n", __FUNCTION__); + } spin_lock_irqsave(&irq_descp(vector)->lock, flags); spin_lock(&iosapic_lock); @@ -884,6 +887,8 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi, break; case ACPI_INTERRUPT_INIT: vector = assign_irq_vector(AUTO_ASSIGN); + if (vector < 0) + panic("%s: out of interrupt vectors!\n", __FUNCTION__); delivery = IOSAPIC_INIT; break; case ACPI_INTERRUPT_CPEI: diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index 4fe60c7a2e9..6c4d59fd036 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c @@ -63,30 +63,19 @@ EXPORT_SYMBOL(isa_irq_to_vector_map); static unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_NUM_DEVICE_VECTORS)]; int -assign_irq_vector_nopanic (int irq) +assign_irq_vector (int irq) { int pos, vector; again: pos = find_first_zero_bit(ia64_vector_mask, IA64_NUM_DEVICE_VECTORS); vector = IA64_FIRST_DEVICE_VECTOR + pos; if (vector > IA64_LAST_DEVICE_VECTOR) - return -1; + return -ENOSPC; if (test_and_set_bit(pos, ia64_vector_mask)) goto again; return vector; } -int -assign_irq_vector (int irq) -{ - int vector = assign_irq_vector_nopanic(irq); - - if (vector < 0) - panic("assign_irq_vector: out of interrupt vectors!"); - - return vector; -} - void free_irq_vector (int vector) { diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index 3aa3167edbe..884f5cd27d8 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c @@ -713,7 +713,7 @@ static struct kprobe trampoline_p = { .pre_handler = trampoline_probe_handler }; -int __init arch_init(void) +int __init arch_init_kprobes(void) { trampoline_p.addr = (kprobe_opcode_t *)((struct fnptr *)kretprobe_trampoline)->ip; diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 736e328b5e6..4ebbf397438 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -271,7 +271,7 @@ ia64_mca_log_sal_error_record(int sal_info_type) #ifdef CONFIG_ACPI -static int cpe_vector = -1; +int cpe_vector = -1; static irqreturn_t ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs) diff --git a/arch/ia64/kernel/numa.c b/arch/ia64/kernel/numa.c new file mode 100644 index 00000000000..a68ce667809 --- /dev/null +++ b/arch/ia64/kernel/numa.c @@ -0,0 +1,57 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * ia64 kernel NUMA specific stuff + * + * Copyright (C) 2002 Erich Focht <efocht@ess.nec.de> + * Copyright (C) 2004 Silicon Graphics, Inc. + * Jesse Barnes <jbarnes@sgi.com> + */ +#include <linux/config.h> +#include <linux/topology.h> +#include <linux/module.h> +#include <asm/processor.h> +#include <asm/smp.h> + +u8 cpu_to_node_map[NR_CPUS] __cacheline_aligned; +EXPORT_SYMBOL(cpu_to_node_map); + +cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned; + +/** + * build_cpu_to_node_map - setup cpu to node and node to cpumask arrays + * + * Build cpu to node mapping and initialize the per node cpu masks using + * info from the node_cpuid array handed to us by ACPI. + */ +void __init build_cpu_to_node_map(void) +{ + int cpu, i, node; + + for(node=0; node < MAX_NUMNODES; node++) + cpus_clear(node_to_cpu_mask[node]); + + for(cpu = 0; cpu < NR_CPUS; ++cpu) { + node = -1; + for (i = 0; i < NR_CPUS; ++i) + if (cpu_physical_id(cpu) == node_cpuid[i].phys_id) { + node = node_cpuid[i].nid; + break; + } + cpu_to_node_map[cpu] = (node >= 0) ? node : 0; + if (node >= 0) + cpu_set(cpu, node_to_cpu_mask[node]); + } +} diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 6407bff6bfd..b8ebb8e427e 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -37,7 +37,6 @@ #include <linux/vfs.h> #include <linux/pagemap.h> #include <linux/mount.h> -#include <linux/version.h> #include <linux/bitops.h> #include <asm/errno.h> diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 6e35bff05d5..e484910246a 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -196,6 +196,7 @@ update_pal_halt_status(int status) void default_idle (void) { + local_irq_enable(); while (!need_resched()) if (can_do_pal_halt) safe_halt(); diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 2693e1522d7..5c7c95737bb 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -40,6 +40,8 @@ #include <linux/serial_core.h> #include <linux/efi.h> #include <linux/initrd.h> +#include <linux/platform.h> +#include <linux/pm.h> #include <asm/ia32.h> #include <asm/machvec.h> @@ -783,6 +785,7 @@ cpu_init (void) /* size of physical stacked register partition plus 8 bytes: */ __get_cpu_var(ia64_phys_stacked_size_p8) = num_phys_stacked*8 + 8; platform_cpu_init(); + pm_idle = default_idle; } void diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c index edd9f07860b..b8a0a7d257a 100644 --- a/arch/ia64/kernel/signal.c +++ b/arch/ia64/kernel/signal.c @@ -143,6 +143,7 @@ restore_sigcontext (struct sigcontext __user *sc, struct sigscratch *scr) __copy_from_user(current->thread.fph, &sc->sc_fr[32], 96*16); psr->mfh = 0; /* drop signal handler's fph contents... */ + preempt_disable(); if (psr->dfh) ia64_drop_fpu(current); else { @@ -150,6 +151,7 @@ restore_sigcontext (struct sigcontext __user *sc, struct sigscratch *scr) __ia64_load_fpu(current->thread.fph); ia64_set_local_fpu_owner(current); } + preempt_enable(); } return err; } diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 623b0a54670..7d72c0d872b 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -525,47 +525,6 @@ smp_build_cpu_map (void) } } -#ifdef CONFIG_NUMA - -/* on which node is each logical CPU (one cacheline even for 64 CPUs) */ -u8 cpu_to_node_map[NR_CPUS] __cacheline_aligned; -EXPORT_SYMBOL(cpu_to_node_map); -/* which logical CPUs are on which nodes */ -cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned; - -/* - * Build cpu to node mapping and initialize the per node cpu masks. - */ -void __init -build_cpu_to_node_map (void) -{ - int cpu, i, node; - - for(node=0; node<MAX_NUMNODES; node++) - cpus_clear(node_to_cpu_mask[node]); - for(cpu = 0; cpu < NR_CPUS; ++cpu) { - /* - * All Itanium NUMA platforms I know use ACPI, so maybe we - * can drop this ifdef completely. [EF] - */ -#ifdef CONFIG_ACPI_NUMA - node = -1; - for (i = 0; i < NR_CPUS; ++i) - if (cpu_physical_id(cpu) == node_cpuid[i].phys_id) { - node = node_cpuid[i].nid; - break; - } -#else -# error Fixme: Dunno how to build CPU-to-node map. -#endif - cpu_to_node_map[cpu] = (node >= 0) ? node : 0; - if (node >= 0) - cpu_set(cpu, node_to_cpu_mask[node]); - } -} - -#endif /* CONFIG_NUMA */ - /* * Cycle through the APs sending Wakeup IPIs to boot each. */ diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c index f1aafd4c05f..d8030f3bd86 100644 --- a/arch/ia64/kernel/topology.c +++ b/arch/ia64/kernel/topology.c @@ -36,6 +36,13 @@ int arch_register_cpu(int num) parent = &sysfs_nodes[cpu_to_node(num)]; #endif /* CONFIG_NUMA */ + /* + * If CPEI cannot be re-targetted, and this is + * CPEI target, then dont create the control file + */ + if (!can_cpei_retarget() && is_cpu_cpei_target(num)) + sysfs_cpus[num].cpu.no_control = 1; + return register_cpu(&sysfs_cpus[num].cpu, num, parent); } diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c index e7e520d90f0..4440c8343fa 100644 --- a/arch/ia64/kernel/traps.c +++ b/arch/ia64/kernel/traps.c @@ -90,14 +90,16 @@ die (const char *str, struct pt_regs *regs, long err) .lock_owner_depth = 0 }; static int die_counter; + int cpu = get_cpu(); - if (die.lock_owner != smp_processor_id()) { + if (die.lock_owner != cpu) { console_verbose(); spin_lock_irq(&die.lock); - die.lock_owner = smp_processor_id(); + die.lock_owner = cpu; die.lock_owner_depth = 0; bust_spinlocks(1); } + put_cpu(); if (++die.lock_owner_depth < 3) { printk("%s[%d]: %s %ld [%d]\n", |