aboutsummaryrefslogtreecommitdiff
path: root/arch/x86_64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86_64/kernel')
-rw-r--r--arch/x86_64/kernel/genapic_flat.c42
-rw-r--r--arch/x86_64/kernel/smp.c10
-rw-r--r--arch/x86_64/kernel/smpboot.c19
3 files changed, 54 insertions, 17 deletions
diff --git a/arch/x86_64/kernel/genapic_flat.c b/arch/x86_64/kernel/genapic_flat.c
index b4cbbad0422..00f3fa6df71 100644
--- a/arch/x86_64/kernel/genapic_flat.c
+++ b/arch/x86_64/kernel/genapic_flat.c
@@ -7,6 +7,8 @@
* Hacked for x86-64 by James Cleverdon from i386 architecture code by
* Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
* James Cleverdon.
+ * Ashok Raj <ashok.raj@intel.com>
+ * Removed IPI broadcast shortcut to support CPU hotplug
*/
#include <linux/config.h>
#include <linux/threads.h>
@@ -45,22 +47,6 @@ static void flat_init_apic_ldr(void)
apic_write_around(APIC_LDR, val);
}
-static void flat_send_IPI_allbutself(int vector)
-{
- /*
- * if there are no other CPUs in the system then
- * we get an APIC send error if we try to broadcast.
- * thus we have to avoid sending IPIs in this case.
- */
- if (num_online_cpus() > 1)
- __send_IPI_shortcut(APIC_DEST_ALLBUT, vector, APIC_DEST_LOGICAL);
-}
-
-static void flat_send_IPI_all(int vector)
-{
- __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL);
-}
-
static void flat_send_IPI_mask(cpumask_t cpumask, int vector)
{
unsigned long mask = cpus_addr(cpumask)[0];
@@ -93,6 +79,30 @@ static void flat_send_IPI_mask(cpumask_t cpumask, int vector)
local_irq_restore(flags);
}
+static void flat_send_IPI_allbutself(int vector)
+{
+ cpumask_t mask;
+ /*
+ * if there are no other CPUs in the system then
+ * we get an APIC send error if we try to broadcast.
+ * thus we have to avoid sending IPIs in this case.
+ */
+ int this_cpu = get_cpu();
+
+ mask = cpu_online_map;
+ cpu_clear(this_cpu, mask);
+
+ if (cpus_weight(mask) >= 1)
+ flat_send_IPI_mask(mask, vector);
+
+ put_cpu();
+}
+
+static void flat_send_IPI_all(int vector)
+{
+ flat_send_IPI_mask(cpu_online_map, vector);
+}
+
static int flat_apic_id_registered(void)
{
return physid_isset(GET_APIC_ID(apic_read(APIC_ID)), phys_cpu_present_map);
diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c
index 1e379ed17b1..ccae392886a 100644
--- a/arch/x86_64/kernel/smp.c
+++ b/arch/x86_64/kernel/smp.c
@@ -283,6 +283,16 @@ struct call_data_struct {
static struct call_data_struct * call_data;
+void lock_ipi_call_lock(void)
+{
+ spin_lock_irq(&call_lock);
+}
+
+void unlock_ipi_call_lock(void)
+{
+ spin_unlock_irq(&call_lock);
+}
+
/*
* this function sends a 'generic call function' IPI to all other CPUs
* in the system.
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
index 571a55462fa..b969ee12872 100644
--- a/arch/x86_64/kernel/smpboot.c
+++ b/arch/x86_64/kernel/smpboot.c
@@ -508,9 +508,22 @@ void __cpuinit start_secondary(void)
set_cpu_sibling_map(smp_processor_id());
/*
+ * We need to hold call_lock, so there is no inconsistency
+ * between the time smp_call_function() determines number of
+ * IPI receipients, and the time when the determination is made
+ * for which cpus receive the IPI in genapic_flat.c. Holding this
+ * lock helps us to not include this cpu in a currently in progress
+ * smp_call_function().
+ */
+ lock_ipi_call_lock();
+
+ /*
* Allow the master to continue.
*/
cpu_set(smp_processor_id(), cpu_online_map);
+ per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
+ unlock_ipi_call_lock();
+
mb();
/* Wait for TSC sync to not schedule things before.
@@ -1038,6 +1051,7 @@ void __init smp_prepare_boot_cpu(void)
cpu_set(me, cpu_callout_map);
cpu_set(0, cpu_sibling_map[0]);
cpu_set(0, cpu_core_map[0]);
+ per_cpu(cpu_state, me) = CPU_ONLINE;
}
/*
@@ -1066,6 +1080,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
return -ENOSYS;
}
+ per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
/* Boot it! */
err = do_boot_cpu(cpu, apicid);
if (err < 0) {
@@ -1170,8 +1185,10 @@ void __cpu_die(unsigned int cpu)
for (i = 0; i < 10; i++) {
/* They ack this in play_dead by setting CPU_DEAD */
- if (per_cpu(cpu_state, cpu) == CPU_DEAD)
+ if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
+ printk ("CPU %d is now offline\n", cpu);
return;
+ }
current->state = TASK_UNINTERRUPTIBLE;
schedule_timeout(HZ/10);
}