aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/Makefile4
-rw-r--r--arch/x86/kernel/cpu/common.c11
-rw-r--r--arch/x86/kernel/ldt.c9
-rw-r--r--arch/x86/kernel/paravirt-spinlocks.c37
-rw-r--r--arch/x86/kernel/paravirt.c27
-rw-r--r--arch/x86/kernel/process_32.c39
-rw-r--r--arch/x86/kernel/process_64.c22
-rw-r--r--arch/x86/kernel/smp.c6
-rw-r--r--arch/x86/kernel/smpboot.c77
-rw-r--r--arch/x86/kernel/tlb_32.c8
-rw-r--r--arch/x86/mm/fault.c14
-rw-r--r--arch/x86/xen/Kconfig10
-rw-r--r--arch/x86/xen/Makefile12
-rw-r--r--arch/x86/xen/debugfs.c123
-rw-r--r--arch/x86/xen/debugfs.h10
-rw-r--r--arch/x86/xen/enlighten.c189
-rw-r--r--arch/x86/xen/irq.c143
-rw-r--r--arch/x86/xen/mmu.c270
-rw-r--r--arch/x86/xen/multicalls.c115
-rw-r--r--arch/x86/xen/smp.c245
-rw-r--r--arch/x86/xen/spinlock.c428
-rw-r--r--arch/x86/xen/time.c12
-rw-r--r--arch/x86/xen/xen-asm_32.S2
-rw-r--r--arch/x86/xen/xen-asm_64.S2
-rw-r--r--arch/x86/xen/xen-ops.h8
-rw-r--r--drivers/block/xen-blkfront.c2
-rw-r--r--drivers/char/hvc_xen.c6
-rw-r--r--drivers/input/xen-kbdfront.c4
-rw-r--r--drivers/net/xen-netfront.c6
-rw-r--r--drivers/video/xen-fbfront.c4
-rw-r--r--drivers/xen/Makefile1
-rw-r--r--drivers/xen/balloon.c174
-rw-r--r--drivers/xen/cpu_hotplug.c90
-rw-r--r--drivers/xen/events.c40
-rw-r--r--drivers/xen/grant-table.c2
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c8
-rw-r--r--include/asm-x86/desc.h15
-rw-r--r--include/asm-x86/paravirt.h20
-rw-r--r--include/asm-x86/smp.h34
-rw-r--r--include/asm-x86/spinlock.h9
-rw-r--r--include/asm-x86/tlbflush.h10
-rw-r--r--include/asm-x86/xen/hypervisor.h14
-rw-r--r--include/linux/kernel.h2
-rw-r--r--include/linux/mm.h6
-rw-r--r--include/linux/mm_types.h10
-rw-r--r--include/linux/sched.h6
-rw-r--r--include/xen/events.h2
-rw-r--r--lib/cmdline.c2
48 files changed, 1610 insertions, 680 deletions
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 3db651fc8ec..d679cb2c79b 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -10,7 +10,7 @@ ifdef CONFIG_FTRACE
# Do not profile debug and lowlevel utilities
CFLAGS_REMOVE_tsc.o = -pg
CFLAGS_REMOVE_rtc.o = -pg
-CFLAGS_REMOVE_paravirt.o = -pg
+CFLAGS_REMOVE_paravirt-spinlocks.o = -pg
endif
#
@@ -89,7 +89,7 @@ obj-$(CONFIG_DEBUG_NX_TEST) += test_nx.o
obj-$(CONFIG_VMI) += vmi_32.o vmiclock_32.o
obj-$(CONFIG_KVM_GUEST) += kvm.o
obj-$(CONFIG_KVM_CLOCK) += kvmclock.o
-obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt_patch_$(BITS).o
+obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt_patch_$(BITS).o paravirt-spinlocks.o
obj-$(CONFIG_PARAVIRT_CLOCK) += pvclock.o
obj-$(CONFIG_PCSPKR_PLATFORM) += pcspeaker.o
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 4e456bd955b..9983bc3f5d1 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -728,14 +728,3 @@ void __cpuinit cpu_init(void)
mxcsr_feature_mask_init();
}
-#ifdef CONFIG_HOTPLUG_CPU
-void __cpuinit cpu_uninit(void)
-{
- int cpu = raw_smp_processor_id();
- cpu_clear(cpu, cpu_initialized);
-
- /* lazy TLB state */
- per_cpu(cpu_tlbstate, cpu).state = 0;
- per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm;
-}
-#endif
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index b68e21f06f4..6e388412a85 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -51,6 +51,8 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
memset(newldt + oldsize * LDT_ENTRY_SIZE, 0,
(mincount - oldsize) * LDT_ENTRY_SIZE);
+ paravirt_alloc_ldt(newldt, mincount);
+
#ifdef CONFIG_X86_64
/* CHECKME: Do we really need this ? */
wmb();
@@ -73,6 +75,7 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
#endif
}
if (oldsize) {
+ paravirt_free_ldt(oldldt, oldsize);
if (oldsize * LDT_ENTRY_SIZE > PAGE_SIZE)
vfree(oldldt);
else
@@ -84,10 +87,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
{
int err = alloc_ldt(new, old->size, 0);
+ int i;
if (err < 0)
return err;
- memcpy(new->ldt, old->ldt, old->size * LDT_ENTRY_SIZE);
+
+ for(i = 0; i < old->size; i++)
+ write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
return 0;
}
@@ -124,6 +130,7 @@ void destroy_context(struct mm_struct *mm)
if (mm == current->active_mm)
clear_LDT();
#endif
+ paravirt_free_ldt(mm->context.ldt, mm->context.size);
if (mm->context.size * LDT_ENTRY_SIZE > PAGE_SIZE)
vfree(mm->context.ldt);
else
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
new file mode 100644
index 00000000000..0e9f1982b1d
--- /dev/null
+++ b/arch/x86/kernel/paravirt-spinlocks.c
@@ -0,0 +1,37 @@
+/*
+ * Split spinlock implementation out into its own file, so it can be
+ * compiled in a FTRACE-compatible way.
+ */
+#include <linux/spinlock.h>
+#include <linux/module.h>
+
+#include <asm/paravirt.h>
+
+static void default_spin_lock_flags(struct raw_spinlock *lock, unsigned long flags)
+{
+ __raw_spin_lock(lock);
+}
+
+struct pv_lock_ops pv_lock_ops = {
+#ifdef CONFIG_SMP
+ .spin_is_locked = __ticket_spin_is_locked,
+ .spin_is_contended = __ticket_spin_is_contended,
+
+ .spin_lock = __ticket_spin_lock,
+ .spin_lock_flags = default_spin_lock_flags,
+ .spin_trylock = __ticket_spin_trylock,
+ .spin_unlock = __ticket_spin_unlock,
+#endif
+};
+EXPORT_SYMBOL(pv_lock_ops);
+
+void __init paravirt_use_bytelocks(void)
+{
+#ifdef CONFIG_SMP
+ pv_lock_ops.spin_is_locked = __byte_spin_is_locked;
+ pv_lock_ops.spin_is_contended = __byte_spin_is_contended;
+ pv_lock_ops.spin_lock = __byte_spin_lock;
+ pv_lock_ops.spin_trylock = __byte_spin_trylock;
+ pv_lock_ops.spin_unlock = __byte_spin_unlock;
+#endif
+}
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 300da17e61c..7faea1817d0 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -268,17 +268,6 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
return __get_cpu_var(paravirt_lazy_mode);
}
-void __init paravirt_use_bytelocks(void)
-{
-#ifdef CONFIG_SMP
- pv_lock_ops.spin_is_locked = __byte_spin_is_locked;
- pv_lock_ops.spin_is_contended = __byte_spin_is_contended;
- pv_lock_ops.spin_lock = __byte_spin_lock;
- pv_lock_ops.spin_trylock = __byte_spin_trylock;
- pv_lock_ops.spin_unlock = __byte_spin_unlock;
-#endif
-}
-
struct pv_info pv_info = {
.name = "bare hardware",
.paravirt_enabled = 0,
@@ -348,6 +337,10 @@ struct pv_cpu_ops pv_cpu_ops = {
.write_ldt_entry = native_write_ldt_entry,
.write_gdt_entry = native_write_gdt_entry,
.write_idt_entry = native_write_idt_entry,
+
+ .alloc_ldt = paravirt_nop,
+ .free_ldt = paravirt_nop,
+
.load_sp0 = native_load_sp0,
#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
@@ -461,18 +454,6 @@ struct pv_mmu_ops pv_mmu_ops = {
.set_fixmap = native_set_fixmap,
};
-struct pv_lock_ops pv_lock_ops = {
-#ifdef CONFIG_SMP
- .spin_is_locked = __ticket_spin_is_locked,
- .spin_is_contended = __ticket_spin_is_contended,
-
- .spin_lock = __ticket_spin_lock,
- .spin_trylock = __ticket_spin_trylock,
- .spin_unlock = __ticket_spin_unlock,
-#endif
-};
-EXPORT_SYMBOL(pv_lock_ops);
-
EXPORT_SYMBOL_GPL(pv_time_ops);
EXPORT_SYMBOL (pv_cpu_ops);
EXPORT_SYMBOL (pv_mmu_ops);
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 4b3cfdf5421..b76b38ff962 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -72,47 +72,12 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
return ((unsigned long *)tsk->thread.sp)[3];
}
-#ifdef CONFIG_HOTPLUG_CPU
-#include <asm/nmi.h>
-
-static void cpu_exit_clear(void)
-{
- int cpu = raw_smp_processor_id();
-
- idle_task_exit();
-
- cpu_uninit();
- irq_ctx_exit(cpu);
-
- cpu_clear(cpu, cpu_callout_map);
- cpu_clear(cpu, cpu_callin_map);
-
- numa_remove_cpu(cpu);
- c1e_remove_cpu(cpu);
-}
-
-/* We don't actually take CPU down, just spin without interrupts. */
-static inline void play_dead(void)
-{
- /* This must be done before dead CPU ack */
- cpu_exit_clear();
- mb();
- /* Ack it */
- __get_cpu_var(cpu_state) = CPU_DEAD;
-
- /*
- * With physical CPU hotplug, we should halt the cpu
- */
- local_irq_disable();
- /* mask all interrupts, flush any and all caches, and halt */
- wbinvd_halt();
-}
-#else
+#ifndef CONFIG_SMP
static inline void play_dead(void)
{
BUG();
}
-#endif /* CONFIG_HOTPLUG_CPU */
+#endif
/*
* The idle thread. There's no useful work to be
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index e12e0e4dd25..ec27afa43d7 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -85,30 +85,12 @@ void exit_idle(void)
__exit_idle();
}
-#ifdef CONFIG_HOTPLUG_CPU
-DECLARE_PER_CPU(int, cpu_state);
-
-#include <asm/nmi.h>
-/* We halt the CPU with physical CPU hotplug */
-static inline void play_dead(void)
-{
- idle_task_exit();
- c1e_remove_cpu(raw_smp_processor_id());
-
- mb();
- /* Ack it */
- __get_cpu_var(cpu_state) = CPU_DEAD;
-
- local_irq_disable();
- /* mask all interrupts, flush any and all caches, and halt */
- wbinvd_halt();
-}
-#else
+#ifndef CONFIG_SMP
static inline void play_dead(void)
{
BUG();
}
-#endif /* CONFIG_HOTPLUG_CPU */
+#endif
/*
* The idle thread. There's no useful work to be
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index 361b7a4c640..18f9b19f5f8 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -214,12 +214,16 @@ void smp_call_function_single_interrupt(struct pt_regs *regs)
struct smp_ops smp_ops = {
.smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
.smp_prepare_cpus = native_smp_prepare_cpus,
- .cpu_up = native_cpu_up,
.smp_cpus_done = native_smp_cpus_done,
.smp_send_stop = native_smp_send_stop,
.smp_send_reschedule = native_smp_send_reschedule,
+ .cpu_up = native_cpu_up,
+ .cpu_die = native_cpu_die,
+ .cpu_disable = native_cpu_disable,
+ .play_dead = native_play_dead,
+
.send_call_func_ipi = native_send_call_func_ipi,
.send_call_func_single_ipi = native_send_call_func_single_ipi,
};
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 7985c5b3f91..06f1407d554 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -52,6 +52,7 @@
#include <asm/desc.h>
#include <asm/nmi.h>
#include <asm/irq.h>
+#include <asm/idle.h>
#include <asm/smp.h>
#include <asm/trampoline.h>
#include <asm/cpu.h>
@@ -1346,25 +1347,9 @@ static void __ref remove_cpu_from_maps(int cpu)
numa_remove_cpu(cpu);
}
-int __cpu_disable(void)
+void cpu_disable_common(void)
{
int cpu = smp_processor_id();
-
- /*
- * Perhaps use cpufreq to drop frequency, but that could go
- * into generic code.
- *
- * We won't take down the boot processor on i386 due to some
- * interrupts only being able to be serviced by the BSP.
- * Especially so if we're not using an IOAPIC -zwane
- */
- if (cpu == 0)
- return -EBUSY;
-
- if (nmi_watchdog == NMI_LOCAL_APIC)
- stop_apic_nmi_watchdog(NULL);
- clear_local_APIC();
-
/*
* HACK:
* Allow any queued timer interrupts to get serviced
@@ -1382,10 +1367,32 @@ int __cpu_disable(void)
remove_cpu_from_maps(cpu);
unlock_vector_lock();
fixup_irqs(cpu_online_map);
+}
+
+int native_cpu_disable(void)
+{
+ int cpu = smp_processor_id();
+
+ /*
+ * Perhaps use cpufreq to drop frequency, but that could go
+ * into generic code.
+ *
+ * We won't take down the boot processor on i386 due to some
+ * interrupts only being able to be serviced by the BSP.
+ * Especially so if we're not using an IOAPIC -zwane
+ */
+ if (cpu == 0)
+ return -EBUSY;
+
+ if (nmi_watchdog == NMI_LOCAL_APIC)
+ stop_apic_nmi_watchdog(NULL);
+ clear_local_APIC();
+
+ cpu_disable_common();
return 0;
}
-void __cpu_die(unsigned int cpu)
+void native_cpu_die(unsigned int cpu)
{
/* We don't do anything here: idle task is faking death itself. */
unsigned int i;
@@ -1402,15 +1409,45 @@ void __cpu_die(unsigned int cpu)
}
printk(KERN_ERR "CPU %u didn't die...\n", cpu);
}
+
+void play_dead_common(void)
+{
+ idle_task_exit();
+ reset_lazy_tlbstate();
+ irq_ctx_exit(raw_smp_processor_id());
+ c1e_remove_cpu(raw_smp_processor_id());
+
+ mb();
+ /* Ack it */
+ __get_cpu_var(cpu_state) = CPU_DEAD;
+
+ /*
+ * With physical CPU hotplug, we should halt the cpu
+ */
+ local_irq_disable();
+}
+
+void native_play_dead(void)
+{
+ play_dead_common();
+ wbinvd_halt();
+}
+
#else /* ... !CONFIG_HOTPLUG_CPU */
-int __cpu_disable(void)
+int native_cpu_disable(void)
{
return -ENOSYS;
}
-void __cpu_die(unsigned int cpu)
+void native_cpu_die(unsigned int cpu)
{
/* We said "no" in __cpu_disable */
BUG();
}
+
+void native_play_dead(void)
+{
+ BUG();
+}
+
#endif
diff --git a/arch/x86/kernel/tlb_32.c b/arch/x86/kernel/tlb_32.c
index fec1ecedc9b..e00534b3353 100644
--- a/arch/x86/kernel/tlb_32.c
+++ b/arch/x86/kernel/tlb_32.c
@@ -241,3 +241,11 @@ void flush_tlb_all(void)
on_each_cpu(do_flush_tlb_all, NULL, 1);
}
+void reset_lazy_tlbstate(void)
+{
+ int cpu = raw_smp_processor_id();
+
+ per_cpu(cpu_tlbstate, cpu).state = 0;
+ per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm;
+}
+
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 455f3fe67b4..356ed2dec3a 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -915,15 +915,15 @@ LIST_HEAD(pgd_list);
void vmalloc_sync_all(void)
{
-#ifdef CONFIG_X86_32
- unsigned long start = VMALLOC_START & PGDIR_MASK;
unsigned long address;
+#ifdef CONFIG_X86_32
if (SHARED_KERNEL_PMD)
return;
- BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK);
- for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) {
+ for (address = VMALLOC_START & PMD_MASK;
+ address >= TASK_SIZE && address < FIXADDR_TOP;
+ address += PMD_SIZE) {
unsigned long flags;
struct page *page;
@@ -936,10 +936,8 @@ void vmalloc_sync_all(void)
spin_unlock_irqrestore(&pgd_lock, flags);
}
#else /* CONFIG_X86_64 */
- unsigned long start = VMALLOC_START & PGDIR_MASK;
- unsigned long address;
-
- for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) {
+ for (address = VMALLOC_START & PGDIR_MASK; address <= VMALLOC_END;
+ address += PGDIR_SIZE) {
const pgd_t *pgd_ref = pgd_offset_k(address);
unsigned long flags;
struct page *page;
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
index 3815e425f47..d3e68465ace 100644
--- a/arch/x86/xen/Kconfig
+++ b/arch/x86/xen/Kconfig
@@ -27,4 +27,12 @@ config XEN_MAX_DOMAIN_MEMORY
config XEN_SAVE_RESTORE
bool
depends on PM
- default y \ No newline at end of file
+ default y
+
+config XEN_DEBUG_FS
+ bool "Enable Xen debug and tuning parameters in debugfs"
+ depends on XEN && DEBUG_FS
+ default n
+ help
+ Enable statistics output and various tuning options in debugfs.
+ Enabling this option may incur a significant performance overhead. \ No newline at end of file
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
index 59c1e539aed..313947940a1 100644
--- a/arch/x86/xen/Makefile
+++ b/arch/x86/xen/Makefile
@@ -1,4 +1,12 @@
-obj-y := enlighten.o setup.o multicalls.o mmu.o \
+ifdef CONFIG_FTRACE
+# Do not profile debug and lowlevel utilities
+CFLAGS_REMOVE_spinlock.o = -pg
+CFLAGS_REMOVE_time.o = -pg
+CFLAGS_REMOVE_irq.o = -pg
+endif
+
+obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \
time.o xen-asm_$(BITS).o grant-table.o suspend.o
-obj-$(CONFIG_SMP) += smp.o
+obj-$(CONFIG_SMP) += smp.o spinlock.o
+obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o \ No newline at end of file
diff --git a/arch/x86/xen/debugfs.c b/arch/x86/xen/debugfs.c
new file mode 100644
index 00000000000..b53225d2cac
--- /dev/null
+++ b/arch/x86/xen/debugfs.c
@@ -0,0 +1,123 @@
+#include <linux/init.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+
+#include "debugfs.h"
+
+static struct dentry *d_xen_debug;
+
+struct dentry * __init xen_init_debugfs(void)
+{
+ if (!d_xen_debug) {
+ d_xen_debug = debugfs_create_dir("xen", NULL);
+
+ if (!d_xen_debug)
+ pr_warning("Could not create 'xen' debugfs directory\n");
+ }
+
+ return d_xen_debug;
+}
+
+struct array_data
+{
+ void *array;
+ unsigned elements;
+};
+
+static int u32_array_open(struct inode *inode, struct file *file)
+{
+ file->private_data = NULL;
+ return nonseekable_open(inode, file);
+}
+
+static size_t format_array(char *buf, size_t bufsize, const char *fmt,
+ u32 *array, unsigned array_size)
+{
+ size_t ret = 0;
+ unsigned i;
+
+ for(i = 0; i < array_size; i++) {
+ size_t len;
+
+ len = snprintf(buf, bufsize, fmt, array[i]);
+ len++; /* ' ' or '\n' */
+ ret += len;
+
+ if (buf) {
+ buf += len;
+ bufsize -= len;
+ buf[-1] = (i == array_size-1) ? '\n' : ' ';
+ }
+ }
+
+ ret++; /* \0 */
+ if (buf)
+ *buf = '\0';
+
+ return ret;
+}
+
+static char *format_array_alloc(const char *fmt, u32 *array, unsigned array_size)
+{
+ size_t len = format_array(NULL, 0, fmt, array, array_size);
+ char *ret;
+
+ ret = kmalloc(len, GFP_KERNEL);
+ if (ret == NULL)
+ return NULL;
+
+ format_array(ret, len, fmt, array, array_size);
+ return ret;
+}
+
+static ssize_t u32_array_read(struct file *file, char __user *buf, size_t len,
+ loff_t *ppos)
+{
+ struct inode *inode = file->f_path.dentry->d_inode;
+ struct array_data *data = inode->i_private;
+ size_t size;
+
+ if (*ppos == 0) {
+ if (file->private_data) {
+ kfree(file->private_data);
+ file->private_data = NULL;
+ }
+
+ file->private_data = format_array_alloc("%u", data->array, data->elements);
+ }
+
+ size = 0;
+ if (file->private_data)
+ size = strlen(file->private_data);
+
+ return simple_read_from_buffer(buf, len, ppos, file->private_data, size);
+}
+
+static int xen_array_release(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+
+ return 0;
+}
+
+static struct file_operations u32_array_fops = {
+ .owner = THIS_MODULE,
+ .open = u32_array_open,
+ .release= xen_array_release,
+ .read = u32_array_read,
+};
+
+struct dentry *xen_debugfs_create_u32_array(const char *name, mode_t mode,
+ struct dentry *parent,
+ u32 *array, unsigned elements)
+{
+ struct array_data *data = kmalloc(sizeof(*data), GFP_KERNEL);
+
+ if (data == NULL)
+ return NULL;
+
+ data->array = array;
+ data->elements = elements;
+
+ return debugfs_create_file(name, mode, parent, data, &u32_array_fops);
+}
diff --git a/arch/x86/xen/debugfs.h b/arch/x86/xen/debugfs.h
new file mode 100644
index 00000000000..e2813208483
--- /dev/null
+++ b/arch/x86/xen/debugfs.h
@@ -0,0 +1,10 @@
+#ifndef _XEN_DEBUGFS_H
+#define _XEN_DEBUGFS_H
+
+struct dentry * __init xen_init_debugfs(void);
+
+struct dentry *xen_debugfs_create_u32_array(const char *name, mode_t mode,
+ struct dentry *parent,
+ u32 *array, unsigned elements);
+
+#endif /* _XEN_DEBUGFS_H */
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index a4e201b47f6..8ca2f88bde1 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -30,7 +30,6 @@
#include <xen/interface/xen.h>
#include <xen/interface/physdev.h>
#include <xen/interface/vcpu.h>
-#include <xen/interface/sched.h>
#include <xen/features.h>
#include <xen/page.h>
#include <xen/hvc-console.h>
@@ -57,6 +56,9 @@ EXPORT_SYMBOL_GPL(hypercall_page);
DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
+enum xen_domain_type xen_domain_type = XEN_NATIVE;
+EXPORT_SYMBOL_GPL(xen_domain_type);
+
/*
* Identity map, in addition to plain kernel map. This needs to be
* large enough to allocate page table pages to allocate the rest.
@@ -226,103 +228,68 @@ static unsigned long xen_get_debugreg(int reg)
return HYPERVISOR_get_debugreg(reg);
}
-static unsigned long xen_save_fl(void)
+static void xen_leave_lazy(void)
{
- struct vcpu_info *vcpu;
- unsigned long flags;
-
- vcpu = x86_read_percpu(xen_vcpu);
-
- /* flag has opposite sense of mask */
- flags = !vcpu->evtchn_upcall_mask;
-
- /* convert to IF type flag
- -0 -> 0x00000000
- -1 -> 0xffffffff
- */
- return (-flags) & X86_EFLAGS_IF;
+ paravirt_leave_lazy(paravirt_get_lazy_mode());
+ xen_mc_flush();
}
-static void xen_restore_fl(unsigned long flags)
+static unsigned long xen_store_tr(void)
{
- struct vcpu_info *vcpu;
-
- /* convert from IF type flag */
- flags = !(flags & X86_EFLAGS_IF);
-
- /* There's a one instruction preempt window here. We need to
- make sure we're don't switch CPUs between getting the vcpu
- pointer and updating the mask. */
- preempt_disable();
- vcpu = x86_read_percpu(xen_vcpu);
- vcpu->evtchn_upcall_mask = flags;
- preempt_enable_no_resched();
-
- /* Doesn't matter if we get preempted here, because any
- pending event will get dealt with anyway. */
-
- if (flags == 0) {
- preempt_check_resched();
- barrier(); /* unmask then check (avoid races) */
- if (unlikely(vcpu->evtchn_upcall_pending))
- force_evtchn_callback();
- }
+ return 0;
}
-static void xen_irq_disable(void)
+/*
+ * Set the page permissions for a particular virtual address. If the
+ * address is a vmalloc mapping (or other non-linear mapping), then
+ * find the linear mapping of the page and also set its protections to
+ * match.
+ */
+static void set_aliased_prot(void *v, pgprot_t prot)
{
- /* There's a one instruction preempt window here. We need to
- make sure we're don't switch CPUs between getting the vcpu
- pointer and updating the mask. */
- preempt_disable();
- x86_read_percpu(xen_vcpu)->evtchn_upcall_mask = 1;
- preempt_enable_no_resched();
-}
+ int level;
+ pte_t *ptep;
+ pte_t pte;
+ unsigned long pfn;
+ struct page *page;
-static void xen_irq_enable(void)
-{
- struct vcpu_info *vcpu;
+ ptep = lookup_address((unsigned long)v, &level);
+ BUG_ON(ptep == NULL);
- /* We don't need to worry about being preempted here, since
- either a) interrupts are disabled, so no preemption, or b)
- the caller is confused and is trying to re-enable interrupts
- on an indeterminate processor. */
+ pfn = pte_pfn(*ptep);
+ page = pfn_to_page(pfn);
- vcpu = x86_read_percpu(xen_vcpu);
- vcpu->evtchn_upcall_mask = 0;
+ pte = pfn_pte(pfn, prot);
- /* Doesn't matter if we get preempted here, because any
- pending event will get dealt with anyway. */
+ if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
+ BUG();
- barrier(); /* unmask then check (avoid races) */
- if (unlikely(vcpu->evtchn_upcall_pending))
- force_evtchn_callback();
-}
+ if (!PageHighMem(page)) {
+ void *av = __va(PFN_PHYS(pfn));
-static void xen_safe_halt(void)
-{
- /* Blocking includes an implicit local_irq_enable(). */
- if (HYPERVISOR_sched_op(SCHEDOP_block, NULL) != 0)
- BUG();
+ if (av != v)
+ if (HYPERVISOR_update_va_mapping((unsigned long)av, pte, 0))
+ BUG();
+ } else
+ kmap_flush_unused();
}
-static void xen_halt(void)
+static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
{
- if (irqs_disabled())
- HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
- else
- xen_safe_halt();
-}
+ const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
+ int i;
-static void xen_leave_lazy(void)
-{
- paravirt_leave_lazy(paravirt_get_lazy_mode());
- xen_mc_flush();
+ for(i = 0; i < entries; i += entries_per_page)
+ set_aliased_prot(ldt + i, PAGE_KERNEL_RO);
}
-static unsigned long xen_store_tr(void)
+static void xen_free_ldt(struct desc_struct *ldt, unsigned entries)
{
- return 0;
+ const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
+ int i;
+
+ for(i = 0; i < entries; i += entries_per_page)
+ set_aliased_prot(ldt + i, PAGE_KERNEL);
}
static void xen_set_ldt(const void *addr, unsigned entries)
@@ -425,8 +392,7 @@ static void xen_load_gs_index(unsigned int idx)
static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum,
const void *ptr)
{
- unsigned long lp = (unsigned long)&dt[entrynum];
- xmaddr_t mach_lp = virt_to_machine(lp);
+ xmaddr_t mach_lp = arbitrary_virt_to_machine(&dt[entrynum]);
u64 entry = *(u64 *)ptr;
preempt_disable();
@@ -559,7 +525,7 @@ static void xen_write_gdt_entry(struct desc_struct *dt, int entry,
}
static void xen_load_sp0(struct tss_struct *tss,
- struct thread_struct *thread)
+ struct thread_struct *thread)
{
struct multicall_space mcs = xen_mc_entry(0);
MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->sp0);
@@ -803,6 +769,19 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
ret = -EFAULT;
break;
#endif
+
+ case MSR_STAR:
+ case MSR_CSTAR:
+ case MSR_LSTAR:
+ case MSR_SYSCALL_MASK:
+ case MSR_IA32_SYSENTER_CS:
+ case MSR_IA32_SYSENTER_ESP:
+ case MSR_IA32_SYSENTER_EIP:
+ /* Fast syscall setup is all done in hypercalls, so
+ these are all ignored. Stub them out here to stop
+ Xen console noise. */
+ break;
+
default:
ret = native_write_msr_safe(msr, low, high);
}
@@ -846,8 +825,8 @@ static void xen_alloc_ptpage(struct mm_struct *mm, u32 pfn, unsigned level)
SetPagePinned(page);
if (!PageHighMem(page)) {
- make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
- if (level == PT_PTE)
+ make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn)));
+ if (level == PT_PTE && USE_SPLIT_PTLOCKS)
pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
} else
/* make sure there are no stray mappings of
@@ -915,7 +894,7 @@ static void xen_release_ptpage(u32 pfn, unsigned level)
if (PagePinned(page)) {
if (!PageHighMem(page)) {
- if (level == PT_PTE)
+ if (level == PT_PTE && USE_SPLIT_PTLOCKS)
pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
}
@@ -1220,6 +1199,9 @@ static const struct pv_cpu_ops xen_cpu_ops __initdata = {
.load_gs_index = xen_load_gs_index,
#endif
+ .alloc_ldt = xen_alloc_ldt,
+ .free_ldt = xen_free_ldt,
+
.store_gdt = native_store_gdt,
.store_idt = native_store_idt,
.store_tr = xen_store_tr,
@@ -1241,36 +1223,6 @@ static const struct pv_cpu_ops xen_cpu_ops __initdata = {
},
};
-static void __init __xen_init_IRQ(void)
-{
-#ifdef CONFIG_X86_64
- int i;
-
- /* Create identity vector->irq map */
- for(i = 0; i < NR_VECTORS; i++) {
- int cpu;
-
- for_each_possible_cpu(cpu)
- per_cpu(vector_irq, cpu)[i] = i;
- }
-#endif /* CONFIG_X86_64 */
-
- xen_init_IRQ();
-}
-
-static const struct pv_irq_ops xen_irq_ops __initdata = {
- .init_IRQ = __xen_init_IRQ,
- .save_fl = xen_save_fl,
- .restore_fl = xen_restore_fl,
- .irq_disable = xen_irq_disable,
- .irq_enable = xen_irq_enable,
- .safe_halt = xen_safe_halt,
- .halt = xen_halt,
-#ifdef CONFIG_X86_64
- .adjust_exception_frame = xen_adjust_exception_frame,
-#endif
-};
-
static const struct pv_apic_ops xen_apic_ops __initdata = {
#ifdef CONFIG_X86_LOCAL_APIC
.apic_write = xen_apic_write,
@@ -1664,6 +1616,8 @@ asmlinkage void __init xen_start_kernel(void)
if (!xen_start_info)
return;
+ xen_domain_type = XEN_PV_DOMAIN;
+
BUG_ON(memcmp(xen_start_info->magic, "xen-3", 5) != 0);
xen_setup_features();
@@ -1673,10 +1627,11 @@ asmlinkage void __init xen_start_kernel(void)
pv_init_ops = xen_init_ops;
pv_time_ops = xen_time_ops;
pv_cpu_ops = xen_cpu_ops;
- pv_irq_ops = xen_irq_ops;
pv_apic_ops = xen_apic_ops;
pv_mmu_ops = xen_mmu_ops;
+ xen_init_irq_ops();
+
if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) {
pv_mmu_ops.ptep_modify_prot_start = xen_ptep_modify_prot_start;
pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit;
@@ -1700,7 +1655,7 @@ asmlinkage void __init xen_start_kernel(void)
/* Prevent unwanted bits from being set in PTEs. */
__supported_pte_mask &= ~_PAGE_GLOBAL;
- if (!is_initial_xendomain())
+ if (!xen_initial_domain())
__supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD);
/* Don't do the full vcpu_info placement stuff until we have a
@@ -1735,7 +1690,7 @@ asmlinkage void __init xen_start_kernel(void)
boot_params.hdr.ramdisk_size = xen_start_info->mod_len;
boot_params.hdr.cmd_line_ptr = __pa(xen_start_info->cmd_line);
- if (!is_initial_xendomain()) {
+ if (!xen_initial_domain()) {
add_preferred_console("xenboot", 0, NULL);
add_preferred_console("tty", 0, NULL);
add_preferred_console("hvc", 0, NULL);
diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c
new file mode 100644
index 00000000000..28b85ab8422
--- /dev/null
+++ b/arch/x86/xen/irq.c
@@ -0,0 +1,143 @@
+#include <linux/hardirq.h>
+
+#include <xen/interface/xen.h>
+#include <xen/interface/sched.h>
+#include <xen/interface/vcpu.h>
+
+#include <asm/xen/hypercall.h>
+#include <asm/xen/hypervisor.h>
+
+#include "xen-ops.h"
+
+/*
+ * Force a proper event-channel callback from Xen after clearing the
+ * callback mask. We do this in a very simple manner, by making a call
+ * down into Xen. The pending flag will be checked by Xen on return.
+ */
+void xen_force_evtchn_callback(void)
+{
+ (void)HYPERVISOR_xen_version(0, NULL);
+}
+
+static void __init __xen_init_IRQ(void)
+{
+#ifdef CONFIG_X86_64
+ int i;
+
+ /* Create identity vector->irq map */
+ for(i = 0; i < NR_VECTORS; i++) {
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ per_cpu(vector_irq, cpu)[i] = i;
+ }
+#endif /* CONFIG_X86_64 */
+
+ xen_init_IRQ();
+}
+
+static unsigned long xen_save_fl(void)
+{
+ struct vcpu_info *vcpu;
+ unsigned long flags;
+
+ vcpu = x86_read_percpu(xen_vcpu);
+
+ /* flag has opposite sense of mask */
+ flags = !vcpu->evtchn_upcall_mask;
+
+ /* convert to IF type flag
+ -0 -> 0x00000000
+ -1 -> 0xffffffff
+ */
+ return (-flags) & X86_EFLAGS_IF;
+}
+
+static void xen_restore_fl(unsigned long flags)
+{
+ struct vcpu_info *vcpu;
+
+ /* convert from IF type flag */
+ flags = !(flags & X86_EFLAGS_IF);
+
+ /* There's a one instruction preempt window here. We need to
+ make sure we're don't switch CPUs between getting the vcpu
+ pointer and updating the mask. */
+ preempt_disable();
+ vcpu = x86_read_percpu(xen_vcpu);
+ vcpu->evtchn_upcall_mask = flags;
+ preempt_enable_no_resched();
+
+ /* Doesn't matter if we get preempted here, because any
+ pending event will get dealt with anyway. */
+
+ if (flags == 0) {
+ preempt_check_resched();
+ barrier(); /* unmask then check (avoid races) */
+ if (unlikely(vcpu->evtchn_upcall_pending))
+ xen_force_evtchn_callback();
+ }
+}
+
+static void xen_irq_disable(void)
+{
+ /* There's a one instruction preempt window here. We need to
+ make sure we're don't switch CPUs between getting the vcpu
+ pointer and updating the mask. */
+ preempt_disable();
+ x86_read_percpu(xen_vcpu)->evtchn_upcall_mask = 1;
+ preempt_enable_no_resched();
+}
+
+static void xen_irq_enable(void)
+{
+ struct vcpu_info *vcpu;
+
+ /* We don't need to worry about being preempted here, since
+ either a) interrupts are disabled, so no preemption, or b)
+ the caller is confused and is trying to re-enable interrupts
+ on an indeterminate processor. */
+
+ vcpu = x86_read_percpu(xen_vcpu);
+ vcpu->evtchn_upcall_mask = 0;
+
+ /* Doesn't matter if we get preempted here, because any
+ pending event will get dealt with anyway. */
+
+ barrier(); /* unmask then check (avoid races) */
+ if (unlikely(vcpu->evtchn_upcall_pending))
+ xen_force_evtchn_callback();
+}
+
+static void xen_safe_halt(void)
+{
+ /* Blocking includes an implicit local_irq_enable(). */
+ if (HYPERVISOR_sched_op(SCHEDOP_block, NULL) != 0)
+ BUG();
+}
+
+static void xen_halt(void)
+{
+ if (irqs_disabled())
+ HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
+ else
+ xen_safe_halt();
+}
+
+static const struct pv_irq_ops xen_irq_ops __initdata = {
+ .init_IRQ = __xen_init_IRQ,
+ .save_fl = xen_save_fl,
+ .restore_fl = xen_restore_fl,
+ .irq_disable = xen_irq_disable,
+ .irq_enable = xen_irq_enable,
+ .safe_halt = xen_safe_halt,
+ .halt = xen_halt,
+#ifdef CONFIG_X86_64
+ .adjust_exception_frame = xen_adjust_exception_frame,
+#endif
+};
+
+void __init xen_init_irq_ops()
+{
+ pv_irq_ops = xen_irq_ops;
+}
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index aa37469da69..64e58681767 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -40,6 +40,7 @@
*/
#include <linux/sched.h>
#include <linux/highmem.h>
+#include <linux/debugfs.h>
#include <linux/bug.h>
#include <asm/pgtable.h>
@@ -57,6 +58,61 @@
#include "multicalls.h"
#include "mmu.h"
+#include "debugfs.h"
+
+#define MMU_UPDATE_HISTO 30
+
+#ifdef CONFIG_XEN_DEBUG_FS
+
+static struct {
+ u32 pgd_update;
+ u32 pgd_update_pinned;
+ u32 pgd_update_batched;
+
+ u32 pud_update;
+ u32 pud_update_pinned;
+ u32 pud_update_batched;
+
+ u32 pmd_update;
+ u32 pmd_update_pinned;
+ u32 pmd_update_batched;
+
+ u32 pte_update;
+ u32 pte_update_pinned;
+ u32 pte_update_batched;
+
+ u32 mmu_update;
+ u32 mmu_update_extended;
+ u32 mmu_update_histo[MMU_UPDATE_HISTO];
+
+ u32 prot_commit;
+ u32 prot_commit_batched;
+
+ u32 set_pte_at;
+ u32 set_pte_at_batched;
+ u32 set_pte_at_pinned;
+ u32 set_pte_at_current;
+ u32 set_pte_at_kernel;
+} mmu_stats;
+
+static u8 zero_stats;
+
+static inline void check_zero(void)
+{
+ if (unlikely(zero_stats)) {
+ memset(&mmu_stats, 0, sizeof(mmu_stats));
+ zero_stats = 0;
+ }
+}
+
+#define ADD_STATS(elem, val) \
+ do { check_zero(); mmu_stats.elem += (val); } while(0)
+
+#else /* !CONFIG_XEN_DEBUG_FS */
+
+#define ADD_STATS(elem, val) do { (void)(val); } while(0)
+
+#endif /* CONFIG_XEN_DEBUG_FS */
/*
* Just beyond the highest usermode address. STACK_TOP_MAX has a
@@ -229,25 +285,35 @@ void make_lowmem_page_readwrite(void *vaddr)
}
-static bool page_pinned(void *ptr)
+static bool xen_page_pinned(void *ptr)
{
struct page *page = virt_to_page(ptr);
return PagePinned(page);
}
-static void extend_mmu_update(const struct mmu_update *update)
+static void xen_extend_mmu_update(const struct mmu_update *update)
{
struct multicall_space mcs;
struct mmu_update *u;
mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
- if (mcs.mc != NULL)
+ if (mcs.mc != NULL) {
+ ADD_STATS(mmu_update_extended, 1);
+ ADD_STATS(mmu_update_histo[mcs.mc->args[1]], -1);
+
mcs.mc->args[1]++;
- else {
+
+ if (mcs.mc->args[1] < MMU_UPDATE_HISTO)
+ ADD_STATS(mmu_update_histo[mcs.mc->args[1]], 1);
+ else
+ ADD_STATS(mmu_update_histo[0], 1);
+ } else {
+ ADD_STATS(mmu_update, 1);
mcs = __xen_mc_entry(sizeof(*u));
MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
+ ADD_STATS(mmu_update_histo[1], 1);
}
u = mcs.args;
@@ -265,7 +331,9 @@ void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
/* ptr may be ioremapped for 64-bit pagetable setup */
u.ptr = arbitrary_virt_to_machine(ptr).maddr;
u.val = pmd_val_ma(val);
- extend_mmu_update(&u);
+ xen_extend_mmu_update(&u);
+
+ ADD_STATS(pmd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
xen_mc_issue(PARAVIRT_LAZY_MMU);
@@ -274,13 +342,17 @@ void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
void xen_set_pmd(pmd_t *ptr, pmd_t val)
{
+ ADD_STATS(pmd_update, 1);
+
/* If page is not pinned, we can just update the entry
directly */
- if (!page_pinned(ptr)) {
+ if (!xen_page_pinned(ptr)) {
*ptr = val;
return;
}
+ ADD_STATS(pmd_update_pinned, 1);
+
xen_set_pmd_hyper(ptr, val);
}
@@ -300,12 +372,18 @@ void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
if (mm == &init_mm)
preempt_disable();
+ ADD_STATS(set_pte_at, 1);
+// ADD_STATS(set_pte_at_pinned, xen_page_pinned(ptep));
+ ADD_STATS(set_pte_at_current, mm == current->mm);
+ ADD_STATS(set_pte_at_kernel, mm == &init_mm);
+
if (mm == current->mm || mm == &init_mm) {
if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
struct multicall_space mcs;
mcs = xen_mc_entry(0);
MULTI_update_va_mapping(mcs.mc, addr, pteval, 0);
+ ADD_STATS(set_pte_at_batched, 1);
xen_mc_issue(PARAVIRT_LAZY_MMU);
goto out;
} else
@@ -334,7 +412,10 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
u.val = pte_val_ma(pte);
- extend_mmu_update(&u);
+ xen_extend_mmu_update(&u);
+
+ ADD_STATS(prot_commit, 1);
+ ADD_STATS(prot_commit_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
xen_mc_issue(PARAVIRT_LAZY_MMU);
}
@@ -400,7 +481,9 @@ void xen_set_pud_hyper(pud_t *ptr, pud_t val)
/* ptr may be ioremapped for 64-bit pagetable setup */
u.ptr = arbitrary_virt_to_machine(ptr).maddr;
u.val = pud_val_ma(val);
- extend_mmu_update(&u);
+ xen_extend_mmu_update(&u);
+
+ ADD_STATS(pud_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
xen_mc_issue(PARAVIRT_LAZY_MMU);
@@ -409,18 +492,26 @@ void xen_set_pud_hyper(pud_t *ptr, pud_t val)
void xen_set_pud(pud_t *ptr, pud_t val)
{
+ ADD_STATS(pud_update, 1);
+
/* If page is not pinned, we can just update the entry
directly */
- if (!page_pinned(ptr)) {
+ if (!xen_page_pinned(ptr)) {
*ptr = val;
return;
}
+ ADD_STATS(pud_update_pinned, 1);
+
xen_set_pud_hyper(ptr, val);
}
void xen_set_pte(pte_t *ptep, pte_t pte)
{
+ ADD_STATS(pte_update, 1);
+// ADD_STATS(pte_update_pinned, xen_page_pinned(ptep));
+ ADD_STATS(pte_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
+
#ifdef CONFIG_X86_PAE
ptep->pte_high = pte.pte_high;
smp_wmb();
@@ -490,7 +581,7 @@ static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
u.ptr = virt_to_machine(ptr).maddr;
u.val = pgd_val_ma(val);
- extend_mmu_update(&u);
+ xen_extend_mmu_update(&u);
}
/*
@@ -517,17 +608,22 @@ void xen_set_pgd(pgd_t *ptr, pgd_t val)
{
pgd_t *user_ptr = xen_get_user_pgd(ptr);
+ ADD_STATS(pgd_update, 1);
+
/* If page is not pinned, we can just update the entry
directly */
- if (!page_pinned(ptr)) {
+ if (!xen_page_pinned(ptr)) {
*ptr = val;
if (user_ptr) {
- WARN_ON(page_pinned(user_ptr));
+ WARN_ON(xen_page_pinned(user_ptr));
*user_ptr = val;
}
return;
}
+ ADD_STATS(pgd_update_pinned, 1);
+ ADD_STATS(pgd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
+
/* If it's pinned, then we can at least batch the kernel and
user updates together. */
xen_mc_batch();
@@ -555,8 +651,8 @@ void xen_set_pgd(pgd_t *ptr, pgd_t val)
* For 64-bit, we must skip the Xen hole in the middle of the address
* space, just after the big x86-64 virtual hole.
*/
-static int pgd_walk(pgd_t *pgd, int (*func)(struct page *, enum pt_level),
- unsigned long limit)
+static int xen_pgd_walk(pgd_t *pgd, int (*func)(struct page *, enum pt_level),
+ unsigned long limit)
{
int flush = 0;
unsigned hole_low, hole_high;
@@ -590,8 +686,6 @@ static int pgd_walk(pgd_t *pgd, int (*func)(struct page *, enum pt_level),
pmdidx_limit = 0;
#endif
- flush |= (*func)(virt_to_page(pgd), PT_PGD);
-
for (pgdidx = 0; pgdidx <= pgdidx_limit; pgdidx++) {
pud_t *pud;
@@ -637,16 +731,22 @@ static int pgd_walk(pgd_t *pgd, int (*func)(struct page *, enum pt_level),
}
}
}
+
out:
+ /* Do the top level last, so that the callbacks can use it as
+ a cue to do final things like tlb flushes. */
+ flush |= (*func)(virt_to_page(pgd), PT_PGD);
return flush;
}
-static spinlock_t *lock_pte(struct page *page)
+/* If we're using split pte locks, then take the page's lock and
+ return a pointer to it. Otherwise return NULL. */
+static spinlock_t *xen_pte_lock(struct page *page)
{
spinlock_t *ptl = NULL;
-#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
+#if USE_SPLIT_PTLOCKS
ptl = __pte_lockptr(page);
spin_lock(ptl);
#endif
@@ -654,7 +754,7 @@ static spinlock_t *lock_pte(struct page *page)
return ptl;
}
-static void do_unlock(void *v)
+static void xen_pte_unlock(void *v)
{
spinlock_t *ptl = v;
spin_unlock(ptl);
@@ -672,7 +772,7 @@ static void xen_do_pin(unsigned level, unsigned long pfn)
MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
}
-static int pin_page(struct page *page, enum pt_level level)
+static int xen_pin_page(struct page *page, enum pt_level level)
{
unsigned pgfl = TestSetPagePinned(page);
int flush;
@@ -691,21 +791,40 @@ static int pin_page(struct page *page, enum pt_level level)
flush = 0;
+ /*
+ * We need to hold the pagetable lock between the time
+ * we make the pagetable RO and when we actually pin
+ * it. If we don't, then other users may come in and
+ * attempt to update the pagetable by writing it,
+ * which will fail because the memory is RO but not
+ * pinned, so Xen won't do the trap'n'emulate.
+ *
+ * If we're using split pte locks, we can't hold the
+ * entire pagetable's worth of locks during the
+ * traverse, because we may wrap the preempt count (8
+ * bits). The solution is to mark RO and pin each PTE
+ * page while holding the lock. This means the number
+ * of locks we end up holding is never more than a
+ * batch size (~32 entries, at present).
+ *
+ * If we're not using split pte locks, we needn't pin
+ * the PTE pages independently, because we're
+ * protected by the overall pagetable lock.
+ */
ptl = NULL;
if (level == PT_PTE)
- ptl = lock_pte(page);
+ ptl = xen_pte_lock(page);
MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
pfn_pte(pfn, PAGE_KERNEL_RO),
level == PT_PGD ? UVMF_TLB_FLUSH : 0);
- if (level == PT_PTE)
+ if (ptl) {
xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
- if (ptl) {
/* Queue a deferred unlock for when this batch
is completed. */
- xen_mc_callback(do_unlock, ptl);
+ xen_mc_callback(xen_pte_unlock, ptl);
}
}
@@ -719,7 +838,7 @@ void xen_pgd_pin(pgd_t *pgd)
{
xen_mc_batch();
- if (pgd_walk(pgd, pin_page, USER_LIMIT)) {
+ if (xen_pgd_walk(pgd, xen_pin_page, USER_LIMIT)) {
/* re-enable interrupts for kmap_flush_unused */
xen_mc_issue(0);
kmap_flush_unused();
@@ -733,14 +852,14 @@ void xen_pgd_pin(pgd_t *pgd)
xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
if (user_pgd) {
- pin_page(virt_to_page(user_pgd), PT_PGD);
+ xen_pin_page(virt_to_page(user_pgd), PT_PGD);
xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(user_pgd)));
}
}
#else /* CONFIG_X86_32 */
#ifdef CONFIG_X86_PAE
/* Need to make sure unshared kernel PMD is pinnable */
- pin_page(virt_to_page(pgd_page(pgd[pgd_index(TASK_SIZE)])), PT_PMD);
+ xen_pin_page(virt_to_page(pgd_page(pgd[pgd_index(TASK_SIZE)])), PT_PMD);
#endif
xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
#endif /* CONFIG_X86_64 */
@@ -775,7 +894,7 @@ void xen_mm_pin_all(void)
* that's before we have page structures to store the bits. So do all
* the book-keeping now.
*/
-static __init int mark_pinned(struct page *page, enum pt_level level)
+static __init int xen_mark_pinned(struct page *page, enum pt_level level)
{
SetPagePinned(page);
return 0;
@@ -783,10 +902,10 @@ static __init int mark_pinned(struct page *page, enum pt_level level)
void __init xen_mark_init_mm_pinned(void)
{
- pgd_walk(init_mm.pgd, mark_pinned, FIXADDR_TOP);
+ xen_pgd_walk(init_mm.pgd, xen_mark_pinned, FIXADDR_TOP);
}
-static int unpin_page(struct page *page, enum pt_level level)
+static int xen_unpin_page(struct page *page, enum pt_level level)
{
unsigned pgfl = TestClearPagePinned(page);
@@ -796,10 +915,18 @@ static int unpin_page(struct page *page, enum pt_level level)
spinlock_t *ptl = NULL;
struct multicall_space mcs;
+ /*
+ * Do the converse to pin_page. If we're using split
+ * pte locks, we must be holding the lock for while
+ * the pte page is unpinned but still RO to prevent
+ * concurrent updates from seeing it in this
+ * partially-pinned state.
+ */
if (level == PT_PTE) {
- ptl = lock_pte(page);
+ ptl = xen_pte_lock(page);
- xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
+ if (ptl)
+ xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
}
mcs = __xen_mc_entry(0);
@@ -810,7 +937,7 @@ static int unpin_page(struct page *page, enum pt_level level)
if (ptl) {
/* unlock when batch completed */
- xen_mc_callback(do_unlock, ptl);
+ xen_mc_callback(xen_pte_unlock, ptl);
}
}
@@ -830,17 +957,17 @@ static void xen_pgd_unpin(pgd_t *pgd)
if (user_pgd) {
xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(user_pgd)));
- unpin_page(virt_to_page(user_pgd), PT_PGD);
+ xen_unpin_page(virt_to_page(user_pgd), PT_PGD);
}
}
#endif
#ifdef CONFIG_X86_PAE
/* Need to make sure unshared kernel PMD is unpinned */
- pin_page(virt_to_page(pgd_page(pgd[pgd_index(TASK_SIZE)])), PT_PMD);
+ xen_unpin_page(virt_to_page(pgd_page(pgd[pgd_index(TASK_SIZE)])), PT_PMD);
#endif
- pgd_walk(pgd, unpin_page, USER_LIMIT);
+ xen_pgd_walk(pgd, xen_unpin_page, USER_LIMIT);
xen_mc_issue(0);
}
@@ -907,7 +1034,7 @@ static void drop_other_mm_ref(void *info)
}
}
-static void drop_mm_ref(struct mm_struct *mm)
+static void xen_drop_mm_ref(struct mm_struct *mm)
{
cpumask_t mask;
unsigned cpu;
@@ -937,7 +1064,7 @@ static void drop_mm_ref(struct mm_struct *mm)
smp_call_function_mask(mask, drop_other_mm_ref, mm, 1);
}
#else
-static void drop_mm_ref(struct mm_struct *mm)
+static void xen_drop_mm_ref(struct mm_struct *mm)
{
if (current->active_mm == mm)
load_cr3(swapper_pg_dir);
@@ -961,14 +1088,77 @@ static void drop_mm_ref(struct mm_struct *mm)
void xen_exit_mmap(struct mm_struct *mm)
{
get_cpu(); /* make sure we don't move around */
- drop_mm_ref(mm);
+ xen_drop_mm_ref(mm);
put_cpu();
spin_lock(&mm->page_table_lock);
/* pgd may not be pinned in the error exit path of execve */
- if (page_pinned(mm->pgd))
+ if (xen_page_pinned(mm->pgd))
xen_pgd_unpin(mm->pgd);
spin_unlock(&mm->page_table_lock);
}
+
+#ifdef CONFIG_XEN_DEBUG_FS
+
+static struct dentry *d_mmu_debug;
+
+static int __init xen_mmu_debugfs(void)
+{
+ struct dentry *d_xen = xen_init_debugfs();
+
+ if (d_xen == NULL)
+ return -ENOMEM;
+
+ d_mmu_debug = debugfs_create_dir("mmu", d_xen);
+
+ debugfs_create_u8("zero_stats", 0644, d_mmu_debug, &zero_stats);
+
+ debugfs_create_u32("pgd_update", 0444, d_mmu_debug, &mmu_stats.pgd_update);
+ debugfs_create_u32("pgd_update_pinned", 0444, d_mmu_debug,
+ &mmu_stats.pgd_update_pinned);
+ debugfs_create_u32("pgd_update_batched", 0444, d_mmu_debug,
+ &mmu_stats.pgd_update_pinned);
+
+ debugfs_create_u32("pud_update", 0444, d_mmu_debug, &mmu_stats.pud_update);
+ debugfs_create_u32("pud_update_pinned", 0444, d_mmu_debug,
+ &mmu_stats.pud_update_pinned);
+ debugfs_create_u32("pud_update_batched", 0444, d_mmu_debug,
+ &mmu_stats.pud_update_pinned);
+
+ debugfs_create_u32("pmd_update", 0444, d_mmu_debug, &mmu_stats.pmd_update);
+ debugfs_create_u32("pmd_update_pinned", 0444, d_mmu_debug,
+ &mmu_stats.pmd_update_pinned);
+ debugfs_create_u32("pmd_update_batched", 0444, d_mmu_debug,
+ &mmu_stats.pmd_update_pinned);
+
+ debugfs_create_u32("pte_update", 0444, d_mmu_debug, &mmu_stats.pte_update);
+// debugfs_create_u32("pte_update_pinned", 0444, d_mmu_debug,
+// &mmu_stats.pte_update_pinned);
+ debugfs_create_u32("pte_update_batched", 0444, d_mmu_debug,
+ &mmu_stats.pte_update_pinned);
+
+ debugfs_create_u32("mmu_update", 0444, d_mmu_debug, &mmu_stats.mmu_update);
+ debugfs_create_u32("mmu_update_extended", 0444, d_mmu_debug,
+ &mmu_stats.mmu_update_extended);
+ xen_debugfs_create_u32_array("mmu_update_histo", 0444, d_mmu_debug,
+ mmu_stats.mmu_update_histo, 20);
+
+ debugfs_create_u32("set_pte_at", 0444, d_mmu_debug, &mmu_stats.set_pte_at);
+ debugfs_create_u32("set_pte_at_batched", 0444, d_mmu_debug,
+ &mmu_stats.set_pte_at_batched);
+ debugfs_create_u32("set_pte_at_current", 0444, d_mmu_debug,
+ &mmu_stats.set_pte_at_current);
+ debugfs_create_u32("set_pte_at_kernel", 0444, d_mmu_debug,
+ &mmu_stats.set_pte_at_kernel);
+
+ debugfs_create_u32("prot_commit", 0444, d_mmu_debug, &mmu_stats.prot_commit);
+ debugfs_create_u32("prot_commit_batched", 0444, d_mmu_debug,
+ &mmu_stats.prot_commit_batched);
+
+ return 0;
+}
+fs_initcall(xen_mmu_debugfs);
+
+#endif /* CONFIG_XEN_DEBUG_FS */
diff --git a/arch/x86/xen/multicalls.c b/arch/x86/xen/multicalls.c
index 9efd1c6c977..8ea8a0d0b0d 100644
--- a/arch/x86/xen/multicalls.c
+++ b/arch/x86/xen/multicalls.c
@@ -21,16 +21,20 @@
*/
#include <linux/percpu.h>
#include <linux/hardirq.h>
+#include <linux/debugfs.h>
#include <asm/xen/hypercall.h>
#include "multicalls.h"
+#include "debugfs.h"
+
+#define MC_BATCH 32
#define MC_DEBUG 1
-#define MC_BATCH 32
#define MC_ARGS (MC_BATCH * 16)
+
struct mc_buffer {
struct multicall_entry entries[MC_BATCH];
#if MC_DEBUG
@@ -47,6 +51,76 @@ struct mc_buffer {
static DEFINE_PER_CPU(struct mc_buffer, mc_buffer);
DEFINE_PER_CPU(unsigned long, xen_mc_irq_flags);
+/* flush reasons 0- slots, 1- args, 2- callbacks */
+enum flush_reasons
+{
+ FL_SLOTS,
+ FL_ARGS,
+ FL_CALLBACKS,
+
+ FL_N_REASONS
+};
+
+#ifdef CONFIG_XEN_DEBUG_FS
+#define NHYPERCALLS 40 /* not really */
+
+static struct {
+ unsigned histo[MC_BATCH+1];
+
+ unsigned issued;
+ unsigned arg_total;
+ unsigned hypercalls;
+ unsigned histo_hypercalls[NHYPERCALLS];
+
+ unsigned flush[FL_N_REASONS];
+} mc_stats;
+
+static u8 zero_stats;
+
+static inline void check_zero(void)
+{
+ if (unlikely(zero_stats)) {
+ memset(&mc_stats, 0, sizeof(mc_stats));
+ zero_stats = 0;
+ }
+}
+
+static void mc_add_stats(const struct mc_buffer *mc)
+{
+ int i;
+
+ check_zero();
+
+ mc_stats.issued++;
+ mc_stats.hypercalls += mc->mcidx;
+ mc_stats.arg_total += mc->argidx;
+
+ mc_stats.histo[mc->mcidx]++;
+ for(i = 0; i < mc->mcidx; i++) {
+ unsigned op = mc->entries[i].op;
+ if (op < NHYPERCALLS)
+ mc_stats.histo_hypercalls[op]++;
+ }
+}
+
+static void mc_stats_flush(enum flush_reasons idx)
+{
+ check_zero();
+
+ mc_stats.flush[idx]++;
+}
+
+#else /* !CONFIG_XEN_DEBUG_FS */
+
+static inline void mc_add_stats(const struct mc_buffer *mc)
+{
+}
+
+static inline void mc_stats_flush(enum flush_reasons idx)
+{
+}
+#endif /* CONFIG_XEN_DEBUG_FS */
+
void xen_mc_flush(void)
{
struct mc_buffer *b = &__get_cpu_var(mc_buffer);
@@ -60,6 +134,8 @@ void xen_mc_flush(void)
something in the middle */
local_irq_save(flags);
+ mc_add_stats(b);
+
if (b->mcidx) {
#if MC_DEBUG
memcpy(b->debug, b->entries,
@@ -115,6 +191,7 @@ struct multicall_space __xen_mc_entry(size_t args)
if (b->mcidx == MC_BATCH ||
(argidx + args) > MC_ARGS) {
+ mc_stats_flush(b->mcidx == MC_BATCH ? FL_SLOTS : FL_ARGS);
xen_mc_flush();
argidx = roundup(b->argidx, sizeof(u64));
}
@@ -158,10 +235,44 @@ void xen_mc_callback(void (*fn)(void *), void *data)
struct mc_buffer *b = &__get_cpu_var(mc_buffer);
struct callback *cb;
- if (b->cbidx == MC_BATCH)
+ if (b->cbidx == MC_BATCH) {
+ mc_stats_flush(FL_CALLBACKS);
xen_mc_flush();
+ }
cb = &b->callbacks[b->cbidx++];
cb->fn = fn;
cb->data = data;
}
+
+#ifdef CONFIG_XEN_DEBUG_FS
+
+static struct dentry *d_mc_debug;
+
+static int __init xen_mc_debugfs(void)
+{
+ struct dentry *d_xen = xen_init_debugfs();
+
+ if (d_xen == NULL)
+ return -ENOMEM;
+
+ d_mc_debug = debugfs_create_dir("multicalls", d_xen);
+
+ debugfs_create_u8("zero_stats", 0644, d_mc_debug, &zero_stats);
+
+ debugfs_create_u32("batches", 0444, d_mc_debug, &mc_stats.issued);
+ debugfs_create_u32("hypercalls", 0444, d_mc_debug, &mc_stats.hypercalls);
+ debugfs_create_u32("arg_total", 0444, d_mc_debug, &mc_stats.arg_total);
+
+ xen_debugfs_create_u32_array("batch_histo", 0444, d_mc_debug,
+ mc_stats.histo, MC_BATCH);
+ xen_debugfs_create_u32_array("hypercall_histo", 0444, d_mc_debug,
+ mc_stats.histo_hypercalls, NHYPERCALLS);
+ xen_debugfs_create_u32_array("flush_reasons", 0444, d_mc_debug,
+ mc_stats.flush, FL_N_REASONS);
+
+ return 0;
+}
+fs_initcall(xen_mc_debugfs);
+
+#endif /* CONFIG_XEN_DEBUG_FS */
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index d8faf79a0a1..d77da613b1d 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -11,11 +11,8 @@
* useful topology information for the kernel to make use of. As a
* result, all CPUs are treated as if they're single-core and
* single-threaded.
- *
- * This does not handle HOTPLUG_CPU yet.
*/
#include <linux/sched.h>
-#include <linux/kernel_stat.h>
#include <linux/err.h>
#include <linux/smp.h>
@@ -36,8 +33,6 @@
#include "xen-ops.h"
#include "mmu.h"
-static void __cpuinit xen_init_lock_cpu(int cpu);
-
cpumask_t xen_cpu_initialized_map;
static DEFINE_PER_CPU(int, resched_irq);
@@ -64,11 +59,12 @@ static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static __cpuinit void cpu_bringup_and_idle(void)
+static __cpuinit void cpu_bringup(void)
{
int cpu = smp_processor_id();
cpu_init();
+ touch_softlockup_watchdog();
preempt_disable();
xen_enable_sysenter();
@@ -89,6 +85,11 @@ static __cpuinit void cpu_bringup_and_idle(void)
local_irq_enable();
wmb(); /* make sure everything is out */
+}
+
+static __cpuinit void cpu_bringup_and_idle(void)
+{
+ cpu_bringup();
cpu_idle();
}
@@ -212,8 +213,6 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
cpu_set(cpu, cpu_present_map);
}
-
- //init_xenbus_allowed_cpumask();
}
static __cpuinit int
@@ -281,12 +280,6 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
struct task_struct *idle = idle_task(cpu);
int rc;
-#if 0
- rc = cpu_up_check(cpu);
- if (rc)
- return rc;
-#endif
-
#ifdef CONFIG_X86_64
/* Allocate node local memory for AP pdas */
WARN_ON(cpu == 0);
@@ -339,6 +332,60 @@ static void xen_smp_cpus_done(unsigned int max_cpus)
{
}
+#ifdef CONFIG_HOTPLUG_CPU
+static int xen_cpu_disable(void)
+{
+ unsigned int cpu = smp_processor_id();
+ if (cpu == 0)
+ return -EBUSY;
+
+ cpu_disable_common();
+
+ load_cr3(swapper_pg_dir);
+ return 0;
+}
+
+static void xen_cpu_die(unsigned int cpu)
+{
+ while (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
+ current->state = TASK_UNINTERRUPTIBLE;
+ schedule_timeout(HZ/10);
+ }
+ unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
+ unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
+ unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL);
+ unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL);
+ xen_uninit_lock_cpu(cpu);
+ xen_teardown_timer(cpu);
+
+ if (num_online_cpus() == 1)
+ alternatives_smp_switch(0);
+}
+
+static void xen_play_dead(void)
+{
+ play_dead_common();
+ HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
+ cpu_bringup();
+}
+
+#else /* !CONFIG_HOTPLUG_CPU */
+static int xen_cpu_disable(void)
+{
+ return -ENOSYS;
+}
+
+static void xen_cpu_die(unsigned int cpu)
+{
+ BUG();
+}
+
+static void xen_play_dead(void)
+{
+ BUG();
+}
+
+#endif
static void stop_self(void *v)
{
int cpu = smp_processor_id();
@@ -419,176 +466,16 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-struct xen_spinlock {
- unsigned char lock; /* 0 -> free; 1 -> locked */
- unsigned short spinners; /* count of waiting cpus */
-};
-
-static int xen_spin_is_locked(struct raw_spinlock *lock)
-{
- struct xen_spinlock *xl = (struct xen_spinlock *)lock;
-
- return xl->lock != 0;
-}
-
-static int xen_spin_is_contended(struct raw_spinlock *lock)
-{
- struct xen_spinlock *xl = (struct xen_spinlock *)lock;
-
- /* Not strictly true; this is only the count of contended
- lock-takers entering the slow path. */
- return xl->spinners != 0;
-}
-
-static int xen_spin_trylock(struct raw_spinlock *lock)
-{
- struct xen_spinlock *xl = (struct xen_spinlock *)lock;
- u8 old = 1;
-
- asm("xchgb %b0,%1"
- : "+q" (old), "+m" (xl->lock) : : "memory");
-
- return old == 0;
-}
-
-static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
-static DEFINE_PER_CPU(struct xen_spinlock *, lock_spinners);
-
-static inline void spinning_lock(struct xen_spinlock *xl)
-{
- __get_cpu_var(lock_spinners) = xl;
- wmb(); /* set lock of interest before count */
- asm(LOCK_PREFIX " incw %0"
- : "+m" (xl->spinners) : : "memory");
-}
-
-static inline void unspinning_lock(struct xen_spinlock *xl)
-{
- asm(LOCK_PREFIX " decw %0"
- : "+m" (xl->spinners) : : "memory");
- wmb(); /* decrement count before clearing lock */
- __get_cpu_var(lock_spinners) = NULL;
-}
-
-static noinline int xen_spin_lock_slow(struct raw_spinlock *lock)
-{
- struct xen_spinlock *xl = (struct xen_spinlock *)lock;
- int irq = __get_cpu_var(lock_kicker_irq);
- int ret;
-
- /* If kicker interrupts not initialized yet, just spin */
- if (irq == -1)
- return 0;
-
- /* announce we're spinning */
- spinning_lock(xl);
-
- /* clear pending */
- xen_clear_irq_pending(irq);
-
- /* check again make sure it didn't become free while
- we weren't looking */
- ret = xen_spin_trylock(lock);
- if (ret)
- goto out;
-
- /* block until irq becomes pending */
- xen_poll_irq(irq);
- kstat_this_cpu.irqs[irq]++;
-
-out:
- unspinning_lock(xl);
- return ret;
-}
-
-static void xen_spin_lock(struct raw_spinlock *lock)
-{
- struct xen_spinlock *xl = (struct xen_spinlock *)lock;
- int timeout;
- u8 oldval;
-
- do {
- timeout = 1 << 10;
-
- asm("1: xchgb %1,%0\n"
- " testb %1,%1\n"
- " jz 3f\n"
- "2: rep;nop\n"
- " cmpb $0,%0\n"
- " je 1b\n"
- " dec %2\n"
- " jnz 2b\n"
- "3:\n"
- : "+m" (xl->lock), "=q" (oldval), "+r" (timeout)
- : "1" (1)
- : "memory");
-
- } while (unlikely(oldval != 0 && !xen_spin_lock_slow(lock)));
-}
-
-static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl)
-{
- int cpu;
-
- for_each_online_cpu(cpu) {
- /* XXX should mix up next cpu selection */
- if (per_cpu(lock_spinners, cpu) == xl) {
- xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
- break;
- }
- }
-}
-
-static void xen_spin_unlock(struct raw_spinlock *lock)
-{
- struct xen_spinlock *xl = (struct xen_spinlock *)lock;
-
- smp_wmb(); /* make sure no writes get moved after unlock */
- xl->lock = 0; /* release lock */
-
- /* make sure unlock happens before kick */
- barrier();
-
- if (unlikely(xl->spinners))
- xen_spin_unlock_slow(xl);
-}
-
-static __cpuinit void xen_init_lock_cpu(int cpu)
-{
- int irq;
- const char *name;
-
- name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
- irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
- cpu,
- xen_reschedule_interrupt,
- IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
- name,
- NULL);
-
- if (irq >= 0) {
- disable_irq(irq); /* make sure it's never delivered */
- per_cpu(lock_kicker_irq, cpu) = irq;
- }
-
- printk("cpu %d spinlock event irq %d\n", cpu, irq);
-}
-
-static void __init xen_init_spinlocks(void)
-{
- pv_lock_ops.spin_is_locked = xen_spin_is_locked;
- pv_lock_ops.spin_is_contended = xen_spin_is_contended;
- pv_lock_ops.spin_lock = xen_spin_lock;
- pv_lock_ops.spin_trylock = xen_spin_trylock;
- pv_lock_ops.spin_unlock = xen_spin_unlock;
-}
-
static const struct smp_ops xen_smp_ops __initdata = {
.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
.smp_prepare_cpus = xen_smp_prepare_cpus,
- .cpu_up = xen_cpu_up,
.smp_cpus_done = xen_smp_cpus_done,
+ .cpu_up = xen_cpu_up,
+ .cpu_die = xen_cpu_die,
+ .cpu_disable = xen_cpu_disable,
+ .play_dead = xen_play_dead,
+
.smp_send_stop = xen_smp_send_stop,
.smp_send_reschedule = xen_smp_send_reschedule,
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
new file mode 100644
index 00000000000..dd71e3a021c
--- /dev/null
+++ b/arch/x86/xen/spinlock.c
@@ -0,0 +1,428 @@
+/*
+ * Split spinlock implementation out into its own file, so it can be
+ * compiled in a FTRACE-compatible way.
+ */
+#include <linux/kernel_stat.h>
+#include <linux/spinlock.h>
+#include <linux/debugfs.h>
+#include <linux/log2.h>
+
+#include <asm/paravirt.h>
+
+#include <xen/interface/xen.h>
+#include <xen/events.h>
+
+#include "xen-ops.h"
+#include "debugfs.h"
+
+#ifdef CONFIG_XEN_DEBUG_FS
+static struct xen_spinlock_stats
+{
+ u64 taken;
+ u32 taken_slow;
+ u32 taken_slow_nested;
+ u32 taken_slow_pickup;
+ u32 taken_slow_spurious;
+ u32 taken_slow_irqenable;
+
+ u64 released;
+ u32 released_slow;
+ u32 released_slow_kicked;
+
+#define HISTO_BUCKETS 30
+ u32 histo_spin_total[HISTO_BUCKETS+1];
+ u32 histo_spin_spinning[HISTO_BUCKETS+1];
+ u32 histo_spin_blocked[HISTO_BUCKETS+1];
+
+ u64 time_total;
+ u64 time_spinning;
+ u64 time_blocked;
+} spinlock_stats;
+
+static u8 zero_stats;
+
+static unsigned lock_timeout = 1 << 10;
+#define TIMEOUT lock_timeout
+
+static inline void check_zero(void)
+{
+ if (unlikely(zero_stats)) {
+ memset(&spinlock_stats, 0, sizeof(spinlock_stats));
+ zero_stats = 0;
+ }
+}
+
+#define ADD_STATS(elem, val) \
+ do { check_zero(); spinlock_stats.elem += (val); } while(0)
+
+static inline u64 spin_time_start(void)
+{
+ return xen_clocksource_read();
+}
+
+static void __spin_time_accum(u64 delta, u32 *array)
+{
+ unsigned index = ilog2(delta);
+
+ check_zero();
+
+ if (index < HISTO_BUCKETS)
+ array[index]++;
+ else
+ array[HISTO_BUCKETS]++;
+}
+
+static inline void spin_time_accum_spinning(u64 start)
+{
+ u32 delta = xen_clocksource_read() - start;
+
+ __spin_time_accum(delta, spinlock_stats.histo_spin_spinning);
+ spinlock_stats.time_spinning += delta;
+}
+
+static inline void spin_time_accum_total(u64 start)
+{
+ u32 delta = xen_clocksource_read() - start;
+
+ __spin_time_accum(delta, spinlock_stats.histo_spin_total);
+ spinlock_stats.time_total += delta;
+}
+
+static inline void spin_time_accum_blocked(u64 start)
+{
+ u32 delta = xen_clocksource_read() - start;
+
+ __spin_time_accum(delta, spinlock_stats.histo_spin_blocked);
+ spinlock_stats.time_blocked += delta;
+}
+#else /* !CONFIG_XEN_DEBUG_FS */
+#define TIMEOUT (1 << 10)
+#define ADD_STATS(elem, val) do { (void)(val); } while(0)
+
+static inline u64 spin_time_start(void)
+{
+ return 0;
+}
+
+static inline void spin_time_accum_total(u64 start)
+{
+}
+static inline void spin_time_accum_spinning(u64 start)
+{
+}
+static inline void spin_time_accum_blocked(u64 start)
+{
+}
+#endif /* CONFIG_XEN_DEBUG_FS */
+
+struct xen_spinlock {
+ unsigned char lock; /* 0 -> free; 1 -> locked */
+ unsigned short spinners; /* count of waiting cpus */
+};
+
+static int xen_spin_is_locked(struct raw_spinlock *lock)
+{
+ struct xen_spinlock *xl = (struct xen_spinlock *)lock;
+
+ return xl->lock != 0;
+}
+
+static int xen_spin_is_contended(struct raw_spinlock *lock)
+{
+ struct xen_spinlock *xl = (struct xen_spinlock *)lock;
+
+ /* Not strictly true; this is only the count of contended
+ lock-takers entering the slow path. */
+ return xl->spinners != 0;
+}
+
+static int xen_spin_trylock(struct raw_spinlock *lock)
+{
+ struct xen_spinlock *xl = (struct xen_spinlock *)lock;
+ u8 old = 1;
+
+ asm("xchgb %b0,%1"
+ : "+q" (old), "+m" (xl->lock) : : "memory");
+
+ return old == 0;
+}
+
+static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
+static DEFINE_PER_CPU(struct xen_spinlock *, lock_spinners);
+
+/*
+ * Mark a cpu as interested in a lock. Returns the CPU's previous
+ * lock of interest, in case we got preempted by an interrupt.
+ */
+static inline struct xen_spinlock *spinning_lock(struct xen_spinlock *xl)
+{
+ struct xen_spinlock *prev;
+
+ prev = __get_cpu_var(lock_spinners);
+ __get_cpu_var(lock_spinners) = xl;
+
+ wmb(); /* set lock of interest before count */
+
+ asm(LOCK_PREFIX " incw %0"
+ : "+m" (xl->spinners) : : "memory");
+
+ return prev;
+}
+
+/*
+ * Mark a cpu as no longer interested in a lock. Restores previous
+ * lock of interest (NULL for none).
+ */
+static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock *prev)
+{
+ asm(LOCK_PREFIX " decw %0"
+ : "+m" (xl->spinners) : : "memory");
+ wmb(); /* decrement count before restoring lock */
+ __get_cpu_var(lock_spinners) = prev;
+}
+
+static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enable)
+{
+ struct xen_spinlock *xl = (struct xen_spinlock *)lock;
+ struct xen_spinlock *prev;
+ int irq = __get_cpu_var(lock_kicker_irq);
+ int ret;
+ unsigned long flags;
+ u64 start;
+
+ /* If kicker interrupts not initialized yet, just spin */
+ if (irq == -1)
+ return 0;
+
+ start = spin_time_start();
+
+ /* announce we're spinning */
+ prev = spinning_lock(xl);
+
+ flags = __raw_local_save_flags();
+ if (irq_enable) {
+ ADD_STATS(taken_slow_irqenable, 1);
+ raw_local_irq_enable();
+ }
+
+ ADD_STATS(taken_slow, 1);
+ ADD_STATS(taken_slow_nested, prev != NULL);
+
+ do {
+ /* clear pending */
+ xen_clear_irq_pending(irq);
+
+ /* check again make sure it didn't become free while
+ we weren't looking */
+ ret = xen_spin_trylock(lock);
+ if (ret) {
+ ADD_STATS(taken_slow_pickup, 1);
+
+ /*
+ * If we interrupted another spinlock while it
+ * was blocking, make sure it doesn't block
+ * without rechecking the lock.
+ */
+ if (prev != NULL)
+ xen_set_irq_pending(irq);
+ goto out;
+ }
+
+ /*
+ * Block until irq becomes pending. If we're
+ * interrupted at this point (after the trylock but
+ * before entering the block), then the nested lock
+ * handler guarantees that the irq will be left
+ * pending if there's any chance the lock became free;
+ * xen_poll_irq() returns immediately if the irq is
+ * pending.
+ */
+ xen_poll_irq(irq);
+ ADD_STATS(taken_slow_spurious, !xen_test_irq_pending(irq));
+ } while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */
+
+ kstat_this_cpu.irqs[irq]++;
+
+out:
+ raw_local_irq_restore(flags);
+ unspinning_lock(xl, prev);
+ spin_time_accum_blocked(start);
+
+ return ret;
+}
+
+static inline void __xen_spin_lock(struct raw_spinlock *lock, bool irq_enable)
+{
+ struct xen_spinlock *xl = (struct xen_spinlock *)lock;
+ unsigned timeout;
+ u8 oldval;
+ u64 start_spin;
+
+ ADD_STATS(taken, 1);
+
+ start_spin = spin_time_start();
+
+ do {
+ u64 start_spin_fast = spin_time_start();
+
+ timeout = TIMEOUT;
+
+ asm("1: xchgb %1,%0\n"
+ " testb %1,%1\n"
+ " jz 3f\n"
+ "2: rep;nop\n"
+ " cmpb $0,%0\n"
+ " je 1b\n"
+ " dec %2\n"
+ " jnz 2b\n"
+ "3:\n"
+ : "+m" (xl->lock), "=q" (oldval), "+r" (timeout)
+ : "1" (1)
+ : "memory");
+
+ spin_time_accum_spinning(start_spin_fast);
+
+ } while (unlikely(oldval != 0 &&
+ (TIMEOUT == ~0 || !xen_spin_lock_slow(lock, irq_enable))));
+
+ spin_time_accum_total(start_spin);
+}
+
+static void xen_spin_lock(struct raw_spinlock *lock)
+{
+ __xen_spin_lock(lock, false);
+}
+
+static void xen_spin_lock_flags(struct raw_spinlock *lock, unsigned long flags)
+{
+ __xen_spin_lock(lock, !raw_irqs_disabled_flags(flags));
+}
+
+static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl)
+{
+ int cpu;
+
+ ADD_STATS(released_slow, 1);
+
+ for_each_online_cpu(cpu) {
+ /* XXX should mix up next cpu selection */
+ if (per_cpu(lock_spinners, cpu) == xl) {
+ ADD_STATS(released_slow_kicked, 1);
+ xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
+ break;
+ }
+ }
+}
+
+static void xen_spin_unlock(struct raw_spinlock *lock)
+{
+ struct xen_spinlock *xl = (struct xen_spinlock *)lock;
+
+ ADD_STATS(released, 1);
+
+ smp_wmb(); /* make sure no writes get moved after unlock */
+ xl->lock = 0; /* release lock */
+
+ /* make sure unlock happens before kick */
+ barrier();
+
+ if (unlikely(xl->spinners))
+ xen_spin_unlock_slow(xl);
+}
+
+static irqreturn_t dummy_handler(int irq, void *dev_id)
+{
+ BUG();
+ return IRQ_HANDLED;
+}
+
+void __cpuinit xen_init_lock_cpu(int cpu)
+{
+ int irq;
+ const char *name;
+
+ name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
+ irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
+ cpu,
+ dummy_handler,
+ IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
+ name,
+ NULL);
+
+ if (irq >= 0) {
+ disable_irq(irq); /* make sure it's never delivered */
+ per_cpu(lock_kicker_irq, cpu) = irq;
+ }
+
+ printk("cpu %d spinlock event irq %d\n", cpu, irq);
+}
+
+void xen_uninit_lock_cpu(int cpu)
+{
+ unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL);
+}
+
+void __init xen_init_spinlocks(void)
+{
+ pv_lock_ops.spin_is_locked = xen_spin_is_locked;
+ pv_lock_ops.spin_is_contended = xen_spin_is_contended;
+ pv_lock_ops.spin_lock = xen_spin_lock;
+ pv_lock_ops.spin_lock_flags = xen_spin_lock_flags;
+ pv_lock_ops.spin_trylock = xen_spin_trylock;
+ pv_lock_ops.spin_unlock = xen_spin_unlock;
+}
+
+#ifdef CONFIG_XEN_DEBUG_FS
+
+static struct dentry *d_spin_debug;
+
+static int __init xen_spinlock_debugfs(void)
+{
+ struct dentry *d_xen = xen_init_debugfs();
+
+ if (d_xen == NULL)
+ return -ENOMEM;
+
+ d_spin_debug = debugfs_create_dir("spinlocks", d_xen);
+
+ debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats);
+
+ debugfs_create_u32("timeout", 0644, d_spin_debug, &lock_timeout);
+
+ debugfs_create_u64("taken", 0444, d_spin_debug, &spinlock_stats.taken);
+ debugfs_create_u32("taken_slow", 0444, d_spin_debug,
+ &spinlock_stats.taken_slow);
+ debugfs_create_u32("taken_slow_nested", 0444, d_spin_debug,
+ &spinlock_stats.taken_slow_nested);
+ debugfs_create_u32("taken_slow_pickup", 0444, d_spin_debug,
+ &spinlock_stats.taken_slow_pickup);
+ debugfs_create_u32("taken_slow_spurious", 0444, d_spin_debug,
+ &spinlock_stats.taken_slow_spurious);
+ debugfs_create_u32("taken_slow_irqenable", 0444, d_spin_debug,
+ &spinlock_stats.taken_slow_irqenable);
+
+ debugfs_create_u64("released", 0444, d_spin_debug, &spinlock_stats.released);
+ debugfs_create_u32("released_slow", 0444, d_spin_debug,
+ &spinlock_stats.released_slow);
+ debugfs_create_u32("released_slow_kicked", 0444, d_spin_debug,
+ &spinlock_stats.released_slow_kicked);
+
+ debugfs_create_u64("time_spinning", 0444, d_spin_debug,
+ &spinlock_stats.time_spinning);
+ debugfs_create_u64("time_blocked", 0444, d_spin_debug,
+ &spinlock_stats.time_blocked);
+ debugfs_create_u64("time_total", 0444, d_spin_debug,
+ &spinlock_stats.time_total);
+
+ xen_debugfs_create_u32_array("histo_total", 0444, d_spin_debug,
+ spinlock_stats.histo_spin_total, HISTO_BUCKETS + 1);
+ xen_debugfs_create_u32_array("histo_spinning", 0444, d_spin_debug,
+ spinlock_stats.histo_spin_spinning, HISTO_BUCKETS + 1);
+ xen_debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug,
+ spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1);
+
+ return 0;
+}
+fs_initcall(xen_spinlock_debugfs);
+
+#endif /* CONFIG_XEN_DEBUG_FS */
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 685b77470fc..004ba86326a 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -30,8 +30,6 @@
#define TIMER_SLOP 100000
#define NS_PER_TICK (1000000000LL / HZ)
-static cycle_t xen_clocksource_read(void);
-
/* runstate info updated by Xen */
static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate);
@@ -213,7 +211,7 @@ unsigned long xen_tsc_khz(void)
return xen_khz;
}
-static cycle_t xen_clocksource_read(void)
+cycle_t xen_clocksource_read(void)
{
struct pvclock_vcpu_time_info *src;
cycle_t ret;
@@ -452,6 +450,14 @@ void xen_setup_timer(int cpu)
setup_runstate_info(cpu);
}
+void xen_teardown_timer(int cpu)
+{
+ struct clock_event_device *evt;
+ BUG_ON(cpu == 0);
+ evt = &per_cpu(xen_clock_events, cpu);
+ unbind_from_irqhandler(evt->irq, NULL);
+}
+
void xen_setup_cpu_clockevents(void)
{
BUG_ON(preemptible());
diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
index 2497a30f41d..42786f59d9c 100644
--- a/arch/x86/xen/xen-asm_32.S
+++ b/arch/x86/xen/xen-asm_32.S
@@ -298,7 +298,7 @@ check_events:
push %eax
push %ecx
push %edx
- call force_evtchn_callback
+ call xen_force_evtchn_callback
pop %edx
pop %ecx
pop %eax
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
index 7f58304fafb..3b9bda46487 100644
--- a/arch/x86/xen/xen-asm_64.S
+++ b/arch/x86/xen/xen-asm_64.S
@@ -122,7 +122,7 @@ check_events:
push %r9
push %r10
push %r11
- call force_evtchn_callback
+ call xen_force_evtchn_callback
pop %r11
pop %r10
pop %r9
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index dd3c23152a2..d7422dc2a55 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -2,6 +2,7 @@
#define XEN_OPS_H
#include <linux/init.h>
+#include <linux/clocksource.h>
#include <linux/irqreturn.h>
#include <xen/xen-ops.h>
@@ -31,7 +32,10 @@ void xen_vcpu_restore(void);
void __init xen_build_dynamic_phys_to_machine(void);
+void xen_init_irq_ops(void);
void xen_setup_timer(int cpu);
+void xen_teardown_timer(int cpu);
+cycle_t xen_clocksource_read(void);
void xen_setup_cpu_clockevents(void);
unsigned long xen_tsc_khz(void);
void __init xen_time_init(void);
@@ -50,6 +54,10 @@ void __init xen_setup_vcpu_info_placement(void);
#ifdef CONFIG_SMP
void xen_smp_init(void);
+void __init xen_init_spinlocks(void);
+__cpuinit void xen_init_lock_cpu(int cpu);
+void xen_uninit_lock_cpu(int cpu);
+
extern cpumask_t xen_cpu_initialized_map;
#else
static inline void xen_smp_init(void) {}
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 3ca643cafcc..d5e75325515 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1032,7 +1032,7 @@ static struct xenbus_driver blkfront = {
static int __init xlblk_init(void)
{
- if (!is_running_on_xen())
+ if (!xen_domain())
return -ENODEV;
if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {
diff --git a/drivers/char/hvc_xen.c b/drivers/char/hvc_xen.c
index 6b70aa66a58..538ceea5e7d 100644
--- a/drivers/char/hvc_xen.c
+++ b/drivers/char/hvc_xen.c
@@ -108,8 +108,8 @@ static int __init xen_init(void)
{
struct hvc_struct *hp;
- if (!is_running_on_xen() ||
- is_initial_xendomain() ||
+ if (!xen_pv_domain() ||
+ xen_initial_domain() ||
!xen_start_info->console.domU.evtchn)
return -ENODEV;
@@ -142,7 +142,7 @@ static void __exit xen_fini(void)
static int xen_cons_init(void)
{
- if (!is_running_on_xen())
+ if (!xen_pv_domain())
return 0;
hvc_instantiate(HVC_COOKIE, 0, &hvc_ops);
diff --git a/drivers/input/xen-kbdfront.c b/drivers/input/xen-kbdfront.c
index 9ce3b3baf3a..3ab6362f043 100644
--- a/drivers/input/xen-kbdfront.c
+++ b/drivers/input/xen-kbdfront.c
@@ -335,11 +335,11 @@ static struct xenbus_driver xenkbd = {
static int __init xenkbd_init(void)
{
- if (!is_running_on_xen())
+ if (!xen_domain())
return -ENODEV;
/* Nothing to do if running in dom0. */
- if (is_initial_xendomain())
+ if (xen_initial_domain())
return -ENODEV;
return xenbus_register_frontend(&xenkbd);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index c749bdba214..3c3dd403f5d 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1794,10 +1794,10 @@ static struct xenbus_driver netfront = {
static int __init netif_init(void)
{
- if (!is_running_on_xen())
+ if (!xen_domain())
return -ENODEV;
- if (is_initial_xendomain())
+ if (xen_initial_domain())
return 0;
printk(KERN_INFO "Initialising Xen virtual ethernet driver.\n");
@@ -1809,7 +1809,7 @@ module_init(netif_init);
static void __exit netif_exit(void)
{
- if (is_initial_xendomain())
+ if (xen_initial_domain())
return;
xenbus_unregister_driver(&netfront);
diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c
index 47ed39b52f9..a463b3dd837 100644
--- a/drivers/video/xen-fbfront.c
+++ b/drivers/video/xen-fbfront.c
@@ -680,11 +680,11 @@ static struct xenbus_driver xenfb = {
static int __init xenfb_init(void)
{
- if (!is_running_on_xen())
+ if (!xen_domain())
return -ENODEV;
/* Nothing to do if running in dom0. */
- if (is_initial_xendomain())
+ if (xen_initial_domain())
return -ENODEV;
return xenbus_register_frontend(&xenfb);
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 363286c5429..d2a8fdf0e19 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -1,4 +1,5 @@
obj-y += grant-table.o features.o events.o manage.o
obj-y += xenbus/
+obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
obj-$(CONFIG_XEN_XENCOMM) += xencomm.o
obj-$(CONFIG_XEN_BALLOON) += balloon.o
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 2e15da5459c..a51f3e17a5f 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -226,9 +226,8 @@ static int increase_reservation(unsigned long nr_pages)
}
set_xen_guest_handle(reservation.extent_start, frame_list);
- reservation.nr_extents = nr_pages;
- rc = HYPERVISOR_memory_op(
- XENMEM_populate_physmap, &reservation);
+ reservation.nr_extents = nr_pages;
+ rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
if (rc < nr_pages) {
if (rc > 0) {
int ret;
@@ -236,7 +235,7 @@ static int increase_reservation(unsigned long nr_pages)
/* We hit the Xen hard limit: reprobe. */
reservation.nr_extents = rc;
ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
- &reservation);
+ &reservation);
BUG_ON(ret != rc);
}
if (rc >= 0)
@@ -420,7 +419,7 @@ static int __init balloon_init(void)
unsigned long pfn;
struct page *page;
- if (!is_running_on_xen())
+ if (!xen_pv_domain())
return -ENODEV;
pr_info("xen_balloon: Initialising balloon driver.\n");
@@ -464,136 +463,13 @@ static void balloon_exit(void)
module_exit(balloon_exit);
-static void balloon_update_driver_allowance(long delta)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&balloon_lock, flags);
- balloon_stats.driver_pages += delta;
- spin_unlock_irqrestore(&balloon_lock, flags);
-}
-
-static int dealloc_pte_fn(
- pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
-{
- unsigned long mfn = pte_mfn(*pte);
- int ret;
- struct xen_memory_reservation reservation = {
- .nr_extents = 1,
- .extent_order = 0,
- .domid = DOMID_SELF
- };
- set_xen_guest_handle(reservation.extent_start, &mfn);
- set_pte_at(&init_mm, addr, pte, __pte_ma(0ull));
- set_phys_to_machine(__pa(addr) >> PAGE_SHIFT, INVALID_P2M_ENTRY);
- ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
- BUG_ON(ret != 1);
- return 0;
-}
-
-static struct page **alloc_empty_pages_and_pagevec(int nr_pages)
-{
- unsigned long vaddr, flags;
- struct page *page, **pagevec;
- int i, ret;
-
- pagevec = kmalloc(sizeof(page) * nr_pages, GFP_KERNEL);
- if (pagevec == NULL)
- return NULL;
-
- for (i = 0; i < nr_pages; i++) {
- page = pagevec[i] = alloc_page(GFP_KERNEL);
- if (page == NULL)
- goto err;
-
- vaddr = (unsigned long)page_address(page);
-
- scrub_page(page);
-
- spin_lock_irqsave(&balloon_lock, flags);
-
- if (xen_feature(XENFEAT_auto_translated_physmap)) {
- unsigned long gmfn = page_to_pfn(page);
- struct xen_memory_reservation reservation = {
- .nr_extents = 1,
- .extent_order = 0,
- .domid = DOMID_SELF
- };
- set_xen_guest_handle(reservation.extent_start, &gmfn);
- ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
- &reservation);
- if (ret == 1)
- ret = 0; /* success */
- } else {
- ret = apply_to_page_range(&init_mm, vaddr, PAGE_SIZE,
- dealloc_pte_fn, NULL);
- }
-
- if (ret != 0) {
- spin_unlock_irqrestore(&balloon_lock, flags);
- __free_page(page);
- goto err;
- }
-
- totalram_pages = --balloon_stats.current_pages;
-
- spin_unlock_irqrestore(&balloon_lock, flags);
- }
-
- out:
- schedule_work(&balloon_worker);
- flush_tlb_all();
- return pagevec;
-
- err:
- spin_lock_irqsave(&balloon_lock, flags);
- while (--i >= 0)
- balloon_append(pagevec[i]);
- spin_unlock_irqrestore(&balloon_lock, flags);
- kfree(pagevec);
- pagevec = NULL;
- goto out;
-}
-
-static void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages)
-{
- unsigned long flags;
- int i;
-
- if (pagevec == NULL)
- return;
-
- spin_lock_irqsave(&balloon_lock, flags);
- for (i = 0; i < nr_pages; i++) {
- BUG_ON(page_count(pagevec[i]) != 1);
- balloon_append(pagevec[i]);
- }
- spin_unlock_irqrestore(&balloon_lock, flags);
-
- kfree(pagevec);
-
- schedule_work(&balloon_worker);
-}
-
-static void balloon_release_driver_page(struct page *page)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&balloon_lock, flags);
- balloon_append(page);
- balloon_stats.driver_pages--;
- spin_unlock_irqrestore(&balloon_lock, flags);
-
- schedule_work(&balloon_worker);
-}
-
-
-#define BALLOON_SHOW(name, format, args...) \
- static ssize_t show_##name(struct sys_device *dev, \
- char *buf) \
- { \
- return sprintf(buf, format, ##args); \
- } \
+#define BALLOON_SHOW(name, format, args...) \
+ static ssize_t show_##name(struct sys_device *dev, \
+ struct sysdev_attribute *attr, \
+ char *buf) \
+ { \
+ return sprintf(buf, format, ##args); \
+ } \
static SYSDEV_ATTR(name, S_IRUGO, show_##name, NULL)
BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(balloon_stats.current_pages));
@@ -604,7 +480,8 @@ BALLOON_SHOW(hard_limit_kb,
(balloon_stats.hard_limit!=~0UL) ? PAGES2KB(balloon_stats.hard_limit) : 0);
BALLOON_SHOW(driver_kb, "%lu\n", PAGES2KB(balloon_stats.driver_pages));
-static ssize_t show_target_kb(struct sys_device *dev, char *buf)
+static ssize_t show_target_kb(struct sys_device *dev, struct sysdev_attribute *attr,
+ char *buf)
{
return sprintf(buf, "%lu\n", PAGES2KB(balloon_stats.target_pages));
}
@@ -614,19 +491,14 @@ static ssize_t store_target_kb(struct sys_device *dev,
const char *buf,
size_t count)
{
- char memstring[64], *endchar;
+ char *endchar;
unsigned long long target_bytes;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (count <= 1)
- return -EBADMSG; /* runt */
- if (count > sizeof(memstring))
- return -EFBIG; /* too long */
- strcpy(memstring, buf);
+ target_bytes = memparse(buf, &endchar);
- target_bytes = memparse(memstring, &endchar);
balloon_set_new_target(target_bytes >> PAGE_SHIFT);
return count;
@@ -694,20 +566,4 @@ static int register_balloon(struct sys_device *sysdev)
return error;
}
-static void unregister_balloon(struct sys_device *sysdev)
-{
- int i;
-
- sysfs_remove_group(&sysdev->kobj, &balloon_info_group);
- for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++)
- sysdev_remove_file(sysdev, balloon_attrs[i]);
- sysdev_unregister(sysdev);
- sysdev_class_unregister(&balloon_sysdev_class);
-}
-
-static void balloon_sysfs_exit(void)
-{
- unregister_balloon(&balloon_sysdev);
-}
-
MODULE_LICENSE("GPL");
diff --git a/drivers/xen/cpu_hotplug.c b/drivers/xen/cpu_hotplug.c
new file mode 100644
index 00000000000..565280ec1c6
--- /dev/null
+++ b/drivers/xen/cpu_hotplug.c
@@ -0,0 +1,90 @@
+#include <linux/notifier.h>
+
+#include <xen/xenbus.h>
+
+#include <asm-x86/xen/hypervisor.h>
+#include <asm/cpu.h>
+
+static void enable_hotplug_cpu(int cpu)
+{
+ if (!cpu_present(cpu))
+ arch_register_cpu(cpu);
+
+ cpu_set(cpu, cpu_present_map);
+}
+
+static void disable_hotplug_cpu(int cpu)
+{
+ if (cpu_present(cpu))
+ arch_unregister_cpu(cpu);
+
+ cpu_clear(cpu, cpu_present_map);
+}
+
+static void vcpu_hotplug(unsigned int cpu)
+{
+ int err;
+ char dir[32], state[32];
+
+ if (!cpu_possible(cpu))
+ return;
+
+ sprintf(dir, "cpu/%u", cpu);
+ err = xenbus_scanf(XBT_NIL, dir, "availability", "%s", state);
+ if (err != 1) {
+ printk(KERN_ERR "XENBUS: Unable to read cpu state\n");
+ return;
+ }
+
+ if (strcmp(state, "online") == 0) {
+ enable_hotplug_cpu(cpu);
+ } else if (strcmp(state, "offline") == 0) {
+ (void)cpu_down(cpu);
+ disable_hotplug_cpu(cpu);
+ } else {
+ printk(KERN_ERR "XENBUS: unknown state(%s) on CPU%d\n",
+ state, cpu);
+ }
+}
+
+static void handle_vcpu_hotplug_event(struct xenbus_watch *watch,
+ const char **vec, unsigned int len)
+{
+ unsigned int cpu;
+ char *cpustr;
+ const char *node = vec[XS_WATCH_PATH];
+
+ cpustr = strstr(node, "cpu/");
+ if (cpustr != NULL) {
+ sscanf(cpustr, "cpu/%u", &cpu);
+ vcpu_hotplug(cpu);
+ }
+}
+
+static int setup_cpu_watcher(struct notifier_block *notifier,
+ unsigned long event, void *data)
+{
+ static struct xenbus_watch cpu_watch = {
+ .node = "cpu",
+ .callback = handle_vcpu_hotplug_event};
+
+ (void)register_xenbus_watch(&cpu_watch);
+
+ return NOTIFY_DONE;
+}
+
+static int __init setup_vcpu_hotplug_event(void)
+{
+ static struct notifier_block xsn_cpu = {
+ .notifier_call = setup_cpu_watcher };
+
+ if (!xen_pv_domain())
+ return -ENODEV;
+
+ register_xenstore_notifier(&xsn_cpu);
+
+ return 0;
+}
+
+arch_initcall(setup_vcpu_hotplug_event);
+
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 0e0c28574af..c3290bc186a 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -84,17 +84,6 @@ static int irq_bindcount[NR_IRQS];
/* Xen will never allocate port zero for any purpose. */
#define VALID_EVTCHN(chn) ((chn) != 0)
-/*
- * Force a proper event-channel callback from Xen after clearing the
- * callback mask. We do this in a very simple manner, by making a call
- * down into Xen. The pending flag will be checked by Xen on return.
- */
-void force_evtchn_callback(void)
-{
- (void)HYPERVISOR_xen_version(0, NULL);
-}
-EXPORT_SYMBOL_GPL(force_evtchn_callback);
-
static struct irq_chip xen_dynamic_chip;
/* Constructor for packed IRQ information. */
@@ -175,6 +164,12 @@ static inline void set_evtchn(int port)
sync_set_bit(port, &s->evtchn_pending[0]);
}
+static inline int test_evtchn(int port)
+{
+ struct shared_info *s = HYPERVISOR_shared_info;
+ return sync_test_bit(port, &s->evtchn_pending[0]);
+}
+
/**
* notify_remote_via_irq - send event to remote end of event channel via irq
@@ -365,6 +360,10 @@ static void unbind_from_irq(unsigned int irq)
per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
[index_from_irq(irq)] = -1;
break;
+ case IRQT_IPI:
+ per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
+ [index_from_irq(irq)] = -1;
+ break;
default:
break;
}
@@ -743,6 +742,25 @@ void xen_clear_irq_pending(int irq)
clear_evtchn(evtchn);
}
+void xen_set_irq_pending(int irq)
+{
+ int evtchn = evtchn_from_irq(irq);
+
+ if (VALID_EVTCHN(evtchn))
+ set_evtchn(evtchn);
+}
+
+bool xen_test_irq_pending(int irq)
+{
+ int evtchn = evtchn_from_irq(irq);
+ bool ret = false;
+
+ if (VALID_EVTCHN(evtchn))
+ ret = test_evtchn(evtchn);
+
+ return ret;
+}
+
/* Poll waiting for an irq to become pending. In the usual case, the
irq will be disabled so it won't deliver an interrupt. */
void xen_poll_irq(int irq)
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index e9e11168616..06592b9da83 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -508,7 +508,7 @@ static int __devinit gnttab_init(void)
unsigned int max_nr_glist_frames, nr_glist_frames;
unsigned int nr_init_grefs;
- if (!is_running_on_xen())
+ if (!xen_domain())
return -ENODEV;
nr_grant_frames = 1;
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 57ceb5346b7..7f24a98a446 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -814,7 +814,7 @@ static int __init xenbus_probe_init(void)
DPRINTK("");
err = -ENODEV;
- if (!is_running_on_xen())
+ if (!xen_domain())
goto out_error;
/* Register ourselves with the kernel bus subsystem */
@@ -829,7 +829,7 @@ static int __init xenbus_probe_init(void)
/*
* Domain0 doesn't have a store_evtchn or store_mfn yet.
*/
- if (is_initial_xendomain()) {
+ if (xen_initial_domain()) {
/* dom0 not yet supported */
} else {
xenstored_ready = 1;
@@ -846,7 +846,7 @@ static int __init xenbus_probe_init(void)
goto out_unreg_back;
}
- if (!is_initial_xendomain())
+ if (!xen_initial_domain())
xenbus_probe(NULL);
return 0;
@@ -937,7 +937,7 @@ static void wait_for_devices(struct xenbus_driver *xendrv)
unsigned long timeout = jiffies + 10*HZ;
struct device_driver *drv = xendrv ? &xendrv->driver : NULL;
- if (!ready_to_wait_for_devices || !is_running_on_xen())
+ if (!ready_to_wait_for_devices || !xen_domain())
return;
while (exists_disconnected_device(drv)) {
diff --git a/include/asm-x86/desc.h b/include/asm-x86/desc.h
index a44c4dc7059..06f786f4b4f 100644
--- a/include/asm-x86/desc.h
+++ b/include/asm-x86/desc.h
@@ -24,6 +24,11 @@ static inline void fill_ldt(struct desc_struct *desc,
desc->d = info->seg_32bit;
desc->g = info->limit_in_pages;
desc->base2 = (info->base_addr & 0xff000000) >> 24;
+ /*
+ * Don't allow setting of the lm bit. It is useless anyway
+ * because 64bit system calls require __USER_CS:
+ */
+ desc->l = 0;
}
extern struct desc_ptr idt_descr;
@@ -97,7 +102,15 @@ static inline int desc_empty(const void *ptr)
native_write_gdt_entry(dt, entry, desc, type)
#define write_idt_entry(dt, entry, g) \
native_write_idt_entry(dt, entry, g)
-#endif
+
+static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
+{
+}
+
+static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
+{
+}
+#endif /* CONFIG_PARAVIRT */
static inline void native_write_idt_entry(gate_desc *idt, int entry,
const gate_desc *gate)
diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h
index fbbde93f12d..8e9b1266898 100644
--- a/include/asm-x86/paravirt.h
+++ b/include/asm-x86/paravirt.h
@@ -124,6 +124,9 @@ struct pv_cpu_ops {
int entrynum, const void *desc, int size);
void (*write_idt_entry)(gate_desc *,
int entrynum, const gate_desc *gate);
+ void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries);
+ void (*free_ldt)(struct desc_struct *ldt, unsigned entries);
+
void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t);
void (*set_iopl_mask)(unsigned mask);
@@ -330,6 +333,7 @@ struct pv_lock_ops {
int (*spin_is_locked)(struct raw_spinlock *lock);
int (*spin_is_contended)(struct raw_spinlock *lock);
void (*spin_lock)(struct raw_spinlock *lock);
+ void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
int (*spin_trylock)(struct raw_spinlock *lock);
void (*spin_unlock)(struct raw_spinlock *lock);
};
@@ -824,6 +828,16 @@ do { \
(aux) = __aux; \
} while (0)
+static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
+{
+ PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
+}
+
+static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
+{
+ PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
+}
+
static inline void load_TR_desc(void)
{
PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
@@ -1401,6 +1415,12 @@ static __always_inline void __raw_spin_lock(struct raw_spinlock *lock)
PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
}
+static __always_inline void __raw_spin_lock_flags(struct raw_spinlock *lock,
+ unsigned long flags)
+{
+ PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
+}
+
static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock)
{
return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h
index 3c877f74f27..30b5146cc43 100644
--- a/include/asm-x86/smp.h
+++ b/include/asm-x86/smp.h
@@ -47,12 +47,16 @@ extern struct {
struct smp_ops {
void (*smp_prepare_boot_cpu)(void);
void (*smp_prepare_cpus)(unsigned max_cpus);
- int (*cpu_up)(unsigned cpu);
void (*smp_cpus_done)(unsigned max_cpus);
void (*smp_send_stop)(void);
void (*smp_send_reschedule)(int cpu);
+ int (*cpu_up)(unsigned cpu);
+ int (*cpu_disable)(void);
+ void (*cpu_die)(unsigned int cpu);
+ void (*play_dead)(void);
+
void (*send_call_func_ipi)(cpumask_t mask);
void (*send_call_func_single_ipi)(int cpu);
};
@@ -91,6 +95,21 @@ static inline int __cpu_up(unsigned int cpu)
return smp_ops.cpu_up(cpu);
}
+static inline int __cpu_disable(void)
+{
+ return smp_ops.cpu_disable();
+}
+
+static inline void __cpu_die(unsigned int cpu)
+{
+ smp_ops.cpu_die(cpu);
+}
+
+static inline void play_dead(void)
+{
+ smp_ops.play_dead();
+}
+
static inline void smp_send_reschedule(int cpu)
{
smp_ops.smp_send_reschedule(cpu);
@@ -106,16 +125,19 @@ static inline void arch_send_call_function_ipi(cpumask_t mask)
smp_ops.send_call_func_ipi(mask);
}
+void cpu_disable_common(void);
void native_smp_prepare_boot_cpu(void);
void native_smp_prepare_cpus(unsigned int max_cpus);
void native_smp_cpus_done(unsigned int max_cpus);
int native_cpu_up(unsigned int cpunum);
+int native_cpu_disable(void);
+void native_cpu_die(unsigned int cpu);
+void native_play_dead(void);
+void play_dead_common(void);
+
void native_send_call_func_ipi(cpumask_t mask);
void native_send_call_func_single_ipi(int cpu);
-extern int __cpu_disable(void);
-extern void __cpu_die(unsigned int cpu);
-
void smp_store_cpu_info(int id);
#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
@@ -200,9 +222,5 @@ static inline int hard_smp_processor_id(void)
#endif /* CONFIG_X86_LOCAL_APIC */
-#ifdef CONFIG_HOTPLUG_CPU
-extern void cpu_uninit(void);
-#endif
-
#endif /* __ASSEMBLY__ */
#endif
diff --git a/include/asm-x86/spinlock.h b/include/asm-x86/spinlock.h
index e39c790dbfd..b755ea86367 100644
--- a/include/asm-x86/spinlock.h
+++ b/include/asm-x86/spinlock.h
@@ -182,8 +182,6 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
}
#endif
-#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
-
#ifdef CONFIG_PARAVIRT
/*
* Define virtualization-friendly old-style lock byte lock, for use in
@@ -272,6 +270,13 @@ static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock)
{
__ticket_spin_unlock(lock);
}
+
+static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
+ unsigned long flags)
+{
+ __raw_spin_lock(lock);
+}
+
#endif /* CONFIG_PARAVIRT */
static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
diff --git a/include/asm-x86/tlbflush.h b/include/asm-x86/tlbflush.h
index 35c76ceb9f4..0e7bbb54911 100644
--- a/include/asm-x86/tlbflush.h
+++ b/include/asm-x86/tlbflush.h
@@ -119,6 +119,10 @@ static inline void native_flush_tlb_others(const cpumask_t *cpumask,
{
}
+static inline void reset_lazy_tlbstate(void)
+{
+}
+
#else /* SMP */
#include <asm/smp.h>
@@ -151,6 +155,12 @@ struct tlb_state {
char __cacheline_padding[L1_CACHE_BYTES-8];
};
DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate);
+
+void reset_lazy_tlbstate(void);
+#else
+static inline void reset_lazy_tlbstate(void)
+{
+}
#endif
#endif /* SMP */
diff --git a/include/asm-x86/xen/hypervisor.h b/include/asm-x86/xen/hypervisor.h
index 04ee0610014..fca066febc3 100644
--- a/include/asm-x86/xen/hypervisor.h
+++ b/include/asm-x86/xen/hypervisor.h
@@ -54,7 +54,6 @@
/* arch/i386/kernel/setup.c */
extern struct shared_info *HYPERVISOR_shared_info;
extern struct start_info *xen_start_info;
-#define is_initial_xendomain() (xen_start_info->flags & SIF_INITDOMAIN)
/* arch/i386/mach-xen/evtchn.c */
/* Force a proper event-channel callback from Xen. */
@@ -67,6 +66,17 @@ u64 jiffies_to_st(unsigned long jiffies);
#define MULTI_UVMFLAGS_INDEX 3
#define MULTI_UVMDOMID_INDEX 4
-#define is_running_on_xen() (xen_start_info ? 1 : 0)
+enum xen_domain_type {
+ XEN_NATIVE,
+ XEN_PV_DOMAIN,
+ XEN_HVM_DOMAIN,
+};
+
+extern enum xen_domain_type xen_domain_type;
+
+#define xen_domain() (xen_domain_type != XEN_NATIVE)
+#define xen_pv_domain() (xen_domain_type == XEN_PV_DOMAIN)
+#define xen_initial_domain() (xen_pv_domain() && xen_start_info->flags & SIF_INITDOMAIN)
+#define xen_hvm_domain() (xen_domain_type == XEN_HVM_DOMAIN)
#endif /* __HYPERVISOR_H__ */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 2651f805ba6..75d81f157d2 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -182,7 +182,7 @@ extern int vsscanf(const char *, const char *, va_list)
extern int get_option(char **str, int *pint);
extern char *get_options(const char *str, int nints, int *ints);
-extern unsigned long long memparse(char *ptr, char **retptr);
+extern unsigned long long memparse(const char *ptr, char **retptr);
extern int core_kernel_text(unsigned long addr);
extern int __kernel_text_address(unsigned long addr);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 72a15dc26bb..4194bf8e4f6 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -919,7 +919,7 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
}
#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
-#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
+#if USE_SPLIT_PTLOCKS
/*
* We tuck a spinlock to guard each pagetable page into its struct page,
* at page->private, with BUILD_BUG_ON to make sure that this will not
@@ -932,14 +932,14 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
} while (0)
#define pte_lock_deinit(page) ((page)->mapping = NULL)
#define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));})
-#else
+#else /* !USE_SPLIT_PTLOCKS */
/*
* We use mm->page_table_lock to guard all pagetable pages of the mm.
*/
#define pte_lock_init(page) do {} while (0)
#define pte_lock_deinit(page) do {} while (0)
#define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;})
-#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
+#endif /* USE_SPLIT_PTLOCKS */
static inline void pgtable_page_ctor(struct page *page)
{
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index bf334138c7c..9d49fa36bbe 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -21,11 +21,13 @@
struct address_space;
-#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
+#define USE_SPLIT_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
+
+#if USE_SPLIT_PTLOCKS
typedef atomic_long_t mm_counter_t;
-#else /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
+#else /* !USE_SPLIT_PTLOCKS */
typedef unsigned long mm_counter_t;
-#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
+#endif /* !USE_SPLIT_PTLOCKS */
/*
* Each physical page in the system has a struct page associated with
@@ -65,7 +67,7 @@ struct page {
* see PAGE_MAPPING_ANON below.
*/
};
-#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
+#if USE_SPLIT_PTLOCKS
spinlock_t ptl;
#endif
struct kmem_cache *slab; /* SLUB: Pointer to slab */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 3d9120c5ad1..272c35309df 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -352,7 +352,7 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
extern void arch_unmap_area(struct mm_struct *, unsigned long);
extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
-#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
+#if USE_SPLIT_PTLOCKS
/*
* The mm counters are not protected by its page_table_lock,
* so must be incremented atomically.
@@ -363,7 +363,7 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
#define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member)
#define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member)
-#else /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
+#else /* !USE_SPLIT_PTLOCKS */
/*
* The mm counters are protected by its page_table_lock,
* so can be incremented directly.
@@ -374,7 +374,7 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
#define inc_mm_counter(mm, member) (mm)->_##member++
#define dec_mm_counter(mm, member) (mm)->_##member--
-#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
+#endif /* !USE_SPLIT_PTLOCKS */
#define get_mm_rss(mm) \
(get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss))
diff --git a/include/xen/events.h b/include/xen/events.h
index 4680ff3fbc9..0d5f1adc036 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -46,6 +46,8 @@ extern void xen_irq_resume(void);
/* Clear an irq's pending state, in preparation for polling on it */
void xen_clear_irq_pending(int irq);
+void xen_set_irq_pending(int irq);
+bool xen_test_irq_pending(int irq);
/* Poll waiting for an irq to become pending. In the usual case, the
irq will be disabled so it won't deliver an interrupt. */
diff --git a/lib/cmdline.c b/lib/cmdline.c
index 5ba8a942a47..f5f3ad8b62f 100644
--- a/lib/cmdline.c
+++ b/lib/cmdline.c
@@ -126,7 +126,7 @@ char *get_options(const char *str, int nints, int *ints)
* megabyte, or one gigabyte, respectively.
*/
-unsigned long long memparse(char *ptr, char **retptr)
+unsigned long long memparse(const char *ptr, char **retptr)
{
char *endptr; /* local pointer to end of parsed string */