From 1a960b402a51d80abf54e3f8e4972374ffe5f22d Mon Sep 17 00:00:00 2001 From: Jason Yeh Date: Wed, 23 Jul 2008 23:05:53 +0200 Subject: Oprofile Multiplexing Patch This patch introduces multiplexing support for the Oprofile kernel module. It basically adds a new function pointer in oprofile_operator allowing each architecture to supply its callback to switch between different sets of event when the timer expires. Userspace tools can modify the time slice through /dev/oprofile/time_slice. It also modifies the number of counters exposed to the userspace through /dev/oprofile. For example, the number of counters for AMD CPUs are changed to 32 and multiplexed in the sets of 4. Signed-off-by: Jason Yeh Signed-off-by: Robert Richter Cc: oprofile-list Signed-off-by: Ingo Molnar --- arch/x86/oprofile/nmi_int.c | 100 +++++++++++++++++++++++++++++++++++--- arch/x86/oprofile/op_counter.h | 3 +- arch/x86/oprofile/op_model_amd.c | 76 +++++++++++++++++------------ arch/x86/oprofile/op_model_p4.c | 4 ++ arch/x86/oprofile/op_model_ppro.c | 2 + arch/x86/oprofile/op_x86_model.h | 3 ++ 6 files changed, 149 insertions(+), 39 deletions(-) (limited to 'arch') diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 287513a0981..2a65fe7680a 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c @@ -23,12 +23,18 @@ #include "op_counter.h" #include "op_x86_model.h" +DEFINE_PER_CPU(int, switch_index); + static struct op_x86_model_spec const *model; static DEFINE_PER_CPU(struct op_msrs, cpu_msrs); static DEFINE_PER_CPU(unsigned long, saved_lvtpc); static int nmi_start(void); static void nmi_stop(void); +static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs); +static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs); +static void nmi_cpu_stop(void *dummy); +static void nmi_cpu_start(void *dummy); /* 0 == registered but off, 1 == registered and on */ static int nmi_enabled = 0; @@ -81,6 +87,47 @@ static void exit_sysfs(void) #define exit_sysfs() do { } while (0) #endif /* CONFIG_PM */ +static void nmi_cpu_switch(void *dummy) +{ + int cpu = smp_processor_id(); + int si = per_cpu(switch_index, cpu); + struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu); + + nmi_cpu_stop(NULL); + nmi_cpu_save_mpx_registers(msrs); + + /* move to next set */ + si += model->num_hardware_counters; + if ((si > model->num_counters) || (counter_config[si].count == 0)) + per_cpu(switch_index, smp_processor_id()) = 0; + else + per_cpu(switch_index, smp_processor_id()) = si; + + nmi_cpu_restore_mpx_registers(msrs); + model->setup_ctrs(msrs); + nmi_cpu_start(NULL); +} + +/* + * Quick check to see if multiplexing is necessary. + * The check should be sufficient since counters are used + * in ordre. + */ +static int nmi_multiplex_on(void) +{ + return counter_config[model->num_hardware_counters].count ? 0 : -EINVAL; +} + +static int nmi_switch_event(void) +{ + if (nmi_multiplex_on() < 0) + return -EINVAL; + + on_each_cpu(nmi_cpu_switch, NULL, 0, 1); + + return 0; +} + static int profile_exceptions_notify(struct notifier_block *self, unsigned long val, void *data) { @@ -144,11 +191,10 @@ static void free_msrs(void) static int allocate_msrs(void) { - int success = 1; + int i, success = 1; size_t controls_size = sizeof(struct op_msr) * model->num_controls; size_t counters_size = sizeof(struct op_msr) * model->num_counters; - int i; for_each_possible_cpu(i) { per_cpu(cpu_msrs, i).counters = kmalloc(counters_size, GFP_KERNEL); @@ -156,8 +202,8 @@ static int allocate_msrs(void) success = 0; break; } - per_cpu(cpu_msrs, i).controls = kmalloc(controls_size, - GFP_KERNEL); + per_cpu(cpu_msrs, i).controls = + kmalloc(controls_size, GFP_KERNEL); if (!per_cpu(cpu_msrs, i).controls) { success = 0; break; @@ -201,7 +247,8 @@ static int nmi_setup(void) return err; } - /* We need to serialize save and setup for HT because the subset + /* + * We need to serialize save and setup for HT because the subset * of msrs are distinct for save and setup operations */ @@ -217,7 +264,6 @@ static int nmi_setup(void) per_cpu(cpu_msrs, 0).controls, sizeof(struct op_msr) * model->num_controls); } - } on_each_cpu(nmi_save_registers, NULL, 1); on_each_cpu(nmi_cpu_setup, NULL, 1); @@ -225,7 +271,41 @@ static int nmi_setup(void) return 0; } -static void nmi_restore_registers(struct op_msrs *msrs) +static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs) +{ + unsigned int si = __get_cpu_var(switch_index); + unsigned int const nr_ctrs = model->num_hardware_counters; + struct op_msr *counters = &msrs->counters[si]; + unsigned int i; + + for (i = 0; i < nr_ctrs; ++i) { + int offset = i + si; + if (counters[offset].addr) { + rdmsr(counters[offset].addr, + counters[offset].multiplex.low, + counters[offset].multiplex.high); + } + } +} + +static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs) +{ + unsigned int si = __get_cpu_var(switch_index); + unsigned int const nr_ctrs = model->num_hardware_counters; + struct op_msr *counters = &msrs->counters[si]; + unsigned int i; + + for (i = 0; i < nr_ctrs; ++i) { + int offset = i + si; + if (counters[offset].addr) { + wrmsr(counters[offset].addr, + counters[offset].multiplex.low, + counters[offset].multiplex.high); + } + } +} + +static void nmi_cpu_restore_registers(struct op_msrs *msrs) { unsigned int const nr_ctrs = model->num_counters; unsigned int const nr_ctrls = model->num_controls; @@ -265,7 +345,8 @@ static void nmi_cpu_shutdown(void *dummy) apic_write(APIC_LVTERR, v | APIC_LVT_MASKED); apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu)); apic_write(APIC_LVTERR, v); - nmi_restore_registers(msrs); + nmi_cpu_restore_registers(msrs); + __get_cpu_var(switch_index) = 0; } static void nmi_shutdown(void) @@ -328,6 +409,7 @@ static int nmi_create_files(struct super_block *sb, struct dentry *root) oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask); oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel); oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user); + counter_config[i].save_count_low = 0; } return 0; @@ -469,12 +551,14 @@ int __init op_nmi_init(struct oprofile_operations *ops) } /* default values, can be overwritten by model */ + __get_cpu_var(switch_index) = 0; ops->create_files = nmi_create_files; ops->setup = nmi_setup; ops->shutdown = nmi_shutdown; ops->start = nmi_start; ops->stop = nmi_stop; ops->cpu_type = cpu_type; + ops->switch_events = nmi_switch_event; if (model->init) ret = model->init(ops); diff --git a/arch/x86/oprofile/op_counter.h b/arch/x86/oprofile/op_counter.h index 2880b15c467..786d6e01cf7 100644 --- a/arch/x86/oprofile/op_counter.h +++ b/arch/x86/oprofile/op_counter.h @@ -10,13 +10,14 @@ #ifndef OP_COUNTER_H #define OP_COUNTER_H -#define OP_MAX_COUNTER 8 +#define OP_MAX_COUNTER 32 /* Per-perfctr configuration as set via * oprofilefs. */ struct op_counter_config { unsigned long count; + unsigned long save_count_low; unsigned long enabled; unsigned long event; unsigned long kernel; diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index d9faf607b3a..bbf2b68bcc5 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -23,8 +24,10 @@ #include "op_x86_model.h" #include "op_counter.h" -#define NUM_COUNTERS 4 -#define NUM_CONTROLS 4 +#define NUM_COUNTERS 32 +#define NUM_HARDWARE_COUNTERS 4 +#define NUM_CONTROLS 32 +#define NUM_HARDWARE_CONTROLS 4 #define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0) #define CTR_READ(l, h, msrs, c) do {rdmsr(msrs->counters[(c)].addr, (l), (h)); } while (0) @@ -48,6 +51,7 @@ #define CTRL_SET_GUEST_ONLY(val, h) (val |= ((h & 1) << 8)) static unsigned long reset_value[NUM_COUNTERS]; +DECLARE_PER_CPU(int, switch_index); #ifdef CONFIG_OPROFILE_IBS @@ -130,15 +134,17 @@ static void op_amd_fill_in_addresses(struct op_msrs * const msrs) int i; for (i = 0; i < NUM_COUNTERS; i++) { - if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i)) - msrs->counters[i].addr = MSR_K7_PERFCTR0 + i; + int hw_counter = i % NUM_HARDWARE_COUNTERS; + if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + hw_counter)) + msrs->counters[i].addr = MSR_K7_PERFCTR0 + hw_counter; else msrs->counters[i].addr = 0; } for (i = 0; i < NUM_CONTROLS; i++) { - if (reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i)) - msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i; + int hw_control = i % NUM_HARDWARE_CONTROLS; + if (reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + hw_control)) + msrs->controls[i].addr = MSR_K7_EVNTSEL0 + hw_control; else msrs->controls[i].addr = 0; } @@ -150,8 +156,16 @@ static void op_amd_setup_ctrs(struct op_msrs const * const msrs) unsigned int low, high; int i; + for (i = 0; i < NUM_HARDWARE_CONTROLS; ++i) { + int offset = i + __get_cpu_var(switch_index); + if (counter_config[offset].enabled) + reset_value[offset] = counter_config[offset].count; + else + reset_value[offset] = 0; + } + /* clear all counters */ - for (i = 0 ; i < NUM_CONTROLS; ++i) { + for (i = 0 ; i < NUM_HARDWARE_CONTROLS; ++i) { if (unlikely(!CTRL_IS_RESERVED(msrs, i))) continue; CTRL_READ(low, high, msrs, i); @@ -161,34 +175,31 @@ static void op_amd_setup_ctrs(struct op_msrs const * const msrs) } /* avoid a false detection of ctr overflows in NMI handler */ - for (i = 0; i < NUM_COUNTERS; ++i) { + for (i = 0; i < NUM_HARDWARE_COUNTERS; ++i) { if (unlikely(!CTR_IS_RESERVED(msrs, i))) continue; CTR_WRITE(1, msrs, i); } /* enable active counters */ - for (i = 0; i < NUM_COUNTERS; ++i) { - if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) { - reset_value[i] = counter_config[i].count; - - CTR_WRITE(counter_config[i].count, msrs, i); + for (i = 0; i < NUM_HARDWARE_COUNTERS; ++i) { + int offset = i + __get_cpu_var(switch_index); + if ((counter_config[offset].enabled) && (CTR_IS_RESERVED(msrs, i))) { + CTR_WRITE(counter_config[offset].count, msrs, i); CTRL_READ(low, high, msrs, i); CTRL_CLEAR_LO(low); CTRL_CLEAR_HI(high); CTRL_SET_ENABLE(low); - CTRL_SET_USR(low, counter_config[i].user); - CTRL_SET_KERN(low, counter_config[i].kernel); - CTRL_SET_UM(low, counter_config[i].unit_mask); - CTRL_SET_EVENT_LOW(low, counter_config[i].event); - CTRL_SET_EVENT_HIGH(high, counter_config[i].event); + CTRL_SET_USR(low, counter_config[offset].user); + CTRL_SET_KERN(low, counter_config[offset].kernel); + CTRL_SET_UM(low, counter_config[offset].unit_mask); + CTRL_SET_EVENT_LOW(low, counter_config[offset].event); + CTRL_SET_EVENT_HIGH(high, counter_config[offset].event); CTRL_SET_HOST_ONLY(high, 0); CTRL_SET_GUEST_ONLY(high, 0); CTRL_WRITE(low, high, msrs, i); - } else { - reset_value[i] = 0; } } } @@ -276,13 +287,14 @@ static int op_amd_check_ctrs(struct pt_regs * const regs, unsigned int low, high; int i; - for (i = 0 ; i < NUM_COUNTERS; ++i) { - if (!reset_value[i]) + for (i = 0 ; i < NUM_HARDWARE_COUNTERS ; ++i) { + int offset = i + __get_cpu_var(switch_index); + if (!reset_value[offset]) continue; CTR_READ(low, high, msrs, i); if (CTR_OVERFLOWED(low)) { - oprofile_add_sample(regs, i); - CTR_WRITE(reset_value[i], msrs, i); + oprofile_add_sample(regs, offset); + CTR_WRITE(reset_value[offset], msrs, i); } } @@ -298,8 +310,10 @@ static void op_amd_start(struct op_msrs const * const msrs) { unsigned int low, high; int i; - for (i = 0 ; i < NUM_COUNTERS ; ++i) { - if (reset_value[i]) { + + for (i = 0 ; i < NUM_HARDWARE_COUNTERS ; ++i) { + int offset = i + __get_cpu_var(switch_index); + if (reset_value[offset]) { CTRL_READ(low, high, msrs, i); CTRL_SET_ACTIVE(low); CTRL_WRITE(low, high, msrs, i); @@ -329,8 +343,8 @@ static void op_amd_stop(struct op_msrs const * const msrs) /* Subtle: stop on all counters to avoid race with * setting our pm callback */ - for (i = 0 ; i < NUM_COUNTERS ; ++i) { - if (!reset_value[i]) + for (i = 0 ; i < NUM_HARDWARE_COUNTERS ; ++i) { + if (!reset_value[i + per_cpu(switch_index, smp_processor_id())]) continue; CTRL_READ(low, high, msrs, i); CTRL_SET_INACTIVE(low); @@ -356,11 +370,11 @@ static void op_amd_shutdown(struct op_msrs const * const msrs) { int i; - for (i = 0 ; i < NUM_COUNTERS ; ++i) { + for (i = 0 ; i < NUM_HARDWARE_COUNTERS ; ++i) { if (CTR_IS_RESERVED(msrs, i)) release_perfctr_nmi(MSR_K7_PERFCTR0 + i); } - for (i = 0 ; i < NUM_CONTROLS ; ++i) { + for (i = 0 ; i < NUM_HARDWARE_COUNTERS ; ++i) { if (CTRL_IS_RESERVED(msrs, i)) release_evntsel_nmi(MSR_K7_EVNTSEL0 + i); } @@ -534,6 +548,8 @@ struct op_x86_model_spec const op_amd_spec = { .exit = op_amd_exit, .num_counters = NUM_COUNTERS, .num_controls = NUM_CONTROLS, + .num_hardware_counters = NUM_HARDWARE_COUNTERS, + .num_hardware_controls = NUM_HARDWARE_CONTROLS, .fill_in_addresses = &op_amd_fill_in_addresses, .setup_ctrs = &op_amd_setup_ctrs, .check_ctrs = &op_amd_check_ctrs, diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c index 56b4757a1f4..e641545d479 100644 --- a/arch/x86/oprofile/op_model_p4.c +++ b/arch/x86/oprofile/op_model_p4.c @@ -701,6 +701,8 @@ static void p4_shutdown(struct op_msrs const * const msrs) struct op_x86_model_spec const op_p4_ht2_spec = { .num_counters = NUM_COUNTERS_HT2, .num_controls = NUM_CONTROLS_HT2, + .num_hardware_counters = NUM_COUNTERS_HT2, + .num_hardware_controls = NUM_CONTROLS_HT2, .fill_in_addresses = &p4_fill_in_addresses, .setup_ctrs = &p4_setup_ctrs, .check_ctrs = &p4_check_ctrs, @@ -713,6 +715,8 @@ struct op_x86_model_spec const op_p4_ht2_spec = { struct op_x86_model_spec const op_p4_spec = { .num_counters = NUM_COUNTERS_NON_HT, .num_controls = NUM_CONTROLS_NON_HT, + .num_hardware_counters = NUM_COUNTERS_NON_HT, + .num_hardware_controls = NUM_CONTROLS_NON_HT, .fill_in_addresses = &p4_fill_in_addresses, .setup_ctrs = &p4_setup_ctrs, .check_ctrs = &p4_check_ctrs, diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c index eff431f6c57..e5811aa480e 100644 --- a/arch/x86/oprofile/op_model_ppro.c +++ b/arch/x86/oprofile/op_model_ppro.c @@ -183,6 +183,8 @@ static void ppro_shutdown(struct op_msrs const * const msrs) struct op_x86_model_spec const op_ppro_spec = { .num_counters = NUM_COUNTERS, .num_controls = NUM_CONTROLS, + .num_hardware_counters = NUM_COUNTERS, + .num_hardware_controls = NUM_CONTROLS, .fill_in_addresses = &ppro_fill_in_addresses, .setup_ctrs = &ppro_setup_ctrs, .check_ctrs = &ppro_check_ctrs, diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h index 05a0261ba0c..e07ba107637 100644 --- a/arch/x86/oprofile/op_x86_model.h +++ b/arch/x86/oprofile/op_x86_model.h @@ -19,6 +19,7 @@ struct op_saved_msr { struct op_msr { unsigned long addr; struct op_saved_msr saved; + struct op_saved_msr multiplex; }; struct op_msrs { @@ -34,6 +35,8 @@ struct pt_regs; struct op_x86_model_spec { int (*init)(struct oprofile_operations *ops); void (*exit)(void); + unsigned int const num_hardware_counters; + unsigned int const num_hardware_controls; unsigned int const num_counters; unsigned int const num_controls; void (*fill_in_addresses)(struct op_msrs * const msrs); -- cgit v1.2.3 From 7e7b43892b87b6be259479ef4de14029dcb4012f Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Thu, 24 Jul 2008 16:00:16 +0200 Subject: x86/oprofile: fix on_each_cpu build error Signed-off-by: Robert Richter Cc: oprofile-list Cc: Jason Yeh Signed-off-by: Ingo Molnar --- arch/x86/oprofile/nmi_int.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 2a65fe7680a..fb4902bc6f1 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c @@ -123,7 +123,7 @@ static int nmi_switch_event(void) if (nmi_multiplex_on() < 0) return -EINVAL; - on_each_cpu(nmi_cpu_switch, NULL, 0, 1); + on_each_cpu(nmi_cpu_switch, NULL, 1); return 0; } -- cgit v1.2.3 From beb20d52d03a51218827fb4a36a4b583debb03f9 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Mon, 1 Sep 2008 14:55:57 -0700 Subject: hrtimer: convert kvm to the new hrtimer apis In order to be able to do range hrtimers we need to use accessor functions to the "expire" member of the hrtimer struct. This patch converts KVM to these accessors. Signed-off-by: Arjan van de Ven --- arch/x86/kvm/i8254.c | 6 +++--- arch/x86/kvm/lapic.c | 6 ++---- 2 files changed, 5 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index c0f7872a912..1bf8f57a304 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -205,8 +205,8 @@ static int __pit_timer_fn(struct kvm_kpit_state *ps) wake_up_interruptible(&vcpu0->wq); } - pt->timer.expires = ktime_add_ns(pt->timer.expires, pt->period); - pt->scheduled = ktime_to_ns(pt->timer.expires); + hrtimer_add_expires_ns(&pt->timer, pt->period); + pt->scheduled = ktime_to_ns(hrtimer_get_expires(&pt->timer)); return (pt->period == 0 ? 0 : 1); } @@ -246,7 +246,7 @@ void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu) timer = &pit->pit_state.pit_timer.timer; if (hrtimer_cancel(timer)) - hrtimer_start(timer, timer->expires, HRTIMER_MODE_ABS); + hrtimer_start_expires(timer, HRTIMER_MODE_ABS); } static void destroy_pit_timer(struct kvm_kpit_timer *pt) diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 73f43de69f6..a5b61de6adf 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -953,9 +953,7 @@ static int __apic_timer_fn(struct kvm_lapic *apic) } if (apic_lvtt_period(apic)) { result = 1; - apic->timer.dev.expires = ktime_add_ns( - apic->timer.dev.expires, - apic->timer.period); + hrtimer_add_expires_ns(&apic->timer.dev, apic->timer.period); } return result; } @@ -1124,7 +1122,7 @@ void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu) timer = &apic->timer.dev; if (hrtimer_cancel(timer)) - hrtimer_start(timer, timer->expires, HRTIMER_MODE_ABS); + hrtimer_start_expires(timer, HRTIMER_MODE_ABS); } void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu) -- cgit v1.2.3 From 23446d1dc3d4f42a2b0fb82d4a098f9179ba486d Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Mon, 1 Sep 2008 15:18:10 -0700 Subject: hrtimer: convert powerpc/oprofile to the new hrtimer apis In order to be able to do range hrtimers we need to use accessor functions to the "expire" member of the hrtimer struct. This patch converts powerpc/oprofile to these accessors. Signed-off-by: Arjan van de Ven --- arch/powerpc/oprofile/cell/spu_profiler.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/oprofile/cell/spu_profiler.c b/arch/powerpc/oprofile/cell/spu_profiler.c index 380d7e21753..02ffe060db5 100644 --- a/arch/powerpc/oprofile/cell/spu_profiler.c +++ b/arch/powerpc/oprofile/cell/spu_profiler.c @@ -196,7 +196,7 @@ int start_spu_profiling(unsigned int cycles_reset) pr_debug("timer resolution: %lu\n", TICK_NSEC); kt = ktime_set(0, profiling_interval); hrtimer_init(&timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - timer.expires = kt; + hrtimer_set_expires(&timer, kt); timer.function = profile_spus; /* Allocate arrays for collecting SPU PC samples */ -- cgit v1.2.3 From 18dd36af0010dd70c8634cdca0f99b47b5036c60 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Mon, 1 Sep 2008 15:19:11 -0700 Subject: hrtimer: convert kvm-ia64 to the new hrtimer apis In order to be able to do range hrtimers we need to use accessor functions to the "expire" member of the hrtimer struct. This patch converts KVM-ia64 to these accessors. Signed-off-by: Arjan van de Ven --- arch/ia64/kvm/kvm-ia64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 7a37d06376b..cf8eae1855e 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -1112,7 +1112,7 @@ static void kvm_migrate_hlt_timer(struct kvm_vcpu *vcpu) struct hrtimer *p_ht = &vcpu->arch.hlt_timer; if (hrtimer_cancel(p_ht)) - hrtimer_start(p_ht, p_ht->expires, HRTIMER_MODE_ABS); + hrtimer_start_expires(p_ht, HRTIMER_MODE_ABS); } static enum hrtimer_restart hlt_timer_fn(struct hrtimer *data) -- cgit v1.2.3 From 2f86c3e67d6423d6d23ee2f737ad4f0730435742 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Wed, 17 Sep 2008 16:34:11 +0100 Subject: uwb: add the UWB stack (build system) The Kbuild and Kconfig files. Signed-off-by: Greg Kroah-Hartman Signed-off-by: David Vrabel --- arch/arm/Kconfig | 2 ++ arch/cris/Kconfig | 2 ++ arch/h8300/Kconfig | 2 ++ 3 files changed, 6 insertions(+) (limited to 'arch') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 70dba166890..8eedbfa52f0 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1220,6 +1220,8 @@ source "drivers/hid/Kconfig" source "drivers/usb/Kconfig" +source "drivers/uwb/Kconfig" + source "drivers/mmc/Kconfig" source "drivers/leds/Kconfig" diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig index 9389d38f222..cb66c4da25c 100644 --- a/arch/cris/Kconfig +++ b/arch/cris/Kconfig @@ -677,6 +677,8 @@ source "fs/Kconfig" source "drivers/usb/Kconfig" +source "drivers/uwb/Kconfig" + source "arch/cris/Kconfig.debug" source "security/Kconfig" diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig index 396ab059efa..2b413325e88 100644 --- a/arch/h8300/Kconfig +++ b/arch/h8300/Kconfig @@ -213,6 +213,8 @@ source "drivers/hwmon/Kconfig" source "drivers/usb/Kconfig" +source "drivers/uwb/Kconfig" + endmenu source "fs/Kconfig" -- cgit v1.2.3 From 45f197ade73ba95681b9803680c75352fc0a1c0a Mon Sep 17 00:00:00 2001 From: Andrea Righi Date: Sat, 20 Sep 2008 12:58:40 +0200 Subject: x86, oprofile: BUG: using smp_processor_id() in preemptible code Add __raw access before setting per cpu variable switch_index, to avoid the following BUG: [ 449.166827] BUG: using smp_processor_id() in preemptible [00000000] code: modprobe/6998 [ 449.166848] caller is op_nmi_init+0xf0/0x2b0 [oprofile] [ 449.166855] Pid: 6998, comm: modprobe Not tainted 2.6.27-rc5-mm1 #29 [ 449.166860] Call Trace: [ 449.166872] [] debug_smp_processor_id+0xd7/0xe0 [ 449.166887] [] op_nmi_init+0xf0/0x2b0 [oprofile] [ 449.166902] [] oprofile_init+0x0/0x60 [oprofile] [ 449.166915] [] oprofile_arch_init+0x9/0x30 [oprofile] [ 449.166928] [] oprofile_init+0x1e/0x60 [oprofile] [ 449.166937] [] _stext+0x3b/0x160 [ 449.166946] [] __mutex_unlock_slowpath+0xe5/0x190 [ 449.166955] [] trace_hardirqs_on_caller+0xca/0x140 [ 449.166965] [] sys_init_module+0xdc/0x210 [ 449.166972] [] system_call_fastpath+0x16/0x1b Signed-off-by: Andrea Righi Acked-by: Robert Richter Signed-off-by: Ingo Molnar --- arch/x86/oprofile/nmi_int.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index fb4902bc6f1..4108d02c529 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c @@ -551,7 +551,7 @@ int __init op_nmi_init(struct oprofile_operations *ops) } /* default values, can be overwritten by model */ - __get_cpu_var(switch_index) = 0; + __raw_get_cpu_var(switch_index) = 0; ops->create_files = nmi_create_files; ops->setup = nmi_setup; ops->shutdown = nmi_shutdown; -- cgit v1.2.3 From 2fd47094f92fa2bdbf99be33294a7b6b97785a70 Mon Sep 17 00:00:00 2001 From: Thomas Renninger Date: Mon, 1 Sep 2008 14:27:03 +0200 Subject: CPUFREQ: powernow-k8: Try to detect old BIOS, not supporting CPU freq on a recent AMD CPUs. Make use of FW_BUG interface to give vendors and users the ability to automatically check for powernow-k8 related BIOS bugs by: dmesg |grep "Firmware Bug" Signed-off-by: Thomas Renninger Signed-off-by: Andi Kleen Signed-off-by: Len Brown --- arch/x86/kernel/cpu/cpufreq/powernow-k8.c | 42 ++++++++++++++++++------------- 1 file changed, 24 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 84bb395038d..4e0c6abd7ca 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c @@ -45,7 +45,6 @@ #endif #define PFX "powernow-k8: " -#define BFX PFX "BIOS error: " #define VERSION "version 2.20.00" #include "powernow-k8.h" @@ -536,35 +535,40 @@ static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst, u8 for (j = 0; j < data->numps; j++) { if (pst[j].vid > LEAST_VID) { - printk(KERN_ERR PFX "vid %d invalid : 0x%x\n", j, pst[j].vid); + printk(KERN_ERR FW_BUG PFX "vid %d invalid : 0x%x\n", + j, pst[j].vid); return -EINVAL; } if (pst[j].vid < data->rvo) { /* vid + rvo >= 0 */ - printk(KERN_ERR BFX "0 vid exceeded with pstate %d\n", j); + printk(KERN_ERR FW_BUG PFX "0 vid exceeded with pstate" + " %d\n", j); return -ENODEV; } if (pst[j].vid < maxvid + data->rvo) { /* vid + rvo >= maxvid */ - printk(KERN_ERR BFX "maxvid exceeded with pstate %d\n", j); + printk(KERN_ERR FW_BUG PFX "maxvid exceeded with pstate" + " %d\n", j); return -ENODEV; } if (pst[j].fid > MAX_FID) { - printk(KERN_ERR BFX "maxfid exceeded with pstate %d\n", j); + printk(KERN_ERR FW_BUG PFX "maxfid exceeded with pstate" + " %d\n", j); return -ENODEV; } if (j && (pst[j].fid < HI_FID_TABLE_BOTTOM)) { /* Only first fid is allowed to be in "low" range */ - printk(KERN_ERR BFX "two low fids - %d : 0x%x\n", j, pst[j].fid); + printk(KERN_ERR FW_BUG PFX "two low fids - %d : " + "0x%x\n", j, pst[j].fid); return -EINVAL; } if (pst[j].fid < lastfid) lastfid = pst[j].fid; } if (lastfid & 1) { - printk(KERN_ERR BFX "lastfid invalid\n"); + printk(KERN_ERR FW_BUG PFX "lastfid invalid\n"); return -EINVAL; } if (lastfid > LO_FID_TABLE_TOP) - printk(KERN_INFO BFX "first fid not from lo freq table\n"); + printk(KERN_INFO FW_BUG PFX "first fid not from lo freq table\n"); return 0; } @@ -672,13 +676,13 @@ static int find_psb_table(struct powernow_k8_data *data) dprintk("table vers: 0x%x\n", psb->tableversion); if (psb->tableversion != PSB_VERSION_1_4) { - printk(KERN_ERR BFX "PSB table is not v1.4\n"); + printk(KERN_ERR FW_BUG PFX "PSB table is not v1.4\n"); return -ENODEV; } dprintk("flags: 0x%x\n", psb->flags1); if (psb->flags1) { - printk(KERN_ERR BFX "unknown flags\n"); + printk(KERN_ERR FW_BUG PFX "unknown flags\n"); return -ENODEV; } @@ -705,7 +709,7 @@ static int find_psb_table(struct powernow_k8_data *data) } } if (cpst != 1) { - printk(KERN_ERR BFX "numpst must be 1\n"); + printk(KERN_ERR FW_BUG PFX "numpst must be 1\n"); return -ENODEV; } @@ -1130,17 +1134,19 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) "ACPI Processor module before starting this " "driver.\n"); #else - printk(KERN_ERR PFX "Your BIOS does not provide ACPI " - "_PSS objects in a way that Linux understands. " - "Please report this to the Linux ACPI maintainers" - " and complain to your BIOS vendor.\n"); + printk(KERN_ERR FW_BUG PFX "Your BIOS does not provide" + " ACPI _PSS objects in a way that Linux " + "understands. Please report this to the Linux " + "ACPI maintainers and complain to your BIOS " + "vendor.\n"); #endif kfree(data); return -ENODEV; } if (pol->cpu != 0) { - printk(KERN_ERR PFX "No ACPI _PSS objects for CPU other than " - "CPU0. Complain to your BIOS vendor.\n"); + printk(KERN_ERR FW_BUG PFX "No ACPI _PSS objects for " + "CPU other than CPU0. Complain to your BIOS " + "vendor.\n"); kfree(data); return -ENODEV; } @@ -1193,7 +1199,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) /* min/max the cpu is capable of */ if (cpufreq_frequency_table_cpuinfo(pol, data->powernow_table)) { - printk(KERN_ERR PFX "invalid powernow_table\n"); + printk(KERN_ERR FW_BUG PFX "invalid powernow_table\n"); powernow_k8_cpu_exit_acpi(data); kfree(data->powernow_table); kfree(data); -- cgit v1.2.3 From 4c168eaf7ea39f25a45a3d8c7eebc3fedb633a1d Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Wed, 24 Sep 2008 11:08:52 +0200 Subject: Revert "Oprofile Multiplexing Patch" Reverting commit 1a960b402a51d80abf54e3f8e4972374ffe5f22d for the main branch. Multiplexing will be tracked on a separate feature branch. Conflicts: arch/x86/oprofile/nmi_int.c --- arch/x86/oprofile/nmi_int.c | 100 +++----------------------------------- arch/x86/oprofile/op_counter.h | 3 +- arch/x86/oprofile/op_model_amd.c | 76 ++++++++++++----------------- arch/x86/oprofile/op_model_p4.c | 4 -- arch/x86/oprofile/op_model_ppro.c | 2 - arch/x86/oprofile/op_x86_model.h | 3 -- 6 files changed, 39 insertions(+), 149 deletions(-) (limited to 'arch') diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 4108d02c529..287513a0981 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c @@ -23,18 +23,12 @@ #include "op_counter.h" #include "op_x86_model.h" -DEFINE_PER_CPU(int, switch_index); - static struct op_x86_model_spec const *model; static DEFINE_PER_CPU(struct op_msrs, cpu_msrs); static DEFINE_PER_CPU(unsigned long, saved_lvtpc); static int nmi_start(void); static void nmi_stop(void); -static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs); -static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs); -static void nmi_cpu_stop(void *dummy); -static void nmi_cpu_start(void *dummy); /* 0 == registered but off, 1 == registered and on */ static int nmi_enabled = 0; @@ -87,47 +81,6 @@ static void exit_sysfs(void) #define exit_sysfs() do { } while (0) #endif /* CONFIG_PM */ -static void nmi_cpu_switch(void *dummy) -{ - int cpu = smp_processor_id(); - int si = per_cpu(switch_index, cpu); - struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu); - - nmi_cpu_stop(NULL); - nmi_cpu_save_mpx_registers(msrs); - - /* move to next set */ - si += model->num_hardware_counters; - if ((si > model->num_counters) || (counter_config[si].count == 0)) - per_cpu(switch_index, smp_processor_id()) = 0; - else - per_cpu(switch_index, smp_processor_id()) = si; - - nmi_cpu_restore_mpx_registers(msrs); - model->setup_ctrs(msrs); - nmi_cpu_start(NULL); -} - -/* - * Quick check to see if multiplexing is necessary. - * The check should be sufficient since counters are used - * in ordre. - */ -static int nmi_multiplex_on(void) -{ - return counter_config[model->num_hardware_counters].count ? 0 : -EINVAL; -} - -static int nmi_switch_event(void) -{ - if (nmi_multiplex_on() < 0) - return -EINVAL; - - on_each_cpu(nmi_cpu_switch, NULL, 1); - - return 0; -} - static int profile_exceptions_notify(struct notifier_block *self, unsigned long val, void *data) { @@ -191,10 +144,11 @@ static void free_msrs(void) static int allocate_msrs(void) { - int i, success = 1; + int success = 1; size_t controls_size = sizeof(struct op_msr) * model->num_controls; size_t counters_size = sizeof(struct op_msr) * model->num_counters; + int i; for_each_possible_cpu(i) { per_cpu(cpu_msrs, i).counters = kmalloc(counters_size, GFP_KERNEL); @@ -202,8 +156,8 @@ static int allocate_msrs(void) success = 0; break; } - per_cpu(cpu_msrs, i).controls = - kmalloc(controls_size, GFP_KERNEL); + per_cpu(cpu_msrs, i).controls = kmalloc(controls_size, + GFP_KERNEL); if (!per_cpu(cpu_msrs, i).controls) { success = 0; break; @@ -247,8 +201,7 @@ static int nmi_setup(void) return err; } - /* - * We need to serialize save and setup for HT because the subset + /* We need to serialize save and setup for HT because the subset * of msrs are distinct for save and setup operations */ @@ -264,6 +217,7 @@ static int nmi_setup(void) per_cpu(cpu_msrs, 0).controls, sizeof(struct op_msr) * model->num_controls); } + } on_each_cpu(nmi_save_registers, NULL, 1); on_each_cpu(nmi_cpu_setup, NULL, 1); @@ -271,41 +225,7 @@ static int nmi_setup(void) return 0; } -static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs) -{ - unsigned int si = __get_cpu_var(switch_index); - unsigned int const nr_ctrs = model->num_hardware_counters; - struct op_msr *counters = &msrs->counters[si]; - unsigned int i; - - for (i = 0; i < nr_ctrs; ++i) { - int offset = i + si; - if (counters[offset].addr) { - rdmsr(counters[offset].addr, - counters[offset].multiplex.low, - counters[offset].multiplex.high); - } - } -} - -static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs) -{ - unsigned int si = __get_cpu_var(switch_index); - unsigned int const nr_ctrs = model->num_hardware_counters; - struct op_msr *counters = &msrs->counters[si]; - unsigned int i; - - for (i = 0; i < nr_ctrs; ++i) { - int offset = i + si; - if (counters[offset].addr) { - wrmsr(counters[offset].addr, - counters[offset].multiplex.low, - counters[offset].multiplex.high); - } - } -} - -static void nmi_cpu_restore_registers(struct op_msrs *msrs) +static void nmi_restore_registers(struct op_msrs *msrs) { unsigned int const nr_ctrs = model->num_counters; unsigned int const nr_ctrls = model->num_controls; @@ -345,8 +265,7 @@ static void nmi_cpu_shutdown(void *dummy) apic_write(APIC_LVTERR, v | APIC_LVT_MASKED); apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu)); apic_write(APIC_LVTERR, v); - nmi_cpu_restore_registers(msrs); - __get_cpu_var(switch_index) = 0; + nmi_restore_registers(msrs); } static void nmi_shutdown(void) @@ -409,7 +328,6 @@ static int nmi_create_files(struct super_block *sb, struct dentry *root) oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask); oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel); oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user); - counter_config[i].save_count_low = 0; } return 0; @@ -551,14 +469,12 @@ int __init op_nmi_init(struct oprofile_operations *ops) } /* default values, can be overwritten by model */ - __raw_get_cpu_var(switch_index) = 0; ops->create_files = nmi_create_files; ops->setup = nmi_setup; ops->shutdown = nmi_shutdown; ops->start = nmi_start; ops->stop = nmi_stop; ops->cpu_type = cpu_type; - ops->switch_events = nmi_switch_event; if (model->init) ret = model->init(ops); diff --git a/arch/x86/oprofile/op_counter.h b/arch/x86/oprofile/op_counter.h index 786d6e01cf7..2880b15c467 100644 --- a/arch/x86/oprofile/op_counter.h +++ b/arch/x86/oprofile/op_counter.h @@ -10,14 +10,13 @@ #ifndef OP_COUNTER_H #define OP_COUNTER_H -#define OP_MAX_COUNTER 32 +#define OP_MAX_COUNTER 8 /* Per-perfctr configuration as set via * oprofilefs. */ struct op_counter_config { unsigned long count; - unsigned long save_count_low; unsigned long enabled; unsigned long event; unsigned long kernel; diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index bbf2b68bcc5..d9faf607b3a 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include @@ -24,10 +23,8 @@ #include "op_x86_model.h" #include "op_counter.h" -#define NUM_COUNTERS 32 -#define NUM_HARDWARE_COUNTERS 4 -#define NUM_CONTROLS 32 -#define NUM_HARDWARE_CONTROLS 4 +#define NUM_COUNTERS 4 +#define NUM_CONTROLS 4 #define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0) #define CTR_READ(l, h, msrs, c) do {rdmsr(msrs->counters[(c)].addr, (l), (h)); } while (0) @@ -51,7 +48,6 @@ #define CTRL_SET_GUEST_ONLY(val, h) (val |= ((h & 1) << 8)) static unsigned long reset_value[NUM_COUNTERS]; -DECLARE_PER_CPU(int, switch_index); #ifdef CONFIG_OPROFILE_IBS @@ -134,17 +130,15 @@ static void op_amd_fill_in_addresses(struct op_msrs * const msrs) int i; for (i = 0; i < NUM_COUNTERS; i++) { - int hw_counter = i % NUM_HARDWARE_COUNTERS; - if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + hw_counter)) - msrs->counters[i].addr = MSR_K7_PERFCTR0 + hw_counter; + if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i)) + msrs->counters[i].addr = MSR_K7_PERFCTR0 + i; else msrs->counters[i].addr = 0; } for (i = 0; i < NUM_CONTROLS; i++) { - int hw_control = i % NUM_HARDWARE_CONTROLS; - if (reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + hw_control)) - msrs->controls[i].addr = MSR_K7_EVNTSEL0 + hw_control; + if (reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i)) + msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i; else msrs->controls[i].addr = 0; } @@ -156,16 +150,8 @@ static void op_amd_setup_ctrs(struct op_msrs const * const msrs) unsigned int low, high; int i; - for (i = 0; i < NUM_HARDWARE_CONTROLS; ++i) { - int offset = i + __get_cpu_var(switch_index); - if (counter_config[offset].enabled) - reset_value[offset] = counter_config[offset].count; - else - reset_value[offset] = 0; - } - /* clear all counters */ - for (i = 0 ; i < NUM_HARDWARE_CONTROLS; ++i) { + for (i = 0 ; i < NUM_CONTROLS; ++i) { if (unlikely(!CTRL_IS_RESERVED(msrs, i))) continue; CTRL_READ(low, high, msrs, i); @@ -175,31 +161,34 @@ static void op_amd_setup_ctrs(struct op_msrs const * const msrs) } /* avoid a false detection of ctr overflows in NMI handler */ - for (i = 0; i < NUM_HARDWARE_COUNTERS; ++i) { + for (i = 0; i < NUM_COUNTERS; ++i) { if (unlikely(!CTR_IS_RESERVED(msrs, i))) continue; CTR_WRITE(1, msrs, i); } /* enable active counters */ - for (i = 0; i < NUM_HARDWARE_COUNTERS; ++i) { - int offset = i + __get_cpu_var(switch_index); - if ((counter_config[offset].enabled) && (CTR_IS_RESERVED(msrs, i))) { - CTR_WRITE(counter_config[offset].count, msrs, i); + for (i = 0; i < NUM_COUNTERS; ++i) { + if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) { + reset_value[i] = counter_config[i].count; + + CTR_WRITE(counter_config[i].count, msrs, i); CTRL_READ(low, high, msrs, i); CTRL_CLEAR_LO(low); CTRL_CLEAR_HI(high); CTRL_SET_ENABLE(low); - CTRL_SET_USR(low, counter_config[offset].user); - CTRL_SET_KERN(low, counter_config[offset].kernel); - CTRL_SET_UM(low, counter_config[offset].unit_mask); - CTRL_SET_EVENT_LOW(low, counter_config[offset].event); - CTRL_SET_EVENT_HIGH(high, counter_config[offset].event); + CTRL_SET_USR(low, counter_config[i].user); + CTRL_SET_KERN(low, counter_config[i].kernel); + CTRL_SET_UM(low, counter_config[i].unit_mask); + CTRL_SET_EVENT_LOW(low, counter_config[i].event); + CTRL_SET_EVENT_HIGH(high, counter_config[i].event); CTRL_SET_HOST_ONLY(high, 0); CTRL_SET_GUEST_ONLY(high, 0); CTRL_WRITE(low, high, msrs, i); + } else { + reset_value[i] = 0; } } } @@ -287,14 +276,13 @@ static int op_amd_check_ctrs(struct pt_regs * const regs, unsigned int low, high; int i; - for (i = 0 ; i < NUM_HARDWARE_COUNTERS ; ++i) { - int offset = i + __get_cpu_var(switch_index); - if (!reset_value[offset]) + for (i = 0 ; i < NUM_COUNTERS; ++i) { + if (!reset_value[i]) continue; CTR_READ(low, high, msrs, i); if (CTR_OVERFLOWED(low)) { - oprofile_add_sample(regs, offset); - CTR_WRITE(reset_value[offset], msrs, i); + oprofile_add_sample(regs, i); + CTR_WRITE(reset_value[i], msrs, i); } } @@ -310,10 +298,8 @@ static void op_amd_start(struct op_msrs const * const msrs) { unsigned int low, high; int i; - - for (i = 0 ; i < NUM_HARDWARE_COUNTERS ; ++i) { - int offset = i + __get_cpu_var(switch_index); - if (reset_value[offset]) { + for (i = 0 ; i < NUM_COUNTERS ; ++i) { + if (reset_value[i]) { CTRL_READ(low, high, msrs, i); CTRL_SET_ACTIVE(low); CTRL_WRITE(low, high, msrs, i); @@ -343,8 +329,8 @@ static void op_amd_stop(struct op_msrs const * const msrs) /* Subtle: stop on all counters to avoid race with * setting our pm callback */ - for (i = 0 ; i < NUM_HARDWARE_COUNTERS ; ++i) { - if (!reset_value[i + per_cpu(switch_index, smp_processor_id())]) + for (i = 0 ; i < NUM_COUNTERS ; ++i) { + if (!reset_value[i]) continue; CTRL_READ(low, high, msrs, i); CTRL_SET_INACTIVE(low); @@ -370,11 +356,11 @@ static void op_amd_shutdown(struct op_msrs const * const msrs) { int i; - for (i = 0 ; i < NUM_HARDWARE_COUNTERS ; ++i) { + for (i = 0 ; i < NUM_COUNTERS ; ++i) { if (CTR_IS_RESERVED(msrs, i)) release_perfctr_nmi(MSR_K7_PERFCTR0 + i); } - for (i = 0 ; i < NUM_HARDWARE_COUNTERS ; ++i) { + for (i = 0 ; i < NUM_CONTROLS ; ++i) { if (CTRL_IS_RESERVED(msrs, i)) release_evntsel_nmi(MSR_K7_EVNTSEL0 + i); } @@ -548,8 +534,6 @@ struct op_x86_model_spec const op_amd_spec = { .exit = op_amd_exit, .num_counters = NUM_COUNTERS, .num_controls = NUM_CONTROLS, - .num_hardware_counters = NUM_HARDWARE_COUNTERS, - .num_hardware_controls = NUM_HARDWARE_CONTROLS, .fill_in_addresses = &op_amd_fill_in_addresses, .setup_ctrs = &op_amd_setup_ctrs, .check_ctrs = &op_amd_check_ctrs, diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c index cacba61ffba..43ac5af338d 100644 --- a/arch/x86/oprofile/op_model_p4.c +++ b/arch/x86/oprofile/op_model_p4.c @@ -700,8 +700,6 @@ static void p4_shutdown(struct op_msrs const * const msrs) struct op_x86_model_spec const op_p4_ht2_spec = { .num_counters = NUM_COUNTERS_HT2, .num_controls = NUM_CONTROLS_HT2, - .num_hardware_counters = NUM_COUNTERS_HT2, - .num_hardware_controls = NUM_CONTROLS_HT2, .fill_in_addresses = &p4_fill_in_addresses, .setup_ctrs = &p4_setup_ctrs, .check_ctrs = &p4_check_ctrs, @@ -714,8 +712,6 @@ struct op_x86_model_spec const op_p4_ht2_spec = { struct op_x86_model_spec const op_p4_spec = { .num_counters = NUM_COUNTERS_NON_HT, .num_controls = NUM_CONTROLS_NON_HT, - .num_hardware_counters = NUM_COUNTERS_NON_HT, - .num_hardware_controls = NUM_CONTROLS_NON_HT, .fill_in_addresses = &p4_fill_in_addresses, .setup_ctrs = &p4_setup_ctrs, .check_ctrs = &p4_check_ctrs, diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c index e5811aa480e..eff431f6c57 100644 --- a/arch/x86/oprofile/op_model_ppro.c +++ b/arch/x86/oprofile/op_model_ppro.c @@ -183,8 +183,6 @@ static void ppro_shutdown(struct op_msrs const * const msrs) struct op_x86_model_spec const op_ppro_spec = { .num_counters = NUM_COUNTERS, .num_controls = NUM_CONTROLS, - .num_hardware_counters = NUM_COUNTERS, - .num_hardware_controls = NUM_CONTROLS, .fill_in_addresses = &ppro_fill_in_addresses, .setup_ctrs = &ppro_setup_ctrs, .check_ctrs = &ppro_check_ctrs, diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h index e07ba107637..05a0261ba0c 100644 --- a/arch/x86/oprofile/op_x86_model.h +++ b/arch/x86/oprofile/op_x86_model.h @@ -19,7 +19,6 @@ struct op_saved_msr { struct op_msr { unsigned long addr; struct op_saved_msr saved; - struct op_saved_msr multiplex; }; struct op_msrs { @@ -35,8 +34,6 @@ struct pt_regs; struct op_x86_model_spec { int (*init)(struct oprofile_operations *ops); void (*exit)(void); - unsigned int const num_hardware_counters; - unsigned int const num_hardware_controls; unsigned int const num_counters; unsigned int const num_controls; void (*fill_in_addresses)(struct op_msrs * const msrs); -- cgit v1.2.3 From cfb361f13c8136de78c406745abc4e4456e6d480 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Thu, 18 Sep 2008 15:49:14 +0800 Subject: [IA64] utrace syscall.h support for ia64 Add asm/syscall.h for IA64. Utrace requires this. Signed-off-by: Shaohua Li Signed-off-by: Tony Luck --- arch/ia64/include/asm/ptrace.h | 6 +++ arch/ia64/include/asm/syscall.h | 92 +++++++++++++++++++++++++++++++++++++++++ arch/ia64/kernel/ptrace.c | 65 +++++++++++++++++++++++++++++ 3 files changed, 163 insertions(+) create mode 100644 arch/ia64/include/asm/syscall.h (limited to 'arch') diff --git a/arch/ia64/include/asm/ptrace.h b/arch/ia64/include/asm/ptrace.h index 15f8dcfe6ee..14055c636ad 100644 --- a/arch/ia64/include/asm/ptrace.h +++ b/arch/ia64/include/asm/ptrace.h @@ -240,6 +240,12 @@ struct switch_stack { */ # define instruction_pointer(regs) ((regs)->cr_iip + ia64_psr(regs)->ri) +static inline unsigned long user_stack_pointer(struct pt_regs *regs) +{ + /* FIXME: should this be bspstore + nr_dirty regs? */ + return regs->ar_bspstore; +} + #define regs_return_value(regs) ((regs)->r8) /* Conserve space in histogram by encoding slot bits in address diff --git a/arch/ia64/include/asm/syscall.h b/arch/ia64/include/asm/syscall.h new file mode 100644 index 00000000000..3fd4fa6c48d --- /dev/null +++ b/arch/ia64/include/asm/syscall.h @@ -0,0 +1,92 @@ +/* + * Access to user system call parameters and results + * + * Copyright (C) 2008 Intel Corp. Shaohua Li + * + * This copyrighted material is made available to anyone wishing to use, + * modify, copy, or redistribute it subject to the terms and conditions + * of the GNU General Public License v.2. + * + * See asm-generic/syscall.h for descriptions of what we must do here. + */ + +#ifndef _ASM_SYSCALL_H +#define _ASM_SYSCALL_H 1 + +#include +#include + +static inline long syscall_get_nr(struct task_struct *task, + struct pt_regs *regs) +{ + BUG_ON(IS_IA32_PROCESS(regs)); + + if ((long)regs->cr_ifs < 0) /* Not a syscall */ + return -1; + return regs->r15; +} + +static inline void syscall_rollback(struct task_struct *task, + struct pt_regs *regs) +{ + BUG_ON(IS_IA32_PROCESS(regs)); + /* do nothing */ +} + +static inline long syscall_get_error(struct task_struct *task, + struct pt_regs *regs) +{ + BUG_ON(IS_IA32_PROCESS(regs)); + + return regs->r10 == -1 ? regs->r8:0; +} + +static inline long syscall_get_return_value(struct task_struct *task, + struct pt_regs *regs) +{ + BUG_ON(IS_IA32_PROCESS(regs)); + + return regs->r8; +} + +static inline void syscall_set_return_value(struct task_struct *task, + struct pt_regs *regs, + int error, long val) +{ + BUG_ON(IS_IA32_PROCESS(regs)); + + if (error) { + /* error < 0, but ia64 uses > 0 return value */ + regs->r8 = -error; + regs->r10 = -1; + } else { + regs->r8 = val; + regs->r10 = 0; + } +} + +extern void ia64_syscall_get_set_arguments(struct task_struct *task, + struct pt_regs *regs, unsigned int i, unsigned int n, + unsigned long *args, int rw); +static inline void syscall_get_arguments(struct task_struct *task, + struct pt_regs *regs, + unsigned int i, unsigned int n, + unsigned long *args) +{ + BUG_ON(IS_IA32_PROCESS(regs)); + BUG_ON(i + n > 6); + + ia64_syscall_get_set_arguments(task, regs, i, n, args, 0); +} + +static inline void syscall_set_arguments(struct task_struct *task, + struct pt_regs *regs, + unsigned int i, unsigned int n, + unsigned long *args) +{ + BUG_ON(IS_IA32_PROCESS(regs)); + BUG_ON(i + n > 6); + + ia64_syscall_get_set_arguments(task, regs, i, n, args, 1); +} +#endif /* _ASM_SYSCALL_H */ diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c index 2a9943b5947..12b1e9f0b7a 100644 --- a/arch/ia64/kernel/ptrace.c +++ b/arch/ia64/kernel/ptrace.c @@ -2199,3 +2199,68 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *tsk) #endif return &user_ia64_view; } + +struct syscall_get_set_args { + unsigned int i; + unsigned int n; + unsigned long *args; + struct pt_regs *regs; + int rw; +}; + +static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data) +{ + struct syscall_get_set_args *args = data; + struct pt_regs *pt = args->regs; + unsigned long *krbs, cfm, ndirty; + int i, count; + + if (unw_unwind_to_user(info) < 0) + return; + + cfm = pt->cr_ifs; + krbs = (unsigned long *)info->task + IA64_RBS_OFFSET/8; + ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19)); + + count = 0; + if (in_syscall(pt)) + count = min_t(int, args->n, cfm & 0x7f); + + for (i = 0; i < count; i++) { + if (args->rw) + *ia64_rse_skip_regs(krbs, ndirty + i + args->i) = + args->args[i]; + else + args->args[i] = *ia64_rse_skip_regs(krbs, + ndirty + i + args->i); + } + + if (!args->rw) { + while (i < args->n) { + args->args[i] = 0; + i++; + } + } +} + +void ia64_syscall_get_set_arguments(struct task_struct *task, + struct pt_regs *regs, unsigned int i, unsigned int n, + unsigned long *args, int rw) +{ + struct syscall_get_set_args data = { + .i = i, + .n = n, + .args = args, + .regs = regs, + .rw = rw, + }; + + if (task == current) + unw_init_running(syscall_get_set_args_cb, &data); + else { + struct unw_frame_info ufi; + memset(&ufi, 0, sizeof(ufi)); + unw_init_from_blocked_task(&ufi, task); + syscall_get_set_args_cb(&ufi, &data); + } +} -- cgit v1.2.3 From f14488ccfe0f41207e40520fab60dce356ed9e57 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Mon, 6 Oct 2008 10:43:06 -0700 Subject: [IA64] utrace use generic trace hook Make IA64 use generic trace hook in some paths. Signed-off-by: Shaohua Li Signed-off-by: Tony Luck --- arch/ia64/ia32/ia32_entry.S | 5 ++++ arch/ia64/include/asm/thread_info.h | 3 --- arch/ia64/kernel/entry.S | 5 ++++ arch/ia64/kernel/perfmon.c | 7 +++--- arch/ia64/kernel/process.c | 21 +++++------------ arch/ia64/kernel/ptrace.c | 47 +++++++++++-------------------------- arch/ia64/kernel/signal.c | 8 +++++++ 7 files changed, 41 insertions(+), 55 deletions(-) (limited to 'arch') diff --git a/arch/ia64/ia32/ia32_entry.S b/arch/ia64/ia32/ia32_entry.S index ff88c48c5d1..b905dcb791f 100644 --- a/arch/ia64/ia32/ia32_entry.S +++ b/arch/ia64/ia32/ia32_entry.S @@ -108,6 +108,11 @@ GLOBAL_ENTRY(ia32_trace_syscall) ;; st8 [r2]=r3 // initialize return code to -ENOSYS br.call.sptk.few rp=syscall_trace_enter // give parent a chance to catch syscall args + cmp.lt p6,p0=r8,r0 // check tracehook + adds r2=IA64_PT_REGS_R8_OFFSET+16,sp // r2 = &pt_regs.r8 + ;; +(p6) st8.spill [r2]=r8 // store return value in slot for r8 +(p6) br.spnt.few .ret4 .ret2: // Need to reload arguments (they may be changed by the tracing process) adds r2=IA64_PT_REGS_R1_OFFSET+16,sp // r2 = &pt_regs.r1 adds r3=IA64_PT_REGS_R13_OFFSET+16,sp // r3 = &pt_regs.r13 diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h index 7c60fcdd2ef..ae6922626bf 100644 --- a/arch/ia64/include/asm/thread_info.h +++ b/arch/ia64/include/asm/thread_info.h @@ -87,9 +87,6 @@ struct thread_info { #define alloc_task_struct() ((struct task_struct *)__get_free_pages(GFP_KERNEL | __GFP_COMP, KERNEL_STACK_SIZE_ORDER)) #define free_task_struct(tsk) free_pages((unsigned long) (tsk), KERNEL_STACK_SIZE_ORDER) -#define tsk_set_notify_resume(tsk) \ - set_ti_thread_flag(task_thread_info(tsk), TIF_NOTIFY_RESUME) -extern void tsk_clear_notify_resume(struct task_struct *tsk); #endif /* !__ASSEMBLY */ /* diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index 0dd6c1419d8..7ef0c594f5e 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S @@ -534,6 +534,11 @@ GLOBAL_ENTRY(ia64_trace_syscall) stf.spill [r16]=f10 stf.spill [r17]=f11 br.call.sptk.many rp=syscall_trace_enter // give parent a chance to catch syscall args + cmp.lt p6,p0=r8,r0 // check tracehook + adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8 + adds r3=PT(R10)+16,sp // r3 = &pt_regs.r10 + mov r10=0 +(p6) br.cond.sptk strace_error // syscall failed -> adds r16=PT(F6)+16,sp adds r17=PT(F7)+16,sp ;; diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index fc8f3509df2..ada4605d122 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -40,6 +40,7 @@ #include #include #include +#include #include #include @@ -3684,7 +3685,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) PFM_SET_WORK_PENDING(task, 1); - tsk_set_notify_resume(task); + set_notify_resume(task); /* * XXX: send reschedule if task runs on another CPU @@ -5044,8 +5045,6 @@ pfm_handle_work(void) PFM_SET_WORK_PENDING(current, 0); - tsk_clear_notify_resume(current); - regs = task_pt_regs(current); /* @@ -5414,7 +5413,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str * when coming from ctxsw, current still points to the * previous task, therefore we must work with task and not current. */ - tsk_set_notify_resume(task); + set_notify_resume(task); } /* * defer until state is changed (shorten spin window). the context is locked diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 3ab8373103e..341a0319a5b 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include @@ -160,21 +161,6 @@ show_regs (struct pt_regs *regs) show_stack(NULL, NULL); } -void tsk_clear_notify_resume(struct task_struct *tsk) -{ -#ifdef CONFIG_PERFMON - if (tsk->thread.pfm_needs_checking) - return; -#endif - if (test_ti_thread_flag(task_thread_info(tsk), TIF_RESTORE_RSE)) - return; - clear_ti_thread_flag(task_thread_info(tsk), TIF_NOTIFY_RESUME); -} - -/* - * do_notify_resume_user(): - * Called from notify_resume_user at entry.S, with interrupts disabled. - */ void do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall) { @@ -203,6 +189,11 @@ do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall) ia64_do_signal(scr, in_syscall); } + if (test_thread_flag(TIF_NOTIFY_RESUME)) { + clear_thread_flag(TIF_NOTIFY_RESUME); + tracehook_notify_resume(&scr->pt); + } + /* copy user rbs to kernel rbs */ if (unlikely(test_thread_flag(TIF_RESTORE_RSE))) { local_irq_enable(); /* force interrupt enable */ diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c index 12b1e9f0b7a..92c9689b7d9 100644 --- a/arch/ia64/kernel/ptrace.c +++ b/arch/ia64/kernel/ptrace.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -603,7 +604,7 @@ void ia64_ptrace_stop(void) { if (test_and_set_tsk_thread_flag(current, TIF_RESTORE_RSE)) return; - tsk_set_notify_resume(current); + set_notify_resume(current); unw_init_running(do_sync_rbs, ia64_sync_user_rbs); } @@ -613,7 +614,6 @@ void ia64_ptrace_stop(void) void ia64_sync_krbs(void) { clear_tsk_thread_flag(current, TIF_RESTORE_RSE); - tsk_clear_notify_resume(current); unw_init_running(do_sync_rbs, ia64_sync_kernel_rbs); } @@ -644,7 +644,7 @@ ptrace_attach_sync_user_rbs (struct task_struct *child) spin_lock_irq(&child->sighand->siglock); if (child->state == TASK_STOPPED && !test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) { - tsk_set_notify_resume(child); + set_notify_resume(child); child->state = TASK_TRACED; stopped = 1; @@ -1232,37 +1232,16 @@ arch_ptrace (struct task_struct *child, long request, long addr, long data) } -static void -syscall_trace (void) -{ - /* - * The 0x80 provides a way for the tracing parent to - * distinguish between a syscall stop and SIGTRAP delivery. - */ - ptrace_notify(SIGTRAP - | ((current->ptrace & PT_TRACESYSGOOD) ? 0x80 : 0)); - - /* - * This isn't the same as continuing with a signal, but it - * will do for normal use. strace only continues with a - * signal if the stopping signal is not SIGTRAP. -brl - */ - if (current->exit_code) { - send_sig(current->exit_code, current, 1); - current->exit_code = 0; - } -} - /* "asmlinkage" so the input arguments are preserved... */ -asmlinkage void +asmlinkage long syscall_trace_enter (long arg0, long arg1, long arg2, long arg3, long arg4, long arg5, long arg6, long arg7, struct pt_regs regs) { - if (test_thread_flag(TIF_SYSCALL_TRACE) - && (current->ptrace & PT_PTRACED)) - syscall_trace(); + if (test_thread_flag(TIF_SYSCALL_TRACE)) + if (tracehook_report_syscall_entry(®s)) + return -ENOSYS; /* copy user rbs to kernel rbs */ if (test_thread_flag(TIF_RESTORE_RSE)) @@ -1283,6 +1262,7 @@ syscall_trace_enter (long arg0, long arg1, long arg2, long arg3, audit_syscall_entry(arch, syscall, arg0, arg1, arg2, arg3); } + return 0; } /* "asmlinkage" so the input arguments are preserved... */ @@ -1292,6 +1272,8 @@ syscall_trace_leave (long arg0, long arg1, long arg2, long arg3, long arg4, long arg5, long arg6, long arg7, struct pt_regs regs) { + int step; + if (unlikely(current->audit_context)) { int success = AUDITSC_RESULT(regs.r10); long result = regs.r8; @@ -1301,10 +1283,9 @@ syscall_trace_leave (long arg0, long arg1, long arg2, long arg3, audit_syscall_exit(success, result); } - if ((test_thread_flag(TIF_SYSCALL_TRACE) - || test_thread_flag(TIF_SINGLESTEP)) - && (current->ptrace & PT_PTRACED)) - syscall_trace(); + step = test_thread_flag(TIF_SINGLESTEP); + if (step || test_thread_flag(TIF_SYSCALL_TRACE)) + tracehook_report_syscall_exit(®s, step); /* copy user rbs to kernel rbs */ if (test_thread_flag(TIF_RESTORE_RSE)) @@ -1940,7 +1921,7 @@ gpregs_writeback(struct task_struct *target, { if (test_and_set_tsk_thread_flag(target, TIF_RESTORE_RSE)) return 0; - tsk_set_notify_resume(target); + set_notify_resume(target); return do_regset_call(do_gpregs_writeback, target, regset, 0, 0, NULL, NULL); } diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c index 19c5a78636f..e12500a9c44 100644 --- a/arch/ia64/kernel/signal.c +++ b/arch/ia64/kernel/signal.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -439,6 +440,13 @@ handle_signal (unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigse sigaddset(¤t->blocked, sig); recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); + + /* + * Let tracing know that we've done the handler setup. + */ + tracehook_signal_handler(sig, info, ka, &scr->pt, + test_thread_flag(TIF_SINGLESTEP)); + return 1; } -- cgit v1.2.3 From 9690ad031290d86979b284bd6243313f58271bcc Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Wed, 1 Oct 2008 13:57:14 -0700 Subject: [IA64] utrace Enable trace hook config Signed-off-by: Shaohua Li Signed-off-by: Tony Luck --- arch/ia64/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 48e496fe1e7..a36b014ee66 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -21,6 +21,7 @@ config IA64 select HAVE_KRETPROBES select HAVE_DMA_ATTRS select HAVE_KVM + select HAVE_ARCH_TRACEHOOK default y help The Itanium Processor Family is Intel's 64-bit successor to -- cgit v1.2.3 From 680973edf122fd95735ecfc077cf79645d2e5081 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Thu, 18 Sep 2008 15:50:26 +0800 Subject: [IA64] utrace Convert compat ptrace to use compat_sys_ptrace Convert IA64 32-bit ptrace to use compat_sys_ptrace. Signed-off-by: Shaohua Li Signed-off-by: Tony Luck --- arch/ia64/ia32/ia32_entry.S | 2 +- arch/ia64/ia32/sys_ia32.c | 83 +++----------------------------------- arch/ia64/include/asm/ptrace.h | 2 + arch/ia64/include/asm/syscall.h | 89 ++++++++++++++++++++++++++++++++++++----- 4 files changed, 89 insertions(+), 87 deletions(-) (limited to 'arch') diff --git a/arch/ia64/ia32/ia32_entry.S b/arch/ia64/ia32/ia32_entry.S index b905dcb791f..a73ec0089d4 100644 --- a/arch/ia64/ia32/ia32_entry.S +++ b/arch/ia64/ia32/ia32_entry.S @@ -204,7 +204,7 @@ ia32_syscall_table: data8 sys_setuid /* 16-bit version */ data8 sys_getuid /* 16-bit version */ data8 compat_sys_stime /* 25 */ - data8 sys32_ptrace + data8 compat_sys_ptrace data8 sys32_alarm data8 sys_ni_syscall data8 sys32_pause diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c index bf196cbb379..5df5e4c90e4 100644 --- a/arch/ia64/ia32/sys_ia32.c +++ b/arch/ia64/ia32/sys_ia32.c @@ -1300,25 +1300,6 @@ sys32_waitpid (int pid, unsigned int *stat_addr, int options) return compat_sys_wait4(pid, stat_addr, options, NULL); } -static unsigned int -ia32_peek (struct task_struct *child, unsigned long addr, unsigned int *val) -{ - size_t copied; - unsigned int ret; - - copied = access_process_vm(child, addr, val, sizeof(*val), 0); - return (copied != sizeof(ret)) ? -EIO : 0; -} - -static unsigned int -ia32_poke (struct task_struct *child, unsigned long addr, unsigned int val) -{ - - if (access_process_vm(child, addr, &val, sizeof(val), 1) != sizeof(val)) - return -EIO; - return 0; -} - /* * The order in which registers are stored in the ptrace regs structure */ @@ -1616,49 +1597,15 @@ restore_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct __u return 0; } -asmlinkage long -sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data) +long compat_arch_ptrace(struct task_struct *child, compat_long_t request, + compat_ulong_t caddr, compat_ulong_t cdata) { - struct task_struct *child; - unsigned int value, tmp; + unsigned long addr = caddr; + unsigned long data = cdata; + unsigned int tmp; long i, ret; - lock_kernel(); - if (request == PTRACE_TRACEME) { - ret = ptrace_traceme(); - goto out; - } - - child = ptrace_get_task_struct(pid); - if (IS_ERR(child)) { - ret = PTR_ERR(child); - goto out; - } - - if (request == PTRACE_ATTACH) { - ret = sys_ptrace(request, pid, addr, data); - goto out_tsk; - } - - ret = ptrace_check_attach(child, request == PTRACE_KILL); - if (ret < 0) - goto out_tsk; - switch (request) { - case PTRACE_PEEKTEXT: - case PTRACE_PEEKDATA: /* read word at location addr */ - ret = ia32_peek(child, addr, &value); - if (ret == 0) - ret = put_user(value, (unsigned int __user *) compat_ptr(data)); - else - ret = -EIO; - goto out_tsk; - - case PTRACE_POKETEXT: - case PTRACE_POKEDATA: /* write the word at location addr */ - ret = ia32_poke(child, addr, data); - goto out_tsk; - case PTRACE_PEEKUSR: /* read word at addr in USER area */ ret = -EIO; if ((addr & 3) || addr > 17*sizeof(int)) @@ -1723,27 +1670,9 @@ sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data) compat_ptr(data)); break; - case PTRACE_GETEVENTMSG: - ret = put_user(child->ptrace_message, (unsigned int __user *) compat_ptr(data)); - break; - - case PTRACE_SYSCALL: /* continue, stop after next syscall */ - case PTRACE_CONT: /* restart after signal. */ - case PTRACE_KILL: - case PTRACE_SINGLESTEP: /* execute chile for one instruction */ - case PTRACE_DETACH: /* detach a process */ - ret = sys_ptrace(request, pid, addr, data); - break; - default: - ret = ptrace_request(child, request, addr, data); - break; - + return compat_ptrace_request(child, request, caddr, cdata); } - out_tsk: - put_task_struct(child); - out: - unlock_kernel(); return ret; } diff --git a/arch/ia64/include/asm/ptrace.h b/arch/ia64/include/asm/ptrace.h index 14055c636ad..6417c1ecb44 100644 --- a/arch/ia64/include/asm/ptrace.h +++ b/arch/ia64/include/asm/ptrace.h @@ -325,6 +325,8 @@ static inline unsigned long user_stack_pointer(struct pt_regs *regs) #define arch_has_block_step() (1) extern void user_enable_block_step(struct task_struct *); +#define __ARCH_WANT_COMPAT_SYS_PTRACE + #endif /* !__KERNEL__ */ /* pt_all_user_regs is used for PTRACE_GETREGS PTRACE_SETREGS */ diff --git a/arch/ia64/include/asm/syscall.h b/arch/ia64/include/asm/syscall.h index 3fd4fa6c48d..2f758a42f94 100644 --- a/arch/ia64/include/asm/syscall.h +++ b/arch/ia64/include/asm/syscall.h @@ -19,24 +19,35 @@ static inline long syscall_get_nr(struct task_struct *task, struct pt_regs *regs) { - BUG_ON(IS_IA32_PROCESS(regs)); - if ((long)regs->cr_ifs < 0) /* Not a syscall */ return -1; + +#ifdef CONFIG_IA32_SUPPORT + if (IS_IA32_PROCESS(regs)) + return regs->r1; +#endif + return regs->r15; } static inline void syscall_rollback(struct task_struct *task, struct pt_regs *regs) { - BUG_ON(IS_IA32_PROCESS(regs)); +#ifdef CONFIG_IA32_SUPPORT + if (IS_IA32_PROCESS(regs)) + regs->r8 = regs->r1; +#endif + /* do nothing */ } static inline long syscall_get_error(struct task_struct *task, struct pt_regs *regs) { - BUG_ON(IS_IA32_PROCESS(regs)); +#ifdef CONFIG_IA32_SUPPORT + if (IS_IA32_PROCESS(regs)) + return regs->r8; +#endif return regs->r10 == -1 ? regs->r8:0; } @@ -44,8 +55,6 @@ static inline long syscall_get_error(struct task_struct *task, static inline long syscall_get_return_value(struct task_struct *task, struct pt_regs *regs) { - BUG_ON(IS_IA32_PROCESS(regs)); - return regs->r8; } @@ -53,7 +62,12 @@ static inline void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, int error, long val) { - BUG_ON(IS_IA32_PROCESS(regs)); +#ifdef CONFIG_IA32_SUPPORT + if (IS_IA32_PROCESS(regs)) { + regs->r8 = (long) error ? error : val; + return; + } +#endif if (error) { /* error < 0, but ia64 uses > 0 return value */ @@ -73,9 +87,39 @@ static inline void syscall_get_arguments(struct task_struct *task, unsigned int i, unsigned int n, unsigned long *args) { - BUG_ON(IS_IA32_PROCESS(regs)); BUG_ON(i + n > 6); +#ifdef CONFIG_IA32_SUPPORT + if (IS_IA32_PROCESS(regs)) { + switch (i + n) { + case 6: + if (!n--) break; + *args++ = regs->r13; + case 5: + if (!n--) break; + *args++ = regs->r15; + case 4: + if (!n--) break; + *args++ = regs->r14; + case 3: + if (!n--) break; + *args++ = regs->r10; + case 2: + if (!n--) break; + *args++ = regs->r9; + case 1: + if (!n--) break; + *args++ = regs->r11; + case 0: + if (!n--) break; + default: + BUG(); + break; + } + + return; + } +#endif ia64_syscall_get_set_arguments(task, regs, i, n, args, 0); } @@ -84,9 +128,36 @@ static inline void syscall_set_arguments(struct task_struct *task, unsigned int i, unsigned int n, unsigned long *args) { - BUG_ON(IS_IA32_PROCESS(regs)); BUG_ON(i + n > 6); +#ifdef CONFIG_IA32_SUPPORT + if (IS_IA32_PROCESS(regs)) { + switch (i + n) { + case 6: + if (!n--) break; + regs->r13 = *args++; + case 5: + if (!n--) break; + regs->r15 = *args++; + case 4: + if (!n--) break; + regs->r14 = *args++; + case 3: + if (!n--) break; + regs->r10 = *args++; + case 2: + if (!n--) break; + regs->r9 = *args++; + case 1: + if (!n--) break; + regs->r11 = *args++; + case 0: + if (!n--) break; + } + + return; + } +#endif ia64_syscall_get_set_arguments(task, regs, i, n, args, 1); } #endif /* _ASM_SYSCALL_H */ -- cgit v1.2.3 From 80a4b18d19bf1f7b88a261088c00a0d6b310a722 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Mon, 6 Oct 2008 13:01:53 -0700 Subject: select: fix alpha OSF wrapper ... alpha calls the core select code from inside it's architecture code for emulating OSF; this patch makes it compile again Signed-off-by: Arjan van de Ven --- arch/alpha/kernel/osf_sys.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index 8509dad3120..8e19acbf288 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c @@ -986,10 +986,12 @@ asmlinkage int osf_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct timeval32 __user *tvp) { - s64 timeout = MAX_SCHEDULE_TIMEOUT; + struct timespec end_time, *to = NULL; if (tvp) { time_t sec, usec; + to = &end_time; + if (!access_ok(VERIFY_READ, tvp, sizeof(*tvp)) || __get_user(sec, &tvp->tv_sec) || __get_user(usec, &tvp->tv_usec)) { @@ -999,14 +1001,13 @@ osf_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, if (sec < 0 || usec < 0) return -EINVAL; - if ((unsigned long) sec < MAX_SELECT_SECONDS) { - timeout = (usec + 1000000/HZ - 1) / (1000000/HZ); - timeout += sec * (unsigned long) HZ; - } + if (poll_select_set_timeout(to, sec, usec * NSEC_PER_USEC)) + return -EINVAL; + } /* OSF does not copy back the remaining time. */ - return core_sys_select(n, inp, outp, exp, &timeout); + return core_sys_select(n, inp, outp, exp, to); } struct rusage32 { -- cgit v1.2.3 From d0d0f7432c9cbd52cb2f31d499f8292b13a7ecac Mon Sep 17 00:00:00 2001 From: Matt Mackall Date: Thu, 9 Oct 2008 12:41:50 -0500 Subject: x86: remove magic number from ACPI sleep stack buffer x86_64 SMP suspend to RAM uses a 10k temporary stack for saving the kernel state, but only 4k of it is used. Shrink it to 4k. Signed-off-by: Matt Mackall Acked-by: Pavel Machek Signed-off-by: Len Brown --- arch/x86/kernel/acpi/sleep.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index 426e5d91b63..29cf3403abe 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c @@ -97,7 +97,7 @@ int acpi_save_state_mem(void) #else /* CONFIG_64BIT */ header->trampoline_segment = setup_trampoline() >> 4; #ifdef CONFIG_SMP - stack_start.sp = temp_stack + 4096; + stack_start.sp = temp_stack + sizeof(temp_stack); #endif initial_code = (unsigned long)wakeup_long64; saved_magic = 0x123456789abcdef0; -- cgit v1.2.3 From 5000cadcf3188e935dae28c4fc7e24639704ea55 Mon Sep 17 00:00:00 2001 From: Matt Mackall Date: Thu, 9 Oct 2008 11:56:21 -0500 Subject: x86: trim ACPI sleep stack buffer x86_64 SMP suspend to RAM uses a 10k temporary stack for saving the kernel state, but only 4k of it is used. Shrink it to 4k. Signed-off-by: Matt Mackall Signed-off-by: Len Brown --- arch/x86/kernel/acpi/sleep.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index 29cf3403abe..55d10cbe65b 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c @@ -21,7 +21,7 @@ unsigned long acpi_realmode_flags; static unsigned long acpi_realmode; #if defined(CONFIG_SMP) && defined(CONFIG_64BIT) -static char temp_stack[10240]; +static char temp_stack[4096]; #endif /** -- cgit v1.2.3 From ee297533279a802eac8b1cbea8e65b24b36a1aac Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Wed, 24 Sep 2008 19:04:31 -0700 Subject: ACPI: don't load acpi_cpufreq if acpi=off Signed-off-by: Yinghai Lu Signed-off-by: Len Brown --- arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index dd097b83583..9943b4c8774 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -779,6 +779,9 @@ static int __init acpi_cpufreq_init(void) { int ret; + if (acpi_disabled) + return 0; + dprintk("acpi_cpufreq_init\n"); ret = acpi_cpufreq_early_init(); -- cgit v1.2.3 From 27663c5855b10af9ec67bc7dfba001426ba21222 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Fri, 10 Oct 2008 02:22:59 -0400 Subject: ACPI: Change acpi_evaluate_integer to support 64-bit on 32-bit kernels As of version 2.0, ACPI can return 64-bit integers. The current acpi_evaluate_integer only supports 64-bit integers on 64-bit platforms. Change the argument to take a pointer to an acpi_integer so we support 64-bit integers on all platforms. lenb: replaced use of "acpi_integer" with "unsigned long long" lenb: fixed bug in acpi_thermal_trips_update() Signed-off-by: Matthew Wilcox Signed-off-by: Len Brown --- arch/ia64/sn/kernel/io_acpi_init.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/ia64/sn/kernel/io_acpi_init.c b/arch/ia64/sn/kernel/io_acpi_init.c index 6568942a95f..a15baacaba4 100644 --- a/arch/ia64/sn/kernel/io_acpi_init.c +++ b/arch/ia64/sn/kernel/io_acpi_init.c @@ -232,7 +232,7 @@ exit: static unsigned int get_host_devfn(acpi_handle device_handle, acpi_handle rootbus_handle) { - unsigned long adr; + unsigned long long adr; acpi_handle child; unsigned int devfn; int function; @@ -292,8 +292,8 @@ get_host_devfn(acpi_handle device_handle, acpi_handle rootbus_handle) static acpi_status find_matching_device(acpi_handle handle, u32 lvl, void *context, void **rv) { - unsigned long bbn = -1; - unsigned long adr; + unsigned long long bbn = -1; + unsigned long long adr; acpi_handle parent = NULL; acpi_status status; unsigned int devfn; @@ -348,7 +348,7 @@ sn_acpi_get_pcidev_info(struct pci_dev *dev, struct pcidev_info **pcidev_info, unsigned int host_devfn; struct sn_pcidev_match pcidev_match; acpi_handle rootbus_handle; - unsigned long segment; + unsigned long long segment; acpi_status status; rootbus_handle = PCI_CONTROLLER(dev)->acpi_handle; -- cgit v1.2.3 From 1d3ba686ed706a0e8563878b2ec415e125178607 Mon Sep 17 00:00:00 2001 From: Haavard Skinnemoen Date: Mon, 13 Oct 2008 12:42:25 +0200 Subject: avr32: Kconfig: Remove pointless if around atstk1000 include The contents of the ATSTK1000 Kconfig file itself is completely conditional, so including it conditionally makes no sense and only adds clutter. Signed-off-by: Haavard Skinnemoen --- arch/avr32/Kconfig | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch') diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig index 7c239a91627..a90d85d44fc 100644 --- a/arch/avr32/Kconfig +++ b/arch/avr32/Kconfig @@ -121,9 +121,7 @@ config BOARD_ATNGW100 select CPU_AT32AP7000 endchoice -if BOARD_ATSTK1000 source "arch/avr32/boards/atstk1000/Kconfig" -endif choice prompt "Boot loader type" -- cgit v1.2.3 From a3bee42f058c2f9fe281df942eff397924630a12 Mon Sep 17 00:00:00 2001 From: Hans-Christian Egtvedt Date: Tue, 1 Jul 2008 08:29:27 +0000 Subject: avr32: Add support for EVKLCD10X addon boards This patch lets the user enable support for EVKLCD100 and EVKLCD101 (refered to by EVKLCD10X). By enabling EVKLCD10X support the LCD controller and AC97 controller platform devices are added. The user can also choose between the EVKLCD100 (QVGA display) and the EVKLCD101 (VGA display), this is added to automagically select the correct panel timing and resolution parameters. Enabling support for EVKLCD10X addon board will cripple the MCI platform device a bit since they share two GPIO lines (detect and write-protect). These two lines are disabled when EVKLCD10X is enabled. The default configurations are based upon ATNGW100, but with added AC97C and LCDC driver. Virtual terminal is also enabled by default for EVKLCD10X boards. Verified on hardware with a NGW100 + EVKLCD100/101. Signed-off-by: Hans-Christian Egtvedt Signed-off-by: Haavard Skinnemoen --- arch/avr32/Kconfig | 1 + arch/avr32/boards/atngw100/Kconfig | 35 + arch/avr32/boards/atngw100/Makefile | 3 +- arch/avr32/boards/atngw100/evklcd10x.c | 155 +++ arch/avr32/boards/atngw100/setup.c | 5 + arch/avr32/configs/atngw100_evklcd100_defconfig | 1264 +++++++++++++++++++++++ arch/avr32/configs/atngw100_evklcd101_defconfig | 1264 +++++++++++++++++++++++ 7 files changed, 2726 insertions(+), 1 deletion(-) create mode 100644 arch/avr32/boards/atngw100/Kconfig create mode 100644 arch/avr32/boards/atngw100/evklcd10x.c create mode 100644 arch/avr32/configs/atngw100_evklcd100_defconfig create mode 100644 arch/avr32/configs/atngw100_evklcd101_defconfig (limited to 'arch') diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig index a90d85d44fc..5c6d429505e 100644 --- a/arch/avr32/Kconfig +++ b/arch/avr32/Kconfig @@ -122,6 +122,7 @@ config BOARD_ATNGW100 endchoice source "arch/avr32/boards/atstk1000/Kconfig" +source "arch/avr32/boards/atngw100/Kconfig" choice prompt "Boot loader type" diff --git a/arch/avr32/boards/atngw100/Kconfig b/arch/avr32/boards/atngw100/Kconfig new file mode 100644 index 00000000000..b3f99477bbe --- /dev/null +++ b/arch/avr32/boards/atngw100/Kconfig @@ -0,0 +1,35 @@ +# NGW100 customization + +if BOARD_ATNGW100 + +config BOARD_ATNGW100_EVKLCD10X + bool "Add support for EVKLCD10X addon board" + help + This enables support for the EVKLCD100 (QVGA) or EVKLCD101 (VGA) + addon board for the NGW100. By enabling this the LCD controller and + AC97 controller is added as platform devices. + + This choice disables the detect pin and the write-protect pin for the + MCI platform device, since it conflicts with the LCD platform device. + The MCI pins can be reenabled by editing the "add device function" but + this may break the setup for other displays that use these pins. + + Choose 'Y' here if you have a EVKLCD100/101 connected to the NGW100. + +choice + prompt "LCD panel resolution on EVKLCD10X" + depends on BOARD_ATNGW100_EVKLCD10X + default BOARD_ATNGW100_EVKLCD10X_VGA + +config BOARD_ATNGW100_EVKLCD10X_QVGA + bool "QVGA (320x240)" + +config BOARD_ATNGW100_EVKLCD10X_VGA + bool "VGA (640x480)" + +config BOARD_ATNGW100_EVKLCD10X_POW_QVGA + bool "Powertip QVGA (320x240)" + +endchoice + +endif # BOARD_ATNGW100 diff --git a/arch/avr32/boards/atngw100/Makefile b/arch/avr32/boards/atngw100/Makefile index c740aa11675..6376f5322e4 100644 --- a/arch/avr32/boards/atngw100/Makefile +++ b/arch/avr32/boards/atngw100/Makefile @@ -1 +1,2 @@ -obj-y += setup.o flash.o +obj-y += setup.o flash.o +obj-$(CONFIG_BOARD_ATNGW100_EVKLCD10X) += evklcd10x.o diff --git a/arch/avr32/boards/atngw100/evklcd10x.c b/arch/avr32/boards/atngw100/evklcd10x.c new file mode 100644 index 00000000000..8140b22b346 --- /dev/null +++ b/arch/avr32/boards/atngw100/evklcd10x.c @@ -0,0 +1,155 @@ +/* + * Board-specific setup code for the ATEVKLCD10X addon board to the ATNGW100 + * Network Gateway + * + * Copyright (C) 2008 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#include +#include +#include +#include + +#include