From b0247a55f4cdd7a270e938aa39f9edb5b005a88c Mon Sep 17 00:00:00 2001 From: Hidetoshi Seto Date: Tue, 8 Apr 2008 13:31:47 +0900 Subject: [IA64] kdump: add kdump_on_fatal_mca While it is convenient that we can invoke kdump by asserting INIT via button on chassis etc., there are some situations that invoking kdump on fatal MCA is not welcomed rather than rebooting fast without dump. This patch adds a new flag 'kdump_on_fatal_mca' that is independent from 'kdump_on_init' currently available. Adding this flag enable us to turning on/off of kdump depend on the event, INIT and/or fatal MCA. Default for this flag is to take the dump. Signed-off-by: Hidetoshi Seto Signed-off-by: Tony Luck --- arch/ia64/kernel/crash.c | 31 +++++++++++++++++++++++-------- arch/ia64/kernel/mca.c | 6 +----- 2 files changed, 24 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c index fbe742ad2fd..2b01e5a1f3c 100644 --- a/arch/ia64/kernel/crash.c +++ b/arch/ia64/kernel/crash.c @@ -24,6 +24,7 @@ int kdump_status[NR_CPUS]; static atomic_t kdump_cpu_frozen; atomic_t kdump_in_progress; static int kdump_on_init = 1; +static int kdump_on_fatal_mca = 1; static inline Elf64_Word *append_elf_note(Elf64_Word *buf, char *name, unsigned type, void *data, @@ -148,7 +149,7 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data) struct ia64_mca_notify_die *nd; struct die_args *args = data; - if (!kdump_on_init) + if (!kdump_on_init && !kdump_on_fatal_mca) return NOTIFY_DONE; if (!ia64_kimage) { @@ -174,11 +175,14 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data) switch (val) { case DIE_INIT_MONARCH_PROCESS: - atomic_set(&kdump_in_progress, 1); - *(nd->monarch_cpu) = -1; + if (kdump_on_init) { + atomic_set(&kdump_in_progress, 1); + *(nd->monarch_cpu) = -1; + } break; case DIE_INIT_MONARCH_LEAVE: - machine_kdump_on_init(); + if (kdump_on_init) + machine_kdump_on_init(); break; case DIE_INIT_SLAVE_LEAVE: if (atomic_read(&kdump_in_progress)) @@ -189,16 +193,19 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data) unw_init_running(kdump_cpu_freeze, NULL); break; case DIE_MCA_MONARCH_LEAVE: - /* die_register->signr indicate if MCA is recoverable */ - if (!args->signr) + /* die_register->signr indicate if MCA is recoverable */ + if (kdump_on_fatal_mca && !args->signr) { + atomic_set(&kdump_in_progress, 1); + *(nd->monarch_cpu) = -1; machine_kdump_on_init(); + } break; } return NOTIFY_DONE; } #ifdef CONFIG_SYSCTL -static ctl_table kdump_on_init_table[] = { +static ctl_table kdump_ctl_table[] = { { .ctl_name = CTL_UNNUMBERED, .procname = "kdump_on_init", @@ -207,6 +214,14 @@ static ctl_table kdump_on_init_table[] = { .mode = 0644, .proc_handler = &proc_dointvec, }, + { + .ctl_name = CTL_UNNUMBERED, + .procname = "kdump_on_fatal_mca", + .data = &kdump_on_fatal_mca, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, { .ctl_name = 0 } }; @@ -215,7 +230,7 @@ static ctl_table sys_table[] = { .ctl_name = CTL_KERN, .procname = "kernel", .mode = 0555, - .child = kdump_on_init_table, + .child = kdump_ctl_table, }, { .ctl_name = 0 } }; diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 6c18221dba3..338dbb8c2cf 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -1266,16 +1266,12 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, } else { /* Dump buffered message to console */ ia64_mlogbuf_finish(1); -#ifdef CONFIG_KEXEC - atomic_set(&kdump_in_progress, 1); - monarch_cpu = -1; -#endif } + if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover) == NOTIFY_STOP) ia64_mca_spin(__func__); - if (atomic_dec_return(&mca_count) > 0) { int i; -- cgit v1.2.3 From 3975afffd32b84c0ad6797debe5abd179f44a698 Mon Sep 17 00:00:00 2001 From: Hidetoshi Seto Date: Tue, 8 Apr 2008 13:33:08 +0900 Subject: [IA64] kdump: crash.c coding style fix Fix indenting of switch statement to follow Documentation/CodingStyle. Signed-off-by: Hidetoshi Seto Signed-off-by: Tony Luck --- arch/ia64/kernel/crash.c | 52 ++++++++++++++++++++++++------------------------ 1 file changed, 26 insertions(+), 26 deletions(-) (limited to 'arch') diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c index 2b01e5a1f3c..e74e15a0889 100644 --- a/arch/ia64/kernel/crash.c +++ b/arch/ia64/kernel/crash.c @@ -174,32 +174,32 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data) return NOTIFY_DONE; switch (val) { - case DIE_INIT_MONARCH_PROCESS: - if (kdump_on_init) { - atomic_set(&kdump_in_progress, 1); - *(nd->monarch_cpu) = -1; - } - break; - case DIE_INIT_MONARCH_LEAVE: - if (kdump_on_init) - machine_kdump_on_init(); - break; - case DIE_INIT_SLAVE_LEAVE: - if (atomic_read(&kdump_in_progress)) - unw_init_running(kdump_cpu_freeze, NULL); - break; - case DIE_MCA_RENDZVOUS_LEAVE: - if (atomic_read(&kdump_in_progress)) - unw_init_running(kdump_cpu_freeze, NULL); - break; - case DIE_MCA_MONARCH_LEAVE: - /* die_register->signr indicate if MCA is recoverable */ - if (kdump_on_fatal_mca && !args->signr) { - atomic_set(&kdump_in_progress, 1); - *(nd->monarch_cpu) = -1; - machine_kdump_on_init(); - } - break; + case DIE_INIT_MONARCH_PROCESS: + if (kdump_on_init) { + atomic_set(&kdump_in_progress, 1); + *(nd->monarch_cpu) = -1; + } + break; + case DIE_INIT_MONARCH_LEAVE: + if (kdump_on_init) + machine_kdump_on_init(); + break; + case DIE_INIT_SLAVE_LEAVE: + if (atomic_read(&kdump_in_progress)) + unw_init_running(kdump_cpu_freeze, NULL); + break; + case DIE_MCA_RENDZVOUS_LEAVE: + if (atomic_read(&kdump_in_progress)) + unw_init_running(kdump_cpu_freeze, NULL); + break; + case DIE_MCA_MONARCH_LEAVE: + /* die_register->signr indicate if MCA is recoverable */ + if (kdump_on_fatal_mca && !args->signr) { + atomic_set(&kdump_in_progress, 1); + *(nd->monarch_cpu) = -1; + machine_kdump_on_init(); + } + break; } return NOTIFY_DONE; } -- cgit v1.2.3 From e91450161186a926d16d8fdc8669aa1998bce148 Mon Sep 17 00:00:00 2001 From: "Alan D. Brunelle" Date: Thu, 3 Apr 2008 14:30:36 -0400 Subject: [IA64] Fix unlock ordering in smp_callin One should normally unlock in the reverse order of the lock calls, and in this case there certainly is no reason not to. Signed-off-by: Alan D. Brunelle Signed-off-by: Tony Luck --- arch/ia64/kernel/smpboot.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 32ee5979a04..16483be18c0 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -400,9 +400,9 @@ smp_callin (void) /* Setup the per cpu irq handling data structures */ __setup_vector_irq(cpuid); cpu_set(cpuid, cpu_online_map); - unlock_ipi_calllock(); per_cpu(cpu_state, cpuid) = CPU_ONLINE; spin_unlock(&vector_lock); + unlock_ipi_calllock(); smp_setup_percpu_timer(); -- cgit v1.2.3 From 6794c7526651160a75e90322cb750dcceb310d34 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Tue, 1 Apr 2008 12:29:34 +0800 Subject: [IA64] use goto to jump out do/while_each_thread do_each_thread/while_each_thread is a double loop, so should use 'goto' rather than 'break' to break out the loop. Signed-off-by: Li Zefan Signed-off-by: Tony Luck --- arch/ia64/kernel/perfmon.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index a2aabfdc80d..d1d24f4598d 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -4204,10 +4204,10 @@ pfm_check_task_exist(pfm_context_t *ctx) do_each_thread (g, t) { if (t->thread.pfm_context == ctx) { ret = 0; - break; + goto out; } } while_each_thread (g, t); - +out: read_unlock(&tasklist_lock); DPRINT(("pfm_check_task_exist: ret=%d ctx=%p\n", ret, ctx)); -- cgit v1.2.3 From d167cb85150bd473a27df71e3116a9cc0008f5dd Mon Sep 17 00:00:00 2001 From: "Robert P. J. Day" Date: Sat, 29 Mar 2008 10:05:30 -0400 Subject: [IA64] Replace explicit jiffies tests with time_* macros. In arch/ia64/sn/kernel/xpc_{main,partition}.c Signed-off-by: Robert P. J. Day Signed-off-by: Tony Luck --- arch/ia64/sn/kernel/xpc_main.c | 8 ++++---- arch/ia64/sn/kernel/xpc_partition.c | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c index 81785b78bc1..9e0b164da9c 100644 --- a/arch/ia64/sn/kernel/xpc_main.c +++ b/arch/ia64/sn/kernel/xpc_main.c @@ -199,7 +199,7 @@ xpc_timeout_partition_disengage_request(unsigned long data) struct xpc_partition *part = (struct xpc_partition *) data; - DBUG_ON(jiffies < part->disengage_request_timeout); + DBUG_ON(time_before(jiffies, part->disengage_request_timeout)); (void) xpc_partition_disengaged(part); @@ -230,7 +230,7 @@ xpc_hb_beater(unsigned long dummy) { xpc_vars->heartbeat++; - if (jiffies >= xpc_hb_check_timeout) { + if (time_after_eq(jiffies, xpc_hb_check_timeout)) { wake_up_interruptible(&xpc_act_IRQ_wq); } @@ -270,7 +270,7 @@ xpc_hb_checker(void *ignore) /* checking of remote heartbeats is skewed by IRQ handling */ - if (jiffies >= xpc_hb_check_timeout) { + if (time_after_eq(jiffies, xpc_hb_check_timeout)) { dev_dbg(xpc_part, "checking remote heartbeats\n"); xpc_check_remote_hb(); @@ -305,7 +305,7 @@ xpc_hb_checker(void *ignore) /* wait for IRQ or timeout */ (void) wait_event_interruptible(xpc_act_IRQ_wq, (last_IRQ_count < atomic_read(&xpc_act_IRQ_rcvd) || - jiffies >= xpc_hb_check_timeout || + time_after_eq(jiffies, xpc_hb_check_timeout) || (volatile int) xpc_exiting)); } diff --git a/arch/ia64/sn/kernel/xpc_partition.c b/arch/ia64/sn/kernel/xpc_partition.c index 7ba403232cb..9e97c268483 100644 --- a/arch/ia64/sn/kernel/xpc_partition.c +++ b/arch/ia64/sn/kernel/xpc_partition.c @@ -877,7 +877,7 @@ xpc_partition_disengaged(struct xpc_partition *part) disengaged = (xpc_partition_engaged(1UL << partid) == 0); if (part->disengage_request_timeout) { if (!disengaged) { - if (jiffies < part->disengage_request_timeout) { + if (time_before(jiffies, part->disengage_request_timeout)) { /* timelimit hasn't been reached yet */ return 0; } -- cgit v1.2.3 From 96ded9dadde397a9e372a650534a9ffbba97194a Mon Sep 17 00:00:00 2001 From: Pavel Emelyanov Date: Fri, 28 Mar 2008 14:27:00 -0700 Subject: [IA64] fix getpid and set_tid_address fast system calls for pid namespaces The sys_getpid() and sys_set_tid_address() behavior changed from return current->tgid to struct pid *pid; pid = current->pids[PIDTYPE_PID].pid; return pid->numbers[pid->level].nr; But the fast system calls on ia64 still operate the old way. Patch them appropriately to let ia64 work with pid namespaces. Besides, this is one more step in deprecating of pid and tgid on task_struct. The fsys_getppid() is to be patched as well, but its logic is much more complex now, so I will make it later. One thing I'm not 100% sure is the trick with the IA64_UPID_SHIFT. On order to access the pid->level's element of an array I have to perform the following calculations pid + sizeof(struct upid) * pid->level The problem is that ia64 can only multiply float point registers, while all the offsets I have in code are in rXX ones. Fortunately, the sizeof(struct upid) is 32 bytes on ia64 (and is very unlikely to ever change), so the calculations get simpler: pid + pid->level << 5 So, I introduce the IA64_UPID_SHIFT and use the shl instruction. I also looked at how gcc compiles the similar place and found that it makes it with shift as well. Is this OK to do so? Tested with ski emulator with 2.6.24 kernel, but fits 2.6.25-rc4 and 2.6.25-rc4-mm1 as well. Signed-off-by: Pavel Emelyanov Cc: David Mosberger-Tang Cc: Hidetoshi Seto Cc: Fenghua Yu Cc: Amy Griffis Signed-off-by: Andrew Morton Signed-off-by: Tony Luck --- arch/ia64/kernel/asm-offsets.c | 7 +++++++ arch/ia64/kernel/fsys.S | 34 ++++++++++++++++++++++++++++++---- 2 files changed, 37 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c index 0aebc6f79e9..f7bc40dee43 100644 --- a/arch/ia64/kernel/asm-offsets.c +++ b/arch/ia64/kernel/asm-offsets.c @@ -7,6 +7,7 @@ #define ASM_OFFSETS_C 1 #include +#include #include #include @@ -34,6 +35,9 @@ void foo(void) DEFINE(SIGFRAME_SIZE, sizeof (struct sigframe)); DEFINE(UNW_FRAME_INFO_SIZE, sizeof (struct unw_frame_info)); + BUILD_BUG_ON(sizeof(struct upid) != 32); + DEFINE(IA64_UPID_SHIFT, 5); + BLANK(); DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); @@ -45,6 +49,9 @@ void foo(void) DEFINE(IA64_TASK_BLOCKED_OFFSET,offsetof (struct task_struct, blocked)); DEFINE(IA64_TASK_CLEAR_CHILD_TID_OFFSET,offsetof (struct task_struct, clear_child_tid)); DEFINE(IA64_TASK_GROUP_LEADER_OFFSET, offsetof (struct task_struct, group_leader)); + DEFINE(IA64_TASK_TGIDLINK_OFFSET, offsetof (struct task_struct, pids[PIDTYPE_PID].pid)); + DEFINE(IA64_PID_LEVEL_OFFSET, offsetof (struct pid, level)); + DEFINE(IA64_PID_UPID_OFFSET, offsetof (struct pid, numbers[0])); DEFINE(IA64_TASK_PENDING_OFFSET,offsetof (struct task_struct, pending)); DEFINE(IA64_TASK_PID_OFFSET, offsetof (struct task_struct, pid)); DEFINE(IA64_TASK_REAL_PARENT_OFFSET, offsetof (struct task_struct, real_parent)); diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S index 44841971f07..3f570e6fcd9 100644 --- a/arch/ia64/kernel/fsys.S +++ b/arch/ia64/kernel/fsys.S @@ -61,13 +61,29 @@ ENTRY(fsys_getpid) .prologue .altrp b6 .body + add r17=IA64_TASK_GROUP_LEADER_OFFSET,r16 + ;; + ld8 r17=[r17] // r17 = current->group_leader add r9=TI_FLAGS+IA64_TASK_SIZE,r16 ;; ld4 r9=[r9] - add r8=IA64_TASK_TGID_OFFSET,r16 + add r17=IA64_TASK_TGIDLINK_OFFSET,r17 ;; and r9=TIF_ALLWORK_MASK,r9 - ld4 r8=[r8] // r8 = current->tgid + ld8 r17=[r17] // r17 = current->group_leader->pids[PIDTYPE_PID].pid + ;; + add r8=IA64_PID_LEVEL_OFFSET,r17 + ;; + ld4 r8=[r8] // r8 = pid->level + add r17=IA64_PID_UPID_OFFSET,r17 // r17 = &pid->numbers[0] + ;; + shl r8=r8,IA64_UPID_SHIFT + ;; + add r17=r17,r8 // r17 = &pid->numbers[pid->level] + ;; + ld4 r8=[r17] // r8 = pid->numbers[pid->level].nr + ;; + mov r17=0 ;; cmp.ne p8,p0=0,r9 (p8) br.spnt.many fsys_fallback_syscall @@ -126,15 +142,25 @@ ENTRY(fsys_set_tid_address) .altrp b6 .body add r9=TI_FLAGS+IA64_TASK_SIZE,r16 + add r17=IA64_TASK_TGIDLINK_OFFSET,r16 ;; ld4 r9=[r9] tnat.z p6,p7=r32 // check argument register for being NaT + ld8 r17=[r17] // r17 = current->pids[PIDTYPE_PID].pid ;; and r9=TIF_ALLWORK_MASK,r9 - add r8=IA64_TASK_PID_OFFSET,r16 + add r8=IA64_PID_LEVEL_OFFSET,r17 add r18=IA64_TASK_CLEAR_CHILD_TID_OFFSET,r16 ;; - ld4 r8=[r8] + ld4 r8=[r8] // r8 = pid->level + add r17=IA64_PID_UPID_OFFSET,r17 // r17 = &pid->numbers[0] + ;; + shl r8=r8,IA64_UPID_SHIFT + ;; + add r17=r17,r8 // r17 = &pid->numbers[pid->level] + ;; + ld4 r8=[r17] // r8 = pid->numbers[pid->level].nr + ;; cmp.ne p8,p0=0,r9 mov r17=-1 ;; -- cgit v1.2.3 From 34e1ceb1881ec895ad9b1b52d073f414f3aa87a9 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Fri, 28 Mar 2008 14:27:02 -0700 Subject: [IA64] kprobes: kprobe-booster for ia64 Add kprobe-booster support on ia64. Kprobe-booster improves the performance of kprobes by eliminating single-step, where possible. Currently, kprobe-booster is implemented on x86 and x86-64. This is an ia64 port. On ia64, kprobe-booster executes a copied bundle directly, instead of single stepping. Bundles which have B or X unit and which may cause an exception (including break) are not executed directly. And also, to prevent hitting break exceptions on the copied bundle, only the hindmost kprobe is executed directly if several kprobes share a bundle and are placed in different slots. Note: set_brl_inst() is used for preparing an instruction buffer(it does not modify any active code), so it does not need any atomic operation. Signed-off-by: Masami Hiramatsu Cc: Anil S Keshavamurthy Cc: Ananth N Mavinakayanahalli Cc: bibo,mao Cc: Rusty Lynch Cc: Prasanna S Panchamukhi Cc: Jim Keniston Cc: Shaohua Li Signed-off-by: Andrew Morton Signed-off-by: Tony Luck --- arch/ia64/kernel/kprobes.c | 133 ++++++++++++++++++++++++++++++++++++--------- 1 file changed, 107 insertions(+), 26 deletions(-) (limited to 'arch') diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index 8d9a446a0d1..233434f4f88 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c @@ -78,6 +78,20 @@ static enum instruction_type bundle_encoding[32][3] = { { u, u, u }, /* 1F */ }; +/* Insert a long branch code */ +static void __kprobes set_brl_inst(void *from, void *to) +{ + s64 rel = ((s64) to - (s64) from) >> 4; + bundle_t *brl; + brl = (bundle_t *) ((u64) from & ~0xf); + brl->quad0.template = 0x05; /* [MLX](stop) */ + brl->quad0.slot0 = NOP_M_INST; /* nop.m 0x0 */ + brl->quad0.slot1_p0 = ((rel >> 20) & 0x7fffffffff) << 2; + brl->quad1.slot1_p1 = (((rel >> 20) & 0x7fffffffff) << 2) >> (64 - 46); + /* brl.cond.sptk.many.clr rel<<4 (qp=0) */ + brl->quad1.slot2 = BRL_INST(rel >> 59, rel & 0xfffff); +} + /* * In this function we check to see if the instruction * is IP relative instruction and update the kprobe @@ -496,6 +510,77 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, regs->b0 = ((struct fnptr *)kretprobe_trampoline)->ip; } +/* Check the instruction in the slot is break */ +static int __kprobes __is_ia64_break_inst(bundle_t *bundle, uint slot) +{ + unsigned int major_opcode; + unsigned int template = bundle->quad0.template; + unsigned long kprobe_inst; + + /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */ + if (slot == 1 && bundle_encoding[template][1] == L) + slot++; + + /* Get Kprobe probe instruction at given slot*/ + get_kprobe_inst(bundle, slot, &kprobe_inst, &major_opcode); + + /* For break instruction, + * Bits 37:40 Major opcode to be zero + * Bits 27:32 X6 to be zero + * Bits 32:35 X3 to be zero + */ + if (major_opcode || ((kprobe_inst >> 27) & 0x1FF)) { + /* Not a break instruction */ + return 0; + } + + /* Is a break instruction */ + return 1; +} + +/* + * In this function, we check whether the target bundle modifies IP or + * it triggers an exception. If so, it cannot be boostable. + */ +static int __kprobes can_boost(bundle_t *bundle, uint slot, + unsigned long bundle_addr) +{ + unsigned int template = bundle->quad0.template; + + do { + if (search_exception_tables(bundle_addr + slot) || + __is_ia64_break_inst(bundle, slot)) + return 0; /* exception may occur in this bundle*/ + } while ((++slot) < 3); + template &= 0x1e; + if (template >= 0x10 /* including B unit */ || + template == 0x04 /* including X unit */ || + template == 0x06) /* undefined */ + return 0; + + return 1; +} + +/* Prepare long jump bundle and disables other boosters if need */ +static void __kprobes prepare_booster(struct kprobe *p) +{ + unsigned long addr = (unsigned long)p->addr & ~0xFULL; + unsigned int slot = (unsigned long)p->addr & 0xf; + struct kprobe *other_kp; + + if (can_boost(&p->ainsn.insn[0].bundle, slot, addr)) { + set_brl_inst(&p->ainsn.insn[1].bundle, (bundle_t *)addr + 1); + p->ainsn.inst_flag |= INST_FLAG_BOOSTABLE; + } + + /* disables boosters in previous slots */ + for (; addr < (unsigned long)p->addr; addr++) { + other_kp = get_kprobe((void *)addr); + if (other_kp) + other_kp->ainsn.inst_flag &= ~INST_FLAG_BOOSTABLE; + } +} + int __kprobes arch_prepare_kprobe(struct kprobe *p) { unsigned long addr = (unsigned long) p->addr; @@ -530,6 +615,8 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) prepare_break_inst(template, slot, major_opcode, kprobe_inst, p, qp); + prepare_booster(p); + return 0; } @@ -543,7 +630,9 @@ void __kprobes arch_arm_kprobe(struct kprobe *p) src = &p->opcode.bundle; flush_icache_range((unsigned long)p->ainsn.insn, - (unsigned long)p->ainsn.insn + sizeof(kprobe_opcode_t)); + (unsigned long)p->ainsn.insn + + sizeof(kprobe_opcode_t) * MAX_INSN_SIZE); + switch (p->ainsn.slot) { case 0: dest->quad0.slot0 = src->quad0.slot0; @@ -584,13 +673,13 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) void __kprobes arch_remove_kprobe(struct kprobe *p) { mutex_lock(&kprobe_mutex); - free_insn_slot(p->ainsn.insn, 0); + free_insn_slot(p->ainsn.insn, p->ainsn.inst_flag & INST_FLAG_BOOSTABLE); mutex_unlock(&kprobe_mutex); } /* * We are resuming execution after a single step fault, so the pt_regs * structure reflects the register state after we executed the instruction - * located in the kprobe (p->ainsn.insn.bundle). We still need to adjust + * located in the kprobe (p->ainsn.insn->bundle). We still need to adjust * the ip to point back to the original stack address. To set the IP address * to original stack address, handle the case where we need to fixup the * relative IP address and/or fixup branch register. @@ -607,7 +696,7 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) if (slot == 1 && bundle_encoding[template][1] == L) slot = 2; - if (p->ainsn.inst_flag) { + if (p->ainsn.inst_flag & ~INST_FLAG_BOOSTABLE) { if (p->ainsn.inst_flag & INST_FLAG_FIX_RELATIVE_IP_ADDR) { /* Fix relative IP address */ @@ -686,33 +775,12 @@ static void __kprobes prepare_ss(struct kprobe *p, struct pt_regs *regs) static int __kprobes is_ia64_break_inst(struct pt_regs *regs) { unsigned int slot = ia64_psr(regs)->ri; - unsigned int template, major_opcode; - unsigned long kprobe_inst; unsigned long *kprobe_addr = (unsigned long *)regs->cr_iip; bundle_t bundle; memcpy(&bundle, kprobe_addr, sizeof(bundle_t)); - template = bundle.quad0.template; - - /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */ - if (slot == 1 && bundle_encoding[template][1] == L) - slot++; - /* Get Kprobe probe instruction at given slot*/ - get_kprobe_inst(&bundle, slot, &kprobe_inst, &major_opcode); - - /* For break instruction, - * Bits 37:40 Major opcode to be zero - * Bits 27:32 X6 to be zero - * Bits 32:35 X3 to be zero - */ - if (major_opcode || ((kprobe_inst >> 27) & 0x1FF) ) { - /* Not a break instruction */ - return 0; - } - - /* Is a break instruction */ - return 1; + return __is_ia64_break_inst(&bundle, slot); } static int __kprobes pre_kprobes_handler(struct die_args *args) @@ -802,6 +870,19 @@ static int __kprobes pre_kprobes_handler(struct die_args *args) return 1; ss_probe: +#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PM) + if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) { + /* Boost up -- we can execute copied instructions directly */ + ia64_psr(regs)->ri = p->ainsn.slot; + regs->cr_iip = (unsigned long)&p->ainsn.insn->bundle & ~0xFULL; + /* turn single stepping off */ + ia64_psr(regs)->ss = 0; + + reset_current_kprobe(); + preempt_enable_no_resched(); + return 1; + } +#endif prepare_ss(p, regs); kcb->kprobe_status = KPROBE_HIT_SS; return 1; -- cgit v1.2.3 From b34eb53cdcb4f49fd31d78d0e385240820ed9063 Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Fri, 28 Mar 2008 14:27:03 -0700 Subject: [IA64] make IOMMU respect the segment boundary limits IA64's IOMMU implementation allocates memory areas spanning LLD's segment boundary limit. It forces low level drivers to have a workaround to adjust scatter lists that the IOMMU builds. We are in the process of making all the IOMMUs respect the segment boundary limits to remove such work around in LLDs. This patch is for IA64's IOMMU. Signed-off-by: FUJITA Tomonori Signed-off-by: Andrew Morton Signed-off-by: Tony Luck --- arch/ia64/Kconfig | 3 +++ arch/ia64/hp/common/sba_iommu.c | 56 ++++++++++++++++++++++++++++------------- 2 files changed, 42 insertions(+), 17 deletions(-) (limited to 'arch') diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 8fa3faf5ef1..1b73ffe746d 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -611,6 +611,9 @@ config IRQ_PER_CPU bool default y +config IOMMU_HELPER + def_bool (IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_GENERIC) + source "arch/ia64/hp/sim/Kconfig" source "arch/ia64/Kconfig.debug" diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index 523eae6d3e4..9409de5c944 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c @@ -35,6 +35,7 @@ #include #include /* hweight64() */ #include +#include #include /* ia64_get_itc() */ #include @@ -460,6 +461,13 @@ get_iovp_order (unsigned long size) return order; } +static unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr, + unsigned int bitshiftcnt) +{ + return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3) + + bitshiftcnt; +} + /** * sba_search_bitmap - find free space in IO PDIR resource bitmap * @ioc: IO MMU structure which owns the pdir we are interested in. @@ -471,15 +479,25 @@ get_iovp_order (unsigned long size) * Cool perf optimization: search for log2(size) bits at a time. */ static SBA_INLINE unsigned long -sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint) +sba_search_bitmap(struct ioc *ioc, struct device *dev, + unsigned long bits_wanted, int use_hint) { unsigned long *res_ptr; unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]); - unsigned long flags, pide = ~0UL; + unsigned long flags, pide = ~0UL, tpide; + unsigned long boundary_size; + unsigned long shift; + int ret; ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0); ASSERT(res_ptr < res_end); + boundary_size = (unsigned long long)dma_get_seg_boundary(dev) + 1; + boundary_size = ALIGN(boundary_size, 1ULL << iovp_shift) >> iovp_shift; + + BUG_ON(ioc->ibase & ~iovp_mask); + shift = ioc->ibase >> iovp_shift; + spin_lock_irqsave(&ioc->res_lock, flags); /* Allow caller to force a search through the entire resource space */ @@ -504,9 +522,7 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint) if (likely(*res_ptr != ~0UL)) { bitshiftcnt = ffz(*res_ptr); *res_ptr |= (1UL << bitshiftcnt); - pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map); - pide <<= 3; /* convert to bit address */ - pide += bitshiftcnt; + pide = ptr_to_pide(ioc, res_ptr, bitshiftcnt); ioc->res_bitshift = bitshiftcnt + bits_wanted; goto found_it; } @@ -535,11 +551,13 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint) DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr); ASSERT(0 != mask); for (; mask ; mask <<= o, bitshiftcnt += o) { - if(0 == ((*res_ptr) & mask)) { + tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt); + ret = iommu_is_span_boundary(tpide, bits_wanted, + shift, + boundary_size); + if ((0 == ((*res_ptr) & mask)) && !ret) { *res_ptr |= mask; /* mark resources busy! */ - pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map); - pide <<= 3; /* convert to bit address */ - pide += bitshiftcnt; + pide = tpide; ioc->res_bitshift = bitshiftcnt + bits_wanted; goto found_it; } @@ -560,6 +578,11 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint) end = res_end - qwords; for (; res_ptr < end; res_ptr++) { + tpide = ptr_to_pide(ioc, res_ptr, 0); + ret = iommu_is_span_boundary(tpide, bits_wanted, + shift, boundary_size); + if (ret) + goto next_ptr; for (i = 0 ; i < qwords ; i++) { if (res_ptr[i] != 0) goto next_ptr; @@ -572,8 +595,7 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint) res_ptr[i] = ~0UL; res_ptr[i] |= RESMAP_MASK(bits); - pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map); - pide <<= 3; /* convert to bit address */ + pide = tpide; res_ptr += qwords; ioc->res_bitshift = bits; goto found_it; @@ -605,7 +627,7 @@ found_it: * resource bit map. */ static int -sba_alloc_range(struct ioc *ioc, size_t size) +sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size) { unsigned int pages_needed = size >> iovp_shift; #ifdef PDIR_SEARCH_TIMING @@ -622,9 +644,9 @@ sba_alloc_range(struct ioc *ioc, size_t size) /* ** "seek and ye shall find"...praying never hurts either... */ - pide = sba_search_bitmap(ioc, pages_needed, 1); + pide = sba_search_bitmap(ioc, dev, pages_needed, 1); if (unlikely(pide >= (ioc->res_size << 3))) { - pide = sba_search_bitmap(ioc, pages_needed, 0); + pide = sba_search_bitmap(ioc, dev, pages_needed, 0); if (unlikely(pide >= (ioc->res_size << 3))) { #if DELAYED_RESOURCE_CNT > 0 unsigned long flags; @@ -653,7 +675,7 @@ sba_alloc_range(struct ioc *ioc, size_t size) } spin_unlock_irqrestore(&ioc->saved_lock, flags); - pide = sba_search_bitmap(ioc, pages_needed, 0); + pide = sba_search_bitmap(ioc, dev, pages_needed, 0); if (unlikely(pide >= (ioc->res_size << 3))) panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n", ioc->ioc_hpa); @@ -936,7 +958,7 @@ sba_map_single(struct device *dev, void *addr, size_t size, int dir) spin_unlock_irqrestore(&ioc->res_lock, flags); #endif - pide = sba_alloc_range(ioc, size); + pide = sba_alloc_range(ioc, dev, size); iovp = (dma_addr_t) pide << iovp_shift; @@ -1373,7 +1395,7 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev, dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask; ASSERT(dma_len <= DMA_CHUNK_SIZE); dma_sg->dma_address = (dma_addr_t) (PIDE_FLAG - | (sba_alloc_range(ioc, dma_len) << iovp_shift) + | (sba_alloc_range(ioc, dev, dma_len) << iovp_shift) | dma_offset); n_mappings++; } -- cgit v1.2.3 From 734bc367b4830a4c80502a3f9ded7428b1c652e3 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Fri, 28 Mar 2008 14:27:04 -0700 Subject: [IA64] remove redundant display of free swap space in show_mem() show_mem() has no need to print the amount of free swap space manually because show_free_areas() does this already and is called by the former. The two outputs only differ in text formatting: printk("Free swap = %lukB\n", ...); printk("Free swap: %6ldkB\n", ...); Signed-off-by: Johannes Weiner Signed-off-by: Andrew Morton Signed-off-by: Tony Luck --- arch/ia64/mm/contig.c | 2 -- arch/ia64/mm/discontig.c | 2 -- 2 files changed, 4 deletions(-) (limited to 'arch') diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c index 344f64eca7a..0479661fa41 100644 --- a/arch/ia64/mm/contig.c +++ b/arch/ia64/mm/contig.c @@ -45,8 +45,6 @@ void show_mem(void) printk(KERN_INFO "Mem-info:\n"); show_free_areas(); - printk(KERN_INFO "Free swap: %6ldkB\n", - nr_swap_pages<<(PAGE_SHIFT-10)); printk(KERN_INFO "Node memory in pages:\n"); for_each_online_pgdat(pgdat) { unsigned long present; diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index ee5e68b2af9..ffee1ea00bb 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c @@ -522,8 +522,6 @@ void show_mem(void) printk(KERN_INFO "Mem-info:\n"); show_free_areas(); - printk(KERN_INFO "Free swap: %6ldkB\n", - nr_swap_pages<<(PAGE_SHIFT-10)); printk(KERN_INFO "Node memory in pages:\n"); for_each_online_pgdat(pgdat) { unsigned long present; -- cgit v1.2.3 From 5cf1f7cef1c67b5c81736f00e81a2890e07041b9 Mon Sep 17 00:00:00 2001 From: "S.Caglar Onur" Date: Fri, 28 Mar 2008 14:27:05 -0700 Subject: [IA64] arch/ia64/kernel/: use time_* macros The functions time_before, time_before_eq, time_after, and time_after_eq are more robust for comparing jiffies against other values. So use the time_after() & time_before() macros, defined at linux/jiffies.h, which deal with wrapping correctly [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: S.Caglar Onur Reviewed-by: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Tony Luck --- arch/ia64/kernel/irq_ia64.c | 2 +- arch/ia64/kernel/mca.c | 4 +++- arch/ia64/kernel/unaligned.c | 3 ++- 3 files changed, 6 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index d8be23fbe6b..5538471e8d6 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c @@ -472,7 +472,7 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) static unsigned char count; static long last_time; - if (jiffies - last_time > 5*HZ) + if (time_after(jiffies, last_time + 5 * HZ)) count = 0; if (++count < 5) { last_time = jiffies; diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 338dbb8c2cf..1ae51291087 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -69,6 +69,7 @@ * 2007-04-27 Russ Anderson * Support multiple cpus going through OS_MCA in the same event. */ +#include #include #include #include @@ -293,7 +294,8 @@ static void ia64_mlogbuf_dump_from_init(void) if (mlogbuf_finished) return; - if (mlogbuf_timestamp && (mlogbuf_timestamp + 30*HZ > jiffies)) { + if (mlogbuf_timestamp && + time_before(jiffies, mlogbuf_timestamp + 30 * HZ)) { printk(KERN_ERR "INIT: mlogbuf_dump is interrupted by INIT " " and the system seems to be messed up.\n"); ia64_mlogbuf_finish(0); diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c index 6903361d11a..ff0e7c10faa 100644 --- a/arch/ia64/kernel/unaligned.c +++ b/arch/ia64/kernel/unaligned.c @@ -13,6 +13,7 @@ * 2001/08/13 Correct size of extended floats (float_fsz) from 16 to 10 bytes. * 2001/01/17 Add support emulation of unaligned kernel accesses. */ +#include #include #include #include @@ -1290,7 +1291,7 @@ within_logging_rate_limit (void) { static unsigned long count, last_time; - if (jiffies - last_time > 5*HZ) + if (time_after(jiffies, last_time + 5 * HZ)) count = 0; if (count < 5) { last_time = jiffies; -- cgit v1.2.3 From 273988fa4dffd1b1e6deb3de18b979a44e9d8732 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 9 Apr 2008 13:05:41 -0700 Subject: [IA64] Untangle sync_icache_dcache() page size determination Untangle the chaos of page size determination in this function by simply using PAGE_SIZE << compound_order(). Signed-off-by: Christoph Lameter Signed-off-by: Tony Luck --- arch/ia64/mm/init.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index a4ca657c72c..da05893294b 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -58,7 +58,6 @@ __ia64_sync_icache_dcache (pte_t pte) { unsigned long addr; struct page *page; - unsigned long order; page = pte_page(pte); addr = (unsigned long) page_address(page); @@ -66,12 +65,7 @@ __ia64_sync_icache_dcache (pte_t pte) if (test_bit(PG_arch_1, &page->flags)) return; /* i-cache is already coherent with d-cache */ - if (PageCompound(page)) { - order = compound_order(page); - flush_icache_range(addr, addr + (1UL << order << PAGE_SHIFT)); - } - else - flush_icache_range(addr, addr + PAGE_SIZE); + flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page))); set_bit(PG_arch_1, &page->flags); /* mark page as clean */ } -- cgit v1.2.3 From 98075d245a5bc4aeebc2e9f16fa8b089a5c200ac Mon Sep 17 00:00:00 2001 From: Zoltan Menyhart Date: Fri, 11 Apr 2008 15:21:35 -0700 Subject: [IA64] Fix NUMA configuration issue There is a NUMA memory configuration issue in 2.6.24: A 2-node machine of ours has got the following memory layout: Node 0: 0 - 2 Gbytes Node 0: 4 - 8 Gbytes Node 1: 8 - 16 Gbytes Node 0: 16 - 18 Gbytes "efi_memmap_init()" merges the three last ranges into one. "register_active_ranges()" is called as follows: efi_memmap_walk(register_active_ranges, NULL); i.e. once for the 4 - 18 Gbytes range. It picks up the node number from the start address, and registers all the memory for the node #0. "register_active_ranges()" should be called as follows to make sure there is no merged address range at its entry: efi_memmap_walk(filter_memory, register_active_ranges); "filter_memory()" is similar to "filter_rsvd_memory()", but the reserved memory ranges are not filtered out. Signed-off-by: Zoltan Menyhart Signed-off-by: Tony Luck --- arch/ia64/kernel/setup.c | 23 +++++++++++++++++++++++ arch/ia64/mm/contig.c | 2 +- arch/ia64/mm/discontig.c | 2 +- arch/ia64/mm/init.c | 6 ++---- 4 files changed, 27 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 4aa9eaea76c..c85b7dd6ef3 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -176,6 +176,29 @@ filter_rsvd_memory (unsigned long start, unsigned long end, void *arg) return 0; } +/* + * Similar to "filter_rsvd_memory()", but the reserved memory ranges + * are not filtered out. + */ +int __init +filter_memory(unsigned long start, unsigned long end, void *arg) +{ + void (*func)(unsigned long, unsigned long, int); + +#if IGNORE_PFN0 + if (start == PAGE_OFFSET) { + printk(KERN_WARNING "warning: skipping physical page 0\n"); + start += PAGE_SIZE; + if (start >= end) + return 0; + } +#endif + func = arg; + if (start < end) + call_pernode_memory(__pa(start), end - start, func); + return 0; +} + static void __init sort_regions (struct rsvd_region *rsvd_region, int max) { diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c index 0479661fa41..798bf9835a5 100644 --- a/arch/ia64/mm/contig.c +++ b/arch/ia64/mm/contig.c @@ -253,7 +253,7 @@ paging_init (void) max_zone_pfns[ZONE_NORMAL] = max_low_pfn; #ifdef CONFIG_VIRTUAL_MEM_MAP - efi_memmap_walk(register_active_ranges, NULL); + efi_memmap_walk(filter_memory, register_active_ranges); efi_memmap_walk(find_largest_hole, (u64 *)&max_gap); if (max_gap < LARGE_GAP) { vmem_map = (struct page *) 0; diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index ffee1ea00bb..96d5fbfa44a 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c @@ -444,7 +444,7 @@ void __init find_memory(void) mem_data[node].min_pfn = ~0UL; } - efi_memmap_walk(register_active_ranges, NULL); + efi_memmap_walk(filter_memory, register_active_ranges); /* * Initialize the boot memory maps in reverse order since that's diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index da05893294b..5c1de53c8c1 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -547,12 +547,10 @@ find_largest_hole (u64 start, u64 end, void *arg) #endif /* CONFIG_VIRTUAL_MEM_MAP */ int __init -register_active_ranges(u64 start, u64 end, void *arg) +register_active_ranges(u64 start, u64 len, int nid) { - int nid = paddr_to_nid(__pa(start)); + u64 end = start + len; - if (nid < 0) - nid = 0; #ifdef CONFIG_KEXEC if (start > crashk_res.start && start < crashk_res.end) start = crashk_res.end; -- cgit v1.2.3 From 072f042df335d7e0da2027637bcf720d7ff1589b Mon Sep 17 00:00:00 2001 From: Takao Indoh Date: Tue, 15 Apr 2008 05:59:54 -0400 Subject: [IA64] kdump: Add crash_save_vmcoreinfo for INIT This patch fixes the problem that kdump by INIT does not work if we use makedumpfile. The problem is that after INIT is issued, 2nd kernel starts and makedumpfile fails with the following error message. /proc/vmcore doesn't contain vmcoreinfo. '-x' or '-i' must be specified. makedumpfile Failed. The cause of this problem is that kernel does not call crash_save_vmcoreinfo. When kdump starts by panic or sysrq-trigger, crash_save_vmcoreinfo is called by crash_kexec. But this function is not called when kdump starts by INIT. The Attached patch fixes this. This patch just adds crash_save_vmcoreinfo into machine_kdump_on_init so that crash_save_vmcoreinfo can be called when kdump starts by INIT. I tested this patch with linux-2.6.25-rc9 and I confirmed it worked. Signed-off-by: Takao Indoh Signed-off-by: Tony Luck --- arch/ia64/kernel/crash.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c index e74e15a0889..90ef338cf46 100644 --- a/arch/ia64/kernel/crash.c +++ b/arch/ia64/kernel/crash.c @@ -119,6 +119,7 @@ machine_crash_shutdown(struct pt_regs *pt) static void machine_kdump_on_init(void) { + crash_save_vmcoreinfo(); local_irq_disable(); kexec_disable_iosapic(); machine_kexec(ia64_kimage); -- cgit v1.2.3