diff options
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r-- | arch/ia64/kernel/.gitignore | 1 | ||||
-rw-r--r-- | arch/ia64/kernel/acpi.c | 8 | ||||
-rw-r--r-- | arch/ia64/kernel/efi.c | 21 | ||||
-rw-r--r-- | arch/ia64/kernel/gate.lds.S | 3 | ||||
-rw-r--r-- | arch/ia64/kernel/iosapic.c | 18 | ||||
-rw-r--r-- | arch/ia64/kernel/irq.c | 6 | ||||
-rw-r--r-- | arch/ia64/kernel/mca.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/msi_ia64.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/palinfo.c | 91 | ||||
-rw-r--r-- | arch/ia64/kernel/patch.c | 3 | ||||
-rw-r--r-- | arch/ia64/kernel/perfmon.c | 165 | ||||
-rw-r--r-- | arch/ia64/kernel/perfmon_default_smpl.c | 8 | ||||
-rw-r--r-- | arch/ia64/kernel/process.c | 3 | ||||
-rw-r--r-- | arch/ia64/kernel/setup.c | 151 | ||||
-rw-r--r-- | arch/ia64/kernel/signal.c | 4 | ||||
-rw-r--r-- | arch/ia64/kernel/smpboot.c | 21 | ||||
-rw-r--r-- | arch/ia64/kernel/traps.c | 6 | ||||
-rw-r--r-- | arch/ia64/kernel/unaligned.c | 5 |
18 files changed, 298 insertions, 220 deletions
diff --git a/arch/ia64/kernel/.gitignore b/arch/ia64/kernel/.gitignore new file mode 100644 index 00000000000..98307759a3b --- /dev/null +++ b/arch/ia64/kernel/.gitignore @@ -0,0 +1 @@ +gate.lds diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 3d45d24a9d6..897e2083a3b 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -678,9 +678,11 @@ int __init acpi_boot_init(void) /* I/O APIC */ if (acpi_table_parse_madt - (ACPI_MADT_TYPE_IO_SAPIC, acpi_parse_iosapic, NR_IOSAPICS) < 1) - printk(KERN_ERR PREFIX - "Error parsing MADT - no IOSAPIC entries\n"); + (ACPI_MADT_TYPE_IO_SAPIC, acpi_parse_iosapic, NR_IOSAPICS) < 1) { + if (!ia64_platform_is("sn2")) + printk(KERN_ERR PREFIX + "Error parsing MADT - no IOSAPIC entries\n"); + } /* System-Level Interrupt Routing */ diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c index 73ca86d0381..8e8f8b6193e 100644 --- a/arch/ia64/kernel/efi.c +++ b/arch/ia64/kernel/efi.c @@ -218,9 +218,10 @@ efi_gettimeofday (struct timespec *ts) { efi_time_t tm; - memset(ts, 0, sizeof(ts)); - if ((*efi.get_time)(&tm, NULL) != EFI_SUCCESS) + if ((*efi.get_time)(&tm, NULL) != EFI_SUCCESS) { + memset(ts, 0, sizeof(*ts)); return; + } ts->tv_sec = mktime(tm.year, tm.month, tm.day, tm.hour, tm.minute, tm.second); ts->tv_nsec = tm.nanosecond; @@ -967,7 +968,7 @@ find_memmap_space (void) * to use. We can allocate partial granules only if the unavailable * parts exist, and are WB. */ -void +unsigned long efi_memmap_init(unsigned long *s, unsigned long *e) { struct kern_memdesc *k, *prev = NULL; @@ -1084,11 +1085,14 @@ efi_memmap_init(unsigned long *s, unsigned long *e) /* reserve the memory we are using for kern_memmap */ *s = (u64)kern_memmap; *e = (u64)++k; + + return total_mem; } void efi_initialize_iomem_resources(struct resource *code_resource, - struct resource *data_resource) + struct resource *data_resource, + struct resource *bss_resource) { struct resource *res; void *efi_map_start, *efi_map_end, *p; @@ -1109,7 +1113,7 @@ efi_initialize_iomem_resources(struct resource *code_resource, if (md->num_pages == 0) /* should not happen */ continue; - flags = IORESOURCE_MEM; + flags = IORESOURCE_MEM | IORESOURCE_BUSY; switch (md->type) { case EFI_MEMORY_MAPPED_IO: @@ -1131,12 +1135,11 @@ efi_initialize_iomem_resources(struct resource *code_resource, case EFI_ACPI_MEMORY_NVS: name = "ACPI Non-volatile Storage"; - flags |= IORESOURCE_BUSY; break; case EFI_UNUSABLE_MEMORY: name = "reserved"; - flags |= IORESOURCE_BUSY | IORESOURCE_DISABLED; + flags |= IORESOURCE_DISABLED; break; case EFI_RESERVED_TYPE: @@ -1145,7 +1148,6 @@ efi_initialize_iomem_resources(struct resource *code_resource, case EFI_ACPI_RECLAIM_MEMORY: default: name = "reserved"; - flags |= IORESOURCE_BUSY; break; } @@ -1169,6 +1171,7 @@ efi_initialize_iomem_resources(struct resource *code_resource, */ insert_resource(res, code_resource); insert_resource(res, data_resource); + insert_resource(res, bss_resource); #ifdef CONFIG_KEXEC insert_resource(res, &efi_memmap_res); insert_resource(res, &boot_param_res); @@ -1227,7 +1230,7 @@ kdump_find_rsvd_region (unsigned long size, #ifdef CONFIG_PROC_VMCORE /* locate the size find a the descriptor at a certain address */ -unsigned long +unsigned long __init vmcore_find_descriptor_size (unsigned long address) { void *efi_map_start, *efi_map_end, *p; diff --git a/arch/ia64/kernel/gate.lds.S b/arch/ia64/kernel/gate.lds.S index 44817d97ab4..454d7a7dfa9 100644 --- a/arch/ia64/kernel/gate.lds.S +++ b/arch/ia64/kernel/gate.lds.S @@ -20,6 +20,8 @@ SECTIONS .gnu.version_d : { *(.gnu.version_d) } .gnu.version_r : { *(.gnu.version_r) } + .note : { *(.note*) } :readable :note + .dynamic : { *(.dynamic) } :readable :dynamic /* @@ -83,6 +85,7 @@ PHDRS epc PT_LOAD FILEHDR PHDRS FLAGS(1); /* PF_X */ #endif dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ + note PT_NOTE FLAGS(4); /* PF_R */ unwind PT_IA_64_UNWIND; } diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index cfe4654838f..274a5938304 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c @@ -748,6 +748,15 @@ skip_numa_setup: #endif } +static inline unsigned char choose_dmode(void) +{ +#ifdef CONFIG_SMP + if (smp_int_redirect & SMP_IRQ_REDIRECTION) + return IOSAPIC_LOWEST_PRIORITY; +#endif + return IOSAPIC_FIXED; +} + /* * ACPI can describe IOSAPIC interrupts via static tables and namespace * methods. This provides an interface to register those interrupts and @@ -762,6 +771,7 @@ iosapic_register_intr (unsigned int gsi, unsigned long flags; struct iosapic_rte_info *rte; u32 low32; + unsigned char dmode; /* * If this GSI has already been registered (i.e., it's a @@ -791,8 +801,8 @@ iosapic_register_intr (unsigned int gsi, spin_lock(&irq_desc[irq].lock); dest = get_target_cpu(gsi, irq); - err = register_intr(gsi, irq, IOSAPIC_LOWEST_PRIORITY, - polarity, trigger); + dmode = choose_dmode(); + err = register_intr(gsi, irq, dmode, polarity, trigger); if (err < 0) { spin_unlock(&irq_desc[irq].lock); irq = err; @@ -961,10 +971,12 @@ iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi, { int vector, irq; unsigned int dest = cpu_physical_id(smp_processor_id()); + unsigned char dmode; irq = vector = isa_irq_to_vector(isa_irq); BUG_ON(bind_irq_vector(irq, vector, CPU_MASK_ALL)); - register_intr(gsi, irq, IOSAPIC_LOWEST_PRIORITY, polarity, trigger); + dmode = choose_dmode(); + register_intr(gsi, irq, dmode, polarity, trigger); DBG("ISA: IRQ %u -> GSI %u (%s,%s) -> CPU %d (0x%04x) vector %d\n", isa_irq, gsi, trigger == IOSAPIC_EDGE ? "edge" : "level", diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index 44be1c952b7..6dee579f205 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c @@ -61,9 +61,11 @@ int show_interrupts(struct seq_file *p, void *v) unsigned long flags; if (i == 0) { - seq_printf(p, " "); + char cpuname[16]; + seq_printf(p, " "); for_each_online_cpu(j) { - seq_printf(p, "CPU%d ",j); + snprintf(cpuname, 10, "CPU%d", j); + seq_printf(p, "%10s ", cpuname); } seq_putc(p, '\n'); } diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index cc87025e8f5..10b48cd15a8 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -571,7 +571,7 @@ out: * Outputs * None */ -static void __init +void ia64_mca_register_cpev (int cpev) { /* Register the CPE interrupt vector with SAL */ diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c index 2fdbd5c3f21..e86d0295979 100644 --- a/arch/ia64/kernel/msi_ia64.c +++ b/arch/ia64/kernel/msi_ia64.c @@ -109,7 +109,7 @@ int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) write_msi_msg(irq, &msg); set_irq_chip_and_handler(irq, &ia64_msi_chip, handle_edge_irq); - return irq; + return 0; } void ia64_teardown_msi_irq(unsigned int irq) diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c index 6ef6ffb943a..396004e8cd1 100644 --- a/arch/ia64/kernel/palinfo.c +++ b/arch/ia64/kernel/palinfo.c @@ -470,7 +470,7 @@ register_info(char *page) return p - page; } -static const char *proc_features[]={ +static char *proc_features_0[]={ /* Feature set 0 */ NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, @@ -502,25 +502,92 @@ static const char *proc_features[]={ "Enable BERR promotion" }; +static char *proc_features_16[]={ /* Feature set 16 */ + "Disable ETM", + "Enable ETM", + "Enable MCA on half-way timer", + "Enable snoop WC", + NULL, + "Enable Fast Deferral", + "Disable MCA on memory aliasing", + "Enable RSB", + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + "DP system processor", + "Low Voltage", + "HT supported", + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL +}; + +static char **proc_features[]={ + proc_features_0, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + proc_features_16, + NULL, NULL, NULL, NULL, +}; + +static char * +feature_set_info(char *page, u64 avail, u64 status, u64 control, u64 set) +{ + char *p = page; + char **vf, **v; + int i; + + vf = v = proc_features[set]; + for(i=0; i < 64; i++, avail >>=1, status >>=1, control >>=1) { + + if (!(control)) /* No remaining bits set */ + break; + if (!(avail & 0x1)) /* Print only bits that are available */ + continue; + if (vf) + v = vf + i; + if ( v && *v ) { + p += sprintf(p, "%-40s : %s %s\n", *v, + avail & 0x1 ? (status & 0x1 ? + "On " : "Off"): "", + avail & 0x1 ? (control & 0x1 ? + "Ctrl" : "NoCtrl"): ""); + } else { + p += sprintf(p, "Feature set %2ld bit %2d\t\t\t" + " : %s %s\n", + set, i, + avail & 0x1 ? (status & 0x1 ? + "On " : "Off"): "", + avail & 0x1 ? (control & 0x1 ? + "Ctrl" : "NoCtrl"): ""); + } + } + return p; +} static int processor_info(char *page) { char *p = page; - const char **v = proc_features; - u64 avail=1, status=1, control=1; - int i; + u64 avail=1, status=1, control=1, feature_set=0; s64 ret; - if ((ret=ia64_pal_proc_get_features(&avail, &status, &control)) != 0) return 0; + do { + ret = ia64_pal_proc_get_features(&avail, &status, &control, + feature_set); + if (ret < 0) { + return p - page; + } + if (ret == 1) { + feature_set++; + continue; + } + + p = feature_set_info(p, avail, status, control, feature_set); + + feature_set++; + } while(1); - for(i=0; i < 64; i++, v++,avail >>=1, status >>=1, control >>=1) { - if ( ! *v ) continue; - p += sprintf(p, "%-40s : %s%s %s\n", *v, - avail & 0x1 ? "" : "NotImpl", - avail & 0x1 ? (status & 0x1 ? "On" : "Off"): "", - avail & 0x1 ? (control & 0x1 ? "Ctrl" : "NoCtrl"): ""); - } return p - page; } diff --git a/arch/ia64/kernel/patch.c b/arch/ia64/kernel/patch.c index e796e29f8e1..2cb9425e042 100644 --- a/arch/ia64/kernel/patch.c +++ b/arch/ia64/kernel/patch.c @@ -129,9 +129,6 @@ ia64_patch_mckinley_e9 (unsigned long start, unsigned long end) first_time = 0; if (need_workaround) printk(KERN_INFO "Leaving McKinley Errata 9 workaround enabled\n"); - else - printk(KERN_INFO "McKinley Errata 9 workaround not needed; " - "disabling it\n"); } if (need_workaround) return; diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index f55fa07849c..73e7c2e40b5 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -158,14 +158,14 @@ */ #define PROTECT_CTX(c, f) \ do { \ - DPRINT(("spinlock_irq_save ctx %p by [%d]\n", c, current->pid)); \ + DPRINT(("spinlock_irq_save ctx %p by [%d]\n", c, task_pid_nr(current))); \ spin_lock_irqsave(&(c)->ctx_lock, f); \ - DPRINT(("spinlocked ctx %p by [%d]\n", c, current->pid)); \ + DPRINT(("spinlocked ctx %p by [%d]\n", c, task_pid_nr(current))); \ } while(0) #define UNPROTECT_CTX(c, f) \ do { \ - DPRINT(("spinlock_irq_restore ctx %p by [%d]\n", c, current->pid)); \ + DPRINT(("spinlock_irq_restore ctx %p by [%d]\n", c, task_pid_nr(current))); \ spin_unlock_irqrestore(&(c)->ctx_lock, f); \ } while(0) @@ -227,12 +227,12 @@ #ifdef PFM_DEBUGGING #define DPRINT(a) \ do { \ - if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), current->pid); printk a; } \ + if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \ } while (0) #define DPRINT_ovfl(a) \ do { \ - if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), current->pid); printk a; } \ + if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \ } while (0) #endif @@ -558,7 +558,7 @@ static ctl_table pfm_sysctl_dir[] = { { .ctl_name = CTL_UNNUMBERED, .procname = "perfmon", - .mode = 0755, + .mode = 0555, .child = pfm_ctl_table, }, {} @@ -567,7 +567,7 @@ static ctl_table pfm_sysctl_root[] = { { .ctl_name = CTL_KERN, .procname = "kernel", - .mode = 0755, + .mode = 0555, .child = pfm_sysctl_dir, }, {} @@ -913,7 +913,7 @@ pfm_mask_monitoring(struct task_struct *task) unsigned long mask, val, ovfl_mask; int i; - DPRINT_ovfl(("masking monitoring for [%d]\n", task->pid)); + DPRINT_ovfl(("masking monitoring for [%d]\n", task_pid_nr(task))); ovfl_mask = pmu_conf->ovfl_val; /* @@ -992,12 +992,12 @@ pfm_restore_monitoring(struct task_struct *task) ovfl_mask = pmu_conf->ovfl_val; if (task != current) { - printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task->pid, current->pid); + printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task_pid_nr(task), task_pid_nr(current)); return; } if (ctx->ctx_state != PFM_CTX_MASKED) { printk(KERN_ERR "perfmon.%d: task[%d] current[%d] invalid state=%d\n", __LINE__, - task->pid, current->pid, ctx->ctx_state); + task_pid_nr(task), task_pid_nr(current), ctx->ctx_state); return; } psr = pfm_get_psr(); @@ -1051,7 +1051,8 @@ pfm_restore_monitoring(struct task_struct *task) if ((mask & 0x1) == 0UL) continue; ctx->th_pmcs[i] = ctx->ctx_pmcs[i]; ia64_set_pmc(i, ctx->th_pmcs[i]); - DPRINT(("[%d] pmc[%d]=0x%lx\n", task->pid, i, ctx->th_pmcs[i])); + DPRINT(("[%d] pmc[%d]=0x%lx\n", + task_pid_nr(task), i, ctx->th_pmcs[i])); } ia64_srlz_d(); @@ -1370,7 +1371,7 @@ pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu) error_conflict: DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n", - pfm_sessions.pfs_sys_session[cpu]->pid, + task_pid_nr(pfm_sessions.pfs_sys_session[cpu]), cpu)); abort: UNLOCK_PFS(flags); @@ -1442,7 +1443,7 @@ pfm_remove_smpl_mapping(struct task_struct *task, void *vaddr, unsigned long siz /* sanity checks */ if (task->mm == NULL || size == 0UL || vaddr == NULL) { - printk(KERN_ERR "perfmon: pfm_remove_smpl_mapping [%d] invalid context mm=%p\n", task->pid, task->mm); + printk(KERN_ERR "perfmon: pfm_remove_smpl_mapping [%d] invalid context mm=%p\n", task_pid_nr(task), task->mm); return -EINVAL; } @@ -1459,7 +1460,7 @@ pfm_remove_smpl_mapping(struct task_struct *task, void *vaddr, unsigned long siz up_write(&task->mm->mmap_sem); if (r !=0) { - printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task->pid, vaddr, size); + printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task_pid_nr(task), vaddr, size); } DPRINT(("do_unmap(%p, %lu)=%d\n", vaddr, size, r)); @@ -1501,7 +1502,7 @@ pfm_free_smpl_buffer(pfm_context_t *ctx) return 0; invalid_free: - printk(KERN_ERR "perfmon: pfm_free_smpl_buffer [%d] no buffer\n", current->pid); + printk(KERN_ERR "perfmon: pfm_free_smpl_buffer [%d] no buffer\n", task_pid_nr(current)); return -EINVAL; } #endif @@ -1547,13 +1548,13 @@ pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos) unsigned long flags; DECLARE_WAITQUEUE(wait, current); if (PFM_IS_FILE(filp) == 0) { - printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", current->pid); + printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current)); return -EINVAL; } ctx = (pfm_context_t *)filp->private_data; if (ctx == NULL) { - printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", current->pid); + printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", task_pid_nr(current)); return -EINVAL; } @@ -1607,7 +1608,7 @@ pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos) PROTECT_CTX(ctx, flags); } - DPRINT(("[%d] back to running ret=%ld\n", current->pid, ret)); + DPRINT(("[%d] back to running ret=%ld\n", task_pid_nr(current), ret)); set_current_state(TASK_RUNNING); remove_wait_queue(&ctx->ctx_msgq_wait, &wait); @@ -1616,7 +1617,7 @@ pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos) ret = -EINVAL; msg = pfm_get_next_msg(ctx); if (msg == NULL) { - printk(KERN_ERR "perfmon: pfm_read no msg for ctx=%p [%d]\n", ctx, current->pid); + printk(KERN_ERR "perfmon: pfm_read no msg for ctx=%p [%d]\n", ctx, task_pid_nr(current)); goto abort_locked; } @@ -1647,13 +1648,13 @@ pfm_poll(struct file *filp, poll_table * wait) unsigned int mask = 0; if (PFM_IS_FILE(filp) == 0) { - printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", current->pid); + printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current)); return 0; } ctx = (pfm_context_t *)filp->private_data; if (ctx == NULL) { - printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", current->pid); + printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", task_pid_nr(current)); return 0; } @@ -1692,7 +1693,7 @@ pfm_do_fasync(int fd, struct file *filp, pfm_context_t *ctx, int on) ret = fasync_helper (fd, filp, on, &ctx->ctx_async_queue); DPRINT(("pfm_fasync called by [%d] on ctx_fd=%d on=%d async_queue=%p ret=%d\n", - current->pid, + task_pid_nr(current), fd, on, ctx->ctx_async_queue, ret)); @@ -1707,13 +1708,13 @@ pfm_fasync(int fd, struct file *filp, int on) int ret; if (PFM_IS_FILE(filp) == 0) { - printk(KERN_ERR "perfmon: pfm_fasync bad magic [%d]\n", current->pid); + printk(KERN_ERR "perfmon: pfm_fasync bad magic [%d]\n", task_pid_nr(current)); return -EBADF; } ctx = (pfm_context_t *)filp->private_data; if (ctx == NULL) { - printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", current->pid); + printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", task_pid_nr(current)); return -EBADF; } /* @@ -1759,7 +1760,7 @@ pfm_syswide_force_stop(void *info) if (owner != ctx->ctx_task) { printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected owner [%d] instead of [%d]\n", smp_processor_id(), - owner->pid, ctx->ctx_task->pid); + task_pid_nr(owner), task_pid_nr(ctx->ctx_task)); return; } if (GET_PMU_CTX() != ctx) { @@ -1769,7 +1770,7 @@ pfm_syswide_force_stop(void *info) return; } - DPRINT(("on CPU%d forcing system wide stop for [%d]\n", smp_processor_id(), ctx->ctx_task->pid)); + DPRINT(("on CPU%d forcing system wide stop for [%d]\n", smp_processor_id(), task_pid_nr(ctx->ctx_task))); /* * the context is already protected in pfm_close(), we simply * need to mask interrupts to avoid a PMU interrupt race on @@ -1821,7 +1822,7 @@ pfm_flush(struct file *filp, fl_owner_t id) ctx = (pfm_context_t *)filp->private_data; if (ctx == NULL) { - printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", current->pid); + printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", task_pid_nr(current)); return -EBADF; } @@ -1969,7 +1970,7 @@ pfm_close(struct inode *inode, struct file *filp) ctx = (pfm_context_t *)filp->private_data; if (ctx == NULL) { - printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", current->pid); + printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", task_pid_nr(current)); return -EBADF; } @@ -2066,7 +2067,7 @@ pfm_close(struct inode *inode, struct file *filp) */ ctx->ctx_state = PFM_CTX_ZOMBIE; - DPRINT(("zombie ctx for [%d]\n", task->pid)); + DPRINT(("zombie ctx for [%d]\n", task_pid_nr(task))); /* * cannot free the context on the spot. deferred until * the task notices the ZOMBIE state @@ -2472,7 +2473,7 @@ pfm_setup_buffer_fmt(struct task_struct *task, struct file *filp, pfm_context_t /* invoke and lock buffer format, if found */ fmt = pfm_find_buffer_fmt(arg->ctx_smpl_buf_id); if (fmt == NULL) { - DPRINT(("[%d] cannot find buffer format\n", task->pid)); + DPRINT(("[%d] cannot find buffer format\n", task_pid_nr(task))); return -EINVAL; } @@ -2483,7 +2484,7 @@ pfm_setup_buffer_fmt(struct task_struct *task, struct file *filp, pfm_context_t ret = pfm_buf_fmt_validate(fmt, task, ctx_flags, cpu, fmt_arg); - DPRINT(("[%d] after validate(0x%x,%d,%p)=%d\n", task->pid, ctx_flags, cpu, fmt_arg, ret)); + DPRINT(("[%d] after validate(0x%x,%d,%p)=%d\n", task_pid_nr(task), ctx_flags, cpu, fmt_arg, ret)); if (ret) goto error; @@ -2605,23 +2606,23 @@ pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task) * no kernel task or task not owner by caller */ if (task->mm == NULL) { - DPRINT(("task [%d] has not memory context (kernel thread)\n", task->pid)); + DPRINT(("task [%d] has not memory context (kernel thread)\n", task_pid_nr(task))); return -EPERM; } if (pfm_bad_permissions(task)) { - DPRINT(("no permission to attach to [%d]\n", task->pid)); + DPRINT(("no permission to attach to [%d]\n", task_pid_nr(task))); return -EPERM; } /* * cannot block in self-monitoring mode */ if (CTX_OVFL_NOBLOCK(ctx) == 0 && task == current) { - DPRINT(("cannot load a blocking context on self for [%d]\n", task->pid)); + DPRINT(("cannot load a blocking context on self for [%d]\n", task_pid_nr(task))); return -EINVAL; } if (task->exit_state == EXIT_ZOMBIE) { - DPRINT(("cannot attach to zombie task [%d]\n", task->pid)); + DPRINT(("cannot attach to zombie task [%d]\n", task_pid_nr(task))); return -EBUSY; } @@ -2631,7 +2632,7 @@ pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task) if (task == current) return 0; if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) { - DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task->pid, task->state)); + DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task_pid_nr(task), task->state)); return -EBUSY; } /* @@ -3512,7 +3513,7 @@ pfm_use_debug_registers(struct task_struct *task) if (pmu_conf->use_rr_dbregs == 0) return 0; - DPRINT(("called for [%d]\n", task->pid)); + DPRINT(("called for [%d]\n", task_pid_nr(task))); /* * do it only once @@ -3543,7 +3544,7 @@ pfm_use_debug_registers(struct task_struct *task) DPRINT(("ptrace_use_dbregs=%u sys_use_dbregs=%u by [%d] ret = %d\n", pfm_sessions.pfs_ptrace_use_dbregs, pfm_sessions.pfs_sys_use_dbregs, - task->pid, ret)); + task_pid_nr(task), ret)); UNLOCK_PFS(flags); @@ -3568,7 +3569,7 @@ pfm_release_debug_registers(struct task_struct *task) LOCK_PFS(flags); if (pfm_sessions.pfs_ptrace_use_dbregs == 0) { - printk(KERN_ERR "perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task->pid); + printk(KERN_ERR "perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task_pid_nr(task)); ret = -1; } else { pfm_sessions.pfs_ptrace_use_dbregs--; @@ -3620,7 +3621,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) /* sanity check */ if (unlikely(task == NULL)) { - printk(KERN_ERR "perfmon: [%d] pfm_restart no task\n", current->pid); + printk(KERN_ERR "perfmon: [%d] pfm_restart no task\n", task_pid_nr(current)); return -EINVAL; } @@ -3629,7 +3630,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) fmt = ctx->ctx_buf_fmt; DPRINT(("restarting self %d ovfl=0x%lx\n", - task->pid, + task_pid_nr(task), ctx->ctx_ovfl_regs[0])); if (CTX_HAS_SMPL(ctx)) { @@ -3653,11 +3654,11 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, PFM_PMD_LONG_RESET); if (rst_ctrl.bits.mask_monitoring == 0) { - DPRINT(("resuming monitoring for [%d]\n", task->pid)); + DPRINT(("resuming monitoring for [%d]\n", task_pid_nr(task))); if (state == PFM_CTX_MASKED) pfm_restore_monitoring(task); } else { - DPRINT(("keeping monitoring stopped for [%d]\n", task->pid)); + DPRINT(("keeping monitoring stopped for [%d]\n", task_pid_nr(task))); // cannot use pfm_stop_monitoring(task, regs); } @@ -3714,10 +3715,10 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) * "self-monitoring". */ if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) { - DPRINT(("unblocking [%d] \n", task->pid)); + DPRINT(("unblocking [%d] \n", task_pid_nr(task))); complete(&ctx->ctx_restart_done); } else { - DPRINT(("[%d] armed exit trap\n", task->pid)); + DPRINT(("[%d] armed exit trap\n", task_pid_nr(task))); ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_RESET; @@ -3805,7 +3806,7 @@ pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_ * don't bother if we are loaded and task is being debugged */ if (is_loaded && (thread->flags & IA64_THREAD_DBG_VALID) != 0) { - DPRINT(("debug registers already in use for [%d]\n", task->pid)); + DPRINT(("debug registers already in use for [%d]\n", task_pid_nr(task))); return -EBUSY; } @@ -3846,7 +3847,7 @@ pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_ * is shared by all processes running on it */ if (first_time && can_access_pmu) { - DPRINT(("[%d] clearing ibrs, dbrs\n", task->pid)); + DPRINT(("[%d] clearing ibrs, dbrs\n", task_pid_nr(task))); for (i=0; i < pmu_conf->num_ibrs; i++) { ia64_set_ibr(i, 0UL); ia64_dv_serialize_instruction(); @@ -4035,7 +4036,7 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) return -EBUSY; } DPRINT(("task [%d] ctx_state=%d is_system=%d\n", - PFM_CTX_TASK(ctx)->pid, + task_pid_nr(PFM_CTX_TASK(ctx)), state, is_system)); /* @@ -4093,7 +4094,7 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) * monitoring disabled in kernel at next reschedule */ ctx->ctx_saved_psr_up = 0; - DPRINT(("task=[%d]\n", task->pid)); + DPRINT(("task=[%d]\n", task_pid_nr(task))); } return 0; } @@ -4298,11 +4299,12 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) if (is_system) { if (pfm_sessions.pfs_ptrace_use_dbregs) { - DPRINT(("cannot load [%d] dbregs in use\n", task->pid)); + DPRINT(("cannot load [%d] dbregs in use\n", + task_pid_nr(task))); ret = -EBUSY; } else { pfm_sessions.pfs_sys_use_dbregs++; - DPRINT(("load [%d] increased sys_use_dbreg=%u\n", task->pid, pfm_sessions.pfs_sys_use_dbregs)); + DPRINT(("load [%d] increased sys_use_dbreg=%u\n", task_pid_nr(task), pfm_sessions.pfs_sys_use_dbregs)); set_dbregs = 1; } } @@ -4394,7 +4396,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) /* allow user level control */ ia64_psr(regs)->sp = 0; - DPRINT(("clearing psr.sp for [%d]\n", task->pid)); + DPRINT(("clearing psr.sp for [%d]\n", task_pid_nr(task))); SET_LAST_CPU(ctx, smp_processor_id()); INC_ACTIVATION(); @@ -4429,7 +4431,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) */ SET_PMU_OWNER(task, ctx); - DPRINT(("context loaded on PMU for [%d]\n", task->pid)); + DPRINT(("context loaded on PMU for [%d]\n", task_pid_nr(task))); } else { /* * when not current, task MUST be stopped, so this is safe @@ -4493,7 +4495,7 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg int prev_state, is_system; int ret; - DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task->pid : -1)); + DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task_pid_nr(task) : -1)); prev_state = ctx->ctx_state; is_system = ctx->ctx_fl_system; @@ -4568,7 +4570,7 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg */ ia64_psr(regs)->sp = 1; - DPRINT(("setting psr.sp for [%d]\n", task->pid)); + DPRINT(("setting psr.sp for [%d]\n", task_pid_nr(task))); } /* * save PMDs to context @@ -4608,7 +4610,7 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg ctx->ctx_fl_can_restart = 0; ctx->ctx_fl_going_zombie = 0; - DPRINT(("disconnected [%d] from context\n", task->pid)); + DPRINT(("disconnected [%d] from context\n", task_pid_nr(task))); return 0; } @@ -4631,7 +4633,7 @@ pfm_exit_thread(struct task_struct *task) PROTECT_CTX(ctx, flags); - DPRINT(("state=%d task [%d]\n", ctx->ctx_state, task->pid)); + DPRINT(("state=%d task [%d]\n", ctx->ctx_state, task_pid_nr(task))); state = ctx->ctx_state; switch(state) { @@ -4640,13 +4642,13 @@ pfm_exit_thread(struct task_struct *task) * only comes to this function if pfm_context is not NULL, i.e., cannot * be in unloaded state */ - printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task->pid); + printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task_pid_nr(task)); break; case PFM_CTX_LOADED: case PFM_CTX_MASKED: ret = pfm_context_unload(ctx, NULL, 0, regs); if (ret) { - printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task->pid, state, ret); + printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret); } DPRINT(("ctx unloaded for current state was %d\n", state)); @@ -4655,12 +4657,12 @@ pfm_exit_thread(struct task_struct *task) case PFM_CTX_ZOMBIE: ret = pfm_context_unload(ctx, NULL, 0, regs); if (ret) { - printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task->pid, state, ret); + printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret); } free_ok = 1; break; default: - printk(KERN_ERR "perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task->pid, state); + printk(KERN_ERR "perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task_pid_nr(task), state); break; } UNPROTECT_CTX(ctx, flags); @@ -4744,7 +4746,7 @@ recheck: DPRINT(("context %d state=%d [%d] task_state=%ld must_stop=%d\n", ctx->ctx_fd, state, - task->pid, + task_pid_nr(task), task->state, PFM_CMD_STOPPED(cmd))); /* @@ -4791,7 +4793,7 @@ recheck: */ if (PFM_CMD_STOPPED(cmd)) { if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) { - DPRINT(("[%d] task not in stopped state\n", task->pid)); + DPRINT(("[%d] task not in stopped state\n", task_pid_nr(task))); return -EBUSY; } /* @@ -4884,7 +4886,7 @@ restart_args: * limit abuse to min page size */ if (unlikely(sz > PFM_MAX_ARGSIZE)) { - printk(KERN_ERR "perfmon: [%d] argument too big %lu\n", current->pid, sz); + printk(KERN_ERR "perfmon: [%d] argument too big %lu\n", task_pid_nr(current), sz); return -E2BIG; } @@ -5031,11 +5033,11 @@ pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs) { int ret; - DPRINT(("entering for [%d]\n", current->pid)); + DPRINT(("entering for [%d]\n", task_pid_nr(current))); ret = pfm_context_unload(ctx, NULL, 0, regs); if (ret) { - printk(KERN_ERR "pfm_context_force_terminate: [%d] unloaded failed with %d\n", current->pid, ret); + printk(KERN_ERR "pfm_context_force_terminate: [%d] unloaded failed with %d\n", task_pid_nr(current), ret); } /* @@ -5072,7 +5074,7 @@ pfm_handle_work(void) ctx = PFM_GET_CTX(current); if (ctx == NULL) { - printk(KERN_ERR "perfmon: [%d] has no PFM context\n", current->pid); + printk(KERN_ERR "perfmon: [%d] has no PFM context\n", task_pid_nr(current)); return; } @@ -5269,7 +5271,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str DPRINT_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s " "used_pmds=0x%lx\n", pmc0, - task ? task->pid: -1, + task ? task_pid_nr(task): -1, (regs ? regs->cr_iip : 0), CTX_OVFL_NOBLOCK(ctx) ? "nonblocking" : "blocking", ctx->ctx_used_pmds[0])); @@ -5458,7 +5460,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str } DPRINT_ovfl(("owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d\n", - GET_PMU_OWNER() ? GET_PMU_OWNER()->pid : -1, + GET_PMU_OWNER() ? task_pid_nr(GET_PMU_OWNER()) : -1, PFM_GET_WORK_PENDING(task), ctx->ctx_fl_trap_reason, ovfl_pmds, @@ -5483,7 +5485,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str sanity_check: printk(KERN_ERR "perfmon: CPU%d overflow handler [%d] pmc0=0x%lx\n", smp_processor_id(), - task ? task->pid : -1, + task ? task_pid_nr(task) : -1, pmc0); return; @@ -5516,7 +5518,7 @@ stop_monitoring: * * Overall pretty hairy stuff.... */ - DPRINT(("ctx is zombie for [%d], converted to spurious\n", task ? task->pid: -1)); + DPRINT(("ctx is zombie for [%d], converted to spurious\n", task ? task_pid_nr(task): -1)); pfm_clear_psr_up(); ia64_psr(regs)->up = 0; ia64_psr(regs)->sp = 1; @@ -5577,13 +5579,13 @@ pfm_do_interrupt_handler(int irq, void *arg, struct pt_regs *regs) report_spurious1: printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d has no PFM context\n", - this_cpu, task->pid); + this_cpu, task_pid_nr(task)); pfm_unfreeze_pmu(); return -1; report_spurious2: printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d, invalid flag\n", this_cpu, - task->pid); + task_pid_nr(task)); pfm_unfreeze_pmu(); return -1; } @@ -5870,7 +5872,8 @@ pfm_force_cleanup(pfm_context_t *ctx, struct pt_regs *regs) ia64_psr(regs)->sp = 1; if (GET_PMU_OWNER() == task) { - DPRINT(("cleared ownership for [%d]\n", ctx->ctx_task->pid)); + DPRINT(("cleared ownership for [%d]\n", + task_pid_nr(ctx->ctx_task))); SET_PMU_OWNER(NULL, NULL); } @@ -5882,7 +5885,7 @@ pfm_force_cleanup(pfm_context_t *ctx, struct pt_regs *regs) task->thread.pfm_context = NULL; task->thread.flags &= ~IA64_THREAD_PM_VALID; - DPRINT(("force cleanup for [%d]\n", task->pid)); + DPRINT(("force cleanup for [%d]\n", task_pid_nr(task))); } @@ -6426,7 +6429,7 @@ pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx) if (PMD_IS_COUNTING(i)) { DPRINT(("[%d] pmd[%d] ctx_pmd=0x%lx hw_pmd=0x%lx\n", - task->pid, + task_pid_nr(task), i, ctx->ctx_pmds[i].val, val & ovfl_val)); @@ -6448,11 +6451,11 @@ pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx) */ if (pmc0 & (1UL << i)) { val += 1 + ovfl_val; - DPRINT(("[%d] pmd[%d] overflowed\n", task->pid, i)); + DPRINT(("[%d] pmd[%d] overflowed\n", task_pid_nr(task), i)); } } - DPRINT(("[%d] ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task->pid, i, val, pmd_val)); + DPRINT(("[%d] ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task_pid_nr(task), i, val, pmd_val)); if (is_self) ctx->th_pmds[i] = pmd_val; @@ -6793,14 +6796,14 @@ dump_pmu_state(const char *from) printk("CPU%d from %s() current [%d] iip=0x%lx %s\n", this_cpu, from, - current->pid, + task_pid_nr(current), regs->cr_iip, current->comm); task = GET_PMU_OWNER(); ctx = GET_PMU_CTX(); - printk("->CPU%d owner [%d] ctx=%p\n", this_cpu, task ? task->pid : -1, ctx); + printk("->CPU%d owner [%d] ctx=%p\n", this_cpu, task ? task_pid_nr(task) : -1, ctx); psr = pfm_get_psr(); @@ -6848,7 +6851,7 @@ pfm_inherit(struct task_struct *task, struct pt_regs *regs) { struct thread_struct *thread; - DPRINT(("perfmon: pfm_inherit clearing state for [%d]\n", task->pid)); + DPRINT(("perfmon: pfm_inherit clearing state for [%d]\n", task_pid_nr(task))); thread = &task->thread; diff --git a/arch/ia64/kernel/perfmon_default_smpl.c b/arch/ia64/kernel/perfmon_default_smpl.c index ff80eab83b3..a7af1cb419f 100644 --- a/arch/ia64/kernel/perfmon_default_smpl.c +++ b/arch/ia64/kernel/perfmon_default_smpl.c @@ -44,11 +44,11 @@ default_validate(struct task_struct *task, unsigned int flags, int cpu, void *da int ret = 0; if (data == NULL) { - DPRINT(("[%d] no argument passed\n", task->pid)); + DPRINT(("[%d] no argument passed\n", task_pid_nr(task))); return -EINVAL; } - DPRINT(("[%d] validate flags=0x%x CPU%d\n", task->pid, flags, cpu)); + DPRINT(("[%d] validate flags=0x%x CPU%d\n", task_pid_nr(task), flags, cpu)); /* * must hold at least the buffer header + one minimally sized entry @@ -88,7 +88,7 @@ default_init(struct task_struct *task, void *buf, unsigned int flags, int cpu, v hdr->hdr_count = 0UL; DPRINT(("[%d] buffer=%p buf_size=%lu hdr_size=%lu hdr_version=%u cur_offs=%lu\n", - task->pid, + task_pid_nr(task), buf, hdr->hdr_buf_size, sizeof(*hdr), @@ -245,7 +245,7 @@ default_restart(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, stru static int default_exit(struct task_struct *task, void *buf, struct pt_regs *regs) { - DPRINT(("[%d] exit(%p)\n", task->pid, buf)); + DPRINT(("[%d] exit(%p)\n", task_pid_nr(task), buf)); return 0; } diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index c613fc0e91c..2418289ee5c 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -105,7 +105,8 @@ show_regs (struct pt_regs *regs) unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri; print_modules(); - printk("\nPid: %d, CPU %d, comm: %20s\n", current->pid, smp_processor_id(), current->comm); + printk("\nPid: %d, CPU %d, comm: %20s\n", task_pid_nr(current), + smp_processor_id(), current->comm); printk("psr : %016lx ifs : %016lx ip : [<%016lx>] %s\n", regs->cr_ipsr, regs->cr_ifs, ip, print_tainted()); print_symbol("ip is at %s\n", ip); diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index c5cfcfa4c87..2b3751eef5c 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -90,7 +90,12 @@ static struct resource code_resource = { .name = "Kernel code", .flags = IORESOURCE_BUSY | IORESOURCE_MEM }; -extern char _text[], _end[], _etext[]; + +static struct resource bss_resource = { + .name = "Kernel bss", + .flags = IORESOURCE_BUSY | IORESOURCE_MEM +}; +extern char _text[], _end[], _etext[], _edata[], _bss[]; unsigned long ia64_max_cacheline_size; @@ -200,14 +205,59 @@ static int __init register_memory(void) code_resource.start = ia64_tpa(_text); code_resource.end = ia64_tpa(_etext) - 1; data_resource.start = ia64_tpa(_etext); - data_resource.end = ia64_tpa(_end) - 1; - efi_initialize_iomem_resources(&code_resource, &data_resource); + data_resource.end = ia64_tpa(_edata) - 1; + bss_resource.start = ia64_tpa(_bss); + bss_resource.end = ia64_tpa(_end) - 1; + efi_initialize_iomem_resources(&code_resource, &data_resource, + &bss_resource); return 0; } __initcall(register_memory); + +#ifdef CONFIG_KEXEC +static void __init setup_crashkernel(unsigned long total, int *n) +{ + unsigned long long base = 0, size = 0; + int ret; + + ret = parse_crashkernel(boot_command_line, total, + &size, &base); + if (ret == 0 && size > 0) { + if (!base) { + sort_regions(rsvd_region, *n); + base = kdump_find_rsvd_region(size, + rsvd_region, *n); + } + if (base != ~0UL) { + printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " + "for crashkernel (System RAM: %ldMB)\n", + (unsigned long)(size >> 20), + (unsigned long)(base >> 20), + (unsigned long)(total >> 20)); + rsvd_region[*n].start = + (unsigned long)__va(base); + rsvd_region[*n].end = + (unsigned long)__va(base + size); + (*n)++; + crashk_res.start = base; + crashk_res.end = base + size - 1; + } + } + efi_memmap_res.start = ia64_boot_param->efi_memmap; + efi_memmap_res.end = efi_memmap_res.start + + ia64_boot_param->efi_memmap_size; + boot_param_res.start = __pa(ia64_boot_param); + boot_param_res.end = boot_param_res.start + + sizeof(*ia64_boot_param); +} +#else +static inline void __init setup_crashkernel(unsigned long total, int *n) +{} +#endif + /** * reserve_memory - setup reserved memory areas * @@ -219,6 +269,7 @@ void __init reserve_memory (void) { int n = 0; + unsigned long total_memory; /* * none of the entries in this table overlap @@ -254,50 +305,11 @@ reserve_memory (void) n++; #endif - efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end); + total_memory = efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end); n++; -#ifdef CONFIG_KEXEC - /* crashkernel=size@offset specifies the size to reserve for a crash - * kernel. If offset is 0, then it is determined automatically. - * By reserving this memory we guarantee that linux never set's it - * up as a DMA target.Useful for holding code to do something - * appropriate after a kernel panic. - */ - { - char *from = strstr(boot_command_line, "crashkernel="); - unsigned long base, size; - if (from) { - size = memparse(from + 12, &from); - if (*from == '@') - base = memparse(from+1, &from); - else - base = 0; - if (size) { - if (!base) { - sort_regions(rsvd_region, n); - base = kdump_find_rsvd_region(size, - rsvd_region, n); - } - if (base != ~0UL) { - rsvd_region[n].start = - (unsigned long)__va(base); - rsvd_region[n].end = - (unsigned long)__va(base + size); - n++; - crashk_res.start = base; - crashk_res.end = base + size - 1; - } - } - } - efi_memmap_res.start = ia64_boot_param->efi_memmap; - efi_memmap_res.end = efi_memmap_res.start + - ia64_boot_param->efi_memmap_size; - boot_param_res.start = __pa(ia64_boot_param); - boot_param_res.end = boot_param_res.start + - sizeof(*ia64_boot_param); - } -#endif + setup_crashkernel(total_memory, &n); + /* end of memory marker */ rsvd_region[n].start = ~0UL; rsvd_region[n].end = ~0UL; @@ -405,34 +417,6 @@ mark_bsp_online (void) #endif } -#ifdef CONFIG_SMP -static void __init -check_for_logical_procs (void) -{ - pal_logical_to_physical_t info; - s64 status; - - status = ia64_pal_logical_to_phys(0, &info); - if (status == -1) { - printk(KERN_INFO "No logical to physical processor mapping " - "available\n"); - return; - } - if (status) { - printk(KERN_ERR "ia64_pal_logical_to_phys failed with %ld\n", - status); - return; - } - /* - * Total number of siblings that BSP has. Though not all of them - * may have booted successfully. The correct number of siblings - * booted is in info.overview_num_log. - */ - smp_num_siblings = info.overview_tpc; - smp_num_cpucores = info.overview_cpp; -} -#endif - static __initdata int nomca; static __init int setup_nomca(char *s) { @@ -528,15 +512,6 @@ setup_arch (char **cmdline_p) #ifdef CONFIG_SMP cpu_physical_id(0) = hard_smp_processor_id(); - check_for_logical_procs(); - if (smp_num_cpucores > 1) - printk(KERN_INFO - "cpu package is Multi-Core capable: number of cores=%d\n", - smp_num_cpucores); - if (smp_num_siblings > 1) - printk(KERN_INFO - "cpu package is Multi-Threading capable: number of siblings=%d\n", - smp_num_siblings); #endif cpu_init(); /* initialize the bootstrap CPU */ @@ -649,12 +624,13 @@ show_cpuinfo (struct seq_file *m, void *v) lpj*HZ/500000, (lpj*HZ/5000) % 100); #ifdef CONFIG_SMP seq_printf(m, "siblings : %u\n", cpus_weight(cpu_core_map[cpunum])); + if (c->socket_id != -1) + seq_printf(m, "physical id: %u\n", c->socket_id); if (c->threads_per_core > 1 || c->cores_per_socket > 1) seq_printf(m, - "physical id: %u\n" - "core id : %u\n" - "thread id : %u\n", - c->socket_id, c->core_id, c->thread_id); + "core id : %u\n" + "thread id : %u\n", + c->core_id, c->thread_id); #endif seq_printf(m,"\n"); @@ -766,6 +742,9 @@ identify_cpu (struct cpuinfo_ia64 *c) c->socket_id = -1; identify_siblings(c); + + if (c->threads_per_core > smp_num_siblings) + smp_num_siblings = c->threads_per_core; #endif c->ppn = cpuid.field.ppn; c->number = cpuid.field.number; diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c index aeec8184e86..cdb64cc4d9c 100644 --- a/arch/ia64/kernel/signal.c +++ b/arch/ia64/kernel/signal.c @@ -227,7 +227,7 @@ ia64_rt_sigreturn (struct sigscratch *scr) si.si_signo = SIGSEGV; si.si_errno = 0; si.si_code = SI_KERNEL; - si.si_pid = current->pid; + si.si_pid = task_pid_vnr(current); si.si_uid = current->uid; si.si_addr = sc; force_sig_info(SIGSEGV, &si, current); @@ -332,7 +332,7 @@ force_sigsegv_info (int sig, void __user *addr) si.si_signo = SIGSEGV; si.si_errno = 0; si.si_code = SI_KERNEL; - si.si_pid = current->pid; + si.si_pid = task_pid_vnr(current); si.si_uid = current->uid; si.si_addr = addr; force_sig_info(SIGSEGV, &si, current); diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index c57dbce25c1..f0fc4d8465a 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -142,7 +142,6 @@ DEFINE_PER_CPU_SHARED_ALIGNED(cpumask_t, cpu_sibling_map); EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); int smp_num_siblings = 1; -int smp_num_cpucores = 1; /* which logical CPU number maps to which CPU (physical APIC ID) */ volatile int ia64_cpu_to_sapicid[NR_CPUS]; @@ -886,13 +885,17 @@ identify_siblings(struct cpuinfo_ia64 *c) u16 pltid; pal_logical_to_physical_t info; - if (smp_num_cpucores == 1 && smp_num_siblings == 1) - return; - if ((status = ia64_pal_logical_to_phys(-1, &info)) != PAL_STATUS_SUCCESS) { - printk(KERN_ERR "ia64_pal_logical_to_phys failed with %ld\n", - status); - return; + if (status != PAL_STATUS_UNIMPLEMENTED) { + printk(KERN_ERR + "ia64_pal_logical_to_phys failed with %ld\n", + status); + return; + } + + info.overview_ppid = 0; + info.overview_cpp = 1; + info.overview_tpc = 1; } if ((status = ia64_sal_physical_id_info(&pltid)) != PAL_STATUS_SUCCESS) { printk(KERN_ERR "ia64_sal_pltid failed with %ld\n", status); @@ -900,6 +903,10 @@ identify_siblings(struct cpuinfo_ia64 *c) } c->socket_id = (pltid << 8) | info.overview_ppid; + + if (info.overview_cpp == 1 && info.overview_tpc == 1) + return; + c->cores_per_socket = info.overview_cpp; c->threads_per_core = info.overview_tpc; c->num_log = info.overview_num_log; diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c index 3aeaf15e468..78d65cb947d 100644 --- a/arch/ia64/kernel/traps.c +++ b/arch/ia64/kernel/traps.c @@ -61,7 +61,7 @@ die (const char *str, struct pt_regs *regs, long err) if (++die.lock_owner_depth < 3) { printk("%s[%d]: %s %ld [%d]\n", - current->comm, current->pid, str, err, ++die_counter); + current->comm, task_pid_nr(current), str, err, ++die_counter); (void) notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV); show_regs(regs); } else @@ -315,7 +315,7 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr) last.time = current_jiffies + 5 * HZ; printk(KERN_WARNING "%s(%d): floating-point assist fault at ip %016lx, isr %016lx\n", - current->comm, current->pid, regs->cr_iip + ia64_psr(regs)->ri, isr); + current->comm, task_pid_nr(current), regs->cr_iip + ia64_psr(regs)->ri, isr); } } } @@ -453,7 +453,7 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, if (code == 8) { # ifdef CONFIG_IA64_PRINT_HAZARDS printk("%s[%d]: possible hazard @ ip=%016lx (pr = %016lx)\n", - current->comm, current->pid, + current->comm, task_pid_nr(current), regs.cr_iip + ia64_psr(®s)->ri, regs.pr); # endif return; diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c index fe6aa5a9f8f..2173de9fe91 100644 --- a/arch/ia64/kernel/unaligned.c +++ b/arch/ia64/kernel/unaligned.c @@ -1340,7 +1340,8 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) size_t len; len = sprintf(buf, "%s(%d): unaligned access to 0x%016lx, " - "ip=0x%016lx\n\r", current->comm, current->pid, + "ip=0x%016lx\n\r", current->comm, + task_pid_nr(current), ifa, regs->cr_iip + ipsr->ri); /* * Don't call tty_write_message() if we're in the kernel; we might @@ -1363,7 +1364,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) "administrator\n" "echo 0 > /proc/sys/kernel/ignore-" "unaligned-usertrap to re-enable\n", - current->comm, current->pid); + current->comm, task_pid_nr(current)); } } } else { |