diff options
Diffstat (limited to 'arch/s390/kernel')
-rw-r--r-- | arch/s390/kernel/Makefile | 4 | ||||
-rw-r--r-- | arch/s390/kernel/bitmap.S | 56 | ||||
-rw-r--r-- | arch/s390/kernel/bitmap.c | 54 | ||||
-rw-r--r-- | arch/s390/kernel/compat_ptrace.h | 3 | ||||
-rw-r--r-- | arch/s390/kernel/compat_wrapper.S | 2 | ||||
-rw-r--r-- | arch/s390/kernel/debug.c | 9 | ||||
-rw-r--r-- | arch/s390/kernel/early.c | 23 | ||||
-rw-r--r-- | arch/s390/kernel/head.S | 2 | ||||
-rw-r--r-- | arch/s390/kernel/head31.S | 1 | ||||
-rw-r--r-- | arch/s390/kernel/head64.S | 1 | ||||
-rw-r--r-- | arch/s390/kernel/ipl.c | 74 | ||||
-rw-r--r-- | arch/s390/kernel/mcount.S | 6 | ||||
-rw-r--r-- | arch/s390/kernel/module.c | 19 | ||||
-rw-r--r-- | arch/s390/kernel/nmi.c | 376 | ||||
-rw-r--r-- | arch/s390/kernel/process.c | 73 | ||||
-rw-r--r-- | arch/s390/kernel/processor.c | 73 | ||||
-rw-r--r-- | arch/s390/kernel/reipl64.S | 11 | ||||
-rw-r--r-- | arch/s390/kernel/s390_ksyms.c | 44 | ||||
-rw-r--r-- | arch/s390/kernel/setup.c | 52 | ||||
-rw-r--r-- | arch/s390/kernel/smp.c | 68 | ||||
-rw-r--r-- | arch/s390/kernel/sysinfo.c | 428 | ||||
-rw-r--r-- | arch/s390/kernel/time.c | 71 | ||||
-rw-r--r-- | arch/s390/kernel/topology.c | 2 | ||||
-rw-r--r-- | arch/s390/kernel/traps.c | 4 | ||||
-rw-r--r-- | arch/s390/kernel/vdso.c | 2 | ||||
-rw-r--r-- | arch/s390/kernel/vmlinux.lds.S | 2 |
26 files changed, 1150 insertions, 310 deletions
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 3edc6c6f258..228e3105ded 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -17,10 +17,12 @@ CFLAGS_smp.o := -Wno-nonnull # CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' +CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w + obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o \ processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o \ - vdso.o vtime.o + vdso.o vtime.o sysinfo.o nmi.o obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) diff --git a/arch/s390/kernel/bitmap.S b/arch/s390/kernel/bitmap.S deleted file mode 100644 index dfb41f946e2..00000000000 --- a/arch/s390/kernel/bitmap.S +++ /dev/null @@ -1,56 +0,0 @@ -/* - * arch/s390/kernel/bitmap.S - * Bitmaps for set_bit, clear_bit, test_and_set_bit, ... - * See include/asm-s390/{bitops.h|posix_types.h} for details - * - * S390 version - * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation - * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), - */ - - .globl _oi_bitmap -_oi_bitmap: - .byte 0x01,0x02,0x04,0x08,0x10,0x20,0x40,0x80 - - .globl _ni_bitmap -_ni_bitmap: - .byte 0xFE,0xFD,0xFB,0xF7,0xEF,0xDF,0xBF,0x7F - - .globl _zb_findmap -_zb_findmap: - .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4 - .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5 - .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4 - .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6 - .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4 - .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5 - .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4 - .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,7 - .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4 - .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5 - .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4 - .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6 - .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4 - .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5 - .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4 - .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,8 - - .globl _sb_findmap -_sb_findmap: - .byte 8,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 - .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 - .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 - .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 - .byte 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 - .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 - .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 - .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 - .byte 7,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 - .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 - .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 - .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 - .byte 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 - .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 - .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 - .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 - diff --git a/arch/s390/kernel/bitmap.c b/arch/s390/kernel/bitmap.c new file mode 100644 index 00000000000..3ae4757b006 --- /dev/null +++ b/arch/s390/kernel/bitmap.c @@ -0,0 +1,54 @@ +/* + * Bitmaps for set_bit, clear_bit, test_and_set_bit, ... + * See include/asm/{bitops.h|posix_types.h} for details + * + * Copyright IBM Corp. 1999,2009 + * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>, + */ + +#include <linux/bitops.h> +#include <linux/module.h> + +const char _oi_bitmap[] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 }; +EXPORT_SYMBOL(_oi_bitmap); + +const char _ni_bitmap[] = { 0xfe, 0xfd, 0xfb, 0xf7, 0xef, 0xdf, 0xbf, 0x7f }; +EXPORT_SYMBOL(_ni_bitmap); + +const char _zb_findmap[] = { + 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4, + 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5, + 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4, + 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6, + 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4, + 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5, + 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4, + 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,7, + 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4, + 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5, + 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4, + 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6, + 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4, + 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5, + 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4, + 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,8 }; +EXPORT_SYMBOL(_zb_findmap); + +const char _sb_findmap[] = { + 8,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, + 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, + 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, + 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, + 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, + 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, + 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, + 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, + 7,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, + 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, + 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, + 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, + 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, + 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, + 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0, + 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 }; +EXPORT_SYMBOL(_sb_findmap); diff --git a/arch/s390/kernel/compat_ptrace.h b/arch/s390/kernel/compat_ptrace.h index a2be3a978d5..123dd660d7f 100644 --- a/arch/s390/kernel/compat_ptrace.h +++ b/arch/s390/kernel/compat_ptrace.h @@ -1,10 +1,11 @@ #ifndef _PTRACE32_H #define _PTRACE32_H +#include <asm/ptrace.h> /* needed for NUM_CR_WORDS */ #include "compat_linux.h" /* needed for psw_compat_t */ typedef struct { - __u32 cr[3]; + __u32 cr[NUM_CR_WORDS]; } per_cr_words32; typedef struct { diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S index 62c706eb0de..87cf5a79a35 100644 --- a/arch/s390/kernel/compat_wrapper.S +++ b/arch/s390/kernel/compat_wrapper.S @@ -252,7 +252,7 @@ sys32_chroot_wrapper: sys32_ustat_wrapper: llgfr %r2,%r2 # dev_t llgtr %r3,%r3 # struct ustat * - jg sys_ustat + jg compat_sys_ustat .globl sys32_dup2_wrapper sys32_dup2_wrapper: diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index ba03fc0a3a5..be8bceaf37d 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c @@ -603,7 +603,7 @@ debug_input(struct file *file, const char __user *user_buf, size_t length, static int debug_open(struct inode *inode, struct file *file) { - int i = 0, rc = 0; + int i, rc = 0; file_private_info_t *p_info; debug_info_t *debug_info, *debug_info_snapshot; @@ -642,8 +642,7 @@ found: p_info = kmalloc(sizeof(file_private_info_t), GFP_KERNEL); if(!p_info){ - if(debug_info_snapshot) - debug_info_free(debug_info_snapshot); + debug_info_free(debug_info_snapshot); rc = -ENOMEM; goto out; } @@ -698,8 +697,7 @@ debug_info_t *debug_register_mode(const char *name, int pages_per_area, if ((uid != 0) || (gid != 0)) pr_warning("Root becomes the owner of all s390dbf files " "in sysfs\n"); - if (!initialized) - BUG(); + BUG_ON(!initialized); mutex_lock(&debug_mutex); /* create new debug_info */ @@ -1156,7 +1154,6 @@ debug_unregister_view(debug_info_t * id, struct debug_view *view) else { debugfs_remove(id->debugfs_entries[i]); id->views[i] = NULL; - rc = 0; } spin_unlock_irqrestore(&id->lock, flags); out: diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 2a2ca268b1d..4d221c81c84 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -6,6 +6,7 @@ * Heiko Carstens <heiko.carstens@de.ibm.com> */ +#include <linux/compiler.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/string.h> @@ -20,6 +21,7 @@ #include <asm/processor.h> #include <asm/sections.h> #include <asm/setup.h> +#include <asm/sysinfo.h> #include <asm/cpcmd.h> #include <asm/sclp.h> #include "entry.h" @@ -173,19 +175,21 @@ static noinline __init void init_kernel_storage_key(void) page_set_storage_key(init_pfn << PAGE_SHIFT, PAGE_DEFAULT_KEY); } +static __initdata struct sysinfo_3_2_2 vmms __aligned(PAGE_SIZE); + static noinline __init void detect_machine_type(void) { - struct cpuinfo_S390 *cpuinfo = &S390_lowcore.cpu_data; - - get_cpu_id(&S390_lowcore.cpu_data.cpu_id); - - /* Running under z/VM ? */ - if (cpuinfo->cpu_id.version == 0xff) - machine_flags |= MACHINE_FLAG_VM; + /* No VM information? Looks like LPAR */ + if (stsi(&vmms, 3, 2, 2) == -ENOSYS) + return; + if (!vmms.count) + return; - /* Running under KVM ? */ - if (cpuinfo->cpu_id.version == 0xfe) + /* Running under KVM? If not we assume z/VM */ + if (!memcmp(vmms.vm[0].cpi, "\xd2\xe5\xd4", 3)) machine_flags |= MACHINE_FLAG_KVM; + else + machine_flags |= MACHINE_FLAG_VM; } static __init void early_pgm_check_handler(void) @@ -348,7 +352,6 @@ static void __init setup_boot_command_line(void) /* copy arch command line */ strlcpy(boot_command_line, COMMAND_LINE, ARCH_COMMAND_LINE_SIZE); - boot_command_line[ARCH_COMMAND_LINE_SIZE - 1] = 0; /* append IPL PARM data to the boot command line */ if (MACHINE_IS_VM) { diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index ec7e35f6055..1046c2c9f8d 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S @@ -469,6 +469,8 @@ start: .org 0x10000 startup:basr %r13,0 # get base .LPG0: + xc 0x200(256),0x200 # partially clear lowcore + xc 0x300(256),0x300 #ifndef CONFIG_MARCH_G5 # check processor version against MARCH_{G5,Z900,Z990,Z9_109,Z10} diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S index db476d114ca..2ced846065b 100644 --- a/arch/s390/kernel/head31.S +++ b/arch/s390/kernel/head31.S @@ -20,7 +20,6 @@ startup_continue: lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers l %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area # move IPL device to lowcore - mvc __LC_IPLDEV(4),IPL_DEVICE-PARMAREA(%r12) # # Setup stack # diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S index f9f70aa1524..65667b2e65c 100644 --- a/arch/s390/kernel/head64.S +++ b/arch/s390/kernel/head64.S @@ -86,7 +86,6 @@ startup_continue: lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers lg %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area # move IPL device to lowcore - mvc __LC_IPLDEV(4),IPL_DEVICE+4-PARMAREA(%r12) lghi %r0,__LC_PASTE stg %r0,__LC_VDSO_PER_CPU # diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 2dcf590faba..6f3711a0eaa 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -23,7 +23,7 @@ #include <asm/ebcdic.h> #include <asm/reset.h> #include <asm/sclp.h> -#include <asm/setup.h> +#include <asm/checksum.h> #define IPL_PARM_BLOCK_VERSION 0 @@ -56,13 +56,14 @@ struct shutdown_trigger { }; /* - * Five shutdown action types are supported: + * The following shutdown action types are supported: */ #define SHUTDOWN_ACTION_IPL_STR "ipl" #define SHUTDOWN_ACTION_REIPL_STR "reipl" #define SHUTDOWN_ACTION_DUMP_STR "dump" #define SHUTDOWN_ACTION_VMCMD_STR "vmcmd" #define SHUTDOWN_ACTION_STOP_STR "stop" +#define SHUTDOWN_ACTION_DUMP_REIPL_STR "dump_reipl" struct shutdown_action { char *name; @@ -146,6 +147,7 @@ static enum ipl_method reipl_method = REIPL_METHOD_DEFAULT; static struct ipl_parameter_block *reipl_block_fcp; static struct ipl_parameter_block *reipl_block_ccw; static struct ipl_parameter_block *reipl_block_nss; +static struct ipl_parameter_block *reipl_block_actual; static int dump_capabilities = DUMP_TYPE_NONE; static enum dump_type dump_type = DUMP_TYPE_NONE; @@ -835,6 +837,7 @@ static int reipl_set_type(enum ipl_type type) reipl_method = REIPL_METHOD_CCW_VM; else reipl_method = REIPL_METHOD_CCW_CIO; + reipl_block_actual = reipl_block_ccw; break; case IPL_TYPE_FCP: if (diag308_set_works) @@ -843,6 +846,7 @@ static int reipl_set_type(enum ipl_type type) reipl_method = REIPL_METHOD_FCP_RO_VM; else reipl_method = REIPL_METHOD_FCP_RO_DIAG; + reipl_block_actual = reipl_block_fcp; break; case IPL_TYPE_FCP_DUMP: reipl_method = REIPL_METHOD_FCP_DUMP; @@ -852,6 +856,7 @@ static int reipl_set_type(enum ipl_type type) reipl_method = REIPL_METHOD_NSS_DIAG; else reipl_method = REIPL_METHOD_NSS; + reipl_block_actual = reipl_block_nss; break; case IPL_TYPE_UNKNOWN: reipl_method = REIPL_METHOD_DEFAULT; @@ -960,7 +965,6 @@ static void reipl_run(struct shutdown_trigger *trigger) diag308(DIAG308_IPL, NULL); break; case REIPL_METHOD_FCP_DUMP: - default: break; } disabled_wait((unsigned long) __builtin_return_address(0)); @@ -1069,10 +1073,12 @@ static int __init reipl_fcp_init(void) { int rc; - if ((!diag308_set_works) && (ipl_info.type != IPL_TYPE_FCP)) - return 0; - if ((!diag308_set_works) && (ipl_info.type == IPL_TYPE_FCP)) - make_attrs_ro(reipl_fcp_attrs); + if (!diag308_set_works) { + if (ipl_info.type == IPL_TYPE_FCP) + make_attrs_ro(reipl_fcp_attrs); + else + return 0; + } reipl_block_fcp = (void *) get_zeroed_page(GFP_KERNEL); if (!reipl_block_fcp) @@ -1253,7 +1259,6 @@ static void dump_run(struct shutdown_trigger *trigger) diag308(DIAG308_DUMP, NULL); break; case DUMP_METHOD_NONE: - default: return; } printk(KERN_EMERG "Dump failed!\n"); @@ -1332,6 +1337,49 @@ static struct shutdown_action __refdata dump_action = { .init = dump_init, }; +static void dump_reipl_run(struct shutdown_trigger *trigger) +{ + preempt_disable(); + /* + * Bypass dynamic address translation (DAT) when storing IPL parameter + * information block address and checksum into the prefix area + * (corresponding to absolute addresses 0-8191). + * When enhanced DAT applies and the STE format control in one, + * the absolute address is formed without prefixing. In this case a + * normal store (stg/st) into the prefix area would no more match to + * absolute addresses 0-8191. + */ +#ifdef CONFIG_64BIT + asm volatile("sturg %0,%1" + :: "a" ((unsigned long) reipl_block_actual), + "a" (&lowcore_ptr[smp_processor_id()]->ipib)); +#else + asm volatile("stura %0,%1" + :: "a" ((unsigned long) reipl_block_actual), + "a" (&lowcore_ptr[smp_processor_id()]->ipib)); +#endif + asm volatile("stura %0,%1" + :: "a" (csum_partial(reipl_block_actual, + reipl_block_actual->hdr.len, 0)), + "a" (&lowcore_ptr[smp_processor_id()]->ipib_checksum)); + preempt_enable(); + dump_run(trigger); +} + +static int __init dump_reipl_init(void) +{ + if (!diag308_set_works) + return -EOPNOTSUPP; + else + return 0; +} + +static struct shutdown_action __refdata dump_reipl_action = { + .name = SHUTDOWN_ACTION_DUMP_REIPL_STR, + .fn = dump_reipl_run, + .init = dump_reipl_init, +}; + /* * vmcmd shutdown action: Trigger vm command on shutdown. */ @@ -1421,7 +1469,8 @@ static struct shutdown_action stop_action = {SHUTDOWN_ACTION_STOP_STR, /* action list */ static struct shutdown_action *shutdown_actions_list[] = { - &ipl_action, &reipl_action, &dump_action, &vmcmd_action, &stop_action}; + &ipl_action, &reipl_action, &dump_reipl_action, &dump_action, + &vmcmd_action, &stop_action}; #define SHUTDOWN_ACTIONS_COUNT (sizeof(shutdown_actions_list) / sizeof(void *)) /* @@ -1434,11 +1483,11 @@ static int set_trigger(const char *buf, struct shutdown_trigger *trigger, size_t len) { int i; + for (i = 0; i < SHUTDOWN_ACTIONS_COUNT; i++) { if (!shutdown_actions_list[i]) continue; - if (strncmp(buf, shutdown_actions_list[i]->name, - strlen(shutdown_actions_list[i]->name)) == 0) { + if (sysfs_streq(buf, shutdown_actions_list[i]->name)) { trigger->action = shutdown_actions_list[i]; return len; } @@ -1672,7 +1721,7 @@ static int on_panic_notify(struct notifier_block *self, static struct notifier_block on_panic_nb = { .notifier_call = on_panic_notify, - .priority = 0, + .priority = INT_MIN, }; void __init setup_ipl(void) @@ -1696,7 +1745,6 @@ void __init setup_ipl(void) sizeof(ipl_info.data.nss.name)); break; case IPL_TYPE_UNKNOWN: - default: /* We have no info to copy */ break; } diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S index 397d131a345..80641224a09 100644 --- a/arch/s390/kernel/mcount.S +++ b/arch/s390/kernel/mcount.S @@ -5,6 +5,8 @@ * */ +#include <asm/asm-offsets.h> + #ifndef CONFIG_64BIT .globl _mcount _mcount: @@ -14,7 +16,7 @@ _mcount: ahi %r15,-96 l %r3,100(%r15) la %r2,0(%r14) - st %r1,0(%r15) + st %r1,__SF_BACKCHAIN(%r15) la %r3,0(%r3) bras %r14,0f .long ftrace_trace_function @@ -38,7 +40,7 @@ _mcount: stg %r14,112(%r15) lgr %r1,%r15 aghi %r15,-160 - stg %r1,0(%r15) + stg %r1,__SF_BACKCHAIN(%r15) lgr %r2,%r14 lg %r3,168(%r15) larl %r14,ftrace_trace_function diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c index 59b4e796680..eed4a00cb67 100644 --- a/arch/s390/kernel/module.c +++ b/arch/s390/kernel/module.c @@ -310,15 +310,20 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, info->plt_initialized = 1; } if (r_type == R_390_PLTOFF16 || - r_type == R_390_PLTOFF32 - || r_type == R_390_PLTOFF64 - ) + r_type == R_390_PLTOFF32 || + r_type == R_390_PLTOFF64) val = me->arch.plt_offset - me->arch.got_offset + info->plt_offset + rela->r_addend; - else - val = (Elf_Addr) me->module_core + - me->arch.plt_offset + info->plt_offset + - rela->r_addend - loc; + else { + if (!((r_type == R_390_PLT16DBL && + val - loc + 0xffffUL < 0x1ffffeUL) || + (r_type == R_390_PLT32DBL && + val - loc + 0xffffffffULL < 0x1fffffffeULL))) + val = (Elf_Addr) me->module_core + + me->arch.plt_offset + + info->plt_offset; + val += rela->r_addend - loc; + } if (r_type == R_390_PLT16DBL) *(unsigned short *) loc = val >> 1; else if (r_type == R_390_PLTOFF16) diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c new file mode 100644 index 00000000000..4bfdc421d7e --- /dev/null +++ b/arch/s390/kernel/nmi.c @@ -0,0 +1,376 @@ +/* + * Machine check handler + * + * Copyright IBM Corp. 2000,2009 + * Author(s): Ingo Adlung <adlung@de.ibm.com>, + * Martin Schwidefsky <schwidefsky@de.ibm.com>, + * Cornelia Huck <cornelia.huck@de.ibm.com>, + * Heiko Carstens <heiko.carstens@de.ibm.com>, + */ + +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/time.h> +#include <linux/module.h> +#include <asm/lowcore.h> +#include <asm/smp.h> +#include <asm/etr.h> +#include <asm/cpu.h> +#include <asm/nmi.h> +#include <asm/crw.h> + +struct mcck_struct { + int kill_task; + int channel_report; + int warning; + unsigned long long mcck_code; +}; + +static DEFINE_PER_CPU(struct mcck_struct, cpu_mcck); + +static NORET_TYPE void s390_handle_damage(char *msg) +{ + smp_send_stop(); + disabled_wait((unsigned long) __builtin_return_address(0)); + while (1); +} + +/* + * Main machine check handler function. Will be called with interrupts enabled + * or disabled and machine checks enabled or disabled. + */ +void s390_handle_mcck(void) +{ + unsigned long flags; + struct mcck_struct mcck; + + /* + * Disable machine checks and get the current state of accumulated + * machine checks. Afterwards delete the old state and enable machine + * checks again. + */ + local_irq_save(flags); + local_mcck_disable(); + mcck = __get_cpu_var(cpu_mcck); + memset(&__get_cpu_var(cpu_mcck), 0, sizeof(struct mcck_struct)); + clear_thread_flag(TIF_MCCK_PENDING); + local_mcck_enable(); + local_irq_restore(flags); + + if (mcck.channel_report) + crw_handle_channel_report(); + /* + * A warning may remain for a prolonged period on the bare iron. + * (actually until the machine is powered off, or the problem is gone) + * So we just stop listening for the WARNING MCH and avoid continuously + * being interrupted. One caveat is however, that we must do this per + * processor and cannot use the smp version of ctl_clear_bit(). + * On VM we only get one interrupt per virtally presented machinecheck. + * Though one suffices, we may get one interrupt per (virtual) cpu. + */ + if (mcck.warning) { /* WARNING pending ? */ + static int mchchk_wng_posted = 0; + + /* Use single cpu clear, as we cannot handle smp here. */ + __ctl_clear_bit(14, 24); /* Disable WARNING MCH */ + if (xchg(&mchchk_wng_posted, 1) == 0) + kill_cad_pid(SIGPWR, 1); + } + if (mcck.kill_task) { + local_irq_enable(); + printk(KERN_EMERG "mcck: Terminating task because of machine " + "malfunction (code 0x%016llx).\n", mcck.mcck_code); + printk(KERN_EMERG "mcck: task: %s, pid: %d.\n", + current->comm, current->pid); + do_exit(SIGSEGV); + } +} +EXPORT_SYMBOL_GPL(s390_handle_mcck); + +/* + * returns 0 if all registers could be validated + * returns 1 otherwise + */ +static int notrace s390_revalidate_registers(struct mci *mci) +{ + int kill_task; + u64 tmpclock; + u64 zero; + void *fpt_save_area, *fpt_creg_save_area; + + kill_task = 0; + zero = 0; + + if (!mci->gr) { + /* + * General purpose registers couldn't be restored and have + * unknown contents. Process needs to be terminated. + */ + kill_task = 1; + } + if (!mci->fp) { + /* + * Floating point registers can't be restored and + * therefore the process needs to be terminated. + */ + kill_task = 1; + } +#ifndef CONFIG_64BIT + asm volatile( + " ld 0,0(%0)\n" + " ld 2,8(%0)\n" + " ld 4,16(%0)\n" + " ld 6,24(%0)" + : : "a" (&S390_lowcore.floating_pt_save_area)); +#endif + + if (MACHINE_HAS_IEEE) { +#ifdef CONFIG_64BIT + fpt_save_area = &S390_lowcore.floating_pt_save_area; + fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area; +#else + fpt_save_area = (void *) S390_lowcore.extended_save_area_addr; + fpt_creg_save_area = fpt_save_area + 128; +#endif + if (!mci->fc) { + /* + * Floating point control register can't be restored. + * Task will be terminated. + */ + asm volatile("lfpc 0(%0)" : : "a" (&zero), "m" (zero)); + kill_task = 1; + + } else + asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area)); + + asm volatile( + " ld 0,0(%0)\n" + " ld 1,8(%0)\n" + " ld 2,16(%0)\n" + " ld 3,24(%0)\n" + " ld 4,32(%0)\n" + " ld 5,40(%0)\n" + " ld 6,48(%0)\n" + " ld 7,56(%0)\n" + " ld 8,64(%0)\n" + " ld 9,72(%0)\n" + " ld 10,80(%0)\n" + " ld 11,88(%0)\n" + " ld 12,96(%0)\n" + " ld 13,104(%0)\n" + " ld 14,112(%0)\n" + " ld 15,120(%0)\n" + : : "a" (fpt_save_area)); + } + /* Revalidate access registers */ + asm volatile( + " lam 0,15,0(%0)" + : : "a" (&S390_lowcore.access_regs_save_area)); + if (!mci->ar) { + /* + * Access registers have unknown contents. + * Terminating task. + */ + kill_task = 1; + } + /* Revalidate control registers */ + if (!mci->cr) { + /* + * Control registers have unknown contents. + * Can't recover and therefore stopping machine. + */ + s390_handle_damage("invalid control registers."); + } else { +#ifdef CONFIG_64BIT + asm volatile( + " lctlg 0,15,0(%0)" + : : "a" (&S390_lowcore.cregs_save_area)); +#else + asm volatile( + " lctl 0,15,0(%0)" + : : "a" (&S390_lowcore.cregs_save_area)); +#endif + } + /* + * We don't even try to revalidate the TOD register, since we simply + * can't write something sensible into that register. + */ +#ifdef CONFIG_64BIT + /* + * See if we can revalidate the TOD programmable register with its + * old contents (should be zero) otherwise set it to zero. + */ + if (!mci->pr) + asm volatile( + " sr 0,0\n" + " sckpf" + : : : "0", "cc"); + else + asm volatile( + " l 0,0(%0)\n" + " sckpf" + : : "a" (&S390_lowcore.tod_progreg_save_area) + : "0", "cc"); +#endif + /* Revalidate clock comparator register */ + asm volatile( + " stck 0(%1)\n" + " sckc 0(%1)" + : "=m" (tmpclock) : "a" (&(tmpclock)) : "cc", "memory"); + + /* Check if old PSW is valid */ + if (!mci->wp) + /* + * Can't tell if we come from user or kernel mode + * -> stopping machine. + */ + s390_handle_damage("old psw invalid."); + + if (!mci->ms || !mci->pm || !mci->ia) + kill_task = 1; + + return kill_task; +} + +#define MAX_IPD_COUNT 29 +#define MAX_IPD_TIME (5 * 60 * USEC_PER_SEC) /* 5 minutes */ + +#define ED_STP_ISLAND 6 /* External damage STP island check */ +#define ED_STP_SYNC 7 /* External damage STP sync check */ +#define ED_ETR_SYNC 12 /* External damage ETR sync check */ +#define ED_ETR_SWITCH 13 /* External damage ETR switch to local */ + +/* + * machine check handler. + */ +void notrace s390_do_machine_check(struct pt_regs *regs) +{ + static int ipd_count; + static DEFINE_SPINLOCK(ipd_lock); + static unsigned long long last_ipd; + struct mcck_struct *mcck; + unsigned long long tmp; + struct mci *mci; + int umode; + + lockdep_off(); + s390_idle_check(); + + mci = (struct mci *) &S390_lowcore.mcck_interruption_code; + mcck = &__get_cpu_var(cpu_mcck); + umode = user_mode(regs); + + if (mci->sd) { + /* System damage -> stopping machine */ + s390_handle_damage("received system damage machine check."); + } + if (mci->pd) { + if (mci->b) { + /* Processing backup -> verify if we can survive this */ + u64 z_mcic, o_mcic, t_mcic; +#ifdef CONFIG_64BIT + z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<29); + o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 | + 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 | + 1ULL<<30 | 1ULL<<21 | 1ULL<<20 | 1ULL<<17 | + 1ULL<<16); +#else + z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<57 | 1ULL<<50 | + 1ULL<<29); + o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 | + 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 | + 1ULL<<30 | 1ULL<<20 | 1ULL<<17 | 1ULL<<16); +#endif + t_mcic = *(u64 *)mci; + + if (((t_mcic & z_mcic) != 0) || + ((t_mcic & o_mcic) != o_mcic)) { + s390_handle_damage("processing backup machine " + "check with damage."); + } + + /* + * Nullifying exigent condition, therefore we might + * retry this instruction. + */ + spin_lock(&ipd_lock); + tmp = get_clock(); + if (((tmp - last_ipd) >> 12) < MAX_IPD_TIME) + ipd_count++; + else + ipd_count = 1; + last_ipd = tmp; + if (ipd_count == MAX_IPD_COUNT) + s390_handle_damage("too many ipd retries."); + spin_unlock(&ipd_lock); + } else { + /* Processing damage -> stopping machine */ + s390_handle_damage("received instruction processing " + "damage machine check."); + } + } + if (s390_revalidate_registers(mci)) { + if (umode) { + /* + * Couldn't restore all register contents while in + * user mode -> mark task for termination. + */ + mcck->kill_task = 1; + mcck->mcck_code = *(unsigned long long *) mci; + set_thread_flag(TIF_MCCK_PENDING); + } else { + /* + * Couldn't restore all register contents while in + * kernel mode -> stopping machine. + */ + s390_handle_damage("unable to revalidate registers."); + } + } + if (mci->cd) { + /* Timing facility damage */ + s390_handle_damage("TOD clock damaged"); + } + if (mci->ed && mci->ec) { + /* External damage */ + if (S390_lowcore.external_damage_code & (1U << ED_ETR_SYNC)) + etr_sync_check(); + if (S390_lowcore.external_damage_code & (1U << ED_ETR_SWITCH)) + etr_switch_to_local(); + if (S390_lowcore.external_damage_code & (1U << ED_STP_SYNC)) + stp_sync_check(); + if (S390_lowcore.external_damage_code & (1U << ED_STP_ISLAND)) + stp_island_check(); + } + if (mci->se) + /* Storage error uncorrected */ + s390_handle_damage("received storage error uncorrected " + "machine check."); + if (mci->ke) + /* Storage key-error uncorrected */ + s390_handle_damage("received storage key-error uncorrected " + "machine check."); + if (mci->ds && mci->fa) + /* Storage degradation */ + s390_handle_damage("received storage degradation machine " + "check."); + if (mci->cp) { + /* Channel report word pending */ + mcck->channel_report = 1; + set_thread_flag(TIF_MCCK_PENDING); + } + if (mci->w) { + /* Warning pending */ + mcck->warning = 1; + set_thread_flag(TIF_MCCK_PENDING); + } + lockdep_on(); +} + +static int __init machine_check_init(void) +{ + ctl_set_bit(14, 25); /* enable external damage MCH */ + ctl_set_bit(14, 27); /* enable system recovery MCH */ + ctl_set_bit(14, 24); /* enable warning MCH */ + return 0; +} +arch_initcall(machine_check_init); diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 5cd38a90e64..b48e961a38f 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -1,18 +1,10 @@ /* - * arch/s390/kernel/process.c + * This file handles the architecture dependent parts of process handling. * - * S390 version - * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation - * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), - * Hartmut Penner (hp@de.ibm.com), - * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), - * - * Derived from "arch/i386/kernel/process.c" - * Copyright (C) 1995, Linus Torvalds - */ - -/* - * This file handles the architecture-dependent parts of process handling.. + * Copyright IBM Corp. 1999,2009 + * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>, + * Hartmut Penner <hp@de.ibm.com>, + * Denis Joseph Barrow, */ #include <linux/compiler.h> @@ -47,6 +39,7 @@ #include <asm/processor.h> #include <asm/irq.h> #include <asm/timer.h> +#include <asm/nmi.h> #include "entry.h" asmlinkage void ret_from_fork(void) asm ("ret_from_fork"); @@ -76,7 +69,6 @@ unsigned long thread_saved_pc(struct task_struct *tsk) return sf->gprs[8]; } -extern void s390_handle_mcck(void); /* * The idle loop on a S390... */ @@ -149,6 +141,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); } +EXPORT_SYMBOL(kernel_thread); /* * Free current thread data structures etc.. @@ -168,34 +161,35 @@ void release_thread(struct task_struct *dead_task) } int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp, - unsigned long unused, - struct task_struct * p, struct pt_regs * regs) + unsigned long unused, + struct task_struct *p, struct pt_regs *regs) { - struct fake_frame - { - struct stack_frame sf; - struct pt_regs childregs; - } *frame; - - frame = container_of(task_pt_regs(p), struct fake_frame, childregs); - p->thread.ksp = (unsigned long) frame; + struct thread_info *ti; + struct fake_frame + { + struct stack_frame sf; + struct pt_regs childregs; + } *frame; + + frame = container_of(task_pt_regs(p), struct fake_frame, childregs); + p->thread.ksp = (unsigned long) frame; /* Store access registers to kernel stack of new process. */ - frame->childregs = *regs; + frame->childregs = *regs; frame->childregs.gprs[2] = 0; /* child returns 0 on fork. */ - frame->childregs.gprs[15] = new_stackp; - frame->sf.back_chain = 0; + frame->childregs.gprs[15] = new_stackp; + frame->sf.back_chain = 0; - /* new return point is ret_from_fork */ - frame->sf.gprs[8] = (unsigned long) ret_from_fork; + /* new return point is ret_from_fork */ + frame->sf.gprs[8] = (unsigned long) ret_from_fork; - /* fake return stack for resume(), don't go back to schedule */ - frame->sf.gprs[9] = (unsigned long) frame; + /* fake return stack for resume(), don't go back to schedule */ + frame->sf.gprs[9] = (unsigned long) frame; /* Save access registers to new thread structure. */ save_access_regs(&p->thread.acrs[0]); #ifndef CONFIG_64BIT - /* + /* * save fprs to current->thread.fp_regs to merge them with * the emulated registers and then copy the result to the child. */ @@ -220,10 +214,13 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp, #endif /* CONFIG_64BIT */ /* start new process with ar4 pointing to the correct address space */ p->thread.mm_segment = get_fs(); - /* Don't copy debug registers */ - memset(&p->thread.per_info,0,sizeof(p->thread.per_info)); - - return 0; + /* Don't copy debug registers */ + memset(&p->thread.per_info, 0, sizeof(p->thread.per_info)); + /* Initialize per thread user and system timer values */ + ti = task_thread_info(p); + ti->user_timer = 0; + ti->system_timer = 0; + return 0; } SYSCALL_DEFINE0(fork) @@ -311,7 +308,7 @@ out: int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs) { #ifndef CONFIG_64BIT - /* + /* * save fprs to current->thread.fp_regs to merge them with * the emulated registers and then copy the result to the dump. */ @@ -322,6 +319,7 @@ int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs) #endif /* CONFIG_64BIT */ return 1; } +EXPORT_SYMBOL(dump_fpu); unsigned long get_wchan(struct task_struct *p) { @@ -346,4 +344,3 @@ unsigned long get_wchan(struct task_struct *p) } return 0; } - diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c index 82c1872cfe8..802c8ab247f 100644 --- a/arch/s390/kernel/processor.c +++ b/arch/s390/kernel/processor.c @@ -18,10 +18,11 @@ #include <asm/lowcore.h> #include <asm/param.h> -void __cpuinit print_cpu_info(struct cpuinfo_S390 *cpuinfo) +void __cpuinit print_cpu_info(void) { pr_info("Processor %d started, address %d, identification %06X\n", - cpuinfo->cpu_nr, cpuinfo->cpu_addr, cpuinfo->cpu_id.ident); + S390_lowcore.cpu_nr, S390_lowcore.cpu_addr, + S390_lowcore.cpu_id.ident); } /* @@ -30,48 +31,46 @@ void __cpuinit print_cpu_info(struct cpuinfo_S390 *cpuinfo) static int show_cpuinfo(struct seq_file *m, void *v) { - static const char *hwcap_str[8] = { + static const char *hwcap_str[9] = { "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp", - "edat" + "edat", "etf3eh" }; - struct cpuinfo_S390 *cpuinfo; - unsigned long n = (unsigned long) v - 1; - int i; + struct _lowcore *lc; + unsigned long n = (unsigned long) v - 1; + int i; - s390_adjust_jiffies(); - preempt_disable(); - if (!n) { - seq_printf(m, "vendor_id : IBM/S390\n" - "# processors : %i\n" - "bogomips per cpu: %lu.%02lu\n", - num_online_cpus(), loops_per_jiffy/(500000/HZ), - (loops_per_jiffy/(5000/HZ))%100); - seq_puts(m, "features\t: "); - for (i = 0; i < 8; i++) - if (hwcap_str[i] && (elf_hwcap & (1UL << i))) - seq_printf(m, "%s ", hwcap_str[i]); - seq_puts(m, "\n"); - } + s390_adjust_jiffies(); + preempt_disable(); + if (!n) { + seq_printf(m, "vendor_id : IBM/S390\n" + "# processors : %i\n" + "bogomips per cpu: %lu.%02lu\n", + num_online_cpus(), loops_per_jiffy/(500000/HZ), + (loops_per_jiffy/(5000/HZ))%100); + seq_puts(m, "features\t: "); + for (i = 0; i < 9; i++) + if (hwcap_str[i] && (elf_hwcap & (1UL << i))) + seq_printf(m, "%s ", hwcap_str[i]); + seq_puts(m, "\n"); + } - if (cpu_online(n)) { + if (cpu_online(n)) { #ifdef CONFIG_SMP - if (smp_processor_id() == n) - cpuinfo = &S390_lowcore.cpu_data; - else - cpuinfo = &lowcore_ptr[n]->cpu_data; + lc = (smp_processor_id() == n) ? + &S390_lowcore : lowcore_ptr[n]; #else - cpuinfo = &S390_lowcore.cpu_data; + lc = &S390_lowcore; #endif - seq_printf(m, "processor %li: " - "version = %02X, " - "identification = %06X, " - "machine = %04X\n", - n, cpuinfo->cpu_id.version, - cpuinfo->cpu_id.ident, - cpuinfo->cpu_id.machine); - } - preempt_enable(); - return 0; + seq_printf(m, "processor %li: " + "version = %02X, " + "identification = %06X, " + "machine = %04X\n", + n, lc->cpu_id.version, + lc->cpu_id.ident, + lc->cpu_id.machine); + } + preempt_enable(); + return 0; } static void *c_start(struct seq_file *m, loff_t *pos) diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S index c41930499a5..774147824c3 100644 --- a/arch/s390/kernel/reipl64.S +++ b/arch/s390/kernel/reipl64.S @@ -1,10 +1,7 @@ /* - * arch/s390/kernel/reipl.S - * - * S390 version - * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation - * Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com) - Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com) + * Copyright IBM Corp 2000,2009 + * Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>, + * Denis Joseph Barrow, */ #include <asm/lowcore.h> @@ -30,7 +27,7 @@ do_reipl_asm: basr %r13,0 mvc __LC_PREFIX_SAVE_AREA-0x1000(4,%r1),0(%r10) stfpc __LC_FP_CREG_SAVE_AREA-0x1000(%r1) stckc .Lclkcmp-.Lpg0(%r13) - mvc __LC_CLOCK_COMP_SAVE_AREA-0x1000(8,%r1),.Lclkcmp-.Lpg0(%r13) + mvc __LC_CLOCK_COMP_SAVE_AREA-0x1000(7,%r1),.Lclkcmp-.Lpg0(%r13) stpt __LC_CPU_TIMER_SAVE_AREA-0x1000(%r1) stg %r13, __LC_PSW_SAVE_AREA-0x1000+8(%r1) diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c index 46b90cb0370..656fcbb9bd8 100644 --- a/arch/s390/kernel/s390_ksyms.c +++ b/arch/s390/kernel/s390_ksyms.c @@ -1,49 +1,5 @@ -/* - * arch/s390/kernel/s390_ksyms.c - * - * S390 version - */ -#include <linux/highuid.h> #include <linux/module.h> -#include <linux/mm.h> -#include <linux/smp.h> -#include <linux/syscalls.h> -#include <linux/interrupt.h> -#include <asm/checksum.h> -#include <asm/cpcmd.h> -#include <asm/delay.h> -#include <asm/pgalloc.h> -#include <asm/setup.h> #include <asm/ftrace.h> -#ifdef CONFIG_IP_MULTICAST -#include <net/arp.h> -#endif - -/* - * memory management - */ -EXPORT_SYMBOL(_oi_bitmap); -EXPORT_SYMBOL(_ni_bitmap); -EXPORT_SYMBOL(_zb_findmap); -EXPORT_SYMBOL(_sb_findmap); - -/* - * binfmt_elf loader - */ -extern int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs); -EXPORT_SYMBOL(dump_fpu); -EXPORT_SYMBOL(empty_zero_page); - -/* - * misc. - */ -EXPORT_SYMBOL(machine_flags); -EXPORT_SYMBOL(__udelay); -EXPORT_SYMBOL(kernel_thread); -EXPORT_SYMBOL(csum_fold); -EXPORT_SYMBOL(console_mode); -EXPORT_SYMBOL(console_devno); -EXPORT_SYMBOL(console_irq); #ifdef CONFIG_FUNCTION_TRACER EXPORT_SYMBOL(_mcount); diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index c5cfb6185ea..06201b93cbb 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -74,9 +74,17 @@ EXPORT_SYMBOL(uaccess); * Machine setup.. */ unsigned int console_mode = 0; +EXPORT_SYMBOL(console_mode); + unsigned int console_devno = -1; +EXPORT_SYMBOL(console_devno); + unsigned int console_irq = -1; +EXPORT_SYMBOL(console_irq); + unsigned long machine_flags; +EXPORT_SYMBOL(machine_flags); + unsigned long elf_hwcap = 0; char elf_platform[ELF_PLATFORM_SIZE]; @@ -86,6 +94,10 @@ volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */ int __initdata memory_end_set; unsigned long __initdata memory_end; +/* An array with a pointer to the lowcore of every CPU. */ +struct _lowcore *lowcore_ptr[NR_CPUS]; +EXPORT_SYMBOL(lowcore_ptr); + /* * This is set up by the setup-routine at boot-time * for S390 need to find out, what we have to setup @@ -109,13 +121,10 @@ static struct resource data_resource = { */ void __cpuinit cpu_init(void) { - int addr = hard_smp_processor_id(); - /* * Store processor id in lowcore (used e.g. in timer_interrupt) */ - get_cpu_id(&S390_lowcore.cpu_data.cpu_id); - S390_lowcore.cpu_data.cpu_addr = addr; + get_cpu_id(&S390_lowcore.cpu_id); /* * Force FPU initialization: @@ -125,8 +134,7 @@ void __cpuinit cpu_init(void) atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; - if (current->mm) - BUG(); + BUG_ON(current->mm); enter_lazy_tlb(&init_mm, current); } @@ -217,7 +225,7 @@ static void __init conmode_default(void) } } -#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) +#ifdef CONFIG_ZFCPDUMP static void __init setup_zfcpdump(unsigned int console_devno) { static char str[41]; @@ -289,11 +297,7 @@ static int __init early_parse_mem(char *p) early_param("mem", early_parse_mem); #ifdef CONFIG_S390_SWITCH_AMODE -#ifdef CONFIG_PGSTE -unsigned int switch_amode = 1; -#else unsigned int switch_amode = 0; -#endif EXPORT_SYMBOL_GPL(switch_amode); static int set_amode_and_uaccess(unsigned long user_amode, @@ -414,7 +418,6 @@ setup_lowcore(void) PSW_ADDR_AMODE | (unsigned long) mcck_int_handler; lc->io_new_psw.mask = psw_kernel_bits; lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; - lc->ipl_device = S390_lowcore.ipl_device; lc->clock_comparator = -1ULL; lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE; lc->async_stack = (unsigned long) @@ -434,6 +437,7 @@ setup_lowcore(void) lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0]; #endif set_prefix((u32)(unsigned long) lc); + lowcore_ptr[0] = lc; } static void __init @@ -510,7 +514,7 @@ static void __init setup_memory_end(void) unsigned long max_mem; int i; -#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) +#ifdef CONFIG_ZFCPDUMP if (ipl_info.type == IPL_TYPE_FCP_DUMP) { memory_end = ZFCPDUMP_HSA_SIZE; memory_end_set = 1; @@ -677,7 +681,6 @@ setup_memory(void) static void __init setup_hwcaps(void) { static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 }; - struct cpuinfo_S390 *cpuinfo = &S390_lowcore.cpu_data; unsigned long long facility_list_extended; unsigned int facility_list; int i; @@ -693,15 +696,22 @@ static void __init setup_hwcaps(void) * Bit 17: the message-security assist is installed * Bit 19: the long-displacement facility is installed * Bit 21: the extended-immediate facility is installed + * Bit 22: extended-translation facility 3 is installed + * Bit 30: extended-translation facility 3 enhancement facility * These get translated to: * HWCAP_S390_ESAN3 bit 0, HWCAP_S390_ZARCH bit 1, * HWCAP_S390_STFLE bit 2, HWCAP_S390_MSA bit 3, - * HWCAP_S390_LDISP bit 4, and HWCAP_S390_EIMM bit 5. + * HWCAP_S390_LDISP bit 4, HWCAP_S390_EIMM bit 5 and + * HWCAP_S390_ETF3EH bit 8 (22 && 30). */ for (i = 0; i < 6; i++) if (facility_list & (1UL << (31 - stfl_bits[i]))) elf_hwcap |= 1UL << i; + if ((facility_list & (1UL << (31 - 22))) + && (facility_list & (1UL << (31 - 30)))) + elf_hwcap |= 1UL << 8; + /* * Check for additional facilities with store-facility-list-extended. * stfle stores doublewords (8 byte) with bit 1ULL<<63 as bit 0 @@ -710,20 +720,22 @@ static void __init setup_hwcaps(void) * How many facility words are stored depends on the number of * doublewords passed to the instruction. The additional facilites * are: - * Bit 43: decimal floating point facility is installed + * Bit 42: decimal floating point facility is installed + * Bit 44: perform floating point operation facility is installed * translated to: - * HWCAP_S390_DFP bit 6. + * HWCAP_S390_DFP bit 6 (42 && 44). */ if ((elf_hwcap & (1UL << 2)) && __stfle(&facility_list_extended, 1) > 0) { - if (facility_list_extended & (1ULL << (64 - 43))) + if ((facility_list_extended & (1ULL << (63 - 42))) + && (facility_list_extended & (1ULL << (63 - 44)))) elf_hwcap |= 1UL << 6; } if (MACHINE_HAS_HPAGE) elf_hwcap |= 1UL << 7; - switch (cpuinfo->cpu_id.machine) { + switch (S390_lowcore.cpu_id.machine) { case 0x9672: #if !defined(CONFIG_64BIT) default: /* Use "g5" as default for 31 bit kernels. */ @@ -816,7 +828,7 @@ setup_arch(char **cmdline_p) setup_lowcore(); cpu_init(); - __cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr; + __cpu_logical_map[0] = stap(); s390_init_cpu_topology(); /* diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 2d337cbb932..006ed5016eb 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -32,6 +32,7 @@ #include <linux/delay.h> #include <linux/cache.h> #include <linux/interrupt.h> +#include <linux/irqflags.h> #include <linux/cpu.h> #include <linux/timex.h> #include <linux/bootmem.h> @@ -50,12 +51,6 @@ #include <asm/vdso.h> #include "entry.h" -/* - * An array with a pointer the lowcore of every CPU. - */ -struct _lowcore *lowcore_ptr[NR_CPUS]; -EXPORT_SYMBOL(lowcore_ptr); - static struct task_struct *current_set[NR_CPUS]; static u8 smp_cpu_type; @@ -81,9 +76,7 @@ void smp_send_stop(void) /* Disable all interrupts/machine checks */ __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK); - - /* write magic number to zero page (absolute 0) */ - lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC; + trace_hardirqs_off(); /* stop all processors */ for_each_online_cpu(cpu) { @@ -233,7 +226,7 @@ EXPORT_SYMBOL(smp_ctl_clear_bit); */ #define CPU_INIT_NO 1 -#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) +#ifdef CONFIG_ZFCPDUMP /* * zfcpdump_prefix_array holds prefix registers for the following scenario: @@ -274,7 +267,7 @@ EXPORT_SYMBOL_GPL(zfcpdump_save_areas); static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { } -#endif /* CONFIG_ZFCPDUMP || CONFIG_ZFCPDUMP_MODULE */ +#endif /* CONFIG_ZFCPDUMP */ static int cpu_stopped(int cpu) { @@ -304,8 +297,8 @@ static int smp_rescan_cpus_sigp(cpumask_t avail) { int cpu_id, logical_cpu; - logical_cpu = first_cpu(avail); - if (logical_cpu == NR_CPUS) + logical_cpu = cpumask_first(&avail); + if (logical_cpu >= nr_cpu_ids) return 0; for (cpu_id = 0; cpu_id <= 65535; cpu_id++) { if (cpu_known(cpu_id)) @@ -316,8 +309,8 @@ static int smp_rescan_cpus_sigp(cpumask_t avail) continue; cpu_set(logical_cpu, cpu_present_map); smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED; - logical_cpu = next_cpu(logical_cpu, avail); - if (logical_cpu == NR_CPUS) + logical_cpu = cpumask_next(logical_cpu, &avail); + if (logical_cpu >= nr_cpu_ids) break; } return 0; @@ -329,8 +322,8 @@ static int smp_rescan_cpus_sclp(cpumask_t avail) int cpu_id, logical_cpu, cpu; int rc; - logical_cpu = first_cpu(avail); - if (logical_cpu == NR_CPUS) + logical_cpu = cpumask_first(&avail); + if (logical_cpu >= nr_cpu_ids) return 0; info = kmalloc(sizeof(*info), GFP_KERNEL); if (!info) @@ -351,8 +344,8 @@ static int smp_rescan_cpus_sclp(cpumask_t avail) smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY; else smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED; - logical_cpu = next_cpu(logical_cpu, avail); - if (logical_cpu == NR_CPUS) + logical_cpu = cpumask_next(logical_cpu, &avail); + if (logical_cpu >= nr_cpu_ids) break; } out: @@ -379,7 +372,7 @@ static void __init smp_detect_cpus(void) c_cpus = 1; s_cpus = 0; - boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr; + boot_cpu_addr = __cpu_logical_map[0]; info = kmalloc(sizeof(*info), GFP_KERNEL); if (!info) panic("smp_detect_cpus failed to allocate memory\n"); @@ -453,7 +446,7 @@ int __cpuinit start_secondary(void *cpuvoid) /* Switch on interrupts */ local_irq_enable(); /* Print info about this processor */ - print_cpu_info(&S390_lowcore.cpu_data); + print_cpu_info(); /* cpu_idle will call schedule for us */ cpu_idle(); return 0; @@ -515,7 +508,6 @@ out: return -ENOMEM; } -#ifdef CONFIG_HOTPLUG_CPU static void smp_free_lowcore(int cpu) { struct _lowcore *lowcore; @@ -534,7 +526,6 @@ static void smp_free_lowcore(int cpu) free_pages((unsigned long) lowcore, lc_order); lowcore_ptr[cpu] = NULL; } -#endif /* CONFIG_HOTPLUG_CPU */ /* Upping and downing of CPUs */ int __cpuinit __cpu_up(unsigned int cpu) @@ -543,16 +534,23 @@ int __cpuinit __cpu_up(unsigned int cpu) struct _lowcore *cpu_lowcore; struct stack_frame *sf; sigp_ccode ccode; + u32 lowcore; if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED) return -EIO; if (smp_alloc_lowcore(cpu)) return -ENOMEM; - - ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]), - cpu, sigp_set_prefix); - if (ccode) - return -EIO; + do { + ccode = signal_processor(cpu, sigp_initial_cpu_reset); + if (ccode == sigp_busy) + udelay(10); + if (ccode == sigp_not_operational) + goto err_out; + } while (ccode == sigp_busy); + + lowcore = (u32)(unsigned long)lowcore_ptr[cpu]; + while (signal_processor_p(lowcore, cpu, sigp_set_prefix) == sigp_busy) + udelay(10); idle = current_set[cpu]; cpu_lowcore = lowcore_ptr[cpu]; @@ -571,9 +569,8 @@ int __cpuinit __cpu_up(unsigned int cpu) : : "a" (&cpu_lowcore->access_regs_save_area) : "memory"); cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; cpu_lowcore->current_task = (unsigned long) idle; - cpu_lowcore->cpu_data.cpu_nr = cpu; + cpu_lowcore->cpu_nr = cpu; cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce; - cpu_lowcore->ipl_device = S390_lowcore.ipl_device; eieio(); while (signal_processor(cpu, sigp_restart) == sigp_busy) @@ -582,6 +579,10 @@ int __cpuinit __cpu_up(unsigned int cpu) while (!cpu_online(cpu)) cpu_relax(); return 0; + +err_out: + smp_free_lowcore(cpu); + return -EIO; } static int __init setup_possible_cpus(char *s) @@ -589,9 +590,8 @@ static int __init setup_possible_cpus(char *s) int pcpus, cpu; pcpus = simple_strtoul(s, NULL, 0); - cpu_possible_map = cpumask_of_cpu(0); - for (cpu = 1; cpu < pcpus && cpu < NR_CPUS; cpu++) - cpu_set(cpu, cpu_possible_map); + for (cpu = 0; cpu < pcpus && cpu < nr_cpu_ids; cpu++) + set_cpu_possible(cpu, true); return 0; } early_param("possible_cpus", setup_possible_cpus); @@ -663,7 +663,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) /* request the 0x1201 emergency signal external interrupt */ if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) panic("Couldn't request external interrupt 0x1201"); - print_cpu_info(&S390_lowcore.cpu_data); + print_cpu_info(); /* Reallocate current lowcore, but keep its contents. */ lc_order = sizeof(long) == 8 ? 1 : 0; diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c new file mode 100644 index 00000000000..b5e75e1061c --- /dev/null +++ b/arch/s390/kernel/sysinfo.c @@ -0,0 +1,428 @@ +/* + * Copyright IBM Corp. 2001, 2009 + * Author(s): Ulrich Weigand <Ulrich.Weigand@de.ibm.com>, + * Martin Schwidefsky <schwidefsky@de.ibm.com>, + */ + +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/proc_fs.h> +#include <linux/seq_file.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/module.h> +#include <asm/ebcdic.h> +#include <asm/sysinfo.h> +#include <asm/cpcmd.h> + +/* Sigh, math-emu. Don't ask. */ +#include <asm/sfp-util.h> +#include <math-emu/soft-fp.h> +#include <math-emu/single.h> + +static inline int stsi_0(void) +{ + int rc = stsi(NULL, 0, 0, 0); + return rc == -ENOSYS ? rc : (((unsigned int) rc) >> 28); +} + +static int stsi_1_1_1(struct sysinfo_1_1_1 *info, char *page, int len) +{ + if (stsi(info, 1, 1, 1) == -ENOSYS) + return len; + + EBCASC(info->manufacturer, sizeof(info->manufacturer)); + EBCASC(info->type, sizeof(info->type)); + EBCASC(info->model, sizeof(info->model)); + EBCASC(info->sequence, sizeof(info->sequence)); + EBCASC(info->plant, sizeof(info->plant)); + EBCASC(info->model_capacity, sizeof(info->model_capacity)); + EBCASC(info->model_perm_cap, sizeof(info->model_perm_cap)); + EBCASC(info->model_temp_cap, sizeof(info->model_temp_cap)); + len += sprintf(page + len, "Manufacturer: %-16.16s\n", + info->manufacturer); + len += sprintf(page + len, "Type: %-4.4s\n", + info->type); + if (info->model[0] != '\0') + /* + * Sigh: the model field has been renamed with System z9 + * to model_capacity and a new model field has been added + * after the plant field. To avoid confusing older programs + * the "Model:" prints "model_capacity model" or just + * "model_capacity" if the model string is empty . + */ + len += sprintf(page + len, + "Model: %-16.16s %-16.16s\n", + info->model_capacity, info->model); + else + len += sprintf(page + len, "Model: %-16.16s\n", + info->model_capacity); + len += sprintf(page + len, "Sequence Code: %-16.16s\n", + info->sequence); + len += sprintf(page + len, "Plant: %-4.4s\n", + info->plant); + len += sprintf(page + len, "Model Capacity: %-16.16s %08u\n", + info->model_capacity, *(u32 *) info->model_cap_rating); + if (info->model_perm_cap[0] != '\0') + len += sprintf(page + len, + "Model Perm. Capacity: %-16.16s %08u\n", + info->model_perm_cap, + *(u32 *) info->model_perm_cap_rating); + if (info->model_temp_cap[0] != '\0') + len += sprintf(page + len, + "Model Temp. Capacity: %-16.16s %08u\n", + info->model_temp_cap, + *(u32 *) info->model_temp_cap_rating); + return len; +} + +static int stsi_1_2_2(struct sysinfo_1_2_2 *info, char *page, int len) +{ + struct sysinfo_1_2_2_extension *ext; + int i; + + if (stsi(info, 1, 2, 2) == -ENOSYS) + return len; + ext = (struct sysinfo_1_2_2_extension *) + ((unsigned long) info + info->acc_offset); + + len += sprintf(page + len, "\n"); + len += sprintf(page + len, "CPUs Total: %d\n", + info->cpus_total); + len += sprintf(page + len, "CPUs Configured: %d\n", + info->cpus_configured); + len += sprintf(page + len, "CPUs Standby: %d\n", + info->cpus_standby); + len += sprintf(page + len, "CPUs Reserved: %d\n", + info->cpus_reserved); + + if (info->format == 1) { + /* + * Sigh 2. According to the specification the alternate + * capability field is a 32 bit floating point number + * if the higher order 8 bits are not zero. Printing + * a floating point number in the kernel is a no-no, + * always print the number as 32 bit unsigned integer. + * The user-space needs to know about the strange + * encoding of the alternate cpu capability. + */ + len += sprintf(page + len, "Capability: %u %u\n", + info->capability, ext->alt_capability); + for (i = 2; i <= info->cpus_total; i++) + len += sprintf(page + len, + "Adjustment %02d-way: %u %u\n", + i, info->adjustment[i-2], + ext->alt_adjustment[i-2]); + + } else { + len += sprintf(page + len, "Capability: %u\n", + info->capability); + for (i = 2; i <= info->cpus_total; i++) + len += sprintf(page + len, + "Adjustment %02d-way: %u\n", + i, info->adjustment[i-2]); + } + + if (info->secondary_capability != 0) + len += sprintf(page + len, "Secondary Capability: %d\n", + info->secondary_capability); + return len; +} + +static int stsi_2_2_2(struct sysinfo_2_2_2 *info, char *page, int len) +{ + if (stsi(info, 2, 2, 2) == -ENOSYS) + return len; + + EBCASC(info->name, sizeof(info->name)); + + len += sprintf(page + len, "\n"); + len += sprintf(page + len, "LPAR Number: %d\n", + info->lpar_number); + + len += sprintf(page + len, "LPAR Characteristics: "); + if (info->characteristics & LPAR_CHAR_DEDICATED) + len += sprintf(page + len, "Dedicated "); + if (info->characteristics & LPAR_CHAR_SHARED) + len += sprintf(page + len, "Shared "); + if (info->characteristics & LPAR_CHAR_LIMITED) + len += sprintf(page + len, "Limited "); + len += sprintf(page + len, "\n"); + + len += sprintf(page + len, "LPAR Name: %-8.8s\n", + info->name); + + len += sprintf(page + len, "LPAR Adjustment: %d\n", + info->caf); + + len += sprintf(page + len, "LPAR CPUs Total: %d\n", + info->cpus_total); + len += sprintf(page + len, "LPAR CPUs Configured: %d\n", + info->cpus_configured); + len += sprintf(page + len, "LPAR CPUs Standby: %d\n", + info->cpus_standby); + len += sprintf(page + len, "LPAR CPUs Reserved: %d\n", + info->cpus_reserved); + len += sprintf(page + len, "LPAR CPUs Dedicated: %d\n", + info->cpus_dedicated); + len += sprintf(page + len, "LPAR CPUs Shared: %d\n", + info->cpus_shared); + return len; +} + +static int stsi_3_2_2(struct sysinfo_3_2_2 *info, char *page, int len) +{ + int i; + + if (stsi(info, 3, 2, 2) == -ENOSYS) + return len; + for (i = 0; i < info->count; i++) { + EBCASC(info->vm[i].name, sizeof(info->vm[i].name)); + EBCASC(info->vm[i].cpi, sizeof(info->vm[i].cpi)); + len += sprintf(page + len, "\n"); + len += sprintf(page + len, "VM%02d Name: %-8.8s\n", + i, info->vm[i].name); + len += sprintf(page + len, "VM%02d Control Program: %-16.16s\n", + i, info->vm[i].cpi); + + len += sprintf(page + len, "VM%02d Adjustment: %d\n", + i, info->vm[i].caf); + + len += sprintf(page + len, "VM%02d CPUs Total: %d\n", + i, info->vm[i].cpus_total); + len += sprintf(page + len, "VM%02d CPUs Configured: %d\n", + i, info->vm[i].cpus_configured); + len += sprintf(page + len, "VM%02d CPUs Standby: %d\n", + i, info->vm[i].cpus_standby); + len += sprintf(page + len, "VM%02d CPUs Reserved: %d\n", + i, info->vm[i].cpus_reserved); + } + return len; +} + +static int proc_read_sysinfo(char *page, char **start, + off_t off, int count, + int *eof, void *data) +{ + unsigned long info = get_zeroed_page(GFP_KERNEL); + int level, len; + + if (!info) + return 0; + + len = 0; + level = stsi_0(); + if (level >= 1) + len = stsi_1_1_1((struct sysinfo_1_1_1 *) info, page, len); + + if (level >= 1) + len = stsi_1_2_2((struct sysinfo_1_2_2 *) info, page, len); + + if (level >= 2) + len = stsi_2_2_2((struct sysinfo_2_2_2 *) info, page, len); + + if (level >= 3) + len = stsi_3_2_2((struct sysinfo_3_2_2 *) info, page, len); + + free_page(info); + return len; +} + +static __init int create_proc_sysinfo(void) +{ + create_proc_read_entry("sysinfo", 0444, NULL, + proc_read_sysinfo, NULL); + return 0; +} +device_initcall(create_proc_sysinfo); + +/* + * Service levels interface. + */ + +static DECLARE_RWSEM(service_level_sem); +static LIST_HEAD(service_level_list); + +int register_service_level(struct service_level *slr) +{ + struct service_level *ptr; + + down_write(&service_level_sem); + list_for_each_entry(ptr, &service_level_list, list) + if (ptr == slr) { + up_write(&service_level_sem); + return -EEXIST; + } + list_add_tail(&slr->list, &service_level_list); + up_write(&service_level_sem); + return 0; +} +EXPORT_SYMBOL(register_service_level); + +int unregister_service_level(struct service_level *slr) +{ + struct service_level *ptr, *next; + int rc = -ENOENT; + + down_write(&service_level_sem); + list_for_each_entry_safe(ptr, next, &service_level_list, list) { + if (ptr != slr) + continue; + list_del(&ptr->list); + rc = 0; + break; + } + up_write(&service_level_sem); + return rc; +} +EXPORT_SYMBOL(unregister_service_level); + +static void *service_level_start(struct seq_file *m, loff_t *pos) +{ + down_read(&service_level_sem); + return seq_list_start(&service_level_list, *pos); +} + +static void *service_level_next(struct seq_file *m, void *p, loff_t *pos) +{ + return seq_list_next(p, &service_level_list, pos); +} + +static void service_level_stop(struct seq_file *m, void *p) +{ + up_read(&service_level_sem); +} + +static int service_level_show(struct seq_file *m, void *p) +{ + struct service_level *slr; + + slr = list_entry(p, struct service_level, list); + slr->seq_print(m, slr); + return 0; +} + +static const struct seq_operations service_level_seq_ops = { + .start = service_level_start, + .next = service_level_next, + .stop = service_level_stop, + .show = service_level_show +}; + +static int service_level_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &service_level_seq_ops); +} + +static const struct file_operations service_level_ops = { + .open = service_level_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release +}; + +static void service_level_vm_print(struct seq_file *m, + struct service_level *slr) +{ + char *query_buffer, *str; + + query_buffer = kmalloc(1024, GFP_KERNEL | GFP_DMA); + if (!query_buffer) + return; + cpcmd("QUERY CPLEVEL", query_buffer, 1024, NULL); + str = strchr(query_buffer, '\n'); + if (str) + *str = 0; + seq_printf(m, "VM: %s\n", query_buffer); + kfree(query_buffer); +} + +static struct service_level service_level_vm = { + .seq_print = service_level_vm_print +}; + +static __init int create_proc_service_level(void) +{ + proc_create("service_levels", 0, NULL, &service_level_ops); + if (MACHINE_IS_VM) + register_service_level(&service_level_vm); + return 0; +} +subsys_initcall(create_proc_service_level); + +/* + * Bogomips calculation based on cpu capability. + */ +int get_cpu_capability(unsigned int *capability) +{ + struct sysinfo_1_2_2 *info; + int rc; + + info = (void *) get_zeroed_page(GFP_KERNEL); + if (!info) + return -ENOMEM; + rc = stsi(info, 1, 2, 2); + if (rc == -ENOSYS) + goto out; + rc = 0; + *capability = info->capability; +out: + free_page((unsigned long) info); + return rc; +} + +/* + * CPU capability might have changed. Therefore recalculate loops_per_jiffy. + */ +void s390_adjust_jiffies(void) +{ + struct sysinfo_1_2_2 *info; + const unsigned int fmil = 0x4b189680; /* 1e7 as 32-bit float. */ + FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); + FP_DECL_EX; + unsigned int capability; + + info = (void *) get_zeroed_page(GFP_KERNEL); + if (!info) + return; + + if (stsi(info, 1, 2, 2) != -ENOSYS) { + /* + * Major sigh. The cpu capability encoding is "special". + * If the first 9 bits of info->capability are 0 then it + * is a 32 bit unsigned integer in the range 0 .. 2^23. + * If the first 9 bits are != 0 then it is a 32 bit float. + * In addition a lower value indicates a proportionally + * higher cpu capacity. Bogomips are the other way round. + * To get to a halfway suitable number we divide 1e7 + * by the cpu capability number. Yes, that means a floating + * point division .. math-emu here we come :-) + */ + FP_UNPACK_SP(SA, &fmil); + if ((info->capability >> 23) == 0) + FP_FROM_INT_S(SB, info->capability, 32, int); + else + FP_UNPACK_SP(SB, &info->capability); + FP_DIV_S(SR, SA, SB); + FP_TO_INT_S(capability, SR, 32, 0); + } else + /* + * Really old machine without stsi block for basic + * cpu information. Report 42.0 bogomips. + */ + capability = 42; + loops_per_jiffy = capability * (500000/HZ); + free_page((unsigned long) info); +} + +/* + * calibrate the delay loop + */ +void __cpuinit calibrate_delay(void) +{ + s390_adjust_jiffies(); + /* Print the good old Bogomips line .. */ + printk(KERN_DEBUG "Calibrating delay loop (skipped)... " + "%lu.%02lu BogoMIPS preset\n", loops_per_jiffy/(500000/HZ), + (loops_per_jiffy/(5000/HZ)) % 100); +} diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index fc468cae446..f72d41068dc 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -331,6 +331,7 @@ static unsigned long long adjust_time(unsigned long long old, } static DEFINE_PER_CPU(atomic_t, clock_sync_word); +static DEFINE_MUTEX(clock_sync_mutex); static unsigned long clock_sync_flags; #define CLOCK_SYNC_HAS_ETR 0 @@ -394,6 +395,20 @@ static void enable_sync_clock(void) atomic_set_mask(0x80000000, sw_ptr); } +/* + * Function to check if the clock is in sync. + */ +static inline int check_sync_clock(void) +{ + atomic_t *sw_ptr; + int rc; + + sw_ptr = &get_cpu_var(clock_sync_word); + rc = (atomic_read(sw_ptr) & 0x80000000U) != 0; + put_cpu_var(clock_sync_sync); + return rc; +} + /* Single threaded workqueue used for etr and stp sync events */ static struct workqueue_struct *time_sync_wq; @@ -485,6 +500,8 @@ static void etr_reset(void) if (etr_setr(&etr_eacr) == 0) { etr_tolec = get_clock(); set_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags); + if (etr_port0_online && etr_port1_online) + set_bit(CLOCK_SYNC_ETR, &clock_sync_flags); } else if (etr_port0_online || etr_port1_online) { pr_warning("The real or virtual hardware system does " "not provide an ETR interface\n"); @@ -533,8 +550,7 @@ void etr_switch_to_local(void) { if (!etr_eacr.sl) return; - if (test_bit(CLOCK_SYNC_ETR, &clock_sync_flags)) - disable_sync_clock(NULL); + disable_sync_clock(NULL); set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events); queue_work(time_sync_wq, &etr_work); } @@ -549,8 +565,7 @@ void etr_sync_check(void) { if (!etr_eacr.es) return; - if (test_bit(CLOCK_SYNC_ETR, &clock_sync_flags)) - disable_sync_clock(NULL); + disable_sync_clock(NULL); set_bit(ETR_EVENT_SYNC_CHECK, &etr_events); queue_work(time_sync_wq, &etr_work); } @@ -914,7 +929,7 @@ static struct etr_eacr etr_handle_update(struct etr_aib *aib, * Do not try to get the alternate port aib if the clock * is not in sync yet. */ - if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags) && !eacr.es) + if (!check_sync_clock()) return eacr; /* @@ -997,7 +1012,6 @@ static void etr_work_fn(struct work_struct *work) on_each_cpu(disable_sync_clock, NULL, 1); del_timer_sync(&etr_timer); etr_update_eacr(eacr); - clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags); goto out_unlock; } @@ -1071,18 +1085,13 @@ static void etr_work_fn(struct work_struct *work) /* Both ports not usable. */ eacr.es = eacr.sl = 0; sync_port = -1; - clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags); } - if (!test_bit(CLOCK_SYNC_ETR, &clock_sync_flags)) - eacr.es = 0; - /* * If the clock is in sync just update the eacr and return. * If there is no valid sync port wait for a port update. */ - if (test_bit(CLOCK_SYNC_STP, &clock_sync_flags) || - eacr.es || sync_port < 0) { + if (check_sync_clock() || sync_port < 0) { etr_update_eacr(eacr); etr_set_tolec_timeout(now); goto out_unlock; @@ -1103,13 +1112,11 @@ static void etr_work_fn(struct work_struct *work) * and set up a timer to try again after 0.5 seconds */ etr_update_eacr(eacr); - set_bit(CLOCK_SYNC_ETR, &clock_sync_flags); if (now < etr_tolec + (1600000 << 12) || etr_sync_clock_stop(&aib, sync_port) != 0) { /* Sync failed. Try again in 1/2 second. */ eacr.es = 0; etr_update_eacr(eacr); - clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags); etr_set_sync_timeout(); } else etr_set_tolec_timeout(now); @@ -1191,19 +1198,30 @@ static ssize_t etr_online_store(struct sys_device *dev, return -EINVAL; if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags)) return -EOPNOTSUPP; + mutex_lock(&clock_sync_mutex); if (dev == &etr_port0_dev) { if (etr_port0_online == value) - return count; /* Nothing to do. */ + goto out; /* Nothing to do. */ etr_port0_online = value; + if (etr_port0_online && etr_port1_online) + set_bit(CLOCK_SYNC_ETR, &clock_sync_flags); + else + clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags); set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); queue_work(time_sync_wq, &etr_work); } else { if (etr_port1_online == value) - return count; /* Nothing to do. */ + goto out; /* Nothing to do. */ etr_port1_online = value; + if (etr_port0_online && etr_port1_online) + set_bit(CLOCK_SYNC_ETR, &clock_sync_flags); + else + clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags); set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events); queue_work(time_sync_wq, &etr_work); } +out: + mutex_unlock(&clock_sync_mutex); return count; } @@ -1471,8 +1489,6 @@ static void stp_timing_alert(struct stp_irq_parm *intparm) */ void stp_sync_check(void) { - if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags)) - return; disable_sync_clock(NULL); queue_work(time_sync_wq, &stp_work); } @@ -1485,8 +1501,6 @@ void stp_sync_check(void) */ void stp_island_check(void) { - if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags)) - return; disable_sync_clock(NULL); queue_work(time_sync_wq, &stp_work); } @@ -1513,10 +1527,6 @@ static int stp_sync_clock(void *data) enable_sync_clock(); - set_bit(CLOCK_SYNC_STP, &clock_sync_flags); - if (test_and_clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags)) - queue_work(time_sync_wq, &etr_work); - rc = 0; if (stp_info.todoff[0] || stp_info.todoff[1] || stp_info.todoff[2] || stp_info.todoff[3] || @@ -1535,9 +1545,6 @@ static int stp_sync_clock(void *data) if (rc) { disable_sync_clock(NULL); stp_sync->in_sync = -EAGAIN; - clear_bit(CLOCK_SYNC_STP, &clock_sync_flags); - if (etr_port0_online || etr_port1_online) - queue_work(time_sync_wq, &etr_work); } else stp_sync->in_sync = 1; xchg(&first, 0); @@ -1569,6 +1576,10 @@ static void stp_work_fn(struct work_struct *work) if (rc || stp_info.c == 0) goto out_unlock; + /* Skip synchronization if the clock is already in sync. */ + if (check_sync_clock()) + goto out_unlock; + memset(&stp_sync, 0, sizeof(stp_sync)); get_online_cpus(); atomic_set(&stp_sync.cpus, num_online_cpus() - 1); @@ -1684,8 +1695,14 @@ static ssize_t stp_online_store(struct sysdev_class *class, return -EINVAL; if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags)) return -EOPNOTSUPP; + mutex_lock(&clock_sync_mutex); stp_online = value; + if (stp_online) + set_bit(CLOCK_SYNC_STP, &clock_sync_flags); + else + clear_bit(CLOCK_SYNC_STP, &clock_sync_flags); queue_work(time_sync_wq, &stp_work); + mutex_unlock(&clock_sync_mutex); return count; } diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index cc362c9ea8f..3c72c9cf22b 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c @@ -74,7 +74,7 @@ static DEFINE_SPINLOCK(topology_lock); cpumask_t cpu_core_map[NR_CPUS]; -cpumask_t cpu_coregroup_map(unsigned int cpu) +static cpumask_t cpu_coregroup_map(unsigned int cpu) { struct core_info *core = &core_info; unsigned long flags; diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index 4584d81984c..c2e42cc65ce 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -61,9 +61,11 @@ extern pgm_check_handler_t do_asce_exception; #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; }) #ifndef CONFIG_64BIT +#define LONG "%08lx " #define FOURLONG "%08lx %08lx %08lx %08lx\n" static int kstack_depth_to_print = 12; #else /* CONFIG_64BIT */ +#define LONG "%016lx " #define FOURLONG "%016lx %016lx %016lx %016lx\n" static int kstack_depth_to_print = 20; #endif /* CONFIG_64BIT */ @@ -155,7 +157,7 @@ void show_stack(struct task_struct *task, unsigned long *sp) break; if (i && ((i * sizeof (long) % 32) == 0)) printk("\n "); - printk("%p ", (void *)*stack++); + printk(LONG, *stack++); } printk("\n"); show_trace(task, sp); diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c index 690e1781968..89b2e7f1b7a 100644 --- a/arch/s390/kernel/vdso.c +++ b/arch/s390/kernel/vdso.c @@ -144,7 +144,6 @@ out: return -ENOMEM; } -#ifdef CONFIG_HOTPLUG_CPU void vdso_free_per_cpu(int cpu, struct _lowcore *lowcore) { unsigned long segment_table, page_table, page_frame; @@ -163,7 +162,6 @@ void vdso_free_per_cpu(int cpu, struct _lowcore *lowcore) free_page(page_table); free_pages(segment_table, SEGMENT_ORDER); } -#endif /* CONFIG_HOTPLUG_CPU */ static void __vdso_init_cr5(void *dummy) { diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index d796d05c9c0..7a2063eb88f 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -108,6 +108,8 @@ SECTIONS EXIT_TEXT } + /* early.c uses stsi, which requires page aligned data. */ + . = ALIGN(PAGE_SIZE); .init.data : { INIT_DATA } |