aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-04-27 09:15:31 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-04-27 09:15:31 -0700
commitda8ac5e0fab11d0e84be4e49aaaa828c52d17097 (patch)
treeeade52afcbb5eb31d2d8869fc66e8223a7681a6f
parent32f15dc5e6252f03aa2e04a2b140827a8297f21f (diff)
parentcb629a01bb5bca951287e761c590a5686c6ca416 (diff)
Merge branch 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6
* 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6: (38 commits) [S390] SPIN_LOCK_UNLOCKED cleanup in drivers/s390 [S390] Clean up smp code in preparation for some larger changes. [S390] Remove debugging junk. [S390] Switch etr from tasklet to workqueue. [S390] split page_test_and_clear_dirty. [S390] Processor degradation notification. [S390] vtime: cleanup per_cpu usage. [S390] crypto: cleanup. [S390] sclp: fix coding style. [S390] vmlogrdr: stop IUCV connection in vmlogrdr_release. [S390] sclp: initialize early. [S390] ctc: kmalloc->kzalloc/casting cleanups. [S390] zfcpdump support. [S390] dasd: Add ipldev parameter. [S390] dasd: Add sysfs attribute status and generate uevents. [S390] Improved kernel stack overflow checking. [S390] Get rid of console setup functions. [S390] No execute support cleanup. [S390] Minor fault path optimization. [S390] Use generic bug. ...
-rw-r--r--Documentation/s390/crypto/crypto-API.txt83
-rw-r--r--Documentation/s390/zfcpdump.txt87
-rw-r--r--arch/s390/Kconfig13
-rw-r--r--arch/s390/Makefile5
-rw-r--r--arch/s390/appldata/appldata_base.c38
-rw-r--r--arch/s390/crypto/sha1_s390.c129
-rw-r--r--arch/s390/crypto/sha256_s390.c38
-rw-r--r--arch/s390/defconfig3
-rw-r--r--arch/s390/kernel/Makefile2
-rw-r--r--arch/s390/kernel/compat_linux.c60
-rw-r--r--arch/s390/kernel/compat_signal.c14
-rw-r--r--arch/s390/kernel/dis.c1278
-rw-r--r--arch/s390/kernel/early.c7
-rw-r--r--arch/s390/kernel/entry.S87
-rw-r--r--arch/s390/kernel/entry64.S100
-rw-r--r--arch/s390/kernel/head64.S72
-rw-r--r--arch/s390/kernel/ipl.c253
-rw-r--r--arch/s390/kernel/module.c4
-rw-r--r--arch/s390/kernel/process.c82
-rw-r--r--arch/s390/kernel/setup.c38
-rw-r--r--arch/s390/kernel/signal.c10
-rw-r--r--arch/s390/kernel/smp.c369
-rw-r--r--arch/s390/kernel/sys_s390.c20
-rw-r--r--arch/s390/kernel/syscalls.S14
-rw-r--r--arch/s390/kernel/time.c34
-rw-r--r--arch/s390/kernel/traps.c72
-rw-r--r--arch/s390/kernel/vmlinux.lds.S10
-rw-r--r--arch/s390/kernel/vtime.c16
-rw-r--r--arch/s390/mm/fault.c331
-rw-r--r--drivers/s390/block/dasd.c3
-rw-r--r--drivers/s390/block/dasd_devmap.c58
-rw-r--r--drivers/s390/char/Makefile5
-rw-r--r--drivers/s390/char/con3215.c7
-rw-r--r--drivers/s390/char/con3270.c7
-rw-r--r--drivers/s390/char/sclp.c10
-rw-r--r--drivers/s390/char/sclp.h72
-rw-r--r--drivers/s390/char/sclp_chp.c196
-rw-r--r--drivers/s390/char/sclp_config.c75
-rw-r--r--drivers/s390/char/sclp_cpi.c4
-rw-r--r--drivers/s390/char/sclp_quiesce.c2
-rw-r--r--drivers/s390/char/sclp_rw.c16
-rw-r--r--drivers/s390/char/sclp_sdias.c255
-rw-r--r--drivers/s390/char/sclp_tty.c6
-rw-r--r--drivers/s390/char/sclp_vt220.c8
-rw-r--r--drivers/s390/char/vmlogrdr.c9
-rw-r--r--drivers/s390/char/zcore.c651
-rw-r--r--drivers/s390/cio/Makefile2
-rw-r--r--drivers/s390/cio/ccwgroup.c33
-rw-r--r--drivers/s390/cio/chp.c683
-rw-r--r--drivers/s390/cio/chp.h53
-rw-r--r--drivers/s390/cio/chsc.c1024
-rw-r--r--drivers/s390/cio/chsc.h42
-rw-r--r--drivers/s390/cio/cio.c52
-rw-r--r--drivers/s390/cio/cio.h17
-rw-r--r--drivers/s390/cio/cmf.c2
-rw-r--r--drivers/s390/cio/css.c201
-rw-r--r--drivers/s390/cio/css.h16
-rw-r--r--drivers/s390/cio/device.c246
-rw-r--r--drivers/s390/cio/device_fsm.c8
-rw-r--r--drivers/s390/cio/device_ops.c7
-rw-r--r--drivers/s390/cio/idset.c112
-rw-r--r--drivers/s390/cio/idset.h25
-rw-r--r--drivers/s390/cio/ioasm.h5
-rw-r--r--drivers/s390/net/ctcmain.c23
-rw-r--r--drivers/s390/s390mach.c25
-rw-r--r--drivers/s390/sysinfo.c18
-rw-r--r--include/asm-generic/pgtable.h11
-rw-r--r--include/asm-s390/bug.h69
-rw-r--r--include/asm-s390/ccwgroup.h1
-rw-r--r--include/asm-s390/chpid.h53
-rw-r--r--include/asm-s390/cio.h8
-rw-r--r--include/asm-s390/ipl.h35
-rw-r--r--include/asm-s390/lowcore.h46
-rw-r--r--include/asm-s390/pgtable.h15
-rw-r--r--include/asm-s390/processor.h2
-rw-r--r--include/asm-s390/sclp.h14
-rw-r--r--include/asm-s390/setup.h2
-rw-r--r--include/asm-s390/smp.h6
-rw-r--r--include/linux/page-flags.h2
-rw-r--r--mm/rmap.c8
80 files changed, 5364 insertions, 2155 deletions
diff --git a/Documentation/s390/crypto/crypto-API.txt b/Documentation/s390/crypto/crypto-API.txt
deleted file mode 100644
index 71ae6ca9f2c..00000000000
--- a/Documentation/s390/crypto/crypto-API.txt
+++ /dev/null
@@ -1,83 +0,0 @@
-crypto-API support for z990 Message Security Assist (MSA) instructions
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-AUTHOR: Thomas Spatzier (tspat@de.ibm.com)
-
-
-1. Introduction crypto-API
-~~~~~~~~~~~~~~~~~~~~~~~~~~
-See Documentation/crypto/api-intro.txt for an introduction/description of the
-kernel crypto API.
-According to api-intro.txt support for z990 crypto instructions has been added
-in the algorithm api layer of the crypto API. Several files containing z990
-optimized implementations of crypto algorithms are placed in the
-arch/s390/crypto directory.
-
-
-2. Probing for availability of MSA
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-It should be possible to use Kernels with the z990 crypto implementations both
-on machines with MSA available and on those without MSA (pre z990 or z990
-without MSA). Therefore a simple probing mechanism has been implemented:
-In the init function of each crypto module the availability of MSA and of the
-respective crypto algorithm in particular will be tested. If the algorithm is
-available the module will load and register its algorithm with the crypto API.
-
-If the respective crypto algorithm is not available, the init function will
-return -ENOSYS. In that case a fallback to the standard software implementation
-of the crypto algorithm must be taken ( -> the standard crypto modules are
-also built when compiling the kernel).
-
-
-3. Ensuring z990 crypto module preference
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-If z990 crypto instructions are available the optimized modules should be
-preferred instead of standard modules.
-
-3.1. compiled-in modules
-~~~~~~~~~~~~~~~~~~~~~~~~
-For compiled-in modules it has to be ensured that the z990 modules are linked
-before the standard crypto modules. Then, on system startup the init functions
-of z990 crypto modules will be called first and query for availability of z990
-crypto instructions. If instruction is available, the z990 module will register
-its crypto algorithm implementation -> the load of the standard module will fail
-since the algorithm is already registered.
-If z990 crypto instruction is not available the load of the z990 module will
-fail -> the standard module will load and register its algorithm.
-
-3.2. dynamic modules
-~~~~~~~~~~~~~~~~~~~~
-A system administrator has to take care of giving preference to z990 crypto
-modules. If MSA is available appropriate lines have to be added to
-/etc/modprobe.conf.
-
-Example: z990 crypto instruction for SHA1 algorithm is available
-
- add the following line to /etc/modprobe.conf (assuming the
- z990 crypto modules for SHA1 is called sha1_z990):
-
- alias sha1 sha1_z990
-
- -> when the sha1 algorithm is requested through the crypto API
- (which has a module autoloader) the z990 module will be loaded.
-
-TBD: a userspace module probing mechanism
- something like 'probe sha1 sha1_z990 sha1' in modprobe.conf
- -> try module sha1_z990, if it fails to load standard module sha1
- the 'probe' statement is currently not supported in modprobe.conf
-
-
-4. Currently implemented z990 crypto algorithms
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-The following crypto algorithms with z990 MSA support are currently implemented.
-The name of each algorithm under which it is registered in crypto API and the
-name of the respective module is given in square brackets.
-
-- SHA1 Digest Algorithm [sha1 -> sha1_z990]
-- DES Encrypt/Decrypt Algorithm (64bit key) [des -> des_z990]
-- Triple DES Encrypt/Decrypt Algorithm (128bit key) [des3_ede128 -> des_z990]
-- Triple DES Encrypt/Decrypt Algorithm (192bit key) [des3_ede -> des_z990]
-
-In order to load, for example, the sha1_z990 module when the sha1 algorithm is
-requested (see 3.2.) add 'alias sha1 sha1_z990' to /etc/modprobe.conf.
-
diff --git a/Documentation/s390/zfcpdump.txt b/Documentation/s390/zfcpdump.txt
new file mode 100644
index 00000000000..cf45d27c460
--- /dev/null
+++ b/Documentation/s390/zfcpdump.txt
@@ -0,0 +1,87 @@
+s390 SCSI dump tool (zfcpdump)
+
+System z machines (z900 or higher) provide hardware support for creating system
+dumps on SCSI disks. The dump process is initiated by booting a dump tool, which
+has to create a dump of the current (probably crashed) Linux image. In order to
+not overwrite memory of the crashed Linux with data of the dump tool, the
+hardware saves some memory plus the register sets of the boot cpu before the
+dump tool is loaded. There exists an SCLP hardware interface to obtain the saved
+memory afterwards. Currently 32 MB are saved.
+
+This zfcpdump implementation consists of a Linux dump kernel together with
+a userspace dump tool, which are loaded together into the saved memory region
+below 32 MB. zfcpdump is installed on a SCSI disk using zipl (as contained in
+the s390-tools package) to make the device bootable. The operator of a Linux
+system can then trigger a SCSI dump by booting the SCSI disk, where zfcpdump
+resides on.
+
+The kernel part of zfcpdump is implemented as a debugfs file under "zcore/mem",
+which exports memory and registers of the crashed Linux in an s390
+standalone dump format. It can be used in the same way as e.g. /dev/mem. The
+dump format defines a 4K header followed by plain uncompressed memory. The
+register sets are stored in the prefix pages of the respective cpus. To build a
+dump enabled kernel with the zcore driver, the kernel config option
+CONFIG_ZFCPDUMP has to be set. When reading from "zcore/mem", the part of
+memory, which has been saved by hardware is read by the driver via the SCLP
+hardware interface. The second part is just copied from the non overwritten real
+memory.
+
+The userspace application of zfcpdump can reside e.g. in an intitramfs or an
+initrd. It reads from zcore/mem and writes the system dump to a file on a
+SCSI disk.
+
+To build a zfcpdump kernel use the following settings in your kernel
+configuration:
+ * CONFIG_ZFCPDUMP=y
+ * Enable ZFCP driver
+ * Enable SCSI driver
+ * Enable ext2 and ext3 filesystems
+ * Disable as many features as possible to keep the kernel small.
+ E.g. network support is not needed at all.
+
+To use the zfcpdump userspace application in an initramfs you have to do the
+following:
+
+ * Copy the zfcpdump executable somewhere into your Linux tree.
+ E.g. to "arch/s390/boot/zfcpdump. If you do not want to include
+ shared libraries, compile the tool with the "-static" gcc option.
+ * If you want to include e2fsck, add it to your source tree, too. The zfcpdump
+ application attempts to start /sbin/e2fsck from the ramdisk.
+ * Use an initramfs config file like the following:
+
+ dir /dev 755 0 0
+ nod /dev/console 644 0 0 c 5 1
+ nod /dev/null 644 0 0 c 1 3
+ nod /dev/sda1 644 0 0 b 8 1
+ nod /dev/sda2 644 0 0 b 8 2
+ nod /dev/sda3 644 0 0 b 8 3
+ nod /dev/sda4 644 0 0 b 8 4
+ nod /dev/sda5 644 0 0 b 8 5
+ nod /dev/sda6 644 0 0 b 8 6
+ nod /dev/sda7 644 0 0 b 8 7
+ nod /dev/sda8 644 0 0 b 8 8
+ nod /dev/sda9 644 0 0 b 8 9
+ nod /dev/sda10 644 0 0 b 8 10
+ nod /dev/sda11 644 0 0 b 8 11
+ nod /dev/sda12 644 0 0 b 8 12
+ nod /dev/sda13 644 0 0 b 8 13
+ nod /dev/sda14 644 0 0 b 8 14
+ nod /dev/sda15 644 0 0 b 8 15
+ file /init arch/s390/boot/zfcpdump 755 0 0
+ file /sbin/e2fsck arch/s390/boot/e2fsck 755 0 0
+ dir /proc 755 0 0
+ dir /sys 755 0 0
+ dir /mnt 755 0 0
+ dir /sbin 755 0 0
+
+ * Issue "make image" to build the zfcpdump image with initramfs.
+
+In a Linux distribution the zfcpdump enabled kernel image must be copied to
+/usr/share/zfcpdump/zfcpdump.image, where the s390 zipl tool is looking for the
+dump kernel when preparing a SCSI dump disk.
+
+If you use a ramdisk copy it to "/usr/share/zfcpdump/zfcpdump.rd".
+
+For more information on how to use zfcpdump refer to the s390 'Using the Dump
+Tools book', which is available from
+http://www.ibm.com/developerworks/linux/linux390.
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 0f293aa7b0f..e6ec418093e 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -41,6 +41,11 @@ config GENERIC_HWEIGHT
config GENERIC_TIME
def_bool y
+config GENERIC_BUG
+ bool
+ depends on BUG
+ default y
+
config NO_IOMEM
def_bool y
@@ -514,6 +519,14 @@ config KEXEC
current kernel, and to start another kernel. It is like a reboot
but is independent of hardware/microcode support.
+config ZFCPDUMP
+ tristate "zfcpdump support"
+ select SMP
+ default n
+ help
+ Select this option if you want to build an zfcpdump enabled kernel.
+ Refer to "Documentation/s390/zfcpdump.txt" for more details on this.
+
endmenu
source "net/Kconfig"
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index b1e55849646..68441e0e74b 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -67,8 +67,10 @@ endif
ifeq ($(call cc-option-yn,-mstack-size=8192 -mstack-guard=128),y)
cflags-$(CONFIG_CHECK_STACK) += -mstack-size=$(STACK_SIZE)
+ifneq ($(call cc-option-yn,-mstack-size=8192),y)
cflags-$(CONFIG_CHECK_STACK) += -mstack-guard=$(CONFIG_STACK_GUARD)
endif
+endif
ifeq ($(call cc-option-yn,-mwarn-dynamicstack),y)
cflags-$(CONFIG_WARN_STACK) += -mwarn-dynamicstack
@@ -103,6 +105,9 @@ install: vmlinux
image: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+zfcpdump:
+ $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index 0c3cf4b16ae..ee89b33145d 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -668,45 +668,7 @@ EXPORT_SYMBOL_GPL(appldata_register_ops);
EXPORT_SYMBOL_GPL(appldata_unregister_ops);
EXPORT_SYMBOL_GPL(appldata_diag);
-#ifdef MODULE
-/*
- * Kernel symbols needed by appldata_mem and appldata_os modules.
- * However, if this file is compiled as a module (for testing only), these
- * symbols are not exported. In this case, we define them locally and export
- * those.
- */
-void si_swapinfo(struct sysinfo *val)
-{
- val->freeswap = -1ul;
- val->totalswap = -1ul;
-}
-
-unsigned long avenrun[3] = {-1 - FIXED_1/200, -1 - FIXED_1/200,
- -1 - FIXED_1/200};
-int nr_threads = -1;
-
-void get_full_page_state(struct page_state *ps)
-{
- memset(ps, -1, sizeof(struct page_state));
-}
-
-unsigned long nr_running(void)
-{
- return -1;
-}
-
-unsigned long nr_iowait(void)
-{
- return -1;
-}
-
-/*unsigned long nr_context_switches(void)
-{
- return -1;
-}*/
-#endif /* MODULE */
EXPORT_SYMBOL_GPL(si_swapinfo);
EXPORT_SYMBOL_GPL(nr_threads);
EXPORT_SYMBOL_GPL(nr_running);
EXPORT_SYMBOL_GPL(nr_iowait);
-//EXPORT_SYMBOL_GPL(nr_context_switches);
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c
index 969639f3197..af4460ec381 100644
--- a/arch/s390/crypto/sha1_s390.c
+++ b/arch/s390/crypto/sha1_s390.c
@@ -25,99 +25,100 @@
*/
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/mm.h>
#include <linux/crypto.h>
-#include <asm/scatterlist.h>
-#include <asm/byteorder.h>
+
#include "crypt_s390.h"
#define SHA1_DIGEST_SIZE 20
#define SHA1_BLOCK_SIZE 64
-struct crypt_s390_sha1_ctx {
- u64 count;
+struct s390_sha1_ctx {
+ u64 count; /* message length */
u32 state[5];
- u32 buf_len;
- u8 buffer[2 * SHA1_BLOCK_SIZE];
+ u8 buf[2 * SHA1_BLOCK_SIZE];
};
static void sha1_init(struct crypto_tfm *tfm)
{
- struct crypt_s390_sha1_ctx *ctx = crypto_tfm_ctx(tfm);
-
- ctx->state[0] = 0x67452301;
- ctx->state[1] = 0xEFCDAB89;
- ctx->state[2] = 0x98BADCFE;
- ctx->state[3] = 0x10325476;
- ctx->state[4] = 0xC3D2E1F0;
-
- ctx->count = 0;
- ctx->buf_len = 0;
+ struct s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm);
+
+ sctx->state[0] = 0x67452301;
+ sctx->state[1] = 0xEFCDAB89;
+ sctx->state[2] = 0x98BADCFE;
+ sctx->state[3] = 0x10325476;
+ sctx->state[4] = 0xC3D2E1F0;
+ sctx->count = 0;
}
static void sha1_update(struct crypto_tfm *tfm, const u8 *data,
unsigned int len)
{
- struct crypt_s390_sha1_ctx *sctx;
- long imd_len;
-
- sctx = crypto_tfm_ctx(tfm);
- sctx->count += len * 8; /* message bit length */
-
- /* anything in buffer yet? -> must be completed */
- if (sctx->buf_len && (sctx->buf_len + len) >= SHA1_BLOCK_SIZE) {
- /* complete full block and hash */
- memcpy(sctx->buffer + sctx->buf_len, data,
- SHA1_BLOCK_SIZE - sctx->buf_len);
- crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer,
- SHA1_BLOCK_SIZE);
- data += SHA1_BLOCK_SIZE - sctx->buf_len;
- len -= SHA1_BLOCK_SIZE - sctx->buf_len;
- sctx->buf_len = 0;
+ struct s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm);
+ unsigned int index;
+ int ret;
+
+ /* how much is already in the buffer? */
+ index = sctx->count & 0x3f;
+
+ sctx->count += len;
+
+ if (index + len < SHA1_BLOCK_SIZE)
+ goto store;
+
+ /* process one stored block */
+ if (index) {
+ memcpy(sctx->buf + index, data, SHA1_BLOCK_SIZE - index);
+ ret = crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buf,
+ SHA1_BLOCK_SIZE);
+ BUG_ON(ret != SHA1_BLOCK_SIZE);
+ data += SHA1_BLOCK_SIZE - index;
+ len -= SHA1_BLOCK_SIZE - index;
}
- /* rest of data contains full blocks? */
- imd_len = len & ~0x3ful;
- if (imd_len) {
- crypt_s390_kimd(KIMD_SHA_1, sctx->state, data, imd_len);
- data += imd_len;
- len -= imd_len;
+ /* process as many blocks as possible */
+ if (len >= SHA1_BLOCK_SIZE) {
+ ret = crypt_s390_kimd(KIMD_SHA_1, sctx->state, data,
+ len & ~(SHA1_BLOCK_SIZE - 1));
+ BUG_ON(ret != (len & ~(SHA1_BLOCK_SIZE - 1)));
+ data += ret;
+ len -= ret;
}
- /* anything left? store in buffer */
- if (len) {
- memcpy(sctx->buffer + sctx->buf_len , data, len);
- sctx->buf_len += len;
- }
-}
+store:
+ /* anything left? */
+ if (len)
+ memcpy(sctx->buf + index , data, len);
+}
-static void pad_message(struct crypt_s390_sha1_ctx* sctx)
+/* Add padding and return the message digest. */
+static void sha1_final(struct crypto_tfm *tfm, u8 *out)
{
- int index;
+ struct s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm);
+ u64 bits;
+ unsigned int index, end;
+ int ret;
+
+ /* must perform manual padding */
+ index = sctx->count & 0x3f;
+ end = (index < 56) ? SHA1_BLOCK_SIZE : (2 * SHA1_BLOCK_SIZE);
- index = sctx->buf_len;
- sctx->buf_len = (sctx->buf_len < 56) ?
- SHA1_BLOCK_SIZE:2 * SHA1_BLOCK_SIZE;
/* start pad with 1 */
- sctx->buffer[index] = 0x80;
+ sctx->buf[index] = 0x80;
+
/* pad with zeros */
index++;
- memset(sctx->buffer + index, 0x00, sctx->buf_len - index);
- /* append length */
- memcpy(sctx->buffer + sctx->buf_len - 8, &sctx->count,
- sizeof sctx->count);
-}
+ memset(sctx->buf + index, 0x00, end - index - 8);
-/* Add padding and return the message digest. */
-static void sha1_final(struct crypto_tfm *tfm, u8 *out)
-{
- struct crypt_s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm);
+ /* append message length */
+ bits = sctx->count * 8;
+ memcpy(sctx->buf + end - 8, &bits, sizeof(bits));
+
+ ret = crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buf, end);
+ BUG_ON(ret != end);
- /* must perform manual padding */
- pad_message(sctx);
- crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, sctx->buf_len);
/* copy digest to out */
memcpy(out, sctx->state, SHA1_DIGEST_SIZE);
+
/* wipe context */
memset(sctx, 0, sizeof *sctx);
}
@@ -128,7 +129,7 @@ static struct crypto_alg alg = {
.cra_priority = CRYPT_S390_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_DIGEST,
.cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct crypt_s390_sha1_ctx),
+ .cra_ctxsize = sizeof(struct s390_sha1_ctx),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_u = { .digest = {
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
index 78436c696d3..2ced3330bce 100644
--- a/arch/s390/crypto/sha256_s390.c
+++ b/arch/s390/crypto/sha256_s390.c
@@ -26,7 +26,7 @@
#define SHA256_BLOCK_SIZE 64
struct s390_sha256_ctx {
- u64 count;
+ u64 count; /* message length */
u32 state[8];
u8 buf[2 * SHA256_BLOCK_SIZE];
};
@@ -54,10 +54,9 @@ static void sha256_update(struct crypto_tfm *tfm, const u8 *data,
int ret;
/* how much is already in the buffer? */
- index = sctx->count / 8 & 0x3f;
+ index = sctx->count & 0x3f;
- /* update message bit length */
- sctx->count += len * 8;
+ sctx->count += len;
if ((index + len) < SHA256_BLOCK_SIZE)
goto store;
@@ -87,12 +86,17 @@ store:
memcpy(sctx->buf + index , data, len);
}
-static void pad_message(struct s390_sha256_ctx* sctx)
+/* Add padding and return the message digest */
+static void sha256_final(struct crypto_tfm *tfm, u8 *out)
{
- int index, end;
+ struct s390_sha256_ctx *sctx = crypto_tfm_ctx(tfm);
+ u64 bits;
+ unsigned int index, end;
+ int ret;
- index = sctx->count / 8 & 0x3f;
- end = index < 56 ? SHA256_BLOCK_SIZE : 2 * SHA256_BLOCK_SIZE;
+ /* must perform manual padding */
+ index = sctx->count & 0x3f;
+ end = (index < 56) ? SHA256_BLOCK_SIZE : (2 * SHA256_BLOCK_SIZE);
/* start pad with 1 */
sctx->buf[index] = 0x80;
@@ -102,21 +106,11 @@ static void pad_message(struct s390_sha256_ctx* sctx)
memset(sctx->buf + index, 0x00, end - index - 8);
/* append message length */
- memcpy(sctx->buf + end - 8, &sctx->count, sizeof sctx->count);
-
- sctx->count = end * 8;
-}
-
-/* Add padding and return the message digest */
-static void sha256_final(struct crypto_tfm *tfm, u8 *out)
-{
- struct s390_sha256_ctx *sctx = crypto_tfm_ctx(tfm);
-
- /* must perform manual padding */
- pad_message(sctx);
+ bits = sctx->count * 8;
+ memcpy(sctx->buf + end - 8, &bits, sizeof(bits));
- crypt_s390_kimd(KIMD_SHA_256, sctx->state, sctx->buf,
- sctx->count / 8);
+ ret = crypt_s390_kimd(KIMD_SHA_256, sctx->state, sctx->buf, end);
+ BUG_ON(ret != end);
/* copy digest to out */
memcpy(out, sctx->state, SHA256_DIGEST_SIZE);
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 741d2bbb2b3..0e4da8a7d82 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -12,6 +12,7 @@ CONFIG_RWSEM_XCHGADD_ALGORITHM=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_BUG=y
CONFIG_NO_IOMEM=y
CONFIG_S390=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
@@ -166,6 +167,7 @@ CONFIG_NO_IDLE_HZ=y
CONFIG_NO_IDLE_HZ_INIT=y
CONFIG_S390_HYPFS_FS=y
CONFIG_KEXEC=y
+# CONFIG_ZFCPDUMP is not set
#
# Networking
@@ -705,6 +707,7 @@ CONFIG_DEBUG_MUTEXES=y
CONFIG_DEBUG_SPINLOCK_SLEEP=y
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_INFO is not set
# CONFIG_DEBUG_VM is not set
# CONFIG_DEBUG_LIST is not set
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 5492d25d7d6..3195d375bd5 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -6,7 +6,7 @@ EXTRA_AFLAGS := -traditional
obj-y := bitmap.o traps.o time.o process.o base.o early.o \
setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \
- semaphore.o s390_ext.o debug.o irq.o ipl.o
+ semaphore.o s390_ext.o debug.o irq.o ipl.o dis.o
obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index 664c669b185..5236fdb17fc 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -495,29 +495,34 @@ sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *uinfo)
* sys32_execve() executes a new program after the asm stub has set
* things up for us. This should basically do what I want it to.
*/
-asmlinkage long
-sys32_execve(struct pt_regs regs)
+asmlinkage long sys32_execve(void)
{
- int error;
- char * filename;
+ struct pt_regs *regs = task_pt_regs(current);
+ char *filename;
+ unsigned long result;
+ int rc;
- filename = getname(compat_ptr(regs.orig_gpr2));
- error = PTR_ERR(filename);
- if (IS_ERR(filename))
+ filename = getname(compat_ptr(regs->orig_gpr2));
+ if (IS_ERR(filename)) {
+ result = PTR_ERR(filename);
goto out;
- error = compat_do_execve(filename, compat_ptr(regs.gprs[3]),
- compat_ptr(regs.gprs[4]), &regs);
- if (error == 0)
- {
- task_lock(current);
- current->ptrace &= ~PT_DTRACE;
- task_unlock(current);
- current->thread.fp_regs.fpc=0;
- asm volatile("sfpc %0,0" : : "d" (0));
}
+ rc = compat_do_execve(filename, compat_ptr(regs->gprs[3]),
+ compat_ptr(regs->gprs[4]), regs);
+ if (rc) {
+ result = rc;
+ goto out_putname;
+ }
+ task_lock(current);
+ current->ptrace &= ~PT_DTRACE;
+ task_unlock(current);
+ current->thread.fp_regs.fpc=0;
+ asm volatile("sfpc %0,0" : : "d" (0));
+ result = regs->gprs[2];
+out_putname:
putname(filename);
out:
- return error;
+ return result;
}
@@ -918,19 +923,20 @@ asmlinkage long sys32_write(unsigned int fd, char __user * buf, size_t count)
return sys_write(fd, buf, count);
}
-asmlinkage long sys32_clone(struct pt_regs regs)
+asmlinkage long sys32_clone(void)
{
- unsigned long clone_flags;
- unsigned long newsp;
+ struct pt_regs *regs = task_pt_regs(current);
+ unsigned long clone_flags;
+ unsigned long newsp;
int __user *parent_tidptr, *child_tidptr;
- clone_flags = regs.gprs[3] & 0xffffffffUL;
- newsp = regs.orig_gpr2 & 0x7fffffffUL;
- parent_tidptr = compat_ptr(regs.gprs[4]);
- child_tidptr = compat_ptr(regs.gprs[5]);
- if (!newsp)
- newsp = regs.gprs[15];
- return do_fork(clone_flags, newsp, &regs, 0,
+ clone_flags = regs->gprs[3] & 0xffffffffUL;
+ newsp = regs->orig_gpr2 & 0x7fffffffUL;
+ parent_tidptr = compat_ptr(regs->gprs[4]);
+ child_tidptr = compat_ptr(regs->gprs[5]);
+ if (!newsp)
+ newsp = regs->gprs[15];
+ return do_fork(clone_flags, newsp, regs, 0,
parent_tidptr, child_tidptr);
}
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 887a9881d0d..80a54a0149a 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -255,9 +255,9 @@ sys32_rt_sigaction(int sig, const struct sigaction32 __user *act,
}
asmlinkage long
-sys32_sigaltstack(const stack_t32 __user *uss, stack_t32 __user *uoss,
- struct pt_regs *regs)
+sys32_sigaltstack(const stack_t32 __user *uss, stack_t32 __user *uoss)
{
+ struct pt_regs *regs = task_pt_regs(current);
stack_t kss, koss;
unsigned long ss_sp;
int ret, err = 0;
@@ -344,8 +344,9 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
return 0;
}
-asmlinkage long sys32_sigreturn(struct pt_regs *regs)
+asmlinkage long sys32_sigreturn(void)
{
+ struct pt_regs *regs = task_pt_regs(current);
sigframe32 __user *frame = (sigframe32 __user *)regs->gprs[15];
sigset_t set;
@@ -370,8 +371,9 @@ badframe:
return 0;
}
-asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs)
+asmlinkage long sys32_rt_sigreturn(void)
{
+ struct pt_regs *regs = task_pt_regs(current);
rt_sigframe32 __user *frame = (rt_sigframe32 __user *)regs->gprs[15];
sigset_t set;
stack_t st;
@@ -407,8 +409,8 @@ asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs)
return regs->gprs[2];
badframe:
- force_sig(SIGSEGV, current);
- return 0;
+ force_sig(SIGSEGV, current);
+ return 0;
}
/*
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
new file mode 100644
index 00000000000..dabaf98943d
--- /dev/null
+++ b/arch/s390/kernel/dis.c
@@ -0,0 +1,1278 @@
+/*
+ * arch/s390/kernel/dis.c
+ *
+ * Disassemble s390 instructions.
+ *
+ * Copyright IBM Corp. 2007
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ */
+
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/timer.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/kallsyms.h>
+#include <linux/reboot.h>
+#include <linux/kprobes.h>
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/atomic.h>
+#include <asm/mathemu.h>
+#include <asm/cpcmd.h>
+#include <asm/s390_ext.h>
+#include <asm/lowcore.h>
+#include <asm/debug.h>
+#include <asm/kdebug.h>
+
+#ifndef CONFIG_64BIT
+#define ONELONG "%08lx: "
+#else /* CONFIG_64BIT */
+#define ONELONG "%016lx: "
+#endif /* CONFIG_64BIT */
+
+#define OPERAND_GPR 0x1 /* Operand printed as %rx */
+#define OPERAND_FPR 0x2 /* Operand printed as %fx */
+#define OPERAND_AR 0x4 /* Operand printed as %ax */
+#define OPERAND_CR 0x8 /* Operand printed as %cx */
+#define OPERAND_DISP 0x10 /* Operand printed as displacement */
+#define OPERAND_BASE 0x20 /* Operand printed as base register */
+#define OPERAND_INDEX 0x40 /* Operand printed as index register */
+#define OPERAND_PCREL 0x80 /* Operand printed as pc-relative symbol */
+#define OPERAND_SIGNED 0x100 /* Operand printed as signed value */
+#define OPERAND_LENGTH 0x200 /* Operand printed as length (+1) */
+
+enum {
+ UNUSED, /* Indicates the end of the operand list */
+ R_8, /* GPR starting at position 8 */
+ R_12, /* GPR starting at position 12 */
+ R_16, /* GPR starting at position 16 */
+ R_20, /* GPR starting at position 20 */
+ R_24, /* GPR starting at position 24 */
+ R_28, /* GPR starting at position 28 */
+ R_32, /* GPR starting at position 32 */
+ F_8, /* FPR starting at position 8 */
+ F_12, /* FPR starting at position 12 */
+ F_16, /* FPR starting at position 16 */
+ F_20, /* FPR starting at position 16 */
+ F_24, /* FPR starting at position 24 */
+ F_28, /* FPR starting at position 28 */
+ F_32, /* FPR starting at position 32 */
+ A_8, /* Access reg. starting at position 8 */
+ A_12, /* Access reg. starting at position 12 */
+ A_24, /* Access reg. starting at position 24 */
+ A_28, /* Access reg. starting at position 28 */
+ C_8, /* Control reg. starting at position 8 */
+ C_12, /* Control reg. starting at position 12 */
+ B_16, /* Base register starting at position 16 */
+ B_32, /* Base register starting at position 32 */
+ X_12, /* Index register starting at position 12 */
+ D_20, /* Displacement starting at position 20 */
+ D_36, /* Displacement starting at position 36 */
+ D20_20, /* 20 bit displacement starting at 20 */
+ L4_8, /* 4 bit length starting at position 8 */
+ L4_12, /* 4 bit length starting at position 12 */
+ L8_8, /* 8 bit length starting at position 8 */
+ U4_8, /* 4 bit unsigned value starting at 8 */
+ U4_12, /* 4 bit unsigned value starting at 12 */
+ U4_16, /* 4 bit unsigned value starting at 16 */
+ U4_20, /* 4 bit unsigned value starting at 20 */
+ U8_8, /* 8 bit unsigned value starting at 8 */
+ U8_16, /* 8 bit unsigned value starting at 16 */
+ I16_16, /* 16 bit signed value starting at 16 */
+ U16_16, /* 16 bit unsigned value starting at 16 */
+ J16_16, /* PC relative jump offset at 16 */
+ J32_16, /* PC relative long offset at 16 */
+ I32_16, /* 32 bit signed value starting at 16 */
+ U32_16, /* 32 bit unsigned value starting at 16 */
+ M_16, /* 4 bit optional mask starting at 16 */
+ RO_28, /* optional GPR starting at position 28 */
+};
+
+/*
+ * Enumeration of the different instruction formats.
+ * For details consult the principles of operation.
+ */
+enum {
+ INSTR_INVALID,
+ INSTR_E, INSTR_RIE_RRP, INSTR_RIL_RI, INSTR_RIL_RP, INSTR_RIL_RU,
+ INSTR_RIL_UP, INSTR_RI_RI, INSTR_RI_RP, INSTR_RI_RU, INSTR_RI_UP,
+ INSTR_RRE_00, INSTR_RRE_0R, INSTR_RRE_AA, INSTR_RRE_AR, INSTR_RRE_F0,
+ INSTR_RRE_FF, INSTR_RRE_R0, INSTR_RRE_RA, INSTR_RRE_RF, INSTR_RRE_RR,
+ INSTR_RRE_RR_OPT, INSTR_RRF_F0FF, INSTR_RRF_FUFF, INSTR_RRF_M0RR,
+ INSTR_RRF_R0RR, INSTR_RRF_RURR, INSTR_RRF_U0FF, INSTR_RRF_U0RF,
+ INSTR_RR_FF, INSTR_RR_R0, INSTR_RR_RR, INSTR_RR_U0, INSTR_RR_UR,
+ INSTR_RSE_CCRD, INSTR_RSE_RRRD, INSTR_RSE_RURD, INSTR_RSI_RRP,
+ INSTR_RSL_R0RD, INSTR_RSY_AARD, INSTR_RSY_CCRD, INSTR_RSY_RRRD,
+ INSTR_RSY_RURD, INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD,
+ INSTR_RS_RRRD, INSTR_RS_RURD, INSTR_RXE_FRRD, INSTR_RXE_RRRD,
+ INSTR_RXF_FRRDF, INSTR_RXY_FRRD, INSTR_RXY_RRRD, INSTR_RX_FRRD,
+ INSTR_RX_RRRD, INSTR_RX_URRD, INSTR_SIY_URD, INSTR_SI_URD,
+ INSTR_SSE_RDRD, INSTR_SSF_RRDRD, INSTR_SS_L0RDRD, INSTR_SS_LIRDRD,
+ INSTR_SS_LLRDRD, INSTR_SS_RRRDRD, INSTR_SS_RRRDRD2, INSTR_SS_RRRDRD3,
+ INSTR_S_00, INSTR_S_RD,
+};
+
+struct operand {
+ int bits; /* The number of bits in the operand. */
+ int shift; /* The number of bits to shift. */
+ int flags; /* One bit syntax flags. */
+};
+
+struct insn {
+ const char name[5];
+ unsigned char opfrag;
+ unsigned char format;
+};
+
+static const struct operand operands[] =
+{
+ [UNUSED] = { 0, 0, 0 },
+ [R_8] = { 4, 8, OPERAND_GPR },
+ [R_12] = { 4, 12, OPERAND_GPR },
+ [R_16] = { 4, 16, OPERAND_GPR },
+ [R_20] = { 4, 20, OPERAND_GPR },
+ [R_24] = { 4, 24, OPERAND_GPR },
+ [R_28] = { 4, 28, OPERAND_GPR },
+ [R_32] = { 4, 32, OPERAND_GPR },
+ [F_8] = { 4, 8, OPERAND_FPR },
+ [F_12] = { 4, 12, OPERAND_FPR },
+ [F_16] = { 4, 16, OPERAND_FPR },
+ [F_20] = { 4, 16, OPERAND_FPR },
+ [F_24] = { 4, 24, OPERAND_FPR },
+ [F_28] = { 4, 28, OPERAND_FPR },
+ [F_32] = { 4, 32, OPERAND_FPR },
+ [A_8] = { 4, 8, OPERAND_AR },
+ [A_12] = { 4, 12, OPERAND_AR },
+ [A_24] = { 4, 24, OPERAND_AR },
+ [A_28] = { 4, 28, OPERAND_AR },
+ [C_8] = { 4, 8, OPERAND_CR },
+ [C_12] = { 4, 12, OPERAND_CR },
+ [B_16] = { 4, 16, OPERAND_BASE | OPERAND_GPR },
+ [B_32] = { 4, 32, OPERAND_BASE | OPERAND_GPR },
+ [X_12] = { 4, 12, OPERAND_INDEX | OPERAND_GPR },
+ [D_20] = { 12, 20, OPERAND_DISP },
+ [D_36] = { 12, 36, OPERAND_DISP },
+ [D20_20] = { 20, 20, OPERAND_DISP | OPERAND_SIGNED },
+ [L4_8] = { 4, 8, OPERAND_LENGTH },
+ [L4_12] = { 4, 12, OPERAND_LENGTH },
+ [L8_8] = { 8, 8, OPERAND_LENGTH },
+ [U4_8] = { 4, 8, 0 },
+ [U4_12] = { 4, 12, 0 },
+ [U4_16] = { 4, 16, 0 },
+ [U4_20] = { 4, 20, 0 },
+ [U8_8] = { 8, 8, 0 },
+ [U8_16] = { 8, 16, 0 },
+ [I16_16] = { 16, 16, OPERAND_SIGNED },
+ [U16_16] = { 16, 16, 0 },
+ [J16_16] = { 16, 16, OPERAND_PCREL },
+ [J32_16] = { 32, 16, OPERAND_PCREL },
+ [I32_16] = { 32, 16, OPERAND_SIGNED },
+ [U32_16] = { 32, 16, 0 },
+ [M_16] = { 4, 16, 0 },
+ [RO_28] = { 4, 28, OPERAND_GPR }
+};
+
+static const unsigned char formats[][7] = {
+ [INSTR_E] = { 0xff, 0,0,0,0,0,0 }, /* e.g. pr */
+ [INSTR_RIE_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 }, /* e.g. brxhg */
+ [INSTR_RIL_RP] = { 0x0f, R_8,J32_16,0,0,0,0 }, /* e.g. brasl */
+ [INSTR_RIL_UP] = { 0x0f, U4_8,J32_16,0,0,0,0 }, /* e.g. brcl */
+ [INSTR_RIL_RI] = { 0x0f, R_8,I32_16,0,0,0,0 }, /* e.g. afi */
+ [INSTR_RIL_RU] = { 0x0f, R_8,U32_16,0,0,0,0 }, /* e.g. alfi */
+ [INSTR_RI_RI] = { 0x0f, R_8,I16_16,0,0,0,0 }, /* e.g. ahi */
+ [INSTR_RI_RP] = { 0x0f, R_8,J16_16,0,0,0,0 }, /* e.g. brct */
+ [INSTR_RI_RU] = { 0x0f, R_8,U16_16,0,0,0,0 }, /* e.g. tml */
+ [INSTR_RI_UP] = { 0x0f, U4_8,J16_16,0,0,0,0 }, /* e.g. brc */
+ [INSTR_RRE_00] = { 0xff, 0,0,0,0,0,0 }, /* e.g. palb */
+ [INSTR_RRE_0R] = { 0xff, R_28,0,0,0,0,0 }, /* e.g. tb */
+ [INSTR_RRE_AA] = { 0xff, A_24,A_28,0,0,0,0 }, /* e.g. cpya */
+ [INSTR_RRE_AR] = { 0xff, A_24,R_28,0,0,0,0 }, /* e.g. sar */
+ [INSTR_RRE_F0] = { 0xff, F_24,0,0,0,0,0 }, /* e.g. sqer */
+ [INSTR_RRE_FF] = { 0xff, F_24,F_28,0,0,0,0 }, /* e.g. debr */
+ [INSTR_RRE_R0] = { 0xff, R_24,0,0,0,0,0 }, /* e.g. ipm */
+ [INSTR_RRE_RA] = { 0xff, R_24,A_28,0,0,0,0 }, /* e.g. ear */
+ [INSTR_RRE_RF] = { 0xff, R_24,F_28,0,0,0,0 }, /* e.g. cefbr */
+ [INSTR_RRE_RR] = { 0xff, R_24,R_28,0,0,0,0 }, /* e.g. lura */
+ [INSTR_RRE_RR_OPT]= { 0xff, R_24,RO_28,0,0,0,0 }, /* efpc, sfpc */
+ [INSTR_RRF_F0FF] = { 0xff, F_16,F_24,F_28,0,0,0 }, /* e.g. madbr */
+ [INSTR_RRF_FUFF] = { 0xff, F_24,F_16,F_28,U4_20,0,0 },/* e.g. didbr */
+ [INSTR_RRF_RURR] = { 0xff, R_24,R_28,R_16,U4_20,0,0 },/* e.g. .insn */
+ [INSTR_RRF_R0RR] = { 0xff, R_24,R_28,R_16,0,0,0 }, /* e.g. idte */
+ [INSTR_RRF_U0FF] = { 0xff, F_24,U4_16,F_28,0,0,0 }, /* e.g. fixr */
+ [INSTR_RRF_U0RF] = { 0xff, R_24,U4_16,F_28,0,0,0 }, /* e.g. cfebr */
+ [INSTR_RRF_M0RR] = { 0xff, R_24,R_28,M_16,0,0,0 }, /* e.g. sske */
+ [INSTR_RR_FF] = { 0xff, F_8,F_12,0,0,0,0 }, /* e.g. adr */
+ [INSTR_RR_R0] = { 0xff, R_8, 0,0,0,0,0 }, /* e.g. spm */
+ [INSTR_RR_RR] = { 0xff, R_8,R_12,0,0,0,0 }, /* e.g. lr */
+ [INSTR_RR_U0] = { 0xff, U8_8, 0,0,0,0,0 }, /* e.g. svc */
+ [INSTR_RR_UR] = { 0xff, U4_8,R_12,0,0,0,0 }, /* e.g. bcr */
+ [INSTR_RSE_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 }, /* e.g. lmh */
+ [INSTR_RSE_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 }, /* e.g. lmh */
+ [INSTR_RSE_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 }, /* e.g. icmh */
+ [INSTR_RSL_R0RD] = { 0xff, R_8,D_20,B_16,0,0,0 }, /* e.g. tp */
+ [INSTR_RSI_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 }, /* e.g. brxh */
+ [INSTR_RSY_RRRD] = { 0xff, R_8,R_12,D20_20,B_16,0,0 },/* e.g. stmy */
+ [INSTR_RSY_RURD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 },
+ /* e.g. icmh */
+ [INSTR_RSY_AARD] = { 0xff, A_8,A_12,D20_20,B_16,0,0 },/* e.g. lamy */
+ [INSTR_RSY_CCRD] = { 0xff, C_8,C_12,D20_20,B_16,0,0 },/* e.g. lamy */
+ [INSTR_RS_AARD] = { 0xff, A_8,A_12,D_20,B_16,0,0 }, /* e.g. lam */
+ [INSTR_RS_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 }, /* e.g. lctl */
+ [INSTR_RS_R0RD] = { 0xff, R_8,D_20,B_16,0,0,0 }, /* e.g. sll */
+ [INSTR_RS_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 }, /* e.g. cs */
+ [INSTR_RS_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 }, /* e.g. icm */
+ [INSTR_RXE_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 }, /* e.g. axbr */
+ [INSTR_RXE_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 }, /* e.g. lg */
+ [INSTR_RXF_FRRDF] = { 0xff, F_32,F_8,D_20,X_12,B_16,0 },
+ /* e.g. madb */
+ [INSTR_RXY_RRRD] = { 0xff, R_8,D20_20,X_12,B_16,0,0 },/* e.g. ly */
+ [INSTR_RXY_FRRD] = { 0xff, F_8,D20_20,X_12,B_16,0,0 },/* e.g. ley */
+ [INSTR_RX_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 }, /* e.g. ae */
+ [INSTR_RX_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 }, /* e.g. l */
+ [INSTR_RX_URRD] = { 0x00, U4_8,D_20,X_12,B_16,0,0 }, /* e.g. bc */
+ [INSTR_SI_URD] = { 0x00, D_20,B_16,U8_8,0,0,0 }, /* e.g. cli */
+ [INSTR_SIY_URD] = { 0xff, D20_20,B_16,U8_8,0,0,0 }, /* e.g. tmy */
+ [INSTR_SSE_RDRD] = { 0xff, D_20,B_16,D_36,B_32,0,0 }, /* e.g. mvsdk */
+ [INSTR_SS_L0RDRD] = { 0xff, D_20,L8_8,B_16,D_36,B_32,0 },
+ /* e.g. mvc */
+ [INSTR_SS_LIRDRD] = { 0xff, D_20,L4_8,B_16,D_36,B_32,U4_12 },
+ /* e.g. srp */
+ [INSTR_SS_LLRDRD] = { 0xff, D_20,L4_8,B_16,D_36,L4_12,B_32 },
+ /* e.g. pack */
+ [INSTR_SS_RRRDRD] = { 0xff, D_20,R_8,B_16,D_36,B_32,R_12 },
+ /* e.g. mvck */
+ [INSTR_SS_RRRDRD2]= { 0xff, R_8,D_20,B_16,R_12,D_36,B_32 },
+ /* e.g. plo */
+ [INSTR_SS_RRRDRD3]= { 0xff, R_8,R_12,D_20,B_16,D_36,B_32 },
+ /* e.g. lmd */
+ [INSTR_S_00] = { 0xff, 0,0,0,0,0,0 }, /* e.g. hsch */
+ [INSTR_S_RD] = { 0xff, D_20,B_16,0,0,0,0 }, /* e.g. lpsw */
+ [INSTR_SSF_RRDRD] = { 0x00, D_20,B_16,D_36,B_32,R_8,0 },
+ /* e.g. mvcos */
+};
+
+static struct insn opcode[] = {
+#ifdef CONFIG_64BIT
+ { "lmd", 0xef, INSTR_SS_RRRDRD3 },
+#endif
+ { "spm", 0x04, INSTR_RR_R0 },
+ { "balr", 0x05, INSTR_RR_RR },
+ { "bctr", 0x06, INSTR_RR_RR },
+ { "bcr", 0x07, INSTR_RR_UR },
+ { "svc", 0x0a, INSTR_RR_U0 },
+ { "bsm", 0x0b, INSTR_RR_RR },
+ { "bassm", 0x0c, INSTR_RR_RR },
+ { "basr", 0x0d, INSTR_RR_RR },
+ { "mvcl", 0x0e, INSTR_RR_RR },
+ { "clcl", 0x0f, INSTR_RR_RR },
+ { "lpr", 0x10, INSTR_RR_RR },
+ { "lnr", 0x11, INSTR_RR_RR },
+ { "ltr", 0x12, INSTR_RR_RR },
+ { "lcr", 0x13, INSTR_RR_RR },
+ { "nr", 0x14, INSTR_RR_RR },
+ { "clr", 0x15, INSTR_RR_RR },
+ { "or", 0x16, INSTR_RR_RR },
+ { "xr", 0x17, INSTR_RR_RR },
+ { "lr", 0x18, INSTR_RR_RR },
+ { "cr", 0x19, INSTR_RR_RR },
+ { "ar", 0x1a, INSTR_RR_RR },
+ { "sr", 0x1b, INSTR_RR_RR },
+ { "mr", 0x1c, INSTR_RR_RR },
+ { "dr", 0x1d, INSTR_RR_RR },
+ { "alr", 0x1e, INSTR_RR_RR },
+ { "slr", 0x1f, INSTR_RR_RR },
+ { "lpdr", 0x20, INSTR_RR_FF },
+ { "lndr", 0x21, INSTR_RR_FF },
+ { "ltdr", 0x22, INSTR_RR_FF },
+ { "lcdr", 0x23, INSTR_RR_FF },
+ { "hdr", 0x24, INSTR_RR_FF },
+ { "ldxr", 0x25, INSTR_RR_FF },
+ { "lrdr", 0x25, INSTR_RR_FF },
+ { "mxr", 0x26, INSTR_RR_FF },
+ { "mxdr", 0x27, INSTR_RR_FF },
+ { "ldr", 0x28, INSTR_RR_FF },
+ { "cdr", 0x29, INSTR_RR_FF },
+ { "adr", 0x2a, INSTR_RR_FF },
+ { "sdr", 0x2b, INSTR_RR_FF },
+ { "mdr", 0x2c, INSTR_RR_FF },
+ { "ddr", 0x2d, INSTR_RR_FF },
+ { "awr", 0x2e, INSTR_RR_FF },
+ { "swr", 0x2f, INSTR_RR_FF },
+ { "lper", 0x30, INSTR_RR_FF },
+ { "lner", 0x31, INSTR_RR_FF },
+ { "lter", 0x32, INSTR_RR_FF },
+ { "lcer", 0x33, INSTR_RR_FF },
+ { "her", 0x34, INSTR_RR_FF },
+ { "ledr", 0x35, INSTR_RR_FF },
+ { "lrer", 0x35, INSTR_RR_FF },
+ { "axr", 0x36, INSTR_RR_FF },
+ { "sxr", 0x37, INSTR_RR_FF },
+ { "ler", 0x38, INSTR_RR_FF },
+ { "cer", 0x39, INSTR_RR_FF },
+ { "aer", 0x3a, INSTR_RR_FF },
+ { "ser", 0x3b, INSTR_RR_FF },
+ { "mder", 0x3c, INSTR_RR_FF },
+ { "mer", 0x3c, INSTR_RR_FF },
+ { "der", 0x3d, INSTR_RR_FF },
+ { "aur", 0x3e, INSTR_RR_FF },
+ { "sur", 0x3f, INSTR_RR_FF },
+ { "sth", 0x40, INSTR_RX_RRRD },
+ { "la", 0x41, INSTR_RX_RRRD },
+ { "stc", 0x42, INSTR_RX_RRRD },
+ { "ic", 0x43, INSTR_RX_RRRD },
+ { "ex", 0x44, INSTR_RX_RRRD },
+ { "bal", 0x45, INSTR_RX_RRRD },
+ { "bct", 0x46, INSTR_RX_RRRD },
+ { "bc", 0x47, INSTR_RX_URRD },
+ { "lh", 0x48, INSTR_RX_RRRD },
+ { "ch", 0x49, INSTR_RX_RRRD },
+ { "ah", 0x4a, INSTR_RX_RRRD },
+ { "sh", 0x4b, INSTR_RX_RRRD },
+ { "mh", 0x4c, INSTR_RX_RRRD },
+ { "bas", 0x4d, INSTR_RX_RRRD },
+ { "cvd", 0x4e, INSTR_RX_RRRD },
+ { "cvb", 0x4f, INSTR_RX_RRRD },
+ { "st", 0x50, INSTR_RX_RRRD },
+ { "lae", 0x51, INSTR_RX_RRRD },
+ { "n", 0x54, INSTR_RX_RRRD },
+ { "cl", 0x55, INSTR_RX_RRRD },
+ { "o", 0x56, INSTR_RX_RRRD },
+ { "x", 0x57, INSTR_RX_RRRD },
+ { "l", 0x58, INSTR_RX_RRRD },
+ { "c", 0x59, INSTR_RX_RRRD },
+ { "a", 0x5a, INSTR_RX_RRRD },
+ { "s", 0x5b, INSTR_RX_RRRD },
+ { "m", 0x5c, INSTR_RX_RRRD },
+ { "d", 0x5d, INSTR_RX_RRRD },
+ { "al", 0x5e, INSTR_RX_RRRD },
+ { "sl", 0x5f, INSTR_RX_RRRD },
+ { "std", 0x60, INSTR_RX_FRRD },
+ { "mxd", 0x67, INSTR_RX_FRRD },
+ { "ld", 0x68, INSTR_RX_FRRD },
+ { "cd", 0x69, INSTR_RX_FRRD },
+ { "ad", 0x6a, INSTR_RX_FRRD },
+ { "sd", 0x6b, INSTR_RX_FRRD },
+ { "md", 0x6c, INSTR_RX_FRRD },
+ { "dd", 0x6d, INSTR_RX_FRRD },
+ { "aw", 0x6e, INSTR_RX_FRRD },
+ { "sw", 0x6f, INSTR_RX_FRRD },
+ { "ste", 0x70, INSTR_RX_FRRD },
+ { "ms", 0x71, INSTR_RX_RRRD },
+ { "le", 0x78, INSTR_RX_FRRD },
+ { "ce", 0x79, INSTR_RX_FRRD },
+ { "ae", 0x7a, INSTR_RX_FRRD },
+ { "se", 0x7b, INSTR_RX_FRRD },
+ { "mde", 0x7c, INSTR_RX_FRRD },
+ { "me", 0x7c, INSTR_RX_FRRD },
+ { "de", 0x7d, INSTR_RX_FRRD },
+ { "au", 0x7e, INSTR_RX_FRRD },
+ { "su", 0x7f, INSTR_RX_FRRD },
+ { "ssm", 0x80, INSTR_S_RD },
+ { "lpsw", 0x82, INSTR_S_RD },
+ { "diag", 0x83, INSTR_RS_RRRD },
+ { "brxh", 0x84, INSTR_RSI_RRP },
+ { "brxle", 0x85, INSTR_RSI_RRP },
+ { "bxh", 0x86, INSTR_RS_RRRD },
+ { "bxle", 0x87, INSTR_RS_RRRD },
+ { "srl", 0x88, INSTR_RS_R0RD },
+ { "sll", 0x89, INSTR_RS_R0RD },
+ { "sra", 0x8a, INSTR_RS_R0RD },
+ { "sla", 0x8b, INSTR_RS_R0RD },
+ { "srdl", 0x8c, INSTR_RS_R0RD },
+ { "sldl", 0x8d, INSTR_RS_R0RD },
+ { "srda", 0x8e, INSTR_RS_R0RD },
+ { "slda", 0x8f, INSTR_RS_R0RD },
+ { "stm", 0x90, INSTR_RS_RRRD },
+ { "tm", 0x91, INSTR_SI_URD },
+ { "mvi", 0x92, INSTR_SI_URD },
+ { "ts", 0x93, INSTR_S_RD },
+ { "ni", 0x94, INSTR_SI_URD },
+ { "cli", 0x95, INSTR_SI_URD },
+ { "oi", 0x96, INSTR_SI_URD },
+ { "xi", 0x97, INSTR_SI_URD },
+ { "lm", 0x98, INSTR_RS_RRRD },
+ { "trace", 0x99, INSTR_RS_RRRD },
+ { "lam", 0x9a, INSTR_RS_AARD },
+ { "stam", 0x9b, INSTR_RS_AARD },
+ { "mvcle", 0xa8, INSTR_RS_RRRD },
+ { "clcle", 0xa9, INSTR_RS_RRRD },
+ { "stnsm", 0xac, INSTR_SI_URD },
+ { "stosm", 0xad, INSTR_SI_URD },
+ { "sigp", 0xae, INSTR_RS_RRRD },
+ { "mc", 0xaf, INSTR_SI_URD },
+ { "lra", 0xb1, INSTR_RX_RRRD },
+ { "stctl", 0xb6, INSTR_RS_CCRD },
+ { "lctl", 0xb7, INSTR_RS_CCRD },
+ { "cs", 0xba, INSTR_RS_RRRD },
+ { "cds", 0xbb, INSTR_RS_RRRD },
+ { "clm", 0xbd, INSTR_RS_RURD },
+ { "stcm", 0xbe, INSTR_RS_RURD },
+ { "icm", 0xbf, INSTR_RS_RURD },
+ { "mvn", 0xd1, INSTR_SS_L0RDRD },
+ { "mvc", 0xd2, INSTR_SS_L0RDRD },
+ { "mvz", 0xd3, INSTR_SS_L0RDRD },
+ { "nc", 0xd4, INSTR_SS_L0RDRD },
+ { "clc", 0xd5, INSTR_SS_L0RDRD },
+ { "oc", 0xd6, INSTR_SS_L0RDRD },
+ { "xc", 0xd7, INSTR_SS_L0RDRD },
+ { "mvck", 0xd9, INSTR_SS_RRRDRD },
+ { "mvcp", 0xda, INSTR_SS_RRRDRD },
+ { "mvcs", 0xdb, INSTR_SS_RRRDRD },
+ { "tr", 0xdc, INSTR_SS_L0RDRD },
+ { "trt", 0xdd, INSTR_SS_L0RDRD },
+ { "ed", 0xde, INSTR_SS_L0RDRD },
+ { "edmk", 0xdf, INSTR_SS_L0RDRD },
+ { "pku", 0xe1, INSTR_SS_L0RDRD },
+ { "unpku", 0xe2, INSTR_SS_L0RDRD },
+ { "mvcin", 0xe8, INSTR_SS_L0RDRD },
+ { "pka", 0xe9, INSTR_SS_L0RDRD },
+ { "unpka", 0xea, INSTR_SS_L0RDRD },
+ { "plo", 0xee, INSTR_SS_RRRDRD2 },
+ { "srp", 0xf0, INSTR_SS_LIRDRD },
+ { "mvo", 0xf1, INSTR_SS_LLRDRD },
+ { "pack", 0xf2, INSTR_SS_LLRDRD },
+ { "unpk", 0xf3, INSTR_SS_LLRDRD },
+ { "zap", 0xf8, INSTR_SS_LLRDRD },
+ { "cp", 0xf9, INSTR_SS_LLRDRD },
+ { "ap", 0xfa, INSTR_SS_LLRDRD },
+ { "sp", 0xfb, INSTR_SS_LLRDRD },
+ { "mp", 0xfc, INSTR_SS_LLRDRD },
+ { "dp", 0xfd, INSTR_SS_LLRDRD },
+ { "", 0, INSTR_INVALID }
+};
+
+static struct insn opcode_01[] = {
+#ifdef CONFIG_64BIT
+ { "sam64", 0x0e, INSTR_E },
+#endif
+ { "pr", 0x01, INSTR_E },
+ { "upt", 0x02, INSTR_E },
+ { "sckpf", 0x07, INSTR_E },
+ { "tam", 0x0b, INSTR_E },
+ { "sam24", 0x0c, INSTR_E },
+ { "sam31", 0x0d, INSTR_E },
+ { "trap2", 0xff, INSTR_E },
+ { "", 0, INSTR_INVALID }
+};
+
+static struct insn opcode_a5[] = {
+#ifdef CONFIG_64BIT
+ { "iihh", 0x00, INSTR_RI_RU },
+ { "iihl", 0x01, INSTR_RI_RU },
+ { "iilh", 0x02, INSTR_RI_RU },
+ { "iill", 0x03, INSTR_RI_RU },
+ { "nihh", 0x04, INSTR_RI_RU },
+ { "nihl", 0x05, INSTR_RI_RU },
+ { "nilh", 0x06, INSTR_RI_RU },
+ { "nill", 0x07, INSTR_RI_RU },
+ { "oihh", 0x08, INSTR_RI_RU },
+ { "oihl", 0x09, INSTR_RI_RU },
+ { "oilh", 0x0a, INSTR_RI_RU },
+ { "oill", 0x0b, INSTR_RI_RU },
+ { "llihh", 0x0c, INSTR_RI_RU },
+ { "llihl", 0x0d, INSTR_RI_RU },
+ { "llilh", 0x0e, INSTR_RI_RU },
+ { "llill", 0x0f, INSTR_RI_RU },
+#endif
+ { "", 0, INSTR_INVALID }
+};
+
+static struct insn opcode_a7[] = {
+#ifdef CONFIG_64BIT
+ { "tmhh", 0x02, INSTR_RI_RU },
+ { "tmhl", 0x03, INSTR_RI_RU },
+ { "brctg", 0x07, INSTR_RI_RP },
+ { "lghi", 0x09, INSTR_RI_RI },
+ { "aghi", 0x0b, INSTR_RI_RI },
+ { "mghi", 0x0d, INSTR_RI_RI },
+ { "cghi", 0x0f, INSTR_RI_RI },
+#endif
+ { "tmlh", 0x00, INSTR_RI_RU },
+ { "tmll", 0x01, INSTR_RI_RU },
+ { "brc", 0x04, INSTR_RI_UP },
+ { "bras", 0x05, INSTR_RI_RP },
+ { "brct", 0x06, INSTR_RI_RP },
+ { "lhi", 0x08, INSTR_RI_RI },
+ { "ahi", 0x0a, INSTR_RI_RI },
+ { "mhi", 0x0c, INSTR_RI_RI },
+ { "chi", 0x0e, INSTR_RI_RI },
+ { "", 0, INSTR_INVALID }
+};
+
+static struct insn opcode_b2[] = {
+#ifdef CONFIG_64BIT
+ { "sske", 0x2b, INSTR_RRF_M0RR },
+ { "stckf", 0x7c, INSTR_S_RD },
+ { "cu21", 0xa6, INSTR_RRF_M0RR },
+ { "cuutf", 0xa6, INSTR_RRF_M0RR },
+ { "cu12", 0xa7, INSTR_RRF_M0RR },
+ { "cutfu", 0xa7, INSTR_RRF_M0RR },
+ { "stfle", 0xb0, INSTR_S_RD },
+ { "lpswe", 0xb2, INSTR_S_RD },
+#endif
+ { "stidp", 0x02, INSTR_S_RD },
+ { "sck", 0x04, INSTR_S_RD },
+ { "stck", 0x05, INSTR_S_RD },
+ { "sckc", 0x06, INSTR_S_RD },
+ { "stckc", 0x07, INSTR_S_RD },
+ { "spt", 0x08, INSTR_S_RD },
+ { "stpt", 0x09, INSTR_S_RD },
+ { "spka", 0x0a, INSTR_S_RD },
+ { "ipk", 0x0b, INSTR_S_00 },
+ { "ptlb", 0x0d, INSTR_S_00 },
+ { "spx", 0x10, INSTR_S_RD },
+ { "stpx", 0x11, INSTR_S_RD },
+ { "stap", 0x12, INSTR_S_RD },
+ { "sie", 0x14, INSTR_S_RD },
+ { "pc", 0x18, INSTR_S_RD },
+ { "sac", 0x19, INSTR_S_RD },
+ { "cfc", 0x1a, INSTR_S_RD },
+ { "ipte", 0x21, INSTR_RRE_RR },
+ { "ipm", 0x22, INSTR_RRE_R0 },
+ { "ivsk", 0x23, INSTR_RRE_RR },
+ { "iac", 0x24, INSTR_RRE_R0 },
+ { "ssar", 0x25, INSTR_RRE_R0 },
+ { "epar", 0x26, INSTR_RRE_R0 },
+ { "esar", 0x27, INSTR_RRE_R0 },
+ { "pt", 0x28, INSTR_RRE_RR },
+ { "iske", 0x29, INSTR_RRE_RR },
+ { "rrbe", 0x2a, INSTR_RRE_RR },
+ { "sske", 0x2b, INSTR_RRE_RR },
+ { "tb", 0x2c, INSTR_RRE_0R },
+ { "dxr", 0x2d, INSTR_RRE_F0 },
+ { "pgin", 0x2e, INSTR_RRE_RR },
+ { "pgout", 0x2f, INSTR_RRE_RR },
+ { "csch", 0x30, INSTR_S_00 },
+ { "hsch", 0x31, INSTR_S_00 },
+ { "msch", 0x32, INSTR_S_RD },
+ { "ssch", 0x33, INSTR_S_RD },
+ { "stsch", 0x34, INSTR_S_RD },
+ { "tsch", 0x35, INSTR_S_RD },
+ { "tpi", 0x36, INSTR_S_RD },
+ { "sal", 0x37, INSTR_S_00 },
+ { "rsch", 0x38, INSTR_S_00 },
+ { "stcrw", 0x39, INSTR_S_RD },
+ { "stcps", 0x3a, INSTR_S_RD },
+ { "rchp", 0x3b, INSTR_S_00 },
+ { "schm", 0x3c, INSTR_S_00 },
+ { "bakr", 0x40, INSTR_RRE_RR },
+ { "cksm", 0x41, INSTR_RRE_RR },
+ { "sqdr", 0x44, INSTR_RRE_F0 },
+ { "sqer", 0x45, INSTR_RRE_F0 },
+ { "stura", 0x46, INSTR_RRE_RR },
+ { "msta", 0x47, INSTR_RRE_R0 },
+ { "palb", 0x48, INSTR_RRE_00 },
+ { "ereg", 0x49, INSTR_RRE_RR },
+ { "esta", 0x4a, INSTR_RRE_RR },
+ { "lura", 0x4b, INSTR_RRE_RR },
+ { "tar", 0x4c, INSTR_RRE_AR },
+ { "cpya", INSTR_RRE_AA },
+ { "sar", 0x4e, INSTR_RRE_AR },
+ { "ear", 0x4f, INSTR_RRE_RA },
+ { "csp", 0x50, INSTR_RRE_RR },
+ { "msr", 0x52, INSTR_RRE_RR },
+ { "mvpg", 0x54, INSTR_RRE_RR },
+ { "mvst", 0x55, INSTR_RRE_RR },
+ { "cuse", 0x57, INSTR_RRE_RR },
+ { "bsg", 0x58, INSTR_RRE_RR },
+ { "bsa", 0x5a, INSTR_RRE_RR },
+ { "clst", 0x5d, INSTR_RRE_RR },
+ { "srst", 0x5e, INSTR_RRE_RR },
+ { "cmpsc", 0x63, INSTR_RRE_RR },
+ { "cmpsc", 0x63, INSTR_RRE_RR },
+ { "siga", 0x74, INSTR_S_RD },
+ { "xsch", 0x76, INSTR_S_00 },
+ { "rp", 0x77, INSTR_S_RD },
+ { "stcke", 0x78, INSTR_S_RD },
+ { "sacf", 0x79, INSTR_S_RD },
+ { "stsi", 0x7d, INSTR_S_RD },
+ { "srnm", 0x99, INSTR_S_RD },
+ { "stfpc", 0x9c, INSTR_S_RD },
+ { "lfpc", 0x9d, INSTR_S_RD },
+ { "tre", 0xa5, INSTR_RRE_RR },
+ { "cuutf", 0xa6, INSTR_RRE_RR },
+ { "cutfu", 0xa7, INSTR_RRE_RR },
+ { "stfl", 0xb1, INSTR_S_RD },
+ { "trap4", 0xff, INSTR_S_RD },
+ { "", 0, INSTR_INVALID }
+};
+
+static struct insn opcode_b3[] = {
+#ifdef CONFIG_64BIT
+ { "maylr", 0x38, INSTR_RRF_F0FF },
+ { "mylr", 0x39, INSTR_RRF_F0FF },
+ { "mayr", 0x3a, INSTR_RRF_F0FF },
+ { "myr", 0x3b, INSTR_RRF_F0FF },
+ { "mayhr", 0x3c, INSTR_RRF_F0FF },
+ { "myhr", 0x3d, INSTR_RRF_F0FF },
+ { "cegbr", 0xa4, INSTR_RRE_RR },
+ { "cdgbr", 0xa5, INSTR_RRE_RR },
+ { "cxgbr", 0xa6, INSTR_RRE_RR },
+ { "cgebr", 0xa8, INSTR_RRF_U0RF },
+ { "cgdbr", 0xa9, INSTR_RRF_U0RF },
+ { "cgxbr", 0xaa, INSTR_RRF_U0RF },
+ { "cfer", 0xb8, INSTR_RRF_U0RF },
+ { "cfdr", 0xb9, INSTR_RRF_U0RF },
+ { "cfxr", 0xba, INSTR_RRF_U0RF },
+ { "cegr", 0xc4, INSTR_RRE_RR },
+ { "cdgr", 0xc5, INSTR_RRE_RR },
+ { "cxgr", 0xc6, INSTR_RRE_RR },
+ { "cger", 0xc8, INSTR_RRF_U0RF },
+ { "cgdr", 0xc9, INSTR_RRF_U0RF },
+ { "cgxr", 0xca, INSTR_RRF_U0RF },
+#endif
+ { "lpebr", 0x00, INSTR_RRE_FF },
+ { "lnebr", 0x01, INSTR_RRE_FF },
+ { "ltebr", 0x02, INSTR_RRE_FF },
+ { "lcebr", 0x03, INSTR_RRE_FF },
+ { "ldebr", 0x04, INSTR_RRE_FF },
+ { "lxdbr", 0x05, INSTR_RRE_FF },
+ { "lxebr", 0x06, INSTR_RRE_FF },
+ { "mxdbr", 0x07, INSTR_RRE_FF },
+ { "kebr", 0x08, INSTR_RRE_FF },
+ { "cebr", 0x09, INSTR_RRE_FF },
+ { "aebr", 0x0a, INSTR_RRE_FF },
+ { "sebr", 0x0b, INSTR_RRE_FF },
+ { "mdebr", 0x0c, INSTR_RRE_FF },
+ { "debr", 0x0d, INSTR_RRE_FF },
+ { "maebr", 0x0e, INSTR_RRF_F0FF },
+ { "msebr", 0x0f, INSTR_RRF_F0FF },
+ { "lpdbr", 0x10, INSTR_RRE_FF },
+ { "lndbr", 0x11, INSTR_RRE_FF },
+ { "ltdbr", 0x12, INSTR_RRE_FF },
+ { "lcdbr", 0x13, INSTR_RRE_FF },
+ { "sqebr", 0x14, INSTR_RRE_FF },
+ { "sqdbr", 0x15, INSTR_RRE_FF },
+ { "sqxbr", 0x16, INSTR_RRE_FF },
+ { "meebr", 0x17, INSTR_RRE_FF },
+ { "kdbr", 0x18, INSTR_RRE_FF },
+ { "cdbr", 0x19, INSTR_RRE_FF },
+ { "adbr", 0x1a, INSTR_RRE_FF },
+ { "sdbr", 0x1b, INSTR_RRE_FF },
+ { "mdbr", 0x1c, INSTR_RRE_FF },
+ { "ddbr", 0x1d, INSTR_RRE_FF },
+ { "madbr", 0x1e, INSTR_RRF_F0FF },
+ { "msdbr", 0x1f, INSTR_RRF_F0FF },
+ { "lder", 0x24, INSTR_RRE_FF },
+ { "lxdr", 0x25, INSTR_RRE_FF },
+ { "lxer", 0x26, INSTR_RRE_FF },
+ { "maer", 0x2e, INSTR_RRF_F0FF },
+ { "mser", 0x2f, INSTR_RRF_F0FF },
+ { "sqxr", 0x36, INSTR_RRE_FF },
+ { "meer", 0x37, INSTR_RRE_FF },
+ { "madr", 0x3e, INSTR_RRF_F0FF },
+ { "msdr", 0x3f, INSTR_RRF_F0FF },
+ { "lpxbr", 0x40, INSTR_RRE_FF },
+ { "lnxbr", 0x41, INSTR_RRE_FF },
+ { "ltxbr", 0x42, INSTR_RRE_FF },
+ { "lcxbr", 0x43, INSTR_RRE_FF },
+ { "ledbr", 0x44, INSTR_RRE_FF },
+ { "ldxbr", 0x45, INSTR_RRE_FF },
+ { "lexbr", 0x46, INSTR_RRE_FF },
+ { "fixbr", 0x47, INSTR_RRF_U0FF },
+ { "kxbr", 0x48, INSTR_RRE_FF },
+ { "cxbr", 0x49, INSTR_RRE_FF },
+ { "axbr", 0x4a, INSTR_RRE_FF },
+ { "sxbr", 0x4b, INSTR_RRE_FF },
+ { "mxbr", 0x4c, INSTR_RRE_FF },
+ { "dxbr", 0x4d, INSTR_RRE_FF },
+ { "tbedr", 0x50, INSTR_RRF_U0FF },
+ { "tbdr", 0x51, INSTR_RRF_U0FF },
+ { "diebr", 0x53, INSTR_RRF_FUFF },
+ { "fiebr", 0x57, INSTR_RRF_U0FF },
+ { "thder", 0x58, INSTR_RRE_RR },
+ { "thdr", 0x59, INSTR_RRE_RR },
+ { "didbr", 0x5b, INSTR_RRF_FUFF },
+ { "fidbr", 0x5f, INSTR_RRF_U0FF },
+ { "lpxr", 0x60, INSTR_RRE_FF },
+ { "lnxr", 0x61, INSTR_RRE_FF },
+ { "ltxr", 0x62, INSTR_RRE_FF },
+ { "lcxr", 0x63, INSTR_RRE_FF },
+ { "lxr", 0x65, INSTR_RRE_RR },
+ { "lexr", 0x66, INSTR_RRE_FF },
+ { "fixr", 0x67, INSTR_RRF_U0FF },
+ { "cxr", 0x69, INSTR_RRE_FF },
+ { "lzer", 0x74, INSTR_RRE_R0 },
+ { "lzdr", 0x75, INSTR_RRE_R0 },
+ { "lzxr", 0x76, INSTR_RRE_R0 },
+ { "fier", 0x77, INSTR_RRF_U0FF },
+ { "fidr", 0x7f, INSTR_RRF_U0FF },
+ { "sfpc", 0x84, INSTR_RRE_RR_OPT },
+ { "efpc", 0x8c, INSTR_RRE_RR_OPT },
+ { "cefbr", 0x94, INSTR_RRE_RF },
+ { "cdfbr", 0x95, INSTR_RRE_RF },
+ { "cxfbr", 0x96, INSTR_RRE_RF },
+ { "cfebr", 0x98, INSTR_RRF_U0RF },
+ { "cfdbr", 0x99, INSTR_RRF_U0RF },
+ { "cfxbr", 0x9a, INSTR_RRF_U0RF },
+ { "cefr", 0xb4, INSTR_RRE_RF },
+ { "cdfr", 0xb5, INSTR_RRE_RF },
+ { "cxfr", 0xb6, INSTR_RRE_RF },
+ { "", 0, INSTR_INVALID }
+};
+
+static struct insn opcode_b9[] = {
+#ifdef CONFIG_64BIT
+ { "lpgr", 0x00, INSTR_RRE_RR },
+ { "lngr", 0x01, INSTR_RRE_RR },
+ { "ltgr", 0x02, INSTR_RRE_RR },
+ { "lcgr", 0x03, INSTR_RRE_RR },
+ { "lgr", 0x04, INSTR_RRE_RR },
+ { "lurag", 0x05, INSTR_RRE_RR },
+ { "lgbr", 0x06, INSTR_RRE_RR },
+ { "lghr", 0x07, INSTR_RRE_RR },
+ { "agr", 0x08, INSTR_RRE_RR },
+ { "sgr", 0x09, INSTR_RRE_RR },
+ { "algr", 0x0a, INSTR_RRE_RR },
+ { "slgr", 0x0b, INSTR_RRE_RR },
+ { "msgr", 0x0c, INSTR_RRE_RR },
+ { "dsgr", 0x0d, INSTR_RRE_RR },
+ { "eregg", 0x0e, INSTR_RRE_RR },
+ { "lrvgr", 0x0f, INSTR_RRE_RR },
+ { "lpgfr", 0x10, INSTR_RRE_RR },
+ { "lngfr", 0x11, INSTR_RRE_RR },
+ { "ltgfr", 0x12, INSTR_RRE_RR },
+ { "lcgfr", 0x13, INSTR_RRE_RR },
+ { "lgfr", 0x14, INSTR_RRE_RR },
+ { "llgfr", 0x16, INSTR_RRE_RR },
+ { "llgtr", 0x17, INSTR_RRE_RR },
+ { "agfr", 0x18, INSTR_RRE_RR },
+ { "sgfr", 0x19, INSTR_RRE_RR },
+ { "algfr", 0x1a, INSTR_RRE_RR },
+ { "slgfr", 0x1b, INSTR_RRE_RR },
+ { "msgfr", 0x1c, INSTR_RRE_RR },
+ { "dsgfr", 0x1d, INSTR_RRE_RR },
+ { "cgr", 0x20, INSTR_RRE_RR },
+ { "clgr", 0x21, INSTR_RRE_RR },
+ { "sturg", 0x25, INSTR_RRE_RR },
+ { "lbr", 0x26, INSTR_RRE_RR },
+ { "lhr", 0x27, INSTR_RRE_RR },
+ { "cgfr", 0x30, INSTR_RRE_RR },
+ { "clgfr", 0x31, INSTR_RRE_RR },
+ { "bctgr", 0x46, INSTR_RRE_RR },
+ { "ngr", 0x80, INSTR_RRE_RR },
+ { "ogr", 0x81, INSTR_RRE_RR },
+ { "xgr", 0x82, INSTR_RRE_RR },
+ { "flogr", 0x83, INSTR_RRE_RR },
+ { "llgcr", 0x84, INSTR_RRE_RR },
+ { "llghr", 0x85, INSTR_RRE_RR },
+ { "mlgr", 0x86, INSTR_RRE_RR },
+ { "dlgr", 0x87, INSTR_RRE_RR },
+ { "alcgr", 0x88, INSTR_RRE_RR },
+ { "slbgr", 0x89, INSTR_RRE_RR },
+ { "cspg", 0x8a, INSTR_RRE_RR },
+ { "idte", 0x8e, INSTR_RRF_R0RR },
+ { "llcr", 0x94, INSTR_RRE_RR },
+ { "llhr", 0x95, INSTR_RRE_RR },
+ { "esea", 0x9d, INSTR_RRE_R0 },
+ { "lptea", 0xaa, INSTR_RRF_RURR },
+ { "cu14", 0xb0, INSTR_RRF_M0RR },
+ { "cu24", 0xb1, INSTR_RRF_M0RR },
+ { "cu41", 0xb2, INSTR_RRF_M0RR },
+ { "cu42", 0xb3, INSTR_RRF_M0RR },
+#endif
+ { "kmac", 0x1e, INSTR_RRE_RR },
+ { "lrvr", 0x1f, INSTR_RRE_RR },
+ { "km", 0x2e, INSTR_RRE_RR },
+ { "kmc", 0x2f, INSTR_RRE_RR },
+ { "kimd", 0x3e, INSTR_RRE_RR },
+ { "klmd", 0x3f, INSTR_RRE_RR },
+ { "epsw", 0x8d, INSTR_RRE_RR },
+ { "trtt", 0x90, INSTR_RRE_RR },
+ { "trtt", 0x90, INSTR_RRF_M0RR },
+ { "trto", 0x91, INSTR_RRE_RR },
+ { "trto", 0x91, INSTR_RRF_M0RR },
+ { "trot", 0x92, INSTR_RRE_RR },
+ { "trot", 0x92, INSTR_RRF_M0RR },
+ { "troo", 0x93, INSTR_RRE_RR },
+ { "troo", 0x93, INSTR_RRF_M0RR },
+ { "mlr", 0x96, INSTR_RRE_RR },
+ { "dlr", 0x97, INSTR_RRE_RR },
+ { "alcr", 0x98, INSTR_RRE_RR },
+ { "slbr", 0x99, INSTR_RRE_RR },
+ { "", 0, INSTR_INVALID }
+};
+
+static struct insn opcode_c0[] = {
+#ifdef CONFIG_64BIT
+ { "lgfi", 0x01, INSTR_RIL_RI },
+ { "xihf", 0x06, INSTR_RIL_RU },
+ { "xilf", 0x07, INSTR_RIL_RU },
+ { "iihf", 0x08, INSTR_RIL_RU },
+ { "iilf", 0x09, INSTR_RIL_RU },
+ { "nihf", 0x0a, INSTR_RIL_RU },
+ { "nilf", 0x0b, INSTR_RIL_RU },
+ { "oihf", 0x0c, INSTR_RIL_RU },
+ { "oilf", 0x0d, INSTR_RIL_RU },
+ { "llihf", 0x0e, INSTR_RIL_RU },
+ { "llilf", 0x0f, INSTR_RIL_RU },
+#endif
+ { "larl", 0x00, INSTR_RIL_RP },
+ { "brcl", 0x04, INSTR_RIL_UP },
+ { "brasl", 0x05, INSTR_RIL_RP },
+ { "", 0, INSTR_INVALID }
+};
+
+static struct insn opcode_c2[] = {
+#ifdef CONFIG_64BIT
+ { "slgfi", 0x04, INSTR_RIL_RU },
+ { "slfi", 0x05, INSTR_RIL_RU },
+ { "agfi", 0x08, INSTR_RIL_RI },
+ { "afi", 0x09, INSTR_RIL_RI },
+ { "algfi", 0x0a, INSTR_RIL_RU },
+ { "alfi", 0x0b, INSTR_RIL_RU },
+ { "cgfi", 0x0c, INSTR_RIL_RI },
+ { "cfi", 0x0d, INSTR_RIL_RI },
+ { "clgfi", 0x0e, INSTR_RIL_RU },
+ { "clfi", 0x0f, INSTR_RIL_RU },
+#endif
+ { "", 0, INSTR_INVALID }
+};
+
+static struct insn opcode_c8[] = {
+#ifdef CONFIG_64BIT
+ { "mvcos", 0x00, INSTR_SSF_RRDRD },
+#endif
+ { "", 0, INSTR_INVALID }
+};
+
+static struct insn opcode_e3[] = {
+#ifdef CONFIG_64BIT
+ { "ltg", 0x02, INSTR_RXY_RRRD },
+ { "lrag", 0x03, INSTR_RXY_RRRD },
+ { "lg", 0x04, INSTR_RXY_RRRD },
+ { "cvby", 0x06, INSTR_RXY_RRRD },
+ { "ag", 0x08, INSTR_RXY_RRRD },
+ { "sg", 0x09, INSTR_RXY_RRRD },
+ { "alg", 0x0a, INSTR_RXY_RRRD },
+ { "slg", 0x0b, INSTR_RXY_RRRD },
+ { "msg", 0x0c, INSTR_RXY_RRRD },
+ { "dsg", 0x0d, INSTR_RXY_RRRD },
+ { "cvbg", 0x0e, INSTR_RXY_RRRD },
+ { "lrvg", 0x0f, INSTR_RXY_RRRD },
+ { "lt", 0x12, INSTR_RXY_RRRD },
+ { "lray", 0x13, INSTR_RXY_RRRD },
+ { "lgf", 0x14, INSTR_RXY_RRRD },
+ { "lgh", 0x15, INSTR_RXY_RRRD },
+ { "llgf", 0x16, INSTR_RXY_RRRD },
+ { "llgt", 0x17, INSTR_RXY_RRRD },
+ { "agf", 0x18, INSTR_RXY_RRRD },
+ { "sgf", 0x19, INSTR_RXY_RRRD },
+ { "algf", 0x1a, INSTR_RXY_RRRD },
+ { "slgf", 0x1b, INSTR_RXY_RRRD },
+ { "msgf", 0x1c, INSTR_RXY_RRRD },
+ { "dsgf", 0x1d, INSTR_RXY_RRRD },
+ { "cg", 0x20, INSTR_RXY_RRRD },
+ { "clg", 0x21, INSTR_RXY_RRRD },
+ { "stg", 0x24, INSTR_RXY_RRRD },
+ { "cvdy", 0x26, INSTR_RXY_RRRD },
+ { "cvdg", 0x2e, INSTR_RXY_RRRD },
+ { "strvg", 0x2f, INSTR_RXY_RRRD },
+ { "cgf", 0x30, INSTR_RXY_RRRD },
+ { "clgf", 0x31, INSTR_RXY_RRRD },
+ { "strvh", 0x3f, INSTR_RXY_RRRD },
+ { "bctg", 0x46, INSTR_RXY_RRRD },
+ { "sty", 0x50, INSTR_RXY_RRRD },
+ { "msy", 0x51, INSTR_RXY_RRRD },
+ { "ny", 0x54, INSTR_RXY_RRRD },
+ { "cly", 0x55, INSTR_RXY_RRRD },
+ { "oy", 0x56, INSTR_RXY_RRRD },
+ { "xy", 0x57, INSTR_RXY_RRRD },
+ { "ly", 0x58, INSTR_RXY_RRRD },
+ { "cy", 0x59, INSTR_RXY_RRRD },
+ { "ay", 0x5a, INSTR_RXY_RRRD },
+ { "sy", 0x5b, INSTR_RXY_RRRD },
+ { "aly", 0x5e, INSTR_RXY_RRRD },
+ { "sly", 0x5f, INSTR_RXY_RRRD },
+ { "sthy", 0x70, INSTR_RXY_RRRD },
+ { "lay", 0x71, INSTR_RXY_RRRD },
+ { "stcy", 0x72, INSTR_RXY_RRRD },
+ { "icy", 0x73, INSTR_RXY_RRRD },
+ { "lb", 0x76, INSTR_RXY_RRRD },
+ { "lgb", 0x77, INSTR_RXY_RRRD },
+ { "lhy", 0x78, INSTR_RXY_RRRD },
+ { "chy", 0x79, INSTR_RXY_RRRD },
+ { "ahy", 0x7a, INSTR_RXY_RRRD },
+ { "shy", 0x7b, INSTR_RXY_RRRD },
+ { "ng", 0x80, INSTR_RXY_RRRD },
+ { "og", 0x81, INSTR_RXY_RRRD },
+ { "xg", 0x82, INSTR_RXY_RRRD },
+ { "mlg", 0x86, INSTR_RXY_RRRD },
+ { "dlg", 0x87, INSTR_RXY_RRRD },
+ { "alcg", 0x88, INSTR_RXY_RRRD },
+ { "slbg", 0x89, INSTR_RXY_RRRD },
+ { "stpq", 0x8e, INSTR_RXY_RRRD },
+ { "lpq", 0x8f, INSTR_RXY_RRRD },
+ { "llgc", 0x90, INSTR_RXY_RRRD },
+ { "llgh", 0x91, INSTR_RXY_RRRD },
+ { "llc", 0x94, INSTR_RXY_RRRD },
+ { "llh", 0x95, INSTR_RXY_RRRD },
+#endif
+ { "lrv", 0x1e, INSTR_RXY_RRRD },
+ { "lrvh", 0x1f, INSTR_RXY_RRRD },
+ { "strv", 0x3e, INSTR_RXY_RRRD },
+ { "ml", 0x96, INSTR_RXY_RRRD },
+ { "dl", 0x97, INSTR_RXY_RRRD },
+ { "alc", 0x98, INSTR_RXY_RRRD },
+ { "slb", 0x99, INSTR_RXY_RRRD },
+ { "", 0, INSTR_INVALID }
+};
+
+static struct insn opcode_e5[] = {
+#ifdef CONFIG_64BIT
+ { "strag", 0x02, INSTR_SSE_RDRD },
+#endif
+ { "lasp", 0x00, INSTR_SSE_RDRD },
+ { "tprot", 0x01, INSTR_SSE_RDRD },
+ { "mvcsk", 0x0e, INSTR_SSE_RDRD },
+ { "mvcdk", 0x0f, INSTR_SSE_RDRD },
+ { "", 0, INSTR_INVALID }
+};
+
+static struct insn opcode_eb[] = {
+#ifdef CONFIG_64BIT
+ { "lmg", 0x04, INSTR_RSY_RRRD },
+ { "srag", 0x0a, INSTR_RSY_RRRD },
+ { "slag", 0x0b, INSTR_RSY_RRRD },
+ { "srlg", 0x0c, INSTR_RSY_RRRD },
+ { "sllg", 0x0d, INSTR_RSY_RRRD },
+ { "tracg", 0x0f, INSTR_RSY_RRRD },
+ { "csy", 0x14, INSTR_RSY_RRRD },
+ { "rllg", 0x1c, INSTR_RSY_RRRD },
+ { "clmh", 0x20, INSTR_RSY_RURD },
+ { "clmy", 0x21, INSTR_RSY_RURD },
+ { "stmg", 0x24, INSTR_RSY_RRRD },
+ { "stctg", 0x25, INSTR_RSY_CCRD },
+ { "stmh", 0x26, INSTR_RSY_RRRD },
+ { "stcmh", 0x2c, INSTR_RSY_RURD },
+ { "stcmy", 0x2d, INSTR_RSY_RURD },
+ { "lctlg", 0x2f, INSTR_RSY_CCRD },
+ { "csg", 0x30, INSTR_RSY_RRRD },
+ { "cdsy", 0x31, INSTR_RSY_RRRD },
+ { "cdsg", 0x3e, INSTR_RSY_RRRD },
+ { "bxhg", 0x44, INSTR_RSY_RRRD },
+ { "bxleg", 0x45, INSTR_RSY_RRRD },
+ { "tmy", 0x51, INSTR_SIY_URD },
+ { "mviy", 0x52, INSTR_SIY_URD },
+ { "niy", 0x54, INSTR_SIY_URD },
+ { "cliy", 0x55, INSTR_SIY_URD },
+ { "oiy", 0x56, INSTR_SIY_URD },
+ { "xiy", 0x57, INSTR_SIY_URD },
+ { "icmh", 0x80, INSTR_RSE_RURD },
+ { "icmh", 0x80, INSTR_RSY_RURD },
+ { "icmy", 0x81, INSTR_RSY_RURD },
+ { "clclu", 0x8f, INSTR_RSY_RRRD },
+ { "stmy", 0x90, INSTR_RSY_RRRD },
+ { "lmh", 0x96, INSTR_RSY_RRRD },
+ { "lmy", 0x98, INSTR_RSY_RRRD },
+ { "lamy", 0x9a, INSTR_RSY_AARD },
+ { "stamy", 0x9b, INSTR_RSY_AARD },
+#endif
+ { "rll", 0x1d, INSTR_RSY_RRRD },
+ { "mvclu", 0x8e, INSTR_RSY_RRRD },
+ { "tp", 0xc0, INSTR_RSL_R0RD },
+ { "", 0, INSTR_INVALID }
+};
+
+static struct insn opcode_ec[] = {
+#ifdef CONFIG_64BIT
+ { "brxhg", 0x44, INSTR_RIE_RRP },
+ { "brxlg", 0x45, INSTR_RIE_RRP },
+#endif
+ { "", 0, INSTR_INVALID }
+};
+
+static struct insn opcode_ed[] = {
+#ifdef CONFIG_64BIT
+ { "mayl", 0x38, INSTR_RXF_FRRDF },
+ { "myl", 0x39, INSTR_RXF_FRRDF },
+ { "may", 0x3a, INSTR_RXF_FRRDF },
+ { "my", 0x3b, INSTR_RXF_FRRDF },
+ { "mayh", 0x3c, INSTR_RXF_FRRDF },
+ { "myh", 0x3d, INSTR_RXF_FRRDF },
+ { "ley", 0x64, INSTR_RXY_FRRD },
+ { "ldy", 0x65, INSTR_RXY_FRRD },
+ { "stey", 0x66, INSTR_RXY_FRRD },
+ { "stdy", 0x67, INSTR_RXY_FRRD },
+#endif
+ { "ldeb", 0x04, INSTR_RXE_FRRD },
+ { "lxdb", 0x05, INSTR_RXE_FRRD },
+ { "lxeb", 0x06, INSTR_RXE_FRRD },
+ { "mxdb", 0x07, INSTR_RXE_FRRD },
+ { "keb", 0x08, INSTR_RXE_FRRD },
+ { "ceb", 0x09, INSTR_RXE_FRRD },
+ { "aeb", 0x0a, INSTR_RXE_FRRD },
+ { "seb", 0x0b, INSTR_RXE_FRRD },
+ { "mdeb", 0x0c, INSTR_RXE_FRRD },
+ { "deb", 0x0d, INSTR_RXE_FRRD },
+ { "maeb", 0x0e, INSTR_RXF_FRRDF },
+ { "mseb", 0x0f, INSTR_RXF_FRRDF },
+ { "tceb", 0x10, INSTR_RXE_FRRD },
+ { "tcdb", 0x11, INSTR_RXE_FRRD },
+ { "tcxb", 0x12, INSTR_RXE_FRRD },
+ { "sqeb", 0x14, INSTR_RXE_FRRD },
+ { "sqdb", 0x15, INSTR_RXE_FRRD },
+ { "meeb", 0x17, INSTR_RXE_FRRD },
+ { "kdb", 0x18, INSTR_RXE_FRRD },
+ { "cdb", 0x19, INSTR_RXE_FRRD },
+ { "adb", 0x1a, INSTR_RXE_FRRD },
+ { "sdb", 0x1b, INSTR_RXE_FRRD },
+ { "mdb", 0x1c, INSTR_RXE_FRRD },
+ { "ddb", 0x1d, INSTR_RXE_FRRD },
+ { "madb", 0x1e, INSTR_RXF_FRRDF },
+ { "msdb", 0x1f, INSTR_RXF_FRRDF },
+ { "lde", 0x24, INSTR_RXE_FRRD },
+ { "lxd", 0x25, INSTR_RXE_FRRD },
+ { "lxe", 0x26, INSTR_RXE_FRRD },
+ { "mae", 0x2e, INSTR_RXF_FRRDF },
+ { "mse", 0x2f, INSTR_RXF_FRRDF },
+ { "sqe", 0x34, INSTR_RXE_FRRD },
+ { "mee", 0x37, INSTR_RXE_FRRD },
+ { "mad", 0x3e, INSTR_RXF_FRRDF },
+ { "msd", 0x3f, INSTR_RXF_FRRDF },
+ { "", 0, INSTR_INVALID }
+};
+
+/* Extracts an operand value from an instruction. */
+static unsigned int extract_operand(unsigned char *code,
+ const struct operand *operand)
+{
+ unsigned int val;
+ int bits;
+
+ /* Extract fragments of the operand byte for byte. */
+ code += operand->shift / 8;
+ bits = (operand->shift & 7) + operand->bits;
+ val = 0;
+ do {
+ val <<= 8;
+ val |= (unsigned int) *code++;
+ bits -= 8;
+ } while (bits > 0);
+ val >>= -bits;
+ val &= ((1U << (operand->bits - 1)) << 1) - 1;
+
+ /* Check for special long displacement case. */
+ if (operand->bits == 20 && operand->shift == 20)
+ val = (val & 0xff) << 12 | (val & 0xfff00) >> 8;
+
+ /* Sign extend value if the operand is signed or pc relative. */
+ if ((operand->flags & (OPERAND_SIGNED | OPERAND_PCREL)) &&
+ (val & (1U << (operand->bits - 1))))
+ val |= (-1U << (operand->bits - 1)) << 1;
+
+ /* Double value if the operand is pc relative. */
+ if (operand->flags & OPERAND_PCREL)
+ val <<= 1;
+
+ /* Length x in an instructions has real length x + 1. */
+ if (operand->flags & OPERAND_LENGTH)
+ val++;
+ return val;
+}
+
+static inline int insn_length(unsigned char code)
+{
+ return ((((int) code + 64) >> 7) + 1) << 1;
+}
+
+static struct insn *find_insn(unsigned char *code)
+{
+ unsigned char opfrag = code[1];
+ unsigned char opmask;
+ struct insn *table;
+
+ switch (code[0]) {
+ case 0x01:
+ table = opcode_01;
+ break;
+ case 0xa5:
+ table = opcode_a5;
+ break;
+ case 0xa7:
+ table = opcode_a7;
+ break;
+ case 0xb2:
+ table = opcode_b2;
+ break;
+ case 0xb3:
+ table = opcode_b3;
+ break;
+ case 0xb9:
+ table = opcode_b9;
+ break;
+ case 0xc0:
+ table = opcode_c0;
+ break;
+ case 0xc2:
+ table = opcode_c2;
+ break;
+ case 0xc8:
+ table = opcode_c8;
+ break;
+ case 0xe3:
+ table = opcode_e3;
+ opfrag = code[5];
+ break;
+ case 0xe5:
+ table = opcode_e5;
+ break;
+ case 0xeb:
+ table = opcode_eb;
+ opfrag = code[5];
+ break;
+ case 0xec:
+ table = opcode_ec;
+ opfrag = code[5];
+ break;
+ case 0xed:
+ table = opcode_ed;
+ opfrag = code[5];
+ break;
+ default:
+ table = opcode;
+ opfrag = code[0];
+ break;
+ }
+ while (table->format != INSTR_INVALID) {
+ opmask = formats[table->format][0];
+ if (table->opfrag == (opfrag & opmask))
+ return table;
+ table++;
+ }
+ return NULL;
+}
+
+static int print_insn(char *buffer, unsigned char *code, unsigned long addr)
+{
+ struct insn *insn;
+ const unsigned char *ops;
+ const struct operand *operand;
+ unsigned int value;
+ char separator;
+ char *ptr;
+
+ ptr = buffer;
+ insn = find_insn(code);
+ if (insn) {
+ ptr += sprintf(ptr, "%.5s\t", insn->name);
+ /* Extract the operands. */
+ separator = 0;
+ for (ops = formats[insn->format] + 1; *ops != 0; ops++) {
+ operand = operands + *ops;
+ value = extract_operand(code, operand);
+ if ((operand->flags & OPERAND_INDEX) && value == 0)
+ continue;
+ if ((operand->flags & OPERAND_BASE) &&
+ value == 0 && separator == '(') {
+ separator = ',';
+ continue;
+ }
+ if (separator)
+ ptr += sprintf(ptr, "%c", separator);
+ if (operand->flags & OPERAND_GPR)
+ ptr += sprintf(ptr, "%%r%i", value);
+ else if (operand->flags & OPERAND_FPR)
+ ptr += sprintf(ptr, "%%f%i", value);
+ else if (operand->flags & OPERAND_AR)
+ ptr += sprintf(ptr, "%%a%i", value);
+ else if (operand->flags & OPERAND_CR)
+ ptr += sprintf(ptr, "%%c%i", value);
+ else if (operand->flags & OPERAND_PCREL)
+ ptr += sprintf(ptr, "%lx", value + addr);
+ else if (operand->flags & OPERAND_SIGNED)
+ ptr += sprintf(ptr, "%i", value);
+ else
+ ptr += sprintf(ptr, "%u", value);
+ if (operand->flags & OPERAND_DISP)
+ separator = '(';
+ else if (operand->flags & OPERAND_BASE) {
+ ptr += sprintf(ptr, ")");
+ separator = ',';
+ } else
+ separator = ',';
+ }
+ } else
+ ptr += sprintf(ptr, "unknown");
+ return (int) (ptr - buffer);
+}
+
+void show_code(struct pt_regs *regs)
+{
+ char *mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl";
+ unsigned char code[64];
+ char buffer[64], *ptr;
+ mm_segment_t old_fs;
+ unsigned long addr;
+ int start, end, opsize, hops, i;
+
+ /* Get a snapshot of the 64 bytes surrounding the fault address. */
+ old_fs = get_fs();
+ set_fs((regs->psw.mask & PSW_MASK_PSTATE) ? USER_DS : KERNEL_DS);
+ for (start = 32; start && regs->psw.addr >= 34 - start; start -= 2) {
+ addr = regs->psw.addr - 34 + start;
+ if (__copy_from_user(code + start - 2,
+ (char __user *) addr, 2))
+ break;
+ }
+ for (end = 32; end < 64; end += 2) {
+ addr = regs->psw.addr + end - 32;
+ if (__copy_from_user(code + end,
+ (char __user *) addr, 2))
+ break;
+ }
+ set_fs(old_fs);
+ /* Code snapshot useable ? */
+ if ((regs->psw.addr & 1) || start >= end) {
+ printk("%s Code: Bad PSW.\n", mode);
+ return;
+ }
+ /* Find a starting point for the disassembly. */
+ while (start < 32) {
+ hops = 0;
+ for (i = 0, hops = 0; start + i < 32 && hops < 3; hops++) {
+ if (!find_insn(code + start + i))
+ break;
+ i += insn_length(code[start + i]);
+ }
+ if (start + i == 32)
+ /* Looks good, sequence ends at PSW. */
+ break;
+ start += 2;
+ }
+ /* Decode the instructions. */
+ ptr = buffer;
+ ptr += sprintf(ptr, "%s Code:", mode);
+ hops = 0;
+ while (start < end && hops < 8) {
+ *ptr++ = (start == 32) ? '>' : ' ';
+ addr = regs->psw.addr + start - 32;
+ ptr += sprintf(ptr, ONELONG, addr);
+ opsize = insn_length(code[start]);
+ if (start + opsize >= end)
+ break;
+ for (i = 0; i < opsize; i++)
+ ptr += sprintf(ptr, "%02x", code[start + i]);
+ *ptr++ = '\t';
+ if (i < 6)
+ *ptr++ = '\t';
+ ptr += print_insn(ptr, code + start, addr);
+ start += opsize;
+ printk(buffer);
+ ptr = buffer;
+ ptr += sprintf(ptr, "\n ");
+ hops++;
+ }
+ printk("\n");
+}
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 5e47936573f..50538e54561 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -253,11 +253,10 @@ static noinline __init void find_memory_chunks(unsigned long memsize)
break;
#endif
/*
- * Finish memory detection at the first hole, unless
- * - we reached the hsa -> skip it.
- * - we know there must be more.
+ * Finish memory detection at the first hole
+ * if storage size is unknown.
*/
- if (cc == -1UL && !memsize && old_addr != ADDR2G)
+ if (cc == -1UL && !memsize)
break;
if (memsize && addr >= memsize)
break;
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index dddc3de3040..c8a2212014e 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -249,8 +249,6 @@ sysc_do_restart:
bnz BASED(sysc_tracesys)
basr %r14,%r8 # call sys_xxxx
st %r2,SP_R2(%r15) # store return value (change R2 on stack)
- # ATTENTION: check sys_execve_glue before
- # changing anything here !!
sysc_return:
tm SP_PSW+1(%r15),0x01 # returning to user ?
@@ -381,50 +379,37 @@ ret_from_fork:
b BASED(sysc_return)
#
-# clone, fork, vfork, exec and sigreturn need glue,
-# because they all expect pt_regs as parameter,
-# but are called with different parameter.
-# return-address is set up above
+# kernel_execve function needs to deal with pt_regs that is not
+# at the usual place
#
-sys_clone_glue:
- la %r2,SP_PTREGS(%r15) # load pt_regs
- l %r1,BASED(.Lclone)
- br %r1 # branch to sys_clone
-
-sys_fork_glue:
- la %r2,SP_PTREGS(%r15) # load pt_regs
- l %r1,BASED(.Lfork)
- br %r1 # branch to sys_fork
-
-sys_vfork_glue:
- la %r2,SP_PTREGS(%r15) # load pt_regs
- l %r1,BASED(.Lvfork)
- br %r1 # branch to sys_vfork
-
-sys_execve_glue:
- la %r2,SP_PTREGS(%r15) # load pt_regs
- l %r1,BASED(.Lexecve)
- lr %r12,%r14 # save return address
- basr %r14,%r1 # call sys_execve
- ltr %r2,%r2 # check if execve failed
- bnz 0(%r12) # it did fail -> store result in gpr2
- b 4(%r12) # SKIP ST 2,SP_R2(15) after BASR 14,8
- # in system_call/sysc_tracesys
-
-sys_sigreturn_glue:
- la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
- l %r1,BASED(.Lsigreturn)
- br %r1 # branch to sys_sigreturn
-
-sys_rt_sigreturn_glue:
- la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
- l %r1,BASED(.Lrt_sigreturn)
- br %r1 # branch to sys_sigreturn
-
-sys_sigaltstack_glue:
- la %r4,SP_PTREGS(%r15) # load pt_regs as parameter
- l %r1,BASED(.Lsigaltstack)
- br %r1 # branch to sys_sigreturn
+ .globl kernel_execve
+kernel_execve:
+ stm %r12,%r15,48(%r15)
+ lr %r14,%r15
+ l %r13,__LC_SVC_NEW_PSW+4
+ s %r15,BASED(.Lc_spsize)
+ st %r14,__SF_BACKCHAIN(%r15)
+ la %r12,SP_PTREGS(%r15)
+ xc 0(__PT_SIZE,%r12),0(%r12)
+ l %r1,BASED(.Ldo_execve)
+ lr %r5,%r12
+ basr %r14,%r1
+ ltr %r2,%r2
+ be BASED(0f)
+ a %r15,BASED(.Lc_spsize)
+ lm %r12,%r15,48(%r15)
+ br %r14
+ # execve succeeded.
+0: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts
+ l %r15,__LC_KERNEL_STACK # load ksp
+ s %r15,BASED(.Lc_spsize) # make room for registers & psw
+ l %r9,__LC_THREAD_INFO
+ mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs
+ xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
+ stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
+ l %r1,BASED(.Lexecve_tail)
+ basr %r14,%r1
+ b BASED(sysc_return)
/*
* Program check handler routine
@@ -1031,19 +1016,11 @@ cleanup_io_leave_insn:
.Ldo_extint: .long do_extint
.Ldo_signal: .long do_signal
.Lhandle_per: .long do_single_step
+.Ldo_execve: .long do_execve
+.Lexecve_tail: .long execve_tail
.Ljump_table: .long pgm_check_table
.Lschedule: .long schedule
-.Lclone: .long sys_clone
-.Lexecve: .long sys_execve
-.Lfork: .long sys_fork
-.Lrt_sigreturn: .long sys_rt_sigreturn
-.Lrt_sigsuspend:
- .long sys_rt_sigsuspend
-.Lsigreturn: .long sys_sigreturn
-.Lsigsuspend: .long sys_sigsuspend
-.Lsigaltstack: .long sys_sigaltstack
.Ltrace: .long syscall_trace
-.Lvfork: .long sys_vfork
.Lschedtail: .long schedule_tail
.Lsysc_table: .long sys_call_table
#ifdef CONFIG_TRACE_IRQFLAGS
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 0f758c329a5..93745fd8f55 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -244,8 +244,6 @@ sysc_noemu:
jnz sysc_tracesys
basr %r14,%r8 # call sys_xxxx
stg %r2,SP_R2(%r15) # store return value (change R2 on stack)
- # ATTENTION: check sys_execve_glue before
- # changing anything here !!
sysc_return:
tm SP_PSW+1(%r15),0x01 # returning to user ?
@@ -371,77 +369,35 @@ ret_from_fork:
j sysc_return
#
-# clone, fork, vfork, exec and sigreturn need glue,
-# because they all expect pt_regs as parameter,
-# but are called with different parameter.
-# return-address is set up above
+# kernel_execve function needs to deal with pt_regs that is not
+# at the usual place
#
-sys_clone_glue:
- la %r2,SP_PTREGS(%r15) # load pt_regs
- jg sys_clone # branch to sys_clone
-
-#ifdef CONFIG_COMPAT
-sys32_clone_glue:
- la %r2,SP_PTREGS(%r15) # load pt_regs
- jg sys32_clone # branch to sys32_clone
-#endif
-
-sys_fork_glue:
- la %r2,SP_PTREGS(%r15) # load pt_regs
- jg sys_fork # branch to sys_fork
-
-sys_vfork_glue:
- la %r2,SP_PTREGS(%r15) # load pt_regs
- jg sys_vfork # branch to sys_vfork
-
-sys_execve_glue:
- la %r2,SP_PTREGS(%r15) # load pt_regs
- lgr %r12,%r14 # save return address
- brasl %r14,sys_execve # call sys_execve
- ltgr %r2,%r2 # check if execve failed
- bnz 0(%r12) # it did fail -> store result in gpr2
- b 6(%r12) # SKIP STG 2,SP_R2(15) in
- # system_call/sysc_tracesys
-#ifdef CONFIG_COMPAT
-sys32_execve_glue:
- la %r2,SP_PTREGS(%r15) # load pt_regs
- lgr %r12,%r14 # save return address
- brasl %r14,sys32_execve # call sys32_execve
- ltgr %r2,%r2 # check if execve failed
- bnz 0(%r12) # it did fail -> store result in gpr2
- b 6(%r12) # SKIP STG 2,SP_R2(15) in
- # system_call/sysc_tracesys
-#endif
-
-sys_sigreturn_glue:
- la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
- jg sys_sigreturn # branch to sys_sigreturn
-
-#ifdef CONFIG_COMPAT
-sys32_sigreturn_glue:
- la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
- jg sys32_sigreturn # branch to sys32_sigreturn
-#endif
-
-sys_rt_sigreturn_glue:
- la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
- jg sys_rt_sigreturn # branch to sys_sigreturn
-
-#ifdef CONFIG_COMPAT
-sys32_rt_sigreturn_glue:
- la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
- jg sys32_rt_sigreturn # branch to sys32_sigreturn
-#endif
-
-sys_sigaltstack_glue:
- la %r4,SP_PTREGS(%r15) # load pt_regs as parameter
- jg sys_sigaltstack # branch to sys_sigreturn
-
-#ifdef CONFIG_COMPAT
-sys32_sigaltstack_glue:
- la %r4,SP_PTREGS(%r15) # load pt_regs as parameter
- jg sys32_sigaltstack_wrapper # branch to sys_sigreturn
-#endif
+ .globl kernel_execve
+kernel_execve:
+ stmg %r12,%r15,96(%r15)
+ lgr %r14,%r15
+ aghi %r15,-SP_SIZE
+ stg %r14,__SF_BACKCHAIN(%r15)
+ la %r12,SP_PTREGS(%r15)
+ xc 0(__PT_SIZE,%r12),0(%r12)
+ lgr %r5,%r12
+ brasl %r14,do_execve
+ ltgfr %r2,%r2
+ je 0f
+ aghi %r15,SP_SIZE
+ lmg %r12,%r15,96(%r15)
+ br %r14
+ # execve succeeded.
+0: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts
+ lg %r15,__LC_KERNEL_STACK # load ksp
+ aghi %r15,-SP_SIZE # make room for registers & psw
+ lg %r13,__LC_SVC_NEW_PSW+8
+ lg %r9,__LC_THREAD_INFO
+ mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs
+ xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+ stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
+ brasl %r14,execve_tail
+ j sysc_return
/*
* Program check handler routine
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 37010709fe6..a87b1976d40 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -39,7 +39,69 @@ startup_continue:
basr %r13,0 # get base
.LPG1: sll %r13,1 # remove high order bit
srl %r13,1
- lhi %r1,1 # mode 1 = esame
+
+#ifdef CONFIG_ZFCPDUMP
+
+ # check if we have been ipled using zfcp dump:
+
+ tm 0xb9,0x01 # test if subchannel is enabled
+ jno .nodump # subchannel disabled
+ l %r1,0xb8
+ la %r5,.Lipl_schib-.LPG1(%r13)
+ stsch 0(%r5) # get schib of subchannel
+ jne .nodump # schib not available
+ tm 5(%r5),0x01 # devno valid?
+ jno .nodump
+ tm 4(%r5),0x80 # qdio capable device?
+ jno .nodump
+ l %r2,20(%r0) # address of ipl parameter block
+ lhi %r3,0
+ ic %r3,0x148(%r2) # get opt field
+ chi %r3,0x20 # load with dump?
+ jne .nodump
+
+ # store all prefix registers in case of load with dump:
+
+ la %r7,0 # base register for 0 page
+ la %r8,0 # first cpu
+ l %r11,.Lpref_arr_ptr-.LPG1(%r13) # address of prefix array
+ ahi %r11,4 # skip boot cpu
+ lr %r12,%r11
+ ahi %r12,(CONFIG_NR_CPUS*4) # end of prefix array
+ stap .Lcurrent_cpu+2-.LPG1(%r13) # store current cpu addr
+1:
+ cl %r8,.Lcurrent_cpu-.LPG1(%r13) # is ipl cpu ?
+ je 4f # if yes get next cpu
+2:
+ lr %r9,%r7
+ sigp %r9,%r8,0x9 # stop & store status of cpu
+ brc 8,3f # accepted
+ brc 4,4f # status stored: next cpu
+ brc 2,2b # busy: try again
+ brc 1,4f # not op: next cpu
+3:
+ mvc 0(4,%r11),264(%r7) # copy prefix register to prefix array
+ ahi %r11,4 # next element in prefix array
+ clr %r11,%r12
+ je 5f # no more space in prefix array
+4:
+ ahi %r8,1 # next cpu (r8 += 1)
+ cl %r8,.Llast_cpu-.LPG1(%r13) # is last possible cpu ?
+ jl 1b # jump if not last cpu
+5:
+ lhi %r1,2 # mode 2 = esame (dump)
+ j 6f
+ .align 4
+.Lipl_schib:
+ .rept 13
+ .long 0
+ .endr
+.nodump:
+ lhi %r1,1 # mode 1 = esame (normal ipl)
+6:
+#else
+ lhi %r1,1 # mode 1 = esame (normal ipl)
+#endif /* CONFIG_ZFCPDUMP */
mvi __LC_AR_MODE_ID,1 # set esame flag
slr %r0,%r0 # set cpuid to zero
sigp %r1,%r0,0x12 # switch to esame mode
@@ -149,6 +211,14 @@ startup_continue:
.L4malign:.quad 0xffffffffffc00000
.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8
.Lnop: .long 0x07000700
+#ifdef CONFIG_ZFCPDUMP
+.Lcurrent_cpu:
+ .long 0x0
+.Llast_cpu:
+ .long 0x0000ffff
+.Lpref_arr_ptr:
+ .long zfcpdump_prefix_array
+#endif /* CONFIG_ZFCPDUMP */
.Lparmaddr:
.quad PARMAREA
.align 64
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index f731185bf2b..06833ac2b11 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -29,36 +29,21 @@
#define SCCB_LOADPARM (&s390_readinfo_sccb.loadparm)
#define SCCB_FLAG (s390_readinfo_sccb.flags)
-enum ipl_type {
- IPL_TYPE_NONE = 1,
- IPL_TYPE_UNKNOWN = 2,
- IPL_TYPE_CCW = 4,
- IPL_TYPE_FCP = 8,
- IPL_TYPE_NSS = 16,
-};
-
-#define IPL_NONE_STR "none"
-#define IPL_UNKNOWN_STR "unknown"
-#define IPL_CCW_STR "ccw"
-#define IPL_FCP_STR "fcp"
-#define IPL_NSS_STR "nss"
-
-/*
- * Must be in data section since the bss section
- * is not cleared when these are accessed.
- */
-u16 ipl_devno __attribute__((__section__(".data"))) = 0;
-u32 ipl_flags __attribute__((__section__(".data"))) = 0;
+#define IPL_UNKNOWN_STR "unknown"
+#define IPL_CCW_STR "ccw"
+#define IPL_FCP_STR "fcp"
+#define IPL_FCP_DUMP_STR "fcp_dump"
+#define IPL_NSS_STR "nss"
static char *ipl_type_str(enum ipl_type type)
{
switch (type) {
- case IPL_TYPE_NONE:
- return IPL_NONE_STR;
case IPL_TYPE_CCW:
return IPL_CCW_STR;
case IPL_TYPE_FCP:
return IPL_FCP_STR;
+ case IPL_TYPE_FCP_DUMP:
+ return IPL_FCP_DUMP_STR;
case IPL_TYPE_NSS:
return IPL_NSS_STR;
case IPL_TYPE_UNKNOWN:
@@ -67,15 +52,55 @@ static char *ipl_type_str(enum ipl_type type)
}
}
+enum dump_type {
+ DUMP_TYPE_NONE = 1,
+ DUMP_TYPE_CCW = 2,
+ DUMP_TYPE_FCP = 4,
+};
+
+#define DUMP_NONE_STR "none"
+#define DUMP_CCW_STR "ccw"
+#define DUMP_FCP_STR "fcp"
+
+static char *dump_type_str(enum dump_type type)
+{
+ switch (type) {
+ case DUMP_TYPE_NONE:
+ return DUMP_NONE_STR;
+ case DUMP_TYPE_CCW:
+ return DUMP_CCW_STR;
+ case DUMP_TYPE_FCP:
+ return DUMP_FCP_STR;
+ default:
+ return NULL;
+ }
+}
+
+/*
+ * Must be in data section since the bss section
+ * is not cleared when these are accessed.
+ */
+static u16 ipl_devno __attribute__((__section__(".data"))) = 0;
+u32 ipl_flags __attribute__((__section__(".data"))) = 0;
+
enum ipl_method {
- IPL_METHOD_NONE,
- IPL_METHOD_CCW_CIO,
- IPL_METHOD_CCW_DIAG,
- IPL_METHOD_CCW_VM,
- IPL_METHOD_FCP_RO_DIAG,
- IPL_METHOD_FCP_RW_DIAG,
- IPL_METHOD_FCP_RO_VM,
- IPL_METHOD_NSS,
+ REIPL_METHOD_CCW_CIO,
+ REIPL_METHOD_CCW_DIAG,
+ REIPL_METHOD_CCW_VM,
+ REIPL_METHOD_FCP_RO_DIAG,
+ REIPL_METHOD_FCP_RW_DIAG,
+ REIPL_METHOD_FCP_RO_VM,
+ REIPL_METHOD_FCP_DUMP,
+ REIPL_METHOD_NSS,
+ REIPL_METHOD_DEFAULT,
+};
+
+enum dump_method {
+ DUMP_METHOD_NONE,
+ DUMP_METHOD_CCW_CIO,
+ DUMP_METHOD_CCW_DIAG,
+ DUMP_METHOD_CCW_VM,
+ DUMP_METHOD_FCP_DIAG,
};
enum shutdown_action {
@@ -107,15 +132,15 @@ static int diag308_set_works = 0;
static int reipl_capabilities = IPL_TYPE_UNKNOWN;
static enum ipl_type reipl_type = IPL_TYPE_UNKNOWN;
-static enum ipl_method reipl_method = IPL_METHOD_NONE;
+static enum ipl_method reipl_method = REIPL_METHOD_DEFAULT;
static struct ipl_parameter_block *reipl_block_fcp;
static struct ipl_parameter_block *reipl_block_ccw;
static char reipl_nss_name[NSS_NAME_SIZE + 1];
-static int dump_capabilities = IPL_TYPE_NONE;
-static enum ipl_type dump_type = IPL_TYPE_NONE;
-static enum ipl_method dump_method = IPL_METHOD_NONE;
+static int dump_capabilities = DUMP_TYPE_NONE;
+static enum dump_type dump_type = DUMP_TYPE_NONE;
+static enum dump_method dump_method = DUMP_METHOD_NONE;
static struct ipl_parameter_block *dump_block_fcp;
static struct ipl_parameter_block *dump_block_ccw;
@@ -134,6 +159,7 @@ int diag308(unsigned long subcode, void *addr)
: "d" (subcode) : "cc", "memory");
return _rc;
}
+EXPORT_SYMBOL_GPL(diag308);
/* SYSFS */
@@ -197,7 +223,7 @@ static void make_attrs_ro(struct attribute **attrs)
* ipl section
*/
-static enum ipl_type ipl_get_type(void)
+static __init enum ipl_type get_ipl_type(void)
{
struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START;
@@ -211,12 +237,44 @@ static enum ipl_type ipl_get_type(void)
return IPL_TYPE_UNKNOWN;
if (ipl->hdr.pbt != DIAG308_IPL_TYPE_FCP)
return IPL_TYPE_UNKNOWN;
+ if (ipl->ipl_info.fcp.opt == DIAG308_IPL_OPT_DUMP)
+ return IPL_TYPE_FCP_DUMP;
return IPL_TYPE_FCP;
}
+void __init setup_ipl_info(void)
+{
+ ipl_info.type = get_ipl_type();
+ switch (ipl_info.type) {
+ case IPL_TYPE_CCW:
+ ipl_info.data.ccw.dev_id.devno = ipl_devno;
+ ipl_info.data.ccw.dev_id.ssid = 0;
+ break;
+ case IPL_TYPE_FCP:
+ case IPL_TYPE_FCP_DUMP:
+ ipl_info.data.fcp.dev_id.devno =
+ IPL_PARMBLOCK_START->ipl_info.fcp.devno;
+ ipl_info.data.fcp.dev_id.ssid = 0;
+ ipl_info.data.fcp.wwpn = IPL_PARMBLOCK_START->ipl_info.fcp.wwpn;
+ ipl_info.data.fcp.lun = IPL_PARMBLOCK_START->ipl_info.fcp.lun;
+ break;
+ case IPL_TYPE_NSS:
+ strncpy(ipl_info.data.nss.name, kernel_nss_name,
+ sizeof(ipl_info.data.nss.name));
+ break;
+ case IPL_TYPE_UNKNOWN:
+ default:
+ /* We have no info to copy */
+ break;
+ }
+}
+
+struct ipl_info ipl_info;
+EXPORT_SYMBOL_GPL(ipl_info);
+
static ssize_t ipl_type_show(struct subsystem *subsys, char *page)
{
- return sprintf(page, "%s\n", ipl_type_str(ipl_get_type()));
+ return sprintf(page, "%s\n", ipl_type_str(ipl_info.type));
}
static struct subsys_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type);
@@ -225,10 +283,11 @@ static ssize_t sys_ipl_device_show(struct subsystem *subsys, char *page)
{
struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START;
- switch (ipl_get_type()) {
+ switch (ipl_info.type) {
case IPL_TYPE_CCW:
return sprintf(page, "0.0.%04x\n", ipl_devno);
case IPL_TYPE_FCP:
+ case IPL_TYPE_FCP_DUMP:
return sprintf(page, "0.0.%04x\n", ipl->ipl_info.fcp.devno);
default:
return 0;
@@ -485,23 +544,29 @@ static int reipl_set_type(enum ipl_type type)
switch(type) {
case IPL_TYPE_CCW:
if (MACHINE_IS_VM)
- reipl_method = IPL_METHOD_CCW_VM;
+ reipl_method = REIPL_METHOD_CCW_VM;
else
- reipl_method = IPL_METHOD_CCW_CIO;
+ reipl_method = REIPL_METHOD_CCW_CIO;
break;
case IPL_TYPE_FCP:
if (diag308_set_works)
- reipl_method = IPL_METHOD_FCP_RW_DIAG;
+ reipl_method = REIPL_METHOD_FCP_RW_DIAG;
else if (MACHINE_IS_VM)
- reipl_method = IPL_METHOD_FCP_RO_VM;
+ reipl_method = REIPL_METHOD_FCP_RO_VM;
else
- reipl_method = IPL_METHOD_FCP_RO_DIAG;
+ reipl_method = REIPL_METHOD_FCP_RO_DIAG;
+ break;
+ case IPL_TYPE_FCP_DUMP:
+ reipl_method = REIPL_METHOD_FCP_DUMP;
break;
case IPL_TYPE_NSS:
- reipl_method = IPL_METHOD_NSS;
+ reipl_method = REIPL_METHOD_NSS;
+ break;
+ case IPL_TYPE_UNKNOWN:
+ reipl_method = REIPL_METHOD_DEFAULT;
break;
default:
- reipl_method = IPL_METHOD_NONE;
+ BUG();
}
reipl_type = type;
return 0;
@@ -579,22 +644,22 @@ static struct attribute_group dump_ccw_attr_group = {
/* dump type */
-static int dump_set_type(enum ipl_type type)
+static int dump_set_type(enum dump_type type)
{
if (!(dump_capabilities & type))
return -EINVAL;
switch(type) {
- case IPL_TYPE_CCW:
+ case DUMP_TYPE_CCW:
if (MACHINE_IS_VM)
- dump_method = IPL_METHOD_CCW_VM;
+ dump_method = DUMP_METHOD_CCW_VM;
else
- dump_method = IPL_METHOD_CCW_CIO;
+ dump_method = DUMP_METHOD_CCW_CIO;
break;
- case IPL_TYPE_FCP:
- dump_method = IPL_METHOD_FCP_RW_DIAG;
+ case DUMP_TYPE_FCP:
+ dump_method = DUMP_METHOD_FCP_DIAG;
break;
default:
- dump_method = IPL_METHOD_NONE;
+ dump_method = DUMP_METHOD_NONE;
}
dump_type = type;
return 0;
@@ -602,7 +667,7 @@ static int dump_set_type(enum ipl_type type)
static ssize_t dump_type_show(struct subsystem *subsys, char *page)
{
- return sprintf(page, "%s\n", ipl_type_str(dump_type));
+ return sprintf(page, "%s\n", dump_type_str(dump_type));
}
static ssize_t dump_type_store(struct subsystem *subsys, const char *buf,
@@ -610,12 +675,12 @@ static ssize_t dump_type_store(struct subsystem *subsys, const char *buf,
{
int rc = -EINVAL;
- if (strncmp(buf, IPL_NONE_STR, strlen(IPL_NONE_STR)) == 0)
- rc = dump_set_type(IPL_TYPE_NONE);
- else if (strncmp(buf, IPL_CCW_STR, strlen(IPL_CCW_STR)) == 0)
- rc = dump_set_type(IPL_TYPE_CCW);
- else if (strncmp(buf, IPL_FCP_STR, strlen(IPL_FCP_STR)) == 0)
- rc = dump_set_type(IPL_TYPE_FCP);
+ if (strncmp(buf, DUMP_NONE_STR, strlen(DUMP_NONE_STR)) == 0)
+ rc = dump_set_type(DUMP_TYPE_NONE);
+ else if (strncmp(buf, DUMP_CCW_STR, strlen(DUMP_CCW_STR)) == 0)
+ rc = dump_set_type(DUMP_TYPE_CCW);
+ else if (strncmp(buf, DUMP_FCP_STR, strlen(DUMP_FCP_STR)) == 0)
+ rc = dump_set_type(DUMP_TYPE_FCP);
return (rc != 0) ? rc : len;
}
@@ -664,14 +729,14 @@ void do_reipl(void)
char loadparm[LOADPARM_LEN + 1];
switch (reipl_method) {
- case IPL_METHOD_CCW_CIO:
+ case REIPL_METHOD_CCW_CIO:
devid.devno = reipl_block_ccw->ipl_info.ccw.devno;
- if (ipl_get_type() == IPL_TYPE_CCW && devid.devno == ipl_devno)
+ if (ipl_info.type == IPL_TYPE_CCW && devid.devno == ipl_devno)
diag308(DIAG308_IPL, NULL);
devid.ssid = 0;
reipl_ccw_dev(&devid);
break;
- case IPL_METHOD_CCW_VM:
+ case REIPL_METHOD_CCW_VM:
reipl_get_ascii_loadparm(loadparm);
if (strlen(loadparm) == 0)
sprintf(buf, "IPL %X",
@@ -681,30 +746,32 @@ void do_reipl(void)
reipl_block_ccw->ipl_info.ccw.devno, loadparm);
__cpcmd(buf, NULL, 0, NULL);
break;
- case IPL_METHOD_CCW_DIAG:
+ case REIPL_METHOD_CCW_DIAG:
diag308(DIAG308_SET, reipl_block_ccw);
diag308(DIAG308_IPL, NULL);
break;
- case IPL_METHOD_FCP_RW_DIAG:
+ case REIPL_METHOD_FCP_RW_DIAG:
diag308(DIAG308_SET, reipl_block_fcp);
diag308(DIAG308_IPL, NULL);
break;
- case IPL_METHOD_FCP_RO_DIAG:
+ case REIPL_METHOD_FCP_RO_DIAG:
diag308(DIAG308_IPL, NULL);
break;
- case IPL_METHOD_FCP_RO_VM:
+ case REIPL_METHOD_FCP_RO_VM:
__cpcmd("IPL", NULL, 0, NULL);
break;
- case IPL_METHOD_NSS:
+ case REIPL_METHOD_NSS:
sprintf(buf, "IPL %s", reipl_nss_name);
__cpcmd(buf, NULL, 0, NULL);
break;
- case IPL_METHOD_NONE:
- default:
+ case REIPL_METHOD_DEFAULT:
if (MACHINE_IS_VM)
__cpcmd("IPL", NULL, 0, NULL);
diag308(DIAG308_IPL, NULL);
break;
+ case REIPL_METHOD_FCP_DUMP:
+ default:
+ break;
}
signal_processor(smp_processor_id(), sigp_stop_and_store_status);
}
@@ -715,28 +782,28 @@ static void do_dump(void)
static char buf[100];
switch (dump_method) {
- case IPL_METHOD_CCW_CIO:
+ case DUMP_METHOD_CCW_CIO:
smp_send_stop();
devid.devno = dump_block_ccw->ipl_info.ccw.devno;
devid.ssid = 0;
reipl_ccw_dev(&devid);
break;
- case IPL_METHOD_CCW_VM:
+ case DUMP_METHOD_CCW_VM:
smp_send_stop();
sprintf(buf, "STORE STATUS");
__cpcmd(buf, NULL, 0, NULL);
sprintf(buf, "IPL %X", dump_block_ccw->ipl_info.ccw.devno);
__cpcmd(buf, NULL, 0, NULL);
break;
- case IPL_METHOD_CCW_DIAG:
+ case DUMP_METHOD_CCW_DIAG:
diag308(DIAG308_SET, dump_block_ccw);
diag308(DIAG308_DUMP, NULL);
break;
- case IPL_METHOD_FCP_RW_DIAG:
+ case DUMP_METHOD_FCP_DIAG:
diag308(DIAG308_SET, dump_block_fcp);
diag308(DIAG308_DUMP, NULL);
break;
- case IPL_METHOD_NONE:
+ case DUMP_METHOD_NONE:
default:
return;
}
@@ -777,12 +844,13 @@ static int __init ipl_init(void)
rc = firmware_register(&ipl_subsys);
if (rc)
return rc;
- switch (ipl_get_type()) {
+ switch (ipl_info.type) {
case IPL_TYPE_CCW:
rc = sysfs_create_group(&ipl_subsys.kset.kobj,
&ipl_ccw_attr_group);
break;
case IPL_TYPE_FCP:
+ case IPL_TYPE_FCP_DUMP:
rc = ipl_register_fcp_files();
break;
case IPL_TYPE_NSS:
@@ -852,7 +920,7 @@ static int __init reipl_ccw_init(void)
/* FIXME: check for diag308_set_works when enabling diag ccw reipl */
if (!MACHINE_IS_VM)
sys_reipl_ccw_loadparm_attr.attr.mode = S_IRUGO;
- if (ipl_get_type() == IPL_TYPE_CCW)
+ if (ipl_info.type == IPL_TYPE_CCW)
reipl_block_ccw->ipl_info.ccw.devno = ipl_devno;
reipl_capabilities |= IPL_TYPE_CCW;
return 0;
@@ -862,9 +930,9 @@ static int __init reipl_fcp_init(void)
{
int rc;
- if ((!diag308_set_works) && (ipl_get_type() != IPL_TYPE_FCP))
+ if ((!diag308_set_works) && (ipl_info.type != IPL_TYPE_FCP))
return 0;
- if ((!diag308_set_works) && (ipl_get_type() == IPL_TYPE_FCP))
+ if ((!diag308_set_works) && (ipl_info.type == IPL_TYPE_FCP))
make_attrs_ro(reipl_fcp_attrs);
reipl_block_fcp = (void *) get_zeroed_page(GFP_KERNEL);
@@ -875,7 +943,7 @@ static int __init reipl_fcp_init(void)
free_page((unsigned long)reipl_block_fcp);
return rc;
}
- if (ipl_get_type() == IPL_TYPE_FCP) {
+ if (ipl_info.type == IPL_TYPE_FCP) {
memcpy(reipl_block_fcp, IPL_PARMBLOCK_START, PAGE_SIZE);
} else {
reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN;
@@ -909,7 +977,7 @@ static int __init reipl_init(void)
rc = reipl_nss_init();
if (rc)
return rc;
- rc = reipl_set_type(ipl_get_type());
+ rc = reipl_set_type(ipl_info.type);
if (rc)
return rc;
return 0;
@@ -931,7 +999,7 @@ static int __init dump_ccw_init(void)
dump_block_ccw->hdr.version = IPL_PARM_BLOCK_VERSION;
dump_block_ccw->hdr.blk0_len = IPL_PARM_BLK0_CCW_LEN;
dump_block_ccw->hdr.pbt = DIAG308_IPL_TYPE_CCW;
- dump_capabilities |= IPL_TYPE_CCW;
+ dump_capabilities |= DUMP_TYPE_CCW;
return 0;
}
@@ -956,7 +1024,7 @@ static int __init dump_fcp_init(void)
dump_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN;
dump_block_fcp->hdr.pbt = DIAG308_IPL_TYPE_FCP;
dump_block_fcp->ipl_info.fcp.opt = DIAG308_IPL_OPT_DUMP;
- dump_capabilities |= IPL_TYPE_FCP;
+ dump_capabilities |= DUMP_TYPE_FCP;
return 0;
}
@@ -995,7 +1063,7 @@ static int __init dump_init(void)
rc = dump_fcp_init();
if (rc)
return rc;
- dump_set_type(IPL_TYPE_NONE);
+ dump_set_type(DUMP_TYPE_NONE);
return 0;
}
@@ -1038,6 +1106,27 @@ static int __init s390_ipl_init(void)
__initcall(s390_ipl_init);
+void __init ipl_save_parameters(void)
+{
+ struct cio_iplinfo iplinfo;
+ unsigned int *ipl_ptr;
+ void *src, *dst;
+
+ if (cio_get_iplinfo(&iplinfo))
+ return;
+
+ ipl_devno = iplinfo.devno;
+ ipl_flags |= IPL_DEVNO_VALID;
+ if (!iplinfo.is_qdio)
+ return;
+ ipl_flags |= IPL_PARMBLOCK_VALID;
+ ipl_ptr = (unsigned int *)__LC_IPL_PARMBLOCK_PTR;
+ src = (void *)(unsigned long)*ipl_ptr;
+ dst = (void *)IPL_PARMBLOCK_ORIGIN;
+ memmove(dst, src, PAGE_SIZE);
+ *ipl_ptr = IPL_PARMBLOCK_ORIGIN;
+}
+
static LIST_HEAD(rcall);
static DEFINE_MUTEX(rcall_mutex);
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index 39d1dd75252..59b4e796680 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -31,6 +31,7 @@
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/moduleloader.h>
+#include <linux/bug.h>
#if 0
#define DEBUGP printk
@@ -398,9 +399,10 @@ int module_finalize(const Elf_Ehdr *hdr,
struct module *me)
{
vfree(me->arch.syminfo);
- return 0;
+ return module_bug_finalize(hdr, sechdrs, me);
}
void module_arch_cleanup(struct module *mod)
{
+ module_bug_cleanup(mod);
}
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 5acfac654f9..11d9b019762 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -280,24 +280,26 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp,
return 0;
}
-asmlinkage long sys_fork(struct pt_regs regs)
+asmlinkage long sys_fork(void)
{
- return do_fork(SIGCHLD, regs.gprs[15], &regs, 0, NULL, NULL);
+ struct pt_regs *regs = task_pt_regs(current);
+ return do_fork(SIGCHLD, regs->gprs[15], regs, 0, NULL, NULL);
}
-asmlinkage long sys_clone(struct pt_regs regs)
+asmlinkage long sys_clone(void)
{
- unsigned long clone_flags;
- unsigned long newsp;
+ struct pt_regs *regs = task_pt_regs(current);
+ unsigned long clone_flags;
+ unsigned long newsp;
int __user *parent_tidptr, *child_tidptr;
- clone_flags = regs.gprs[3];
- newsp = regs.orig_gpr2;
- parent_tidptr = (int __user *) regs.gprs[4];
- child_tidptr = (int __user *) regs.gprs[5];
- if (!newsp)
- newsp = regs.gprs[15];
- return do_fork(clone_flags, newsp, &regs, 0,
+ clone_flags = regs->gprs[3];
+ newsp = regs->orig_gpr2;
+ parent_tidptr = (int __user *) regs->gprs[4];
+ child_tidptr = (int __user *) regs->gprs[5];
+ if (!newsp)
+ newsp = regs->gprs[15];
+ return do_fork(clone_flags, newsp, regs, 0,
parent_tidptr, child_tidptr);
}
@@ -311,40 +313,52 @@ asmlinkage long sys_clone(struct pt_regs regs)
* do not have enough call-clobbered registers to hold all
* the information you need.
*/
-asmlinkage long sys_vfork(struct pt_regs regs)
+asmlinkage long sys_vfork(void)
{
+ struct pt_regs *regs = task_pt_regs(current);
return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD,
- regs.gprs[15], &regs, 0, NULL, NULL);
+ regs->gprs[15], regs, 0, NULL, NULL);
+}
+
+asmlinkage void execve_tail(void)
+{
+ task_lock(current);
+ current->ptrace &= ~PT_DTRACE;
+ task_unlock(current);
+ current->thread.fp_regs.fpc = 0;
+ if (MACHINE_HAS_IEEE)
+ asm volatile("sfpc %0,%0" : : "d" (0));
}
/*
* sys_execve() executes a new program.
*/
-asmlinkage long sys_execve(struct pt_regs regs)
+asmlinkage long sys_execve(void)
{
- int error;
- char * filename;
-
- filename = getname((char __user *) regs.orig_gpr2);
- error = PTR_ERR(filename);
- if (IS_ERR(filename))
- goto out;
- error = do_execve(filename, (char __user * __user *) regs.gprs[3],
- (char __user * __user *) regs.gprs[4], &regs);
- if (error == 0) {
- task_lock(current);
- current->ptrace &= ~PT_DTRACE;
- task_unlock(current);
- current->thread.fp_regs.fpc = 0;
- if (MACHINE_HAS_IEEE)
- asm volatile("sfpc %0,%0" : : "d" (0));
+ struct pt_regs *regs = task_pt_regs(current);
+ char *filename;
+ unsigned long result;
+ int rc;
+
+ filename = getname((char __user *) regs->orig_gpr2);
+ if (IS_ERR(filename)) {
+ result = PTR_ERR(filename);
+ goto out;
}
- putname(filename);
+ rc = do_execve(filename, (char __user * __user *) regs->gprs[3],
+ (char __user * __user *) regs->gprs[4], regs);
+ if (rc) {
+ result = rc;
+ goto out_putname;
+ }
+ execve_tail();
+ result = regs->gprs[2];
+out_putname:
+ putname(filename);
out:
- return error;
+ return result;
}
-
/*
* fill in the FPU structure for a core dump.
*/
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 863c8d08c02..3dfd0985861 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -285,6 +285,26 @@ static void __init conmode_default(void)
}
}
+#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
+static void __init setup_zfcpdump(unsigned int console_devno)
+{
+ static char str[64];
+
+ if (ipl_info.type != IPL_TYPE_FCP_DUMP)
+ return;
+ if (console_devno != -1)
+ sprintf(str, "cio_ignore=all,!0.0.%04x,!0.0.%04x",
+ ipl_info.data.fcp.dev_id.devno, console_devno);
+ else
+ sprintf(str, "cio_ignore=all,!0.0.%04x",
+ ipl_info.data.fcp.dev_id.devno);
+ strcat(COMMAND_LINE, str);
+ console_loglevel = 2;
+}
+#else
+static inline void setup_zfcpdump(unsigned int console_devno) {}
+#endif /* CONFIG_ZFCPDUMP */
+
#ifdef CONFIG_SMP
void (*_machine_restart)(char *command) = machine_restart_smp;
void (*_machine_halt)(void) = machine_halt_smp;
@@ -586,13 +606,20 @@ setup_resources(void)
}
}
+unsigned long real_memory_size;
+EXPORT_SYMBOL_GPL(real_memory_size);
+
static void __init setup_memory_end(void)
{
- unsigned long real_size, memory_size;
+ unsigned long memory_size;
unsigned long max_mem, max_phys;
int i;
- memory_size = real_size = 0;
+#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
+ if (ipl_info.type == IPL_TYPE_FCP_DUMP)
+ memory_end = ZFCPDUMP_HSA_SIZE;
+#endif
+ memory_size = 0;
max_phys = VMALLOC_END_INIT - VMALLOC_MIN_SIZE;
memory_end &= PAGE_MASK;
@@ -601,7 +628,8 @@ static void __init setup_memory_end(void)
for (i = 0; i < MEMORY_CHUNKS; i++) {
struct mem_chunk *chunk = &memory_chunk[i];
- real_size = max(real_size, chunk->addr + chunk->size);
+ real_memory_size = max(real_memory_size,
+ chunk->addr + chunk->size);
if (chunk->addr >= max_mem) {
memset(chunk, 0, sizeof(*chunk));
continue;
@@ -765,6 +793,7 @@ setup_arch(char **cmdline_p)
parse_early_param();
+ setup_ipl_info();
setup_memory_end();
setup_addressing_mode();
setup_memory();
@@ -782,6 +811,9 @@ setup_arch(char **cmdline_p)
/* Setup default console */
conmode_default();
+
+ /* Setup zfcpdump support */
+ setup_zfcpdump(console_devno);
}
void print_cpu_info(struct cpuinfo_S390 *cpuinfo)
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 554f9cf7499..3c41907799a 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -102,9 +102,9 @@ sys_sigaction(int sig, const struct old_sigaction __user *act,
}
asmlinkage long
-sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
- struct pt_regs *regs)
+sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss)
{
+ struct pt_regs *regs = task_pt_regs(current);
return do_sigaltstack(uss, uoss, regs->gprs[15]);
}
@@ -163,8 +163,9 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
return 0;
}
-asmlinkage long sys_sigreturn(struct pt_regs *regs)
+asmlinkage long sys_sigreturn(void)
{
+ struct pt_regs *regs = task_pt_regs(current);
sigframe __user *frame = (sigframe __user *)regs->gprs[15];
sigset_t set;
@@ -189,8 +190,9 @@ badframe:
return 0;
}
-asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
+asmlinkage long sys_rt_sigreturn(void)
{
+ struct pt_regs *regs = task_pt_regs(current);
rt_sigframe __user *frame = (rt_sigframe __user *)regs->gprs[15];
sigset_t set;
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 97764f710bb..3754e2031b3 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -1,12 +1,12 @@
/*
* arch/s390/kernel/smp.c
*
- * Copyright (C) IBM Corp. 1999,2006
+ * Copyright IBM Corp. 1999,2007
* Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
- * Martin Schwidefsky (schwidefsky@de.ibm.com)
- * Heiko Carstens (heiko.carstens@de.ibm.com)
+ * Martin Schwidefsky (schwidefsky@de.ibm.com)
+ * Heiko Carstens (heiko.carstens@de.ibm.com)
*
- * based on other smp stuff by
+ * based on other smp stuff by
* (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
* (c) 1998 Ingo Molnar
*
@@ -31,6 +31,7 @@
#include <linux/interrupt.h>
#include <linux/cpu.h>
#include <linux/timex.h>
+#include <linux/bootmem.h>
#include <asm/ipl.h>
#include <asm/setup.h>
#include <asm/sigp.h>
@@ -40,17 +41,19 @@
#include <asm/cpcmd.h>
#include <asm/tlbflush.h>
#include <asm/timer.h>
-
-extern volatile int __cpu_logical_map[];
+#include <asm/lowcore.h>
/*
* An array with a pointer the lowcore of every CPU.
*/
-
struct _lowcore *lowcore_ptr[NR_CPUS];
+EXPORT_SYMBOL(lowcore_ptr);
cpumask_t cpu_online_map = CPU_MASK_NONE;
+EXPORT_SYMBOL(cpu_online_map);
+
cpumask_t cpu_possible_map = CPU_MASK_NONE;
+EXPORT_SYMBOL(cpu_possible_map);
static struct task_struct *current_set[NR_CPUS];
@@ -70,7 +73,7 @@ struct call_data_struct {
int wait;
};
-static struct call_data_struct * call_data;
+static struct call_data_struct *call_data;
/*
* 'Call function' interrupt callback
@@ -150,8 +153,8 @@ out:
*
* Run a function on all other CPUs.
*
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler. You may call it from a bottom half.
+ * You must not call this function with disabled interrupts, from a
+ * hardware interrupt handler or from a bottom half.
*/
int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
int wait)
@@ -177,11 +180,11 @@ EXPORT_SYMBOL(smp_call_function);
*
* Run a function on one processor.
*
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler. You may call it from a bottom half.
+ * You must not call this function with disabled interrupts, from a
+ * hardware interrupt handler or from a bottom half.
*/
int smp_call_function_on(void (*func) (void *info), void *info, int nonatomic,
- int wait, int cpu)
+ int wait, int cpu)
{
cpumask_t map = CPU_MASK_NONE;
@@ -195,9 +198,9 @@ EXPORT_SYMBOL(smp_call_function_on);
static void do_send_stop(void)
{
- int cpu, rc;
+ int cpu, rc;
- /* stop all processors */
+ /* stop all processors */
for_each_online_cpu(cpu) {
if (cpu == smp_processor_id())
continue;
@@ -209,9 +212,9 @@ static void do_send_stop(void)
static void do_store_status(void)
{
- int cpu, rc;
+ int cpu, rc;
- /* store status of all processors in their lowcores (real 0) */
+ /* store status of all processors in their lowcores (real 0) */
for_each_online_cpu(cpu) {
if (cpu == smp_processor_id())
continue;
@@ -219,8 +222,8 @@ static void do_store_status(void)
rc = signal_processor_p(
(__u32)(unsigned long) lowcore_ptr[cpu], cpu,
sigp_store_status_at_address);
- } while(rc == sigp_busy);
- }
+ } while (rc == sigp_busy);
+ }
}
static void do_wait_for_stop(void)
@@ -231,7 +234,7 @@ static void do_wait_for_stop(void)
for_each_online_cpu(cpu) {
if (cpu == smp_processor_id())
continue;
- while(!smp_cpu_not_running(cpu))
+ while (!smp_cpu_not_running(cpu))
cpu_relax();
}
}
@@ -245,7 +248,7 @@ void smp_send_stop(void)
/* Disable all interrupts/machine checks */
__load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK);
- /* write magic number to zero page (absolute 0) */
+ /* write magic number to zero page (absolute 0) */
lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC;
/* stop other processors. */
@@ -261,8 +264,7 @@ void smp_send_stop(void)
/*
* Reboot, halt and power_off routines for SMP.
*/
-
-void machine_restart_smp(char * __unused)
+void machine_restart_smp(char *__unused)
{
smp_send_stop();
do_reipl();
@@ -293,17 +295,17 @@ void machine_power_off_smp(void)
static void do_ext_call_interrupt(__u16 code)
{
- unsigned long bits;
+ unsigned long bits;
- /*
- * handle bit signal external calls
- *
- * For the ec_schedule signal we have to do nothing. All the work
- * is done automatically when we return from the interrupt.
- */
+ /*
+ * handle bit signal external calls
+ *
+ * For the ec_schedule signal we have to do nothing. All the work
+ * is done automatically when we return from the interrupt.
+ */
bits = xchg(&S390_lowcore.ext_call_fast, 0);
- if (test_bit(ec_call_function, &bits))
+ if (test_bit(ec_call_function, &bits))
do_call_function();
}
@@ -313,11 +315,11 @@ static void do_ext_call_interrupt(__u16 code)
*/
static void smp_ext_bitcall(int cpu, ec_bit_sig sig)
{
- /*
- * Set signaling bit in lowcore of target cpu and kick it
- */
+ /*
+ * Set signaling bit in lowcore of target cpu and kick it
+ */
set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
- while(signal_processor(cpu, sigp_emergency_signal) == sigp_busy)
+ while (signal_processor(cpu, sigp_emergency_signal) == sigp_busy)
udelay(10);
}
@@ -332,7 +334,7 @@ void smp_ptlb_callback(void *info)
void smp_ptlb_all(void)
{
- on_each_cpu(smp_ptlb_callback, NULL, 0, 1);
+ on_each_cpu(smp_ptlb_callback, NULL, 0, 1);
}
EXPORT_SYMBOL(smp_ptlb_all);
#endif /* ! CONFIG_64BIT */
@@ -344,7 +346,7 @@ EXPORT_SYMBOL(smp_ptlb_all);
*/
void smp_send_reschedule(int cpu)
{
- smp_ext_bitcall(cpu, ec_schedule);
+ smp_ext_bitcall(cpu, ec_schedule);
}
/*
@@ -358,11 +360,12 @@ struct ec_creg_mask_parms {
/*
* callback for setting/clearing control bits
*/
-static void smp_ctl_bit_callback(void *info) {
+static void smp_ctl_bit_callback(void *info)
+{
struct ec_creg_mask_parms *pp = info;
unsigned long cregs[16];
int i;
-
+
__ctl_store(cregs, 0, 15);
for (i = 0; i <= 15; i++)
cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i];
@@ -381,6 +384,7 @@ void smp_ctl_set_bit(int cr, int bit)
parms.orvals[cr] = 1 << bit;
on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1);
}
+EXPORT_SYMBOL(smp_ctl_set_bit);
/*
* Clear a bit in a control register of all cpus
@@ -394,13 +398,72 @@ void smp_ctl_clear_bit(int cr, int bit)
parms.andvals[cr] = ~(1L << bit);
on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1);
}
+EXPORT_SYMBOL(smp_ctl_clear_bit);
+
+#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
+
+/*
+ * zfcpdump_prefix_array holds prefix registers for the following scenario:
+ * 64 bit zfcpdump kernel and 31 bit kernel which is to be dumped. We have to
+ * save its prefix registers, since they get lost, when switching from 31 bit
+ * to 64 bit.
+ */
+unsigned int zfcpdump_prefix_array[NR_CPUS + 1] \
+ __attribute__((__section__(".data")));
+
+static void __init smp_get_save_areas(void)
+{
+ unsigned int cpu, cpu_num, rc;
+ __u16 boot_cpu_addr;
+
+ if (ipl_info.type != IPL_TYPE_FCP_DUMP)
+ return;
+ boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr;
+ cpu_num = 1;
+ for (cpu = 0; cpu <= 65535; cpu++) {
+ if ((u16) cpu == boot_cpu_addr)
+ continue;
+ __cpu_logical_map[1] = (__u16) cpu;
+ if (signal_processor(1, sigp_sense) == sigp_not_operational)
+ continue;
+ if (cpu_num >= NR_CPUS) {
+ printk("WARNING: Registers for cpu %i are not "
+ "saved, since dump kernel was compiled with"
+ "NR_CPUS=%i!\n", cpu_num, NR_CPUS);
+ continue;
+ }
+ zfcpdump_save_areas[cpu_num] =
+ alloc_bootmem(sizeof(union save_area));
+ while (1) {
+ rc = signal_processor(1, sigp_stop_and_store_status);
+ if (rc != sigp_busy)
+ break;
+ cpu_relax();
+ }
+ memcpy(zfcpdump_save_areas[cpu_num],
+ (void *)(unsigned long) store_prefix() +
+ SAVE_AREA_BASE, SAVE_AREA_SIZE);
+#ifdef __s390x__
+ /* copy original prefix register */
+ zfcpdump_save_areas[cpu_num]->s390x.pref_reg =
+ zfcpdump_prefix_array[cpu_num];
+#endif
+ cpu_num++;
+ }
+}
+
+union save_area *zfcpdump_save_areas[NR_CPUS + 1];
+EXPORT_SYMBOL_GPL(zfcpdump_save_areas);
+
+#else
+#define smp_get_save_areas() do { } while (0)
+#endif
/*
* Lets check how many CPUs we have.
*/
-static unsigned int
-__init smp_count_cpus(void)
+static unsigned int __init smp_count_cpus(void)
{
unsigned int cpu, num_cpus;
__u16 boot_cpu_addr;
@@ -416,31 +479,30 @@ __init smp_count_cpus(void)
if ((__u16) cpu == boot_cpu_addr)
continue;
__cpu_logical_map[1] = (__u16) cpu;
- if (signal_processor(1, sigp_sense) ==
- sigp_not_operational)
+ if (signal_processor(1, sigp_sense) == sigp_not_operational)
continue;
num_cpus++;
}
- printk("Detected %d CPU's\n",(int) num_cpus);
+ printk("Detected %d CPU's\n", (int) num_cpus);
printk("Boot cpu address %2X\n", boot_cpu_addr);
return num_cpus;
}
/*
- * Activate a secondary processor.
+ * Activate a secondary processor.
*/
int __devinit start_secondary(void *cpuvoid)
{
- /* Setup the cpu */
- cpu_init();
+ /* Setup the cpu */
+ cpu_init();
preempt_disable();
/* Enable TOD clock interrupts on the secondary cpu. */
- init_cpu_timer();
+ init_cpu_timer();
#ifdef CONFIG_VIRT_TIMER
/* Enable cpu timer interrupts on the secondary cpu. */
- init_cpu_vtimer();
+ init_cpu_vtimer();
#endif
/* Enable pfault pseudo page faults on this cpu. */
pfault_init();
@@ -449,11 +511,11 @@ int __devinit start_secondary(void *cpuvoid)
cpu_set(smp_processor_id(), cpu_online_map);
/* Switch on interrupts */
local_irq_enable();
- /* Print info about this processor */
- print_cpu_info(&S390_lowcore.cpu_data);
- /* cpu_idle will call schedule for us */
- cpu_idle();
- return 0;
+ /* Print info about this processor */
+ print_cpu_info(&S390_lowcore.cpu_data);
+ /* cpu_idle will call schedule for us */
+ cpu_idle();
+ return 0;
}
static void __init smp_create_idle(unsigned int cpu)
@@ -470,56 +532,13 @@ static void __init smp_create_idle(unsigned int cpu)
current_set[cpu] = p;
}
-/* Reserving and releasing of CPUs */
-
-static DEFINE_SPINLOCK(smp_reserve_lock);
-static int smp_cpu_reserved[NR_CPUS];
-
-int
-smp_get_cpu(cpumask_t cpu_mask)
-{
- unsigned long flags;
- int cpu;
-
- spin_lock_irqsave(&smp_reserve_lock, flags);
- /* Try to find an already reserved cpu. */
- for_each_cpu_mask(cpu, cpu_mask) {
- if (smp_cpu_reserved[cpu] != 0) {
- smp_cpu_reserved[cpu]++;
- /* Found one. */
- goto out;
- }
- }
- /* Reserve a new cpu from cpu_mask. */
- for_each_cpu_mask(cpu, cpu_mask) {
- if (cpu_online(cpu)) {
- smp_cpu_reserved[cpu]++;
- goto out;
- }
- }
- cpu = -ENODEV;
-out:
- spin_unlock_irqrestore(&smp_reserve_lock, flags);
- return cpu;
-}
-
-void
-smp_put_cpu(int cpu)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&smp_reserve_lock, flags);
- smp_cpu_reserved[cpu]--;
- spin_unlock_irqrestore(&smp_reserve_lock, flags);
-}
-
-static int
-cpu_stopped(int cpu)
+static int cpu_stopped(int cpu)
{
__u32 status;
/* Check for stopped state */
- if (signal_processor_ps(&status, 0, cpu, sigp_sense) == sigp_status_stored) {
+ if (signal_processor_ps(&status, 0, cpu, sigp_sense) ==
+ sigp_status_stored) {
if (status & 0x40)
return 1;
}
@@ -528,14 +547,13 @@ cpu_stopped(int cpu)
/* Upping and downing of CPUs */
-int
-__cpu_up(unsigned int cpu)
+int __cpu_up(unsigned int cpu)
{
struct task_struct *idle;
- struct _lowcore *cpu_lowcore;
+ struct _lowcore *cpu_lowcore;
struct stack_frame *sf;
- sigp_ccode ccode;
- int curr_cpu;
+ sigp_ccode ccode;
+ int curr_cpu;
for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) {
__cpu_logical_map[cpu] = (__u16) curr_cpu;
@@ -548,7 +566,7 @@ __cpu_up(unsigned int cpu)
ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]),
cpu, sigp_set_prefix);
- if (ccode){
+ if (ccode) {
printk("sigp_set_prefix failed for cpu %d "
"with condition code %d\n",
(int) cpu, (int) ccode);
@@ -556,9 +574,9 @@ __cpu_up(unsigned int cpu)
}
idle = current_set[cpu];
- cpu_lowcore = lowcore_ptr[cpu];
+ cpu_lowcore = lowcore_ptr[cpu];
cpu_lowcore->kernel_stack = (unsigned long)
- task_stack_page(idle) + (THREAD_SIZE);
+ task_stack_page(idle) + THREAD_SIZE;
sf = (struct stack_frame *) (cpu_lowcore->kernel_stack
- sizeof(struct pt_regs)
- sizeof(struct stack_frame));
@@ -570,11 +588,11 @@ __cpu_up(unsigned int cpu)
" stam 0,15,0(%0)"
: : "a" (&cpu_lowcore->access_regs_save_area) : "memory");
cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];
- cpu_lowcore->current_task = (unsigned long) idle;
- cpu_lowcore->cpu_data.cpu_nr = cpu;
+ cpu_lowcore->current_task = (unsigned long) idle;
+ cpu_lowcore->cpu_data.cpu_nr = cpu;
eieio();
- while (signal_processor(cpu,sigp_restart) == sigp_busy)
+ while (signal_processor(cpu, sigp_restart) == sigp_busy)
udelay(10);
while (!cpu_online(cpu))
@@ -589,6 +607,7 @@ void __init smp_setup_cpu_possible_map(void)
{
unsigned int phy_cpus, pos_cpus, cpu;
+ smp_get_save_areas();
phy_cpus = smp_count_cpus();
pos_cpus = min(phy_cpus + additional_cpus, (unsigned int) NR_CPUS);
@@ -620,18 +639,11 @@ static int __init setup_possible_cpus(char *s)
}
early_param("possible_cpus", setup_possible_cpus);
-int
-__cpu_disable(void)
+int __cpu_disable(void)
{
- unsigned long flags;
struct ec_creg_mask_parms cr_parms;
int cpu = smp_processor_id();
- spin_lock_irqsave(&smp_reserve_lock, flags);
- if (smp_cpu_reserved[cpu] != 0) {
- spin_unlock_irqrestore(&smp_reserve_lock, flags);
- return -EBUSY;
- }
cpu_clear(cpu, cpu_online_map);
/* Disable pfault pseudo page faults on this cpu. */
@@ -642,24 +654,23 @@ __cpu_disable(void)
/* disable all external interrupts */
cr_parms.orvals[0] = 0;
- cr_parms.andvals[0] = ~(1<<15 | 1<<14 | 1<<13 | 1<<12 |
- 1<<11 | 1<<10 | 1<< 6 | 1<< 4);
+ cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 12 |
+ 1 << 11 | 1 << 10 | 1 << 6 | 1 << 4);
/* disable all I/O interrupts */
cr_parms.orvals[6] = 0;
- cr_parms.andvals[6] = ~(1<<31 | 1<<30 | 1<<29 | 1<<28 |
- 1<<27 | 1<<26 | 1<<25 | 1<<24);
+ cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 |
+ 1 << 27 | 1 << 26 | 1 << 25 | 1 << 24);
/* disable most machine checks */
cr_parms.orvals[14] = 0;
- cr_parms.andvals[14] = ~(1<<28 | 1<<27 | 1<<26 | 1<<25 | 1<<24);
+ cr_parms.andvals[14] = ~(1 << 28 | 1 << 27 | 1 << 26 |
+ 1 << 25 | 1 << 24);
smp_ctl_bit_callback(&cr_parms);
- spin_unlock_irqrestore(&smp_reserve_lock, flags);
return 0;
}
-void
-__cpu_die(unsigned int cpu)
+void __cpu_die(unsigned int cpu)
{
/* Wait until target cpu is down */
while (!smp_cpu_not_running(cpu))
@@ -667,13 +678,12 @@ __cpu_die(unsigned int cpu)
printk("Processor %d spun down\n", cpu);
}
-void
-cpu_die(void)
+void cpu_die(void)
{
idle_task_exit();
signal_processor(smp_processor_id(), sigp_stop);
BUG();
- for(;;);
+ for (;;);
}
#endif /* CONFIG_HOTPLUG_CPU */
@@ -686,36 +696,36 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
{
unsigned long stack;
unsigned int cpu;
- int i;
-
- /* request the 0x1201 emergency signal external interrupt */
- if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
- panic("Couldn't request external interrupt 0x1201");
- memset(lowcore_ptr,0,sizeof(lowcore_ptr));
- /*
- * Initialize prefix pages and stacks for all possible cpus
- */
+ int i;
+
+ /* request the 0x1201 emergency signal external interrupt */
+ if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
+ panic("Couldn't request external interrupt 0x1201");
+ memset(lowcore_ptr, 0, sizeof(lowcore_ptr));
+ /*
+ * Initialize prefix pages and stacks for all possible cpus
+ */
print_cpu_info(&S390_lowcore.cpu_data);
- for_each_possible_cpu(i) {
+ for_each_possible_cpu(i) {
lowcore_ptr[i] = (struct _lowcore *)
- __get_free_pages(GFP_KERNEL|GFP_DMA,
- sizeof(void*) == 8 ? 1 : 0);
- stack = __get_free_pages(GFP_KERNEL,ASYNC_ORDER);
- if (lowcore_ptr[i] == NULL || stack == 0ULL)
+ __get_free_pages(GFP_KERNEL | GFP_DMA,
+ sizeof(void*) == 8 ? 1 : 0);
+ stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
+ if (!lowcore_ptr[i] || !stack)
panic("smp_boot_cpus failed to allocate memory\n");
*(lowcore_ptr[i]) = S390_lowcore;
- lowcore_ptr[i]->async_stack = stack + (ASYNC_SIZE);
- stack = __get_free_pages(GFP_KERNEL,0);
- if (stack == 0ULL)
+ lowcore_ptr[i]->async_stack = stack + ASYNC_SIZE;
+ stack = __get_free_pages(GFP_KERNEL, 0);
+ if (!stack)
panic("smp_boot_cpus failed to allocate memory\n");
- lowcore_ptr[i]->panic_stack = stack + (PAGE_SIZE);
+ lowcore_ptr[i]->panic_stack = stack + PAGE_SIZE;
#ifndef CONFIG_64BIT
if (MACHINE_HAS_IEEE) {
lowcore_ptr[i]->extended_save_area_addr =
- (__u32) __get_free_pages(GFP_KERNEL,0);
- if (lowcore_ptr[i]->extended_save_area_addr == 0)
+ (__u32) __get_free_pages(GFP_KERNEL, 0);
+ if (!lowcore_ptr[i]->extended_save_area_addr)
panic("smp_boot_cpus failed to "
"allocate memory\n");
}
@@ -754,34 +764,63 @@ void smp_cpus_done(unsigned int max_cpus)
*/
int setup_profiling_timer(unsigned int multiplier)
{
- return 0;
+ return 0;
}
static DEFINE_PER_CPU(struct cpu, cpu_devices);
+static ssize_t show_capability(struct sys_device *dev, char *buf)
+{
+ unsigned int capability;
+ int rc;
+
+ rc = get_cpu_capability(&capability);
+ if (rc)
+ return rc;
+ return sprintf(buf, "%u\n", capability);
+}
+static SYSDEV_ATTR(capability, 0444, show_capability, NULL);
+
+static int __cpuinit smp_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned int)(long)hcpu;
+ struct cpu *c = &per_cpu(cpu_devices, cpu);
+ struct sys_device *s = &c->sysdev;
+
+ switch (action) {
+ case CPU_ONLINE:
+ if (sysdev_create_file(s, &attr_capability))
+ return NOTIFY_BAD;
+ break;
+ case CPU_DEAD:
+ sysdev_remove_file(s, &attr_capability);
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata smp_cpu_nb = {
+ .notifier_call = smp_cpu_notify,
+};
+
static int __init topology_init(void)
{
int cpu;
- int ret;
+
+ register_cpu_notifier(&smp_cpu_nb);
for_each_possible_cpu(cpu) {
struct cpu *c = &per_cpu(cpu_devices, cpu);
+ struct sys_device *s = &c->sysdev;
c->hotpluggable = 1;
- ret = register_cpu(c, cpu);
- if (ret)
- printk(KERN_WARNING "topology_init: register_cpu %d "
- "failed (%d)\n", cpu, ret);
+ register_cpu(c, cpu);
+ if (!cpu_online(cpu))
+ continue;
+ s = &c->sysdev;
+ sysdev_create_file(s, &attr_capability);
}
return 0;
}
-
subsys_initcall(topology_init);
-
-EXPORT_SYMBOL(cpu_online_map);
-EXPORT_SYMBOL(cpu_possible_map);
-EXPORT_SYMBOL(lowcore_ptr);
-EXPORT_SYMBOL(smp_ctl_set_bit);
-EXPORT_SYMBOL(smp_ctl_clear_bit);
-EXPORT_SYMBOL(smp_get_cpu);
-EXPORT_SYMBOL(smp_put_cpu);
diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c
index 584ed95f338..3a77c22cda7 100644
--- a/arch/s390/kernel/sys_s390.c
+++ b/arch/s390/kernel/sys_s390.c
@@ -266,23 +266,3 @@ s390_fadvise64_64(struct fadvise64_64_args __user *args)
return -EFAULT;
return sys_fadvise64_64(a.fd, a.offset, a.len, a.advice);
}
-
-/*
- * Do a system call from kernel instead of calling sys_execve so we
- * end up with proper pt_regs.
- */
-int kernel_execve(const char *filename, char *const argv[], char *const envp[])
-{
- register const char *__arg1 asm("2") = filename;
- register char *const*__arg2 asm("3") = argv;
- register char *const*__arg3 asm("4") = envp;
- register long __svcres asm("2");
- asm volatile(
- "svc %b1"
- : "=d" (__svcres)
- : "i" (__NR_execve),
- "0" (__arg1),
- "d" (__arg2),
- "d" (__arg3) : "memory");
- return __svcres;
-}
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index c774f1069e1..cd8d321cd0c 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -10,7 +10,7 @@
NI_SYSCALL /* 0 */
SYSCALL(sys_exit,sys_exit,sys32_exit_wrapper)
-SYSCALL(sys_fork_glue,sys_fork_glue,sys_fork_glue)
+SYSCALL(sys_fork,sys_fork,sys_fork)
SYSCALL(sys_read,sys_read,sys32_read_wrapper)
SYSCALL(sys_write,sys_write,sys32_write_wrapper)
SYSCALL(sys_open,sys_open,sys32_open_wrapper) /* 5 */
@@ -19,7 +19,7 @@ SYSCALL(sys_restart_syscall,sys_restart_syscall,sys_restart_syscall)
SYSCALL(sys_creat,sys_creat,sys32_creat_wrapper)
SYSCALL(sys_link,sys_link,sys32_link_wrapper)
SYSCALL(sys_unlink,sys_unlink,sys32_unlink_wrapper) /* 10 */
-SYSCALL(sys_execve_glue,sys_execve_glue,sys32_execve_glue)
+SYSCALL(sys_execve,sys_execve,sys32_execve)
SYSCALL(sys_chdir,sys_chdir,sys32_chdir_wrapper)
SYSCALL(sys_time,sys_ni_syscall,sys32_time_wrapper) /* old time syscall */
SYSCALL(sys_mknod,sys_mknod,sys32_mknod_wrapper)
@@ -127,8 +127,8 @@ SYSCALL(sys_swapoff,sys_swapoff,sys32_swapoff_wrapper) /* 115 */
SYSCALL(sys_sysinfo,sys_sysinfo,compat_sys_sysinfo_wrapper)
SYSCALL(sys_ipc,sys_ipc,sys32_ipc_wrapper)
SYSCALL(sys_fsync,sys_fsync,sys32_fsync_wrapper)
-SYSCALL(sys_sigreturn_glue,sys_sigreturn_glue,sys32_sigreturn_glue)
-SYSCALL(sys_clone_glue,sys_clone_glue,sys32_clone_glue) /* 120 */
+SYSCALL(sys_sigreturn,sys_sigreturn,sys32_sigreturn)
+SYSCALL(sys_clone,sys_clone,sys32_clone) /* 120 */
SYSCALL(sys_setdomainname,sys_setdomainname,sys32_setdomainname_wrapper)
SYSCALL(sys_newuname,s390x_newuname,sys32_newuname_wrapper)
NI_SYSCALL /* modify_ldt for i386 */
@@ -181,7 +181,7 @@ SYSCALL(sys_nfsservctl,sys_nfsservctl,compat_sys_nfsservctl_wrapper)
SYSCALL(sys_setresgid16,sys_ni_syscall,sys32_setresgid16_wrapper) /* 170 old setresgid16 syscall */
SYSCALL(sys_getresgid16,sys_ni_syscall,sys32_getresgid16_wrapper) /* old getresgid16 syscall */
SYSCALL(sys_prctl,sys_prctl,sys32_prctl_wrapper)
-SYSCALL(sys_rt_sigreturn_glue,sys_rt_sigreturn_glue,sys32_rt_sigreturn_glue)
+SYSCALL(sys_rt_sigreturn,sys_rt_sigreturn,sys32_rt_sigreturn)
SYSCALL(sys_rt_sigaction,sys_rt_sigaction,sys32_rt_sigaction_wrapper)
SYSCALL(sys_rt_sigprocmask,sys_rt_sigprocmask,sys32_rt_sigprocmask_wrapper) /* 175 */
SYSCALL(sys_rt_sigpending,sys_rt_sigpending,sys32_rt_sigpending_wrapper)
@@ -194,11 +194,11 @@ SYSCALL(sys_chown16,sys_ni_syscall,sys32_chown16_wrapper) /* old chown16 syscall
SYSCALL(sys_getcwd,sys_getcwd,sys32_getcwd_wrapper)
SYSCALL(sys_capget,sys_capget,sys32_capget_wrapper)
SYSCALL(sys_capset,sys_capset,sys32_capset_wrapper) /* 185 */
-SYSCALL(sys_sigaltstack_glue,sys_sigaltstack_glue,sys32_sigaltstack_glue)
+SYSCALL(sys_sigaltstack,sys_sigaltstack,sys32_sigaltstack)
SYSCALL(sys_sendfile,sys_sendfile64,sys32_sendfile_wrapper)
NI_SYSCALL /* streams1 */
NI_SYSCALL /* streams2 */
-SYSCALL(sys_vfork_glue,sys_vfork_glue,sys_vfork_glue) /* 190 */
+SYSCALL(sys_vfork,sys_vfork,sys_vfork) /* 190 */
SYSCALL(sys_getrlimit,sys_getrlimit,compat_sys_getrlimit_wrapper)
SYSCALL(sys_mmap2,sys_mmap2,sys32_mmap2_wrapper)
SYSCALL(sys_truncate64,sys_ni_syscall,sys32_truncate64_wrapper)
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index e1ad464b6f2..711dae8da7a 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -280,7 +280,6 @@ static void clock_comparator_interrupt(__u16 code)
}
static void etr_reset(void);
-static void etr_init(void);
static void etr_ext_handler(__u16);
/*
@@ -355,7 +354,6 @@ void __init time_init(void)
#ifdef CONFIG_VIRT_TIMER
vtime_init();
#endif
- etr_init();
}
/*
@@ -426,11 +424,11 @@ static struct etr_aib etr_port1;
static int etr_port1_uptodate;
static unsigned long etr_events;
static struct timer_list etr_timer;
-static struct tasklet_struct etr_tasklet;
static DEFINE_PER_CPU(atomic_t, etr_sync_word);
static void etr_timeout(unsigned long dummy);
-static void etr_tasklet_fn(unsigned long dummy);
+static void etr_work_fn(struct work_struct *work);
+static DECLARE_WORK(etr_work, etr_work_fn);
/*
* The etr get_clock function. It will write the current clock value
@@ -507,29 +505,31 @@ static void etr_reset(void)
}
}
-static void etr_init(void)
+static int __init etr_init(void)
{
struct etr_aib aib;
if (test_bit(ETR_FLAG_ENOSYS, &etr_flags))
- return;
+ return 0;
/* Check if this machine has the steai instruction. */
if (etr_steai(&aib, ETR_STEAI_STEPPING_PORT) == 0)
set_bit(ETR_FLAG_STEAI, &etr_flags);
setup_timer(&etr_timer, etr_timeout, 0UL);
- tasklet_init(&etr_tasklet, etr_tasklet_fn, 0);
if (!etr_port0_online && !etr_port1_online)
set_bit(ETR_FLAG_EACCES, &etr_flags);
if (etr_port0_online) {
set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
- tasklet_hi_schedule(&etr_tasklet);
+ schedule_work(&etr_work);
}
if (etr_port1_online) {
set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events);
- tasklet_hi_schedule(&etr_tasklet);
+ schedule_work(&etr_work);
}
+ return 0;
}
+arch_initcall(etr_init);
+
/*
* Two sorts of ETR machine checks. The architecture reads:
* "When a machine-check niterruption occurs and if a switch-to-local or
@@ -549,7 +549,7 @@ void etr_switch_to_local(void)
return;
etr_disable_sync_clock(NULL);
set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events);
- tasklet_hi_schedule(&etr_tasklet);
+ schedule_work(&etr_work);
}
/*
@@ -564,7 +564,7 @@ void etr_sync_check(void)
return;
etr_disable_sync_clock(NULL);
set_bit(ETR_EVENT_SYNC_CHECK, &etr_events);
- tasklet_hi_schedule(&etr_tasklet);
+ schedule_work(&etr_work);
}
/*
@@ -591,13 +591,13 @@ static void etr_ext_handler(__u16 code)
* Both ports are not up-to-date now.
*/
set_bit(ETR_EVENT_PORT_ALERT, &etr_events);
- tasklet_hi_schedule(&etr_tasklet);
+ schedule_work(&etr_work);
}
static void etr_timeout(unsigned long dummy)
{
set_bit(ETR_EVENT_UPDATE, &etr_events);
- tasklet_hi_schedule(&etr_tasklet);
+ schedule_work(&etr_work);
}
/*
@@ -927,7 +927,7 @@ static struct etr_eacr etr_handle_update(struct etr_aib *aib,
if (!eacr.e0 && !eacr.e1)
return eacr;
- /* Update port0 or port1 with aib stored in etr_tasklet_fn. */
+ /* Update port0 or port1 with aib stored in etr_work_fn. */
if (aib->esw.q == 0) {
/* Information for port 0 stored. */
if (eacr.p0 && !etr_port0_uptodate) {
@@ -1007,7 +1007,7 @@ static void etr_update_eacr(struct etr_eacr eacr)
* particular this is the only function that calls etr_update_eacr(),
* it "controls" the etr control register.
*/
-static void etr_tasklet_fn(unsigned long dummy)
+static void etr_work_fn(struct work_struct *work)
{
unsigned long long now;
struct etr_eacr eacr;
@@ -1220,13 +1220,13 @@ static ssize_t etr_online_store(struct sys_device *dev,
return count; /* Nothing to do. */
etr_port0_online = value;
set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
- tasklet_hi_schedule(&etr_tasklet);
+ schedule_work(&etr_work);
} else {
if (etr_port1_online == value)
return count; /* Nothing to do. */
etr_port1_online = value;
set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events);
- tasklet_hi_schedule(&etr_tasklet);
+ schedule_work(&etr_work);
}
return count;
}
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index f0e5a320e2e..49dec830373 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -30,7 +30,7 @@
#include <linux/kallsyms.h>
#include <linux/reboot.h>
#include <linux/kprobes.h>
-
+#include <linux/bug.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/io.h>
@@ -188,18 +188,31 @@ void dump_stack(void)
EXPORT_SYMBOL(dump_stack);
+static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
+{
+ return (regs->psw.mask & bits) / ((~bits + 1) & bits);
+}
+
void show_registers(struct pt_regs *regs)
{
- mm_segment_t old_fs;
char *mode;
- int i;
mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl";
printk("%s PSW : %p %p",
mode, (void *) regs->psw.mask,
(void *) regs->psw.addr);
print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN);
- printk("%s GPRS: " FOURLONG, mode,
+ printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
+ "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER),
+ mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO),
+ mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY),
+ mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT),
+ mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
+ mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
+#ifdef CONFIG_64BIT
+ printk(" EA:%x", mask_bits(regs, PSW_BASE_BITS));
+#endif
+ printk("\n%s GPRS: " FOURLONG, mode,
regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
printk(" " FOURLONG,
regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
@@ -208,41 +221,7 @@ void show_registers(struct pt_regs *regs)
printk(" " FOURLONG,
regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
-#if 0
- /* FIXME: this isn't needed any more but it changes the ksymoops
- * input. To remove or not to remove ... */
- save_access_regs(regs->acrs);
- printk("%s ACRS: %08x %08x %08x %08x\n", mode,
- regs->acrs[0], regs->acrs[1], regs->acrs[2], regs->acrs[3]);
- printk(" %08x %08x %08x %08x\n",
- regs->acrs[4], regs->acrs[5], regs->acrs[6], regs->acrs[7]);
- printk(" %08x %08x %08x %08x\n",
- regs->acrs[8], regs->acrs[9], regs->acrs[10], regs->acrs[11]);
- printk(" %08x %08x %08x %08x\n",
- regs->acrs[12], regs->acrs[13], regs->acrs[14], regs->acrs[15]);
-#endif
-
- /*
- * Print the first 20 byte of the instruction stream at the
- * time of the fault.
- */
- old_fs = get_fs();
- if (regs->psw.mask & PSW_MASK_PSTATE)
- set_fs(USER_DS);
- else
- set_fs(KERNEL_DS);
- printk("%s Code: ", mode);
- for (i = 0; i < 20; i++) {
- unsigned char c;
- if (__get_user(c, (char __user *)(regs->psw.addr + i))) {
- printk(" Bad PSW.");
- break;
- }
- printk("%02x ", c);
- }
- set_fs(old_fs);
-
- printk("\n");
+ show_code(regs);
}
/* This is called from fs/proc/array.c */
@@ -318,6 +297,11 @@ report_user_fault(long interruption_code, struct pt_regs *regs)
#endif
}
+int is_valid_bugaddr(unsigned long addr)
+{
+ return 1;
+}
+
static void __kprobes inline do_trap(long interruption_code, int signr,
char *str, struct pt_regs *regs,
siginfo_t *info)
@@ -344,8 +328,14 @@ static void __kprobes inline do_trap(long interruption_code, int signr,
fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
if (fixup)
regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
- else
- die(str, regs, interruption_code);
+ else {
+ enum bug_trap_type btt;
+
+ btt = report_bug(regs->psw.addr & PSW_ADDR_INSN);
+ if (btt == BUG_TRAP_TYPE_WARN)
+ return;
+ die(str, regs, interruption_code);
+ }
}
}
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index c30716ae130..418f6426a94 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -45,6 +45,8 @@ SECTIONS
__ex_table : { *(__ex_table) }
__stop___ex_table = .;
+ BUG_TABLE
+
.data : { /* Data */
*(.data)
CONSTRUCTORS
@@ -77,6 +79,12 @@ SECTIONS
*(.init.text)
_einittext = .;
}
+ /*
+ * .exit.text is discarded at runtime, not link time,
+ * to deal with references from __bug_table
+ */
+ .exit.text : { *(.exit.text) }
+
.init.data : { *(.init.data) }
. = ALIGN(256);
__setup_start = .;
@@ -116,7 +124,7 @@ SECTIONS
/* Sections to be discarded */
/DISCARD/ : {
- *(.exit.text) *(.exit.data) *(.exitcall.exit)
+ *(.exit.data) *(.exitcall.exit)
}
/* Stabs debugging sections. */
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 9d5b02801b4..1e1a6ee2cac 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -128,7 +128,7 @@ static inline void set_vtimer(__u64 expires)
S390_lowcore.last_update_timer = expires;
/* store expire time for this CPU timer */
- per_cpu(virt_cpu_timer, smp_processor_id()).to_expire = expires;
+ __get_cpu_var(virt_cpu_timer).to_expire = expires;
}
#else
static inline void set_vtimer(__u64 expires)
@@ -137,7 +137,7 @@ static inline void set_vtimer(__u64 expires)
asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer));
/* store expire time for this CPU timer */
- per_cpu(virt_cpu_timer, smp_processor_id()).to_expire = expires;
+ __get_cpu_var(virt_cpu_timer).to_expire = expires;
}
#endif
@@ -145,7 +145,7 @@ static void start_cpu_timer(void)
{
struct vtimer_queue *vt_list;
- vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
+ vt_list = &__get_cpu_var(virt_cpu_timer);
/* CPU timer interrupt is pending, don't reprogramm it */
if (vt_list->idle & 1LL<<63)
@@ -159,7 +159,7 @@ static void stop_cpu_timer(void)
{
struct vtimer_queue *vt_list;
- vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
+ vt_list = &__get_cpu_var(virt_cpu_timer);
/* nothing to do */
if (list_empty(&vt_list->list)) {
@@ -219,7 +219,7 @@ static void do_callbacks(struct list_head *cb_list)
if (list_empty(cb_list))
return;
- vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
+ vt_list = &__get_cpu_var(virt_cpu_timer);
list_for_each_entry_safe(event, tmp, cb_list, entry) {
fn = event->function;
@@ -244,7 +244,6 @@ static void do_callbacks(struct list_head *cb_list)
*/
static void do_cpu_timer_interrupt(__u16 error_code)
{
- int cpu;
__u64 next, delta;
struct vtimer_queue *vt_list;
struct vtimer_list *event, *tmp;
@@ -253,8 +252,7 @@ static void do_cpu_timer_interrupt(__u16 error_code)
struct list_head cb_list;
INIT_LIST_HEAD(&cb_list);
- cpu = smp_processor_id();
- vt_list = &per_cpu(virt_cpu_timer, cpu);
+ vt_list = &__get_cpu_var(virt_cpu_timer);
/* walk timer list, fire all expired events */
spin_lock(&vt_list->lock);
@@ -534,7 +532,7 @@ void init_cpu_vtimer(void)
/* enable cpu timer interrupts */
__ctl_set_bit(0,10);
- vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
+ vt_list = &__get_cpu_var(virt_cpu_timer);
INIT_LIST_HEAD(&vt_list->list);
spin_lock_init(&vt_list->lock);
vt_list->to_expire = 0;
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 7462aebd3eb..2b76a879a7b 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -26,9 +26,9 @@
#include <linux/module.h>
#include <linux/hardirq.h>
#include <linux/kprobes.h>
+#include <linux/uaccess.h>
#include <asm/system.h>
-#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/kdebug.h>
#include <asm/s390_ext.h>
@@ -63,21 +63,25 @@ int unregister_page_fault_notifier(struct notifier_block *nb)
return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
}
-static inline int notify_page_fault(enum die_val val, const char *str,
- struct pt_regs *regs, long err, int trap, int sig)
+static int __kprobes __notify_page_fault(struct pt_regs *regs, long err)
{
- struct die_args args = {
- .regs = regs,
- .str = str,
- .err = err,
- .trapnr = trap,
- .signr = sig
- };
- return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
+ struct die_args args = { .str = "page fault",
+ .trapnr = 14,
+ .signr = SIGSEGV };
+ args.regs = regs;
+ args.err = err;
+ return atomic_notifier_call_chain(&notify_page_fault_chain,
+ DIE_PAGE_FAULT, &args);
+}
+
+static inline int notify_page_fault(struct pt_regs *regs, long err)
+{
+ if (unlikely(kprobe_running()))
+ return __notify_page_fault(regs, err);
+ return NOTIFY_DONE;
}
#else
-static inline int notify_page_fault(enum die_val val, const char *str,
- struct pt_regs *regs, long err, int trap, int sig)
+static inline int notify_page_fault(struct pt_regs *regs, long err)
{
return NOTIFY_DONE;
}
@@ -170,74 +174,127 @@ static void do_sigsegv(struct pt_regs *regs, unsigned long error_code,
force_sig_info(SIGSEGV, &si, current);
}
+static void do_no_context(struct pt_regs *regs, unsigned long error_code,
+ unsigned long address)
+{
+ const struct exception_table_entry *fixup;
+
+ /* Are we prepared to handle this kernel fault? */
+ fixup = search_exception_tables(regs->psw.addr & __FIXUP_MASK);
+ if (fixup) {
+ regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
+ return;
+ }
+
+ /*
+ * Oops. The kernel tried to access some bad page. We'll have to
+ * terminate things with extreme prejudice.
+ */
+ if (check_space(current) == 0)
+ printk(KERN_ALERT "Unable to handle kernel pointer dereference"
+ " at virtual kernel address %p\n", (void *)address);
+ else
+ printk(KERN_ALERT "Unable to handle kernel paging request"
+ " at virtual user address %p\n", (void *)address);
+
+ die("Oops", regs, error_code);
+ do_exit(SIGKILL);
+}
+
+static void do_low_address(struct pt_regs *regs, unsigned long error_code)
+{
+ /* Low-address protection hit in kernel mode means
+ NULL pointer write access in kernel mode. */
+ if (regs->psw.mask & PSW_MASK_PSTATE) {
+ /* Low-address protection hit in user mode 'cannot happen'. */
+ die ("Low-address protection", regs, error_code);
+ do_exit(SIGKILL);
+ }
+
+ do_no_context(regs, error_code, 0);
+}
+
+/*
+ * We ran out of memory, or some other thing happened to us that made
+ * us unable to handle the page fault gracefully.
+ */
+static int do_out_of_memory(struct pt_regs *regs, unsigned long error_code,
+ unsigned long address)
+{
+ struct task_struct *tsk = current;
+ struct mm_struct *mm = tsk->mm;
+
+ up_read(&mm->mmap_sem);
+ if (is_init(tsk)) {
+ yield();
+ down_read(&mm->mmap_sem);
+ return 1;
+ }
+ printk("VM: killing process %s\n", tsk->comm);
+ if (regs->psw.mask & PSW_MASK_PSTATE)
+ do_exit(SIGKILL);
+ do_no_context(regs, error_code, address);
+ return 0;
+}
+
+static void do_sigbus(struct pt_regs *regs, unsigned long error_code,
+ unsigned long address)
+{
+ struct task_struct *tsk = current;
+ struct mm_struct *mm = tsk->mm;
+
+ up_read(&mm->mmap_sem);
+ /*
+ * Send a sigbus, regardless of whether we were in kernel
+ * or user mode.
+ */
+ tsk->thread.prot_addr = address;
+ tsk->thread.trap_no = error_code;
+ force_sig(SIGBUS, tsk);
+
+ /* Kernel mode? Handle exceptions or die */
+ if (!(regs->psw.mask & PSW_MASK_PSTATE))
+ do_no_context(regs, error_code, address);
+}
+
#ifdef CONFIG_S390_EXEC_PROTECT
extern long sys_sigreturn(struct pt_regs *regs);
extern long sys_rt_sigreturn(struct pt_regs *regs);
extern long sys32_sigreturn(struct pt_regs *regs);
extern long sys32_rt_sigreturn(struct pt_regs *regs);
-static inline void do_sigreturn(struct mm_struct *mm, struct pt_regs *regs,
- int rt)
+static int signal_return(struct mm_struct *mm, struct pt_regs *regs,
+ unsigned long address, unsigned long error_code)
{
+ u16 instruction;
+ int rc, compat;
+
+ pagefault_disable();
+ rc = __get_user(instruction, (u16 __user *) regs->psw.addr);
+ pagefault_enable();
+ if (rc)
+ return -EFAULT;
+
up_read(&mm->mmap_sem);
clear_tsk_thread_flag(current, TIF_SINGLE_STEP);
#ifdef CONFIG_COMPAT
- if (test_tsk_thread_flag(current, TIF_31BIT)) {
- if (rt)
- sys32_rt_sigreturn(regs);
- else
- sys32_sigreturn(regs);
- return;
- }
-#endif /* CONFIG_COMPAT */
- if (rt)
- sys_rt_sigreturn(regs);
+ compat = test_tsk_thread_flag(current, TIF_31BIT);
+ if (compat && instruction == 0x0a77)
+ sys32_sigreturn(regs);
+ else if (compat && instruction == 0x0aad)
+ sys32_rt_sigreturn(regs);
else
+#endif
+ if (instruction == 0x0a77)
sys_sigreturn(regs);
- return;
-}
-
-static int signal_return(struct mm_struct *mm, struct pt_regs *regs,
- unsigned long address, unsigned long error_code)
-{
- pgd_t *pgd;
- pmd_t *pmd;
- pte_t *pte;
- u16 *instruction;
- unsigned long pfn, uaddr = regs->psw.addr;
-
- spin_lock(&mm->page_table_lock);
- pgd = pgd_offset(mm, uaddr);
- if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
- goto out_fault;
- pmd = pmd_offset(pgd, uaddr);
- if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
- goto out_fault;
- pte = pte_offset_map(pmd_offset(pgd_offset(mm, uaddr), uaddr), uaddr);
- if (!pte || !pte_present(*pte))
- goto out_fault;
- pfn = pte_pfn(*pte);
- if (!pfn_valid(pfn))
- goto out_fault;
- spin_unlock(&mm->page_table_lock);
-
- instruction = (u16 *) ((pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE-1)));
- if (*instruction == 0x0a77)
- do_sigreturn(mm, regs, 0);
- else if (*instruction == 0x0aad)
- do_sigreturn(mm, regs, 1);
+ else if (instruction == 0x0aad)
+ sys_rt_sigreturn(regs);
else {
- printk("- XXX - do_exception: task = %s, primary, NO EXEC "
- "-> SIGSEGV\n", current->comm);
- up_read(&mm->mmap_sem);
current->thread.prot_addr = address;
current->thread.trap_no = error_code;
do_sigsegv(regs, error_code, SEGV_MAPERR, address);
}
return 0;
-out_fault:
- spin_unlock(&mm->page_table_lock);
- return -EFAULT;
}
#endif /* CONFIG_S390_EXEC_PROTECT */
@@ -253,49 +310,23 @@ out_fault:
* 3b Region third trans. -> Not present (nullification)
*/
static inline void
-do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection)
+do_exception(struct pt_regs *regs, unsigned long error_code, int write)
{
- struct task_struct *tsk;
- struct mm_struct *mm;
- struct vm_area_struct * vma;
- unsigned long address;
- const struct exception_table_entry *fixup;
- int si_code;
+ struct task_struct *tsk;
+ struct mm_struct *mm;
+ struct vm_area_struct *vma;
+ unsigned long address;
int space;
+ int si_code;
- tsk = current;
- mm = tsk->mm;
-
- if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
- SIGSEGV) == NOTIFY_STOP)
+ if (notify_page_fault(regs, error_code) == NOTIFY_STOP)
return;
- /*
- * Check for low-address protection. This needs to be treated
- * as a special case because the translation exception code
- * field is not guaranteed to contain valid data in this case.
- */
- if (is_protection && !(S390_lowcore.trans_exc_code & 4)) {
-
- /* Low-address protection hit in kernel mode means
- NULL pointer write access in kernel mode. */
- if (!(regs->psw.mask & PSW_MASK_PSTATE)) {
- address = 0;
- space = 0;
- goto no_context;
- }
-
- /* Low-address protection hit in user mode 'cannot happen'. */
- die ("Low-address protection", regs, error_code);
- do_exit(SIGKILL);
- }
+ tsk = current;
+ mm = tsk->mm;
- /*
- * get the failing address
- * more specific the segment and page table portion of
- * the address
- */
- address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK;
+ /* get the failing address and the affected space */
+ address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK;
space = check_space(tsk);
/*
@@ -313,7 +344,7 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection)
*/
local_irq_enable();
- down_read(&mm->mmap_sem);
+ down_read(&mm->mmap_sem);
si_code = SEGV_MAPERR;
vma = find_vma(mm, address);
@@ -330,19 +361,19 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection)
return;
#endif
- if (vma->vm_start <= address)
- goto good_area;
- if (!(vma->vm_flags & VM_GROWSDOWN))
- goto bad_area;
- if (expand_stack(vma, address))
- goto bad_area;
+ if (vma->vm_start <= address)
+ goto good_area;
+ if (!(vma->vm_flags & VM_GROWSDOWN))
+ goto bad_area;
+ if (expand_stack(vma, address))
+ goto bad_area;
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
good_area:
si_code = SEGV_ACCERR;
- if (!is_protection) {
+ if (!write) {
/* page not present, check vm flags */
if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
goto bad_area;
@@ -357,7 +388,7 @@ survive:
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
- switch (handle_mm_fault(mm, vma, address, is_protection)) {
+ switch (handle_mm_fault(mm, vma, address, write)) {
case VM_FAULT_MINOR:
tsk->min_flt++;
break;
@@ -365,9 +396,12 @@ survive:
tsk->maj_flt++;
break;
case VM_FAULT_SIGBUS:
- goto do_sigbus;
+ do_sigbus(regs, error_code, address);
+ return;
case VM_FAULT_OOM:
- goto out_of_memory;
+ if (do_out_of_memory(regs, error_code, address))
+ goto survive;
+ return;
default:
BUG();
}
@@ -385,75 +419,34 @@ survive:
* Fix it, but check if it's kernel or user first..
*/
bad_area:
- up_read(&mm->mmap_sem);
+ up_read(&mm->mmap_sem);
- /* User mode accesses just cause a SIGSEGV */
- if (regs->psw.mask & PSW_MASK_PSTATE) {
- tsk->thread.prot_addr = address;
- tsk->thread.trap_no = error_code;
+ /* User mode accesses just cause a SIGSEGV */
+ if (regs->psw.mask & PSW_MASK_PSTATE) {
+ tsk->thread.prot_addr = address;
+ tsk->thread.trap_no = error_code;
do_sigsegv(regs, error_code, si_code, address);
- return;
+ return;
}
no_context:
- /* Are we prepared to handle this kernel fault? */
- fixup = search_exception_tables(regs->psw.addr & __FIXUP_MASK);
- if (fixup) {
- regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
- return;
- }
-
-/*
- * Oops. The kernel tried to access some bad page. We'll have to
- * terminate things with extreme prejudice.
- */
- if (space == 0)
- printk(KERN_ALERT "Unable to handle kernel pointer dereference"
- " at virtual kernel address %p\n", (void *)address);
- else
- printk(KERN_ALERT "Unable to handle kernel paging request"
- " at virtual user address %p\n", (void *)address);
-
- die("Oops", regs, error_code);
- do_exit(SIGKILL);
-
-
-/*
- * We ran out of memory, or some other thing happened to us that made
- * us unable to handle the page fault gracefully.
-*/
-out_of_memory:
- up_read(&mm->mmap_sem);
- if (is_init(tsk)) {
- yield();
- down_read(&mm->mmap_sem);
- goto survive;
- }
- printk("VM: killing process %s\n", tsk->comm);
- if (regs->psw.mask & PSW_MASK_PSTATE)
- do_exit(SIGKILL);
- goto no_context;
-
-do_sigbus:
- up_read(&mm->mmap_sem);
-
- /*
- * Send a sigbus, regardless of whether we were in kernel
- * or user mode.
- */
- tsk->thread.prot_addr = address;
- tsk->thread.trap_no = error_code;
- force_sig(SIGBUS, tsk);
-
- /* Kernel mode? Handle exceptions or die */
- if (!(regs->psw.mask & PSW_MASK_PSTATE))
- goto no_context;
+ do_no_context(regs, error_code, address);
}
void __kprobes do_protection_exception(struct pt_regs *regs,
unsigned long error_code)
{
+ /* Protection exception is supressing, decrement psw address. */
regs->psw.addr -= (error_code >> 16);
+ /*
+ * Check for low-address protection. This needs to be treated
+ * as a special case because the translation exception code
+ * field is not guaranteed to contain valid data in this case.
+ */
+ if (unlikely(!(S390_lowcore.trans_exc_code & 4))) {
+ do_low_address(regs, error_code);
+ return;
+ }
do_exception(regs, 4, 1);
}
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index eb5dc62f0d9..e71929db8b0 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -398,6 +398,9 @@ dasd_change_state(struct dasd_device *device)
if (device->state == device->target)
wake_up(&dasd_init_waitq);
+
+ /* let user-space know that the device status changed */
+ kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE);
}
/*
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index ed70852cc91..6a89cefe99b 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -19,6 +19,7 @@
#include <asm/debug.h>
#include <asm/uaccess.h>
+#include <asm/ipl.h>
/* This is ugly... */
#define PRINTK_HEADER "dasd_devmap:"
@@ -133,6 +134,8 @@ dasd_call_setup(char *str)
__setup ("dasd=", dasd_call_setup);
#endif /* #ifndef MODULE */
+#define DASD_IPLDEV "ipldev"
+
/*
* Read a device busid/devno from a string.
*/
@@ -141,6 +144,20 @@ dasd_busid(char **str, int *id0, int *id1, int *devno)
{
int val, old_style;
+ /* Interpret ipldev busid */
+ if (strncmp(DASD_IPLDEV, *str, strlen(DASD_IPLDEV)) == 0) {
+ if (ipl_info.type != IPL_TYPE_CCW) {
+ MESSAGE(KERN_ERR, "%s", "ipl device is not a ccw "
+ "device");
+ return -EINVAL;
+ }
+ *id0 = 0;
+ *id1 = ipl_info.data.ccw.dev_id.ssid;
+ *devno = ipl_info.data.ccw.dev_id.devno;
+ *str += strlen(DASD_IPLDEV);
+
+ return 0;
+ }
/* check for leading '0x' */
old_style = 0;
if ((*str)[0] == '0' && (*str)[1] == 'x') {
@@ -829,6 +846,46 @@ dasd_discipline_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(discipline, 0444, dasd_discipline_show, NULL);
static ssize_t
+dasd_device_status_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dasd_device *device;
+ ssize_t len;
+
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (!IS_ERR(device)) {
+ switch (device->state) {
+ case DASD_STATE_NEW:
+ len = snprintf(buf, PAGE_SIZE, "new\n");
+ break;
+ case DASD_STATE_KNOWN:
+ len = snprintf(buf, PAGE_SIZE, "detected\n");
+ break;
+ case DASD_STATE_BASIC:
+ len = snprintf(buf, PAGE_SIZE, "basic\n");
+ break;
+ case DASD_STATE_UNFMT:
+ len = snprintf(buf, PAGE_SIZE, "unformatted\n");
+ break;
+ case DASD_STATE_READY:
+ len = snprintf(buf, PAGE_SIZE, "ready\n");
+ break;
+ case DASD_STATE_ONLINE:
+ len = snprintf(buf, PAGE_SIZE, "online\n");
+ break;
+ default:
+ len = snprintf(buf, PAGE_SIZE, "no stat\n");
+ break;
+ }
+ dasd_put_device(device);
+ } else
+ len = snprintf(buf, PAGE_SIZE, "unknown\n");
+ return len;
+}
+
+static DEVICE_ATTR(status, 0444, dasd_device_status_show, NULL);
+
+static ssize_t
dasd_alias_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct dasd_devmap *devmap;
@@ -939,6 +996,7 @@ static DEVICE_ATTR(eer_enabled, 0644, dasd_eer_show, dasd_eer_store);
static struct attribute * dasd_attrs[] = {
&dev_attr_readonly.attr,
&dev_attr_discipline.attr,
+ &dev_attr_status.attr,
&dev_attr_alias.attr,
&dev_attr_vendor.attr,
&dev_attr_uid.attr,
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index 293e667b50f..c210784bdf4 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -3,7 +3,7 @@
#
obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
- sclp_info.o
+ sclp_info.o sclp_config.o sclp_chp.o
obj-$(CONFIG_TN3270) += raw3270.o
obj-$(CONFIG_TN3270_CONSOLE) += con3270.o
@@ -29,3 +29,6 @@ obj-$(CONFIG_S390_TAPE_34XX) += tape_34xx.o
obj-$(CONFIG_S390_TAPE_3590) += tape_3590.o
obj-$(CONFIG_MONREADER) += monreader.o
obj-$(CONFIG_MONWRITER) += monwriter.o
+
+zcore_mod-objs := sclp_sdias.o zcore.o
+obj-$(CONFIG_ZFCPDUMP) += zcore_mod.o
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 9a328f14a64..6000bdee408 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -813,12 +813,6 @@ con3215_unblank(void)
spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
}
-static int __init
-con3215_consetup(struct console *co, char *options)
-{
- return 0;
-}
-
/*
* The console structure for the 3215 console
*/
@@ -827,7 +821,6 @@ static struct console con3215 = {
.write = con3215_write,
.device = con3215_device,
.unblank = con3215_unblank,
- .setup = con3215_consetup,
.flags = CON_PRINTBUFFER,
};
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index 8e7f2d7633d..fd3479119eb 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -555,12 +555,6 @@ con3270_unblank(void)
spin_unlock_irqrestore(&cp->view.lock, flags);
}
-static int __init
-con3270_consetup(struct console *co, char *options)
-{
- return 0;
-}
-
/*
* The console structure for the 3270 console
*/
@@ -569,7 +563,6 @@ static struct console con3270 = {
.write = con3270_write,
.device = con3270_device,
.unblank = con3270_unblank,
- .setup = con3270_consetup,
.flags = CON_PRINTBUFFER,
};
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index f171de3b0b1..fa62e694405 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -15,6 +15,7 @@
#include <linux/timer.h>
#include <linux/reboot.h>
#include <linux/jiffies.h>
+#include <linux/init.h>
#include <asm/types.h>
#include <asm/s390_ext.h>
@@ -510,7 +511,7 @@ sclp_state_change_cb(struct evbuf_header *evbuf)
}
static struct sclp_register sclp_state_change_event = {
- .receive_mask = EvTyp_StateChange_Mask,
+ .receive_mask = EVTYP_STATECHANGE_MASK,
.receiver_fn = sclp_state_change_cb
};
@@ -930,3 +931,10 @@ sclp_init(void)
sclp_init_mask(1);
return 0;
}
+
+static __init int sclp_initcall(void)
+{
+ return sclp_init();
+}
+
+arch_initcall(sclp_initcall);
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index 7d29ab45a6e..87ac4a3ad49 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -19,33 +19,37 @@
#define MAX_KMEM_PAGES (sizeof(unsigned long) << 3)
#define MAX_CONSOLE_PAGES 4
-#define EvTyp_OpCmd 0x01
-#define EvTyp_Msg 0x02
-#define EvTyp_StateChange 0x08
-#define EvTyp_PMsgCmd 0x09
-#define EvTyp_CntlProgOpCmd 0x20
-#define EvTyp_CntlProgIdent 0x0B
-#define EvTyp_SigQuiesce 0x1D
-#define EvTyp_VT220Msg 0x1A
-
-#define EvTyp_OpCmd_Mask 0x80000000
-#define EvTyp_Msg_Mask 0x40000000
-#define EvTyp_StateChange_Mask 0x01000000
-#define EvTyp_PMsgCmd_Mask 0x00800000
-#define EvTyp_CtlProgOpCmd_Mask 0x00000001
-#define EvTyp_CtlProgIdent_Mask 0x00200000
-#define EvTyp_SigQuiesce_Mask 0x00000008
-#define EvTyp_VT220Msg_Mask 0x00000040
-
-#define GnrlMsgFlgs_DOM 0x8000
-#define GnrlMsgFlgs_SndAlrm 0x4000
-#define GnrlMsgFlgs_HoldMsg 0x2000
-
-#define LnTpFlgs_CntlText 0x8000
-#define LnTpFlgs_LabelText 0x4000
-#define LnTpFlgs_DataText 0x2000
-#define LnTpFlgs_EndText 0x1000
-#define LnTpFlgs_PromptText 0x0800
+#define EVTYP_OPCMD 0x01
+#define EVTYP_MSG 0x02
+#define EVTYP_STATECHANGE 0x08
+#define EVTYP_PMSGCMD 0x09
+#define EVTYP_CNTLPROGOPCMD 0x20
+#define EVTYP_CNTLPROGIDENT 0x0B
+#define EVTYP_SIGQUIESCE 0x1D
+#define EVTYP_VT220MSG 0x1A
+#define EVTYP_CONFMGMDATA 0x04
+#define EVTYP_SDIAS 0x1C
+
+#define EVTYP_OPCMD_MASK 0x80000000
+#define EVTYP_MSG_MASK 0x40000000
+#define EVTYP_STATECHANGE_MASK 0x01000000
+#define EVTYP_PMSGCMD_MASK 0x00800000
+#define EVTYP_CTLPROGOPCMD_MASK 0x00000001
+#define EVTYP_CTLPROGIDENT_MASK 0x00200000
+#define EVTYP_SIGQUIESCE_MASK 0x00000008
+#define EVTYP_VT220MSG_MASK 0x00000040
+#define EVTYP_CONFMGMDATA_MASK 0x10000000
+#define EVTYP_SDIAS_MASK 0x00000010
+
+#define GNRLMSGFLGS_DOM 0x8000
+#define GNRLMSGFLGS_SNDALRM 0x4000
+#define GNRLMSGFLGS_HOLDMSG 0x2000
+
+#define LNTPFLGS_CNTLTEXT 0x8000
+#define LNTPFLGS_LABELTEXT 0x4000
+#define LNTPFLGS_DATATEXT 0x2000
+#define LNTPFLGS_ENDTEXT 0x1000
+#define LNTPFLGS_PROMPTTEXT 0x0800
typedef unsigned int sclp_cmdw_t;
@@ -56,15 +60,15 @@ typedef unsigned int sclp_cmdw_t;
#define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001
#define GDS_ID_MDSMU 0x1310
-#define GDS_ID_MDSRouteInfo 0x1311
-#define GDS_ID_AgUnWrkCorr 0x1549
-#define GDS_ID_SNACondReport 0x1532
+#define GDS_ID_MDSROUTEINFO 0x1311
+#define GDS_ID_AGUNWRKCORR 0x1549
+#define GDS_ID_SNACONDREPORT 0x1532
#define GDS_ID_CPMSU 0x1212
-#define GDS_ID_RoutTargInstr 0x154D
-#define GDS_ID_OpReq 0x8070
-#define GDS_ID_TextCmd 0x1320
+#define GDS_ID_ROUTTARGINSTR 0x154D
+#define GDS_ID_OPREQ 0x8070
+#define GDS_ID_TEXTCMD 0x1320
-#define GDS_KEY_SelfDefTextMsg 0x31
+#define GDS_KEY_SELFDEFTEXTMSG 0x31
typedef u32 sccb_mask_t; /* ATTENTION: assumes 32bit mask !!! */
diff --git a/drivers/s390/char/sclp_chp.c b/drivers/s390/char/sclp_chp.c
new file mode 100644
index 00000000000..a66b914519b
--- /dev/null
+++ b/drivers/s390/char/sclp_chp.c
@@ -0,0 +1,196 @@
+/*
+ * drivers/s390/char/sclp_chp.c
+ *
+ * Copyright IBM Corp. 2007
+ * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <linux/gfp.h>
+#include <linux/errno.h>
+#include <linux/completion.h>
+#include <asm/sclp.h>
+#include <asm/chpid.h>
+
+#include "sclp.h"
+
+#define TAG "sclp_chp: "
+
+#define SCLP_CMDW_CONFIGURE_CHANNEL_PATH 0x000f0001
+#define SCLP_CMDW_DECONFIGURE_CHANNEL_PATH 0x000e0001
+#define SCLP_CMDW_READ_CHANNEL_PATH_INFORMATION 0x00030001
+
+static inline sclp_cmdw_t get_configure_cmdw(struct chp_id chpid)
+{
+ return SCLP_CMDW_CONFIGURE_CHANNEL_PATH | chpid.id << 8;
+}
+
+static inline sclp_cmdw_t get_deconfigure_cmdw(struct chp_id chpid)
+{
+ return SCLP_CMDW_DECONFIGURE_CHANNEL_PATH | chpid.id << 8;
+}
+
+static void chp_callback(struct sclp_req *req, void *data)
+{
+ struct completion *completion = data;
+
+ complete(completion);
+}
+
+struct chp_cfg_sccb {
+ struct sccb_header header;
+ u8 ccm;
+ u8 reserved[6];
+ u8 cssid;
+} __attribute__((packed));
+
+struct chp_cfg_data {
+ struct chp_cfg_sccb sccb;
+ struct sclp_req req;
+ struct completion completion;
+} __attribute__((packed));
+
+static int do_configure(sclp_cmdw_t cmd)
+{
+ struct chp_cfg_data *data;
+ int rc;
+
+ /* Prepare sccb. */
+ data = (struct chp_cfg_data *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!data)
+ return -ENOMEM;
+ data->sccb.header.length = sizeof(struct chp_cfg_sccb);
+ data->req.command = cmd;
+ data->req.sccb = &(data->sccb);
+ data->req.status = SCLP_REQ_FILLED;
+ data->req.callback = chp_callback;
+ data->req.callback_data = &(data->completion);
+ init_completion(&data->completion);
+
+ /* Perform sclp request. */
+ rc = sclp_add_request(&(data->req));
+ if (rc)
+ goto out;
+ wait_for_completion(&data->completion);
+
+ /* Check response .*/
+ if (data->req.status != SCLP_REQ_DONE) {
+ printk(KERN_WARNING TAG "configure channel-path request failed "
+ "(status=0x%02x)\n", data->req.status);
+ rc = -EIO;
+ goto out;
+ }
+ switch (data->sccb.header.response_code) {
+ case 0x0020:
+ case 0x0120:
+ case 0x0440:
+ case 0x0450:
+ break;
+ default:
+ printk(KERN_WARNING TAG "configure channel-path failed "
+ "(cmd=0x%08x, response=0x%04x)\n", cmd,
+ data->sccb.header.response_code);
+ rc = -EIO;
+ break;
+ }
+out:
+ free_page((unsigned long) data);
+
+ return rc;
+}
+
+/**
+ * sclp_chp_configure - perform configure channel-path sclp command
+ * @chpid: channel-path ID
+ *
+ * Perform configure channel-path command sclp command for specified chpid.
+ * Return 0 after command successfully finished, non-zero otherwise.
+ */
+int sclp_chp_configure(struct chp_id chpid)
+{
+ return do_configure(get_configure_cmdw(chpid));
+}
+
+/**
+ * sclp_chp_deconfigure - perform deconfigure channel-path sclp command
+ * @chpid: channel-path ID
+ *
+ * Perform deconfigure channel-path command sclp command for specified chpid
+ * and wait for completion. On success return 0. Return non-zero otherwise.
+ */
+int sclp_chp_deconfigure(struct chp_id chpid)
+{
+ return do_configure(get_deconfigure_cmdw(chpid));
+}
+
+struct chp_info_sccb {
+ struct sccb_header header;
+ u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
+ u8 standby[SCLP_CHP_INFO_MASK_SIZE];
+ u8 configured[SCLP_CHP_INFO_MASK_SIZE];
+ u8 ccm;
+ u8 reserved[6];
+ u8 cssid;
+} __attribute__((packed));
+
+struct chp_info_data {
+ struct chp_info_sccb sccb;
+ struct sclp_req req;
+ struct completion completion;
+} __attribute__((packed));
+
+/**
+ * sclp_chp_read_info - perform read channel-path information sclp command
+ * @info: resulting channel-path information data
+ *
+ * Perform read channel-path information sclp command and wait for completion.
+ * On success, store channel-path information in @info and return 0. Return
+ * non-zero otherwise.
+ */
+int sclp_chp_read_info(struct sclp_chp_info *info)
+{
+ struct chp_info_data *data;
+ int rc;
+
+ /* Prepare sccb. */
+ data = (struct chp_info_data *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!data)
+ return -ENOMEM;
+ data->sccb.header.length = sizeof(struct chp_info_sccb);
+ data->req.command = SCLP_CMDW_READ_CHANNEL_PATH_INFORMATION;
+ data->req.sccb = &(data->sccb);
+ data->req.status = SCLP_REQ_FILLED;
+ data->req.callback = chp_callback;
+ data->req.callback_data = &(data->completion);
+ init_completion(&data->completion);
+
+ /* Perform sclp request. */
+ rc = sclp_add_request(&(data->req));
+ if (rc)
+ goto out;
+ wait_for_completion(&data->completion);
+
+ /* Check response .*/
+ if (data->req.status != SCLP_REQ_DONE) {
+ printk(KERN_WARNING TAG "read channel-path info request failed "
+ "(status=0x%02x)\n", data->req.status);
+ rc = -EIO;
+ goto out;
+ }
+ if (data->sccb.header.response_code != 0x0010) {
+ printk(KERN_WARNING TAG "read channel-path info failed "
+ "(response=0x%04x)\n", data->sccb.header.response_code);
+ rc = -EIO;
+ goto out;
+ }
+ memcpy(info->recognized, data->sccb.recognized,
+ SCLP_CHP_INFO_MASK_SIZE);
+ memcpy(info->standby, data->sccb.standby,
+ SCLP_CHP_INFO_MASK_SIZE);
+ memcpy(info->configured, data->sccb.configured,
+ SCLP_CHP_INFO_MASK_SIZE);
+out:
+ free_page((unsigned long) data);
+
+ return rc;
+}
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
new file mode 100644
index 00000000000..5322e5e54a9
--- /dev/null
+++ b/drivers/s390/char/sclp_config.c
@@ -0,0 +1,75 @@
+/*
+ * drivers/s390/char/sclp_config.c
+ *
+ * Copyright IBM Corp. 2007
+ * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
+ */
+
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/cpu.h>
+#include <linux/sysdev.h>
+#include <linux/workqueue.h>
+#include "sclp.h"
+
+#define TAG "sclp_config: "
+
+struct conf_mgm_data {
+ u8 reserved;
+ u8 ev_qualifier;
+} __attribute__((packed));
+
+#define EV_QUAL_CAP_CHANGE 3
+
+static struct work_struct sclp_cpu_capability_work;
+
+static void sclp_cpu_capability_notify(struct work_struct *work)
+{
+ int cpu;
+ struct sys_device *sysdev;
+
+ printk(KERN_WARNING TAG "cpu capability changed.\n");
+ lock_cpu_hotplug();
+ for_each_online_cpu(cpu) {
+ sysdev = get_cpu_sysdev(cpu);
+ kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
+ }
+ unlock_cpu_hotplug();
+}
+
+static void sclp_conf_receiver_fn(struct evbuf_header *evbuf)
+{
+ struct conf_mgm_data *cdata;
+
+ cdata = (struct conf_mgm_data *)(evbuf + 1);
+ if (cdata->ev_qualifier == EV_QUAL_CAP_CHANGE)
+ schedule_work(&sclp_cpu_capability_work);
+}
+
+static struct sclp_register sclp_conf_register =
+{
+ .receive_mask = EVTYP_CONFMGMDATA_MASK,
+ .receiver_fn = sclp_conf_receiver_fn,
+};
+
+static int __init sclp_conf_init(void)
+{
+ int rc;
+
+ INIT_WORK(&sclp_cpu_capability_work, sclp_cpu_capability_notify);
+
+ rc = sclp_register(&sclp_conf_register);
+ if (rc) {
+ printk(KERN_ERR TAG "failed to register (%d).\n", rc);
+ return rc;
+ }
+
+ if (!(sclp_conf_register.sclp_receive_mask & EVTYP_CONFMGMDATA_MASK)) {
+ printk(KERN_WARNING TAG "no configuration management.\n");
+ sclp_unregister(&sclp_conf_register);
+ rc = -ENOSYS;
+ }
+ return rc;
+}
+
+__initcall(sclp_conf_init);
diff --git a/drivers/s390/char/sclp_cpi.c b/drivers/s390/char/sclp_cpi.c
index 65aa2c85737..29fe2a5ec2f 100644
--- a/drivers/s390/char/sclp_cpi.c
+++ b/drivers/s390/char/sclp_cpi.c
@@ -46,7 +46,7 @@ struct cpi_sccb {
/* Event type structure for write message and write priority message */
static struct sclp_register sclp_cpi_event =
{
- .send_mask = EvTyp_CtlProgIdent_Mask
+ .send_mask = EVTYP_CTLPROGIDENT_MASK
};
MODULE_LICENSE("GPL");
@@ -201,7 +201,7 @@ cpi_module_init(void)
"console.\n");
return -EINVAL;
}
- if (!(sclp_cpi_event.sclp_send_mask & EvTyp_CtlProgIdent_Mask)) {
+ if (!(sclp_cpi_event.sclp_send_mask & EVTYP_CTLPROGIDENT_MASK)) {
printk(KERN_WARNING "cpi: no control program identification "
"support\n");
sclp_unregister(&sclp_cpi_event);
diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c
index baa8fe669ed..45ff25e787c 100644
--- a/drivers/s390/char/sclp_quiesce.c
+++ b/drivers/s390/char/sclp_quiesce.c
@@ -43,7 +43,7 @@ sclp_quiesce_handler(struct evbuf_header *evbuf)
}
static struct sclp_register sclp_quiesce_event = {
- .receive_mask = EvTyp_SigQuiesce_Mask,
+ .receive_mask = EVTYP_SIGQUIESCE_MASK,
.receiver_fn = sclp_quiesce_handler
};
diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c
index 2486783ea58..bbd5b8b66f4 100644
--- a/drivers/s390/char/sclp_rw.c
+++ b/drivers/s390/char/sclp_rw.c
@@ -30,7 +30,7 @@
/* Event type structure for write message and write priority message */
static struct sclp_register sclp_rw_event = {
- .send_mask = EvTyp_Msg_Mask | EvTyp_PMsgCmd_Mask
+ .send_mask = EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK
};
/*
@@ -64,7 +64,7 @@ sclp_make_buffer(void *page, unsigned short columns, unsigned short htab)
memset(sccb, 0, sizeof(struct write_sccb));
sccb->header.length = sizeof(struct write_sccb);
sccb->msg_buf.header.length = sizeof(struct msg_buf);
- sccb->msg_buf.header.type = EvTyp_Msg;
+ sccb->msg_buf.header.type = EVTYP_MSG;
sccb->msg_buf.mdb.header.length = sizeof(struct mdb);
sccb->msg_buf.mdb.header.type = 1;
sccb->msg_buf.mdb.header.tag = 0xD4C4C240; /* ebcdic "MDB " */
@@ -114,7 +114,7 @@ sclp_initialize_mto(struct sclp_buffer *buffer, int max_len)
memset(mto, 0, sizeof(struct mto));
mto->length = sizeof(struct mto);
mto->type = 4; /* message text object */
- mto->line_type_flags = LnTpFlgs_EndText; /* end text */
+ mto->line_type_flags = LNTPFLGS_ENDTEXT; /* end text */
/* set pointer to first byte after struct mto. */
buffer->current_line = (char *) (mto + 1);
@@ -215,7 +215,7 @@ sclp_write(struct sclp_buffer *buffer, const unsigned char *msg, int count)
case '\a': /* bell, one for several times */
/* set SCLP sound alarm bit in General Object */
buffer->sccb->msg_buf.mdb.go.general_msg_flags |=
- GnrlMsgFlgs_SndAlrm;
+ GNRLMSGFLGS_SNDALRM;
break;
case '\t': /* horizontal tabulator */
/* check if new mto needs to be created */
@@ -452,12 +452,12 @@ sclp_emit_buffer(struct sclp_buffer *buffer,
return -EIO;
sccb = buffer->sccb;
- if (sclp_rw_event.sclp_send_mask & EvTyp_Msg_Mask)
+ if (sclp_rw_event.sclp_send_mask & EVTYP_MSG_MASK)
/* Use normal write message */
- sccb->msg_buf.header.type = EvTyp_Msg;
- else if (sclp_rw_event.sclp_send_mask & EvTyp_PMsgCmd_Mask)
+ sccb->msg_buf.header.type = EVTYP_MSG;
+ else if (sclp_rw_event.sclp_send_mask & EVTYP_PMSGCMD_MASK)
/* Use write priority message */
- sccb->msg_buf.header.type = EvTyp_PMsgCmd;
+ sccb->msg_buf.header.type = EVTYP_PMSGCMD;
else
return -ENOSYS;
buffer->request.command = SCLP_CMDW_WRITE_EVENT_DATA;
diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c
new file mode 100644
index 00000000000..52283daddae
--- /dev/null
+++ b/drivers/s390/char/sclp_sdias.c
@@ -0,0 +1,255 @@
+/*
+ * Sclp "store data in absolut storage"
+ *
+ * Copyright IBM Corp. 2003,2007
+ * Author(s): Michael Holzheu
+ */
+
+#include <linux/sched.h>
+#include <asm/sclp.h>
+#include <asm/debug.h>
+#include <asm/ipl.h>
+#include "sclp.h"
+#include "sclp_rw.h"
+
+#define TRACE(x...) debug_sprintf_event(sdias_dbf, 1, x)
+#define ERROR_MSG(x...) printk ( KERN_ALERT "SDIAS: " x )
+
+#define SDIAS_RETRIES 300
+#define SDIAS_SLEEP_TICKS 50
+
+#define EQ_STORE_DATA 0x0
+#define EQ_SIZE 0x1
+#define DI_FCP_DUMP 0x0
+#define ASA_SIZE_32 0x0
+#define ASA_SIZE_64 0x1
+#define EVSTATE_ALL_STORED 0x0
+#define EVSTATE_NO_DATA 0x3
+#define EVSTATE_PART_STORED 0x10
+
+static struct debug_info *sdias_dbf;
+
+static struct sclp_register sclp_sdias_register = {
+ .send_mask = EVTYP_SDIAS_MASK,
+};
+
+struct sdias_evbuf {
+ struct evbuf_header hdr;
+ u8 event_qual;
+ u8 data_id;
+ u64 reserved2;
+ u32 event_id;
+ u16 reserved3;
+ u8 asa_size;
+ u8 event_status;
+ u32 reserved4;
+ u32 blk_cnt;
+ u64 asa;
+ u32 reserved5;
+ u32 fbn;
+ u32 reserved6;
+ u32 lbn;
+ u16 reserved7;
+ u16 dbs;
+} __attribute__((packed));
+
+struct sdias_sccb {
+ struct sccb_header hdr;
+ struct sdias_evbuf evbuf;
+} __attribute__((packed));
+
+static struct sdias_sccb sccb __attribute__((aligned(4096)));
+
+static int sclp_req_done;
+static wait_queue_head_t sdias_wq;
+static DEFINE_MUTEX(sdias_mutex);
+
+static void sdias_callback(struct sclp_req *request, void *data)
+{
+ struct sdias_sccb *sccb;
+
+ sccb = (struct sdias_sccb *) request->sccb;
+ sclp_req_done = 1;
+ wake_up(&sdias_wq); /* Inform caller, that request is complete */
+ TRACE("callback done\n");
+}
+
+static int sdias_sclp_send(struct sclp_req *req)
+{
+ int retries;
+ int rc;
+
+ for (retries = SDIAS_RETRIES; retries; retries--) {
+ sclp_req_done = 0;
+ TRACE("add request\n");
+ rc = sclp_add_request(req);
+ if (rc) {
+ /* not initiated, wait some time and retry */
+ set_current_state(TASK_INTERRUPTIBLE);
+ TRACE("add request failed: rc = %i\n",rc);
+ schedule_timeout(SDIAS_SLEEP_TICKS);
+ continue;
+ }
+ /* initiated, wait for completion of service call */
+ wait_event(sdias_wq, (sclp_req_done == 1));
+ if (req->status == SCLP_REQ_FAILED) {
+ TRACE("sclp request failed\n");
+ rc = -EIO;
+ continue;
+ }
+ TRACE("request done\n");
+ break;
+ }
+ return rc;
+}
+
+/*
+ * Get number of blocks (4K) available in the HSA
+ */
+int sclp_sdias_blk_count(void)
+{
+ struct sclp_req request;
+ int rc;
+
+ mutex_lock(&sdias_mutex);
+
+ memset(&sccb, 0, sizeof(sccb));
+ memset(&request, 0, sizeof(request));
+
+ sccb.hdr.length = sizeof(sccb);
+ sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf);
+ sccb.evbuf.hdr.type = EVTYP_SDIAS;
+ sccb.evbuf.event_qual = EQ_SIZE;
+ sccb.evbuf.data_id = DI_FCP_DUMP;
+ sccb.evbuf.event_id = 4712;
+ sccb.evbuf.dbs = 1;
+
+ request.sccb = &sccb;
+ request.command = SCLP_CMDW_WRITE_EVENT_DATA;
+ request.status = SCLP_REQ_FILLED;
+ request.callback = sdias_callback;
+
+ rc = sdias_sclp_send(&request);
+ if (rc) {
+ ERROR_MSG("sclp_send failed for get_nr_blocks\n");
+ goto out;
+ }
+ if (sccb.hdr.response_code != 0x0020) {
+ TRACE("send failed: %x\n", sccb.hdr.response_code);
+ rc = -EIO;
+ goto out;
+ }
+
+ switch (sccb.evbuf.event_status) {
+ case 0:
+ rc = sccb.evbuf.blk_cnt;
+ break;
+ default:
+ ERROR_MSG("SCLP error: %x\n", sccb.evbuf.event_status);
+ rc = -EIO;
+ goto out;
+ }
+ TRACE("%i blocks\n", rc);
+out:
+ mutex_unlock(&sdias_mutex);
+ return rc;
+}
+
+/*
+ * Copy from HSA to absolute storage (not reentrant):
+ *
+ * @dest : Address of buffer where data should be copied
+ * @start_blk: Start Block (beginning with 1)
+ * @nr_blks : Number of 4K blocks to copy
+ *
+ * Return Value: 0 : Requested 'number' of blocks of data copied
+ * <0: ERROR - negative event status
+ */
+int sclp_sdias_copy(void *dest, int start_blk, int nr_blks)
+{
+ struct sclp_req request;
+ int rc;
+
+ mutex_lock(&sdias_mutex);
+
+ memset(&sccb, 0, sizeof(sccb));
+ memset(&request, 0, sizeof(request));
+
+ sccb.hdr.length = sizeof(sccb);
+ sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf);
+ sccb.evbuf.hdr.type = EVTYP_SDIAS;
+ sccb.evbuf.hdr.flags = 0;
+ sccb.evbuf.event_qual = EQ_STORE_DATA;
+ sccb.evbuf.data_id = DI_FCP_DUMP;
+ sccb.evbuf.event_id = 4712;
+#ifdef __s390x__
+ sccb.evbuf.asa_size = ASA_SIZE_64;
+#else
+ sccb.evbuf.asa_size = ASA_SIZE_32;
+#endif
+ sccb.evbuf.event_status = 0;
+ sccb.evbuf.blk_cnt = nr_blks;
+ sccb.evbuf.asa = (unsigned long)dest;
+ sccb.evbuf.fbn = start_blk;
+ sccb.evbuf.lbn = 0;
+ sccb.evbuf.dbs = 1;
+
+ request.sccb = &sccb;
+ request.command = SCLP_CMDW_WRITE_EVENT_DATA;
+ request.status = SCLP_REQ_FILLED;
+ request.callback = sdias_callback;
+
+ rc = sdias_sclp_send(&request);
+ if (rc) {
+ ERROR_MSG("sclp_send failed: %x\n", rc);
+ goto out;
+ }
+ if (sccb.hdr.response_code != 0x0020) {
+ TRACE("copy failed: %x\n", sccb.hdr.response_code);
+ rc = -EIO;
+ goto out;
+ }
+
+ switch (sccb.evbuf.event_status) {
+ case EVSTATE_ALL_STORED:
+ TRACE("all stored\n");
+ case EVSTATE_PART_STORED:
+ TRACE("part stored: %i\n", sccb.evbuf.blk_cnt);
+ break;
+ case EVSTATE_NO_DATA:
+ TRACE("no data\n");
+ default:
+ ERROR_MSG("Error from SCLP while copying hsa. "
+ "Event status = %x\n",
+ sccb.evbuf.event_status);
+ rc = -EIO;
+ }
+out:
+ mutex_unlock(&sdias_mutex);
+ return rc;
+}
+
+int __init sdias_init(void)
+{
+ int rc;
+
+ if (ipl_info.type != IPL_TYPE_FCP_DUMP)
+ return 0;
+ sdias_dbf = debug_register("dump_sdias", 4, 1, 4 * sizeof(long));
+ debug_register_view(sdias_dbf, &debug_sprintf_view);
+ debug_set_level(sdias_dbf, 6);
+ rc = sclp_register(&sclp_sdias_register);
+ if (rc) {
+ ERROR_MSG("sclp register failed\n");
+ return rc;
+ }
+ init_waitqueue_head(&sdias_wq);
+ TRACE("init done\n");
+ return 0;
+}
+
+void __exit sdias_exit(void)
+{
+ debug_unregister(sdias_dbf);
+ sclp_unregister(&sclp_sdias_register);
+}
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
index 076816b9d52..e3b3d390b4a 100644
--- a/drivers/s390/char/sclp_tty.c
+++ b/drivers/s390/char/sclp_tty.c
@@ -648,7 +648,7 @@ sclp_eval_textcmd(struct gds_subvector *start,
subvec = start;
while (subvec < end) {
subvec = find_gds_subvector(subvec, end,
- GDS_KEY_SelfDefTextMsg);
+ GDS_KEY_SELFDEFTEXTMSG);
if (!subvec)
break;
sclp_eval_selfdeftextmsg((struct gds_subvector *)(subvec + 1),
@@ -664,7 +664,7 @@ sclp_eval_cpmsu(struct gds_vector *start, struct gds_vector *end)
vec = start;
while (vec < end) {
- vec = find_gds_vector(vec, end, GDS_ID_TextCmd);
+ vec = find_gds_vector(vec, end, GDS_ID_TEXTCMD);
if (!vec)
break;
sclp_eval_textcmd((struct gds_subvector *)(vec + 1),
@@ -703,7 +703,7 @@ sclp_tty_state_change(struct sclp_register *reg)
static struct sclp_register sclp_input_event =
{
- .receive_mask = EvTyp_OpCmd_Mask | EvTyp_PMsgCmd_Mask,
+ .receive_mask = EVTYP_OPCMD_MASK | EVTYP_PMSGCMD_MASK,
.state_change_fn = sclp_tty_state_change,
.receiver_fn = sclp_tty_receiver
};
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index f77dc33b5f8..726334757bb 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -99,8 +99,8 @@ static void sclp_vt220_emit_current(void);
/* Registration structure for our interest in SCLP event buffers */
static struct sclp_register sclp_vt220_register = {
- .send_mask = EvTyp_VT220Msg_Mask,
- .receive_mask = EvTyp_VT220Msg_Mask,
+ .send_mask = EVTYP_VT220MSG_MASK,
+ .receive_mask = EVTYP_VT220MSG_MASK,
.state_change_fn = NULL,
.receiver_fn = sclp_vt220_receiver_fn
};
@@ -202,7 +202,7 @@ sclp_vt220_callback(struct sclp_req *request, void *data)
static int
__sclp_vt220_emit(struct sclp_vt220_request *request)
{
- if (!(sclp_vt220_register.sclp_send_mask & EvTyp_VT220Msg_Mask)) {
+ if (!(sclp_vt220_register.sclp_send_mask & EVTYP_VT220MSG_MASK)) {
request->sclp_req.status = SCLP_REQ_FAILED;
return -EIO;
}
@@ -284,7 +284,7 @@ sclp_vt220_initialize_page(void *page)
sccb->header.length = sizeof(struct sclp_vt220_sccb);
sccb->header.function_code = SCLP_NORMAL_WRITE;
sccb->header.response_code = 0x0000;
- sccb->evbuf.type = EvTyp_VT220Msg;
+ sccb->evbuf.type = EVTYP_VT220MSG;
sccb->evbuf.length = sizeof(struct evbuf_header);
return request;
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index b87d3b01993..a5a00e9ae4d 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -125,7 +125,7 @@ static struct vmlogrdr_priv_t sys_ser[] = {
.recording_name = "EREP",
.minor_num = 0,
.buffer_free = 1,
- .priv_lock = SPIN_LOCK_UNLOCKED,
+ .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[0].priv_lock),
.autorecording = 1,
.autopurge = 1,
},
@@ -134,7 +134,7 @@ static struct vmlogrdr_priv_t sys_ser[] = {
.recording_name = "ACCOUNT",
.minor_num = 1,
.buffer_free = 1,
- .priv_lock = SPIN_LOCK_UNLOCKED,
+ .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[1].priv_lock),
.autorecording = 1,
.autopurge = 1,
},
@@ -143,7 +143,7 @@ static struct vmlogrdr_priv_t sys_ser[] = {
.recording_name = "SYMPTOM",
.minor_num = 2,
.buffer_free = 1,
- .priv_lock = SPIN_LOCK_UNLOCKED,
+ .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[2].priv_lock),
.autorecording = 1,
.autopurge = 1,
}
@@ -385,6 +385,9 @@ static int vmlogrdr_release (struct inode *inode, struct file *filp)
struct vmlogrdr_priv_t * logptr = filp->private_data;
+ iucv_path_sever(logptr->path, NULL);
+ kfree(logptr->path);
+ logptr->path = NULL;
if (logptr->autorecording) {
ret = vmlogrdr_recording(logptr,0,logptr->autopurge);
if (ret)
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
new file mode 100644
index 00000000000..89d439316a5
--- /dev/null
+++ b/drivers/s390/char/zcore.c
@@ -0,0 +1,651 @@
+/*
+ * zcore module to export memory content and register sets for creating system
+ * dumps on SCSI disks (zfcpdump). The "zcore/mem" debugfs file shows the same
+ * dump format as s390 standalone dumps.
+ *
+ * For more information please refer to Documentation/s390/zfcpdump.txt
+ *
+ * Copyright IBM Corp. 2003,2007
+ * Author(s): Michael Holzheu
+ */
+
+#include <linux/init.h>
+#include <linux/miscdevice.h>
+#include <linux/utsname.h>
+#include <linux/debugfs.h>
+#include <asm/ipl.h>
+#include <asm/sclp.h>
+#include <asm/setup.h>
+#include <asm/sigp.h>
+#include <asm/uaccess.h>
+#include <asm/debug.h>
+#include <asm/processor.h>
+#include <asm/irqflags.h>
+
+#define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x)
+#define MSG(x...) printk( KERN_ALERT x )
+#define ERROR_MSG(x...) printk ( KERN_ALERT "DUMP: " x )
+
+#define TO_USER 0
+#define TO_KERNEL 1
+
+enum arch_id {
+ ARCH_S390 = 0,
+ ARCH_S390X = 1,
+};
+
+/* dump system info */
+
+struct sys_info {
+ enum arch_id arch;
+ unsigned long sa_base;
+ u32 sa_size;
+ int cpu_map[NR_CPUS];
+ unsigned long mem_size;
+ union save_area lc_mask;
+};
+
+static struct sys_info sys_info;
+static struct debug_info *zcore_dbf;
+static int hsa_available;
+static struct dentry *zcore_dir;
+static struct dentry *zcore_file;
+
+/*
+ * Copy memory from HSA to kernel or user memory (not reentrant):
+ *
+ * @dest: Kernel or user buffer where memory should be copied to
+ * @src: Start address within HSA where data should be copied
+ * @count: Size of buffer, which should be copied
+ * @mode: Either TO_KERNEL or TO_USER
+ */
+static int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode)
+{
+ int offs, blk_num;
+ static char buf[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
+
+ if (count == 0)
+ return 0;
+
+ /* copy first block */
+ offs = 0;
+ if ((src % PAGE_SIZE) != 0) {
+ blk_num = src / PAGE_SIZE + 2;
+ if (sclp_sdias_copy(buf, blk_num, 1)) {
+ TRACE("sclp_sdias_copy() failed\n");
+ return -EIO;
+ }
+ offs = min((PAGE_SIZE - (src % PAGE_SIZE)), count);
+ if (mode == TO_USER) {
+ if (copy_to_user((__force __user void*) dest,
+ buf + (src % PAGE_SIZE), offs))
+ return -EFAULT;
+ } else
+ memcpy(dest, buf + (src % PAGE_SIZE), offs);
+ }
+ if (offs == count)
+ goto out;
+
+ /* copy middle */
+ for (; (offs + PAGE_SIZE) <= count; offs += PAGE_SIZE) {
+ blk_num = (src + offs) / PAGE_SIZE + 2;
+ if (sclp_sdias_copy(buf, blk_num, 1)) {
+ TRACE("sclp_sdias_copy() failed\n");
+ return -EIO;
+ }
+ if (mode == TO_USER) {
+ if (copy_to_user((__force __user void*) dest + offs,
+ buf, PAGE_SIZE))
+ return -EFAULT;
+ } else
+ memcpy(dest + offs, buf, PAGE_SIZE);
+ }
+ if (offs == count)
+ goto out;
+
+ /* copy last block */
+ blk_num = (src + offs) / PAGE_SIZE + 2;
+ if (sclp_sdias_copy(buf, blk_num, 1)) {
+ TRACE("sclp_sdias_copy() failed\n");
+ return -EIO;
+ }
+ if (mode == TO_USER) {
+ if (copy_to_user((__force __user void*) dest + offs, buf,
+ PAGE_SIZE))
+ return -EFAULT;
+ } else
+ memcpy(dest + offs, buf, count - offs);
+out:
+ return 0;
+}
+
+static int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count)
+{
+ return memcpy_hsa((void __force *) dest, src, count, TO_USER);
+}
+
+static int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count)
+{
+ return memcpy_hsa(dest, src, count, TO_KERNEL);
+}
+
+static int memcpy_real(void *dest, unsigned long src, size_t count)
+{
+ unsigned long flags;
+ int rc = -EFAULT;
+ register unsigned long _dest asm("2") = (unsigned long) dest;
+ register unsigned long _len1 asm("3") = (unsigned long) count;
+ register unsigned long _src asm("4") = src;
+ register unsigned long _len2 asm("5") = (unsigned long) count;
+
+ if (count == 0)
+ return 0;
+ flags = __raw_local_irq_stnsm(0xf8); /* switch to real mode */
+ asm volatile (
+ "0: mvcle %1,%2,0x0\n"
+ "1: jo 0b\n"
+ " lhi %0,0x0\n"
+ "2:\n"
+ EX_TABLE(1b,2b)
+ : "+d" (rc)
+ : "d" (_dest), "d" (_src), "d" (_len1), "d" (_len2)
+ : "cc", "memory");
+ __raw_local_irq_ssm(flags);
+
+ return rc;
+}
+
+static int memcpy_real_user(__user void *dest, unsigned long src, size_t count)
+{
+ static char buf[4096];
+ int offs = 0, size;
+
+ while (offs < count) {
+ size = min(sizeof(buf), count - offs);
+ if (memcpy_real(buf, src + offs, size))
+ return -EFAULT;
+ if (copy_to_user(dest + offs, buf, size))
+ return -EFAULT;
+ offs += size;
+ }
+ return 0;
+}
+
+#ifdef __s390x__
+/*
+ * Convert s390x (64 bit) cpu info to s390 (32 bit) cpu info
+ */
+static void __init s390x_to_s390_regs(union save_area *out, union save_area *in,
+ int cpu)
+{
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ out->s390.gp_regs[i] = in->s390x.gp_regs[i] & 0x00000000ffffffff;
+ out->s390.acc_regs[i] = in->s390x.acc_regs[i];
+ out->s390.ctrl_regs[i] =
+ in->s390x.ctrl_regs[i] & 0x00000000ffffffff;
+ }
+ /* locore for 31 bit has only space for fpregs 0,2,4,6 */
+ out->s390.fp_regs[0] = in->s390x.fp_regs[0];
+ out->s390.fp_regs[1] = in->s390x.fp_regs[2];
+ out->s390.fp_regs[2] = in->s390x.fp_regs[4];
+ out->s390.fp_regs[3] = in->s390x.fp_regs[6];
+ memcpy(&(out->s390.psw[0]), &(in->s390x.psw[0]), 4);
+ out->s390.psw[1] |= 0x8; /* set bit 12 */
+ memcpy(&(out->s390.psw[4]),&(in->s390x.psw[12]), 4);
+ out->s390.psw[4] |= 0x80; /* set (31bit) addressing bit */
+ out->s390.pref_reg = in->s390x.pref_reg;
+ out->s390.timer = in->s390x.timer;
+ out->s390.clk_cmp = in->s390x.clk_cmp;
+}
+
+static void __init s390x_to_s390_save_areas(void)
+{
+ int i = 1;
+ static union save_area tmp;
+
+ while (zfcpdump_save_areas[i]) {
+ s390x_to_s390_regs(&tmp, zfcpdump_save_areas[i], i);
+ memcpy(zfcpdump_save_areas[i], &tmp, sizeof(tmp));
+ i++;
+ }
+}
+
+#endif /* __s390x__ */
+
+static int __init init_cpu_info(enum arch_id arch)
+{
+ union save_area *sa;
+
+ /* get info for boot cpu from lowcore, stored in the HSA */
+
+ sa = kmalloc(sizeof(*sa), GFP_KERNEL);
+ if (!sa) {
+ ERROR_MSG("kmalloc failed: %s: %i\n",__FUNCTION__, __LINE__);
+ return -ENOMEM;
+ }
+ if (memcpy_hsa_kernel(sa, sys_info.sa_base, sys_info.sa_size) < 0) {
+ ERROR_MSG("could not copy from HSA\n");
+ kfree(sa);
+ return -EIO;
+ }
+ zfcpdump_save_areas[0] = sa;
+
+#ifdef __s390x__
+ /* convert s390x regs to s390, if we are dumping an s390 Linux */
+
+ if (arch == ARCH_S390)
+ s390x_to_s390_save_areas();
+#endif
+
+ return 0;
+}
+
+static DEFINE_MUTEX(zcore_mutex);
+
+#define DUMP_VERSION 0x3
+#define DUMP_MAGIC 0xa8190173618f23fdULL
+#define DUMP_ARCH_S390X 2
+#define DUMP_ARCH_S390 1
+#define HEADER_SIZE 4096
+
+/* dump header dumped according to s390 crash dump format */
+
+struct zcore_header {
+ u64 magic;
+ u32 version;
+ u32 header_size;
+ u32 dump_level;
+ u32 page_size;
+ u64 mem_size;
+ u64 mem_start;
+ u64 mem_end;
+ u32 num_pages;
+ u32 pad1;
+ u64 tod;
+ cpuid_t cpu_id;
+ u32 arch_id;
+ u32 build_arch;
+ char pad2[4016];
+} __attribute__((packed,__aligned__(16)));
+
+static struct zcore_header zcore_header = {
+ .magic = DUMP_MAGIC,
+ .version = DUMP_VERSION,
+ .header_size = 4096,
+ .dump_level = 0,
+ .page_size = PAGE_SIZE,
+ .mem_start = 0,
+#ifdef __s390x__
+ .build_arch = DUMP_ARCH_S390X,
+#else
+ .build_arch = DUMP_ARCH_S390,
+#endif
+};
+
+/*
+ * Copy lowcore info to buffer. Use map in order to copy only register parts.
+ *
+ * @buf: User buffer
+ * @sa: Pointer to save area
+ * @sa_off: Offset in save area to copy
+ * @len: Number of bytes to copy
+ */
+static int copy_lc(void __user *buf, void *sa, int sa_off, int len)
+{
+ int i;
+ char *lc_mask = (char*)&sys_info.lc_mask;
+
+ for (i = 0; i < len; i++) {
+ if (!lc_mask[i + sa_off])
+ continue;
+ if (copy_to_user(buf + i, sa + sa_off + i, 1))
+ return -EFAULT;
+ }
+ return 0;
+}
+
+/*
+ * Copy lowcores info to memory, if necessary
+ *
+ * @buf: User buffer
+ * @addr: Start address of buffer in dump memory
+ * @count: Size of buffer
+ */
+static int zcore_add_lc(char __user *buf, unsigned long start, size_t count)
+{
+ unsigned long end;
+ int i = 0;
+
+ if (count == 0)
+ return 0;
+
+ end = start + count;
+ while (zfcpdump_save_areas[i]) {
+ unsigned long cp_start, cp_end; /* copy range */
+ unsigned long sa_start, sa_end; /* save area range */
+ unsigned long prefix;
+ unsigned long sa_off, len, buf_off;
+
+ if (sys_info.arch == ARCH_S390)
+ prefix = zfcpdump_save_areas[i]->s390.pref_reg;
+ else
+ prefix = zfcpdump_save_areas[i]->s390x.pref_reg;
+
+ sa_start = prefix + sys_info.sa_base;
+ sa_end = prefix + sys_info.sa_base + sys_info.sa_size;
+
+ if ((end < sa_start) || (start > sa_end))
+ goto next;
+ cp_start = max(start, sa_start);
+ cp_end = min(end, sa_end);
+
+ buf_off = cp_start - start;
+ sa_off = cp_start - sa_start;
+ len = cp_end - cp_start;
+
+ TRACE("copy_lc for: %lx\n", start);
+ if (copy_lc(buf + buf_off, zfcpdump_save_areas[i], sa_off, len))
+ return -EFAULT;
+next:
+ i++;
+ }
+ return 0;
+}
+
+/*
+ * Read routine for zcore character device
+ * First 4K are dump header
+ * Next 32MB are HSA Memory
+ * Rest is read from absolute Memory
+ */
+static ssize_t zcore_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ unsigned long mem_start; /* Start address in memory */
+ size_t mem_offs; /* Offset in dump memory */
+ size_t hdr_count; /* Size of header part of output buffer */
+ size_t size;
+ int rc;
+
+ mutex_lock(&zcore_mutex);
+
+ if (*ppos > (sys_info.mem_size + HEADER_SIZE)) {
+ rc = -EINVAL;
+ goto fail;
+ }
+
+ count = min(count, (size_t) (sys_info.mem_size + HEADER_SIZE - *ppos));
+
+ /* Copy dump header */
+ if (*ppos < HEADER_SIZE) {
+ size = min(count, (size_t) (HEADER_SIZE - *ppos));
+ if (copy_to_user(buf, &zcore_header + *ppos, size)) {
+ rc = -EFAULT;
+ goto fail;
+ }
+ hdr_count = size;
+ mem_start = 0;
+ } else {
+ hdr_count = 0;
+ mem_start = *ppos - HEADER_SIZE;
+ }
+
+ mem_offs = 0;
+
+ /* Copy from HSA data */
+ if (*ppos < (ZFCPDUMP_HSA_SIZE + HEADER_SIZE)) {
+ size = min((count - hdr_count), (size_t) (ZFCPDUMP_HSA_SIZE
+ - mem_start));
+ rc = memcpy_hsa_user(buf + hdr_count, mem_start, size);
+ if (rc)
+ goto fail;
+
+ mem_offs += size;
+ }
+
+ /* Copy from real mem */
+ size = count - mem_offs - hdr_count;
+ rc = memcpy_real_user(buf + hdr_count + mem_offs, mem_start + mem_offs,
+ size);
+ if (rc)
+ goto fail;
+
+ /*
+ * Since s390 dump analysis tools like lcrash or crash
+ * expect register sets in the prefix pages of the cpus,
+ * we copy them into the read buffer, if necessary.
+ * buf + hdr_count: Start of memory part of output buffer
+ * mem_start: Start memory address to copy from
+ * count - hdr_count: Size of memory area to copy
+ */
+ if (zcore_add_lc(buf + hdr_count, mem_start, count - hdr_count)) {
+ rc = -EFAULT;
+ goto fail;
+ }
+ *ppos += count;
+fail:
+ mutex_unlock(&zcore_mutex);
+ return (rc < 0) ? rc : count;
+}
+
+static int zcore_open(struct inode *inode, struct file *filp)
+{
+ if (!hsa_available)
+ return -ENODATA;
+ else
+ return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
+}
+
+static int zcore_release(struct inode *inode, struct file *filep)
+{
+ diag308(DIAG308_REL_HSA, NULL);
+ hsa_available = 0;
+ return 0;
+}
+
+static loff_t zcore_lseek(struct file *file, loff_t offset, int orig)
+{
+ loff_t rc;
+
+ mutex_lock(&zcore_mutex);
+ switch (orig) {
+ case 0:
+ file->f_pos = offset;
+ rc = file->f_pos;
+ break;
+ case 1:
+ file->f_pos += offset;
+ rc = file->f_pos;
+ break;
+ default:
+ rc = -EINVAL;
+ }
+ mutex_unlock(&zcore_mutex);
+ return rc;
+}
+
+static struct file_operations zcore_fops = {
+ .owner = THIS_MODULE,
+ .llseek = zcore_lseek,
+ .read = zcore_read,
+ .open = zcore_open,
+ .release = zcore_release,
+};
+
+
+static void __init set_s390_lc_mask(union save_area *map)
+{
+ memset(&map->s390.ext_save, 0xff, sizeof(map->s390.ext_save));
+ memset(&map->s390.timer, 0xff, sizeof(map->s390.timer));
+ memset(&map->s390.clk_cmp, 0xff, sizeof(map->s390.clk_cmp));
+ memset(&map->s390.psw, 0xff, sizeof(map->s390.psw));
+ memset(&map->s390.pref_reg, 0xff, sizeof(map->s390.pref_reg));
+ memset(&map->s390.acc_regs, 0xff, sizeof(map->s390.acc_regs));
+ memset(&map->s390.fp_regs, 0xff, sizeof(map->s390.fp_regs));
+ memset(&map->s390.gp_regs, 0xff, sizeof(map->s390.gp_regs));
+ memset(&map->s390.ctrl_regs, 0xff, sizeof(map->s390.ctrl_regs));
+}
+
+static void __init set_s390x_lc_mask(union save_area *map)
+{
+ memset(&map->s390x.fp_regs, 0xff, sizeof(map->s390x.fp_regs));
+ memset(&map->s390x.gp_regs, 0xff, sizeof(map->s390x.gp_regs));
+ memset(&map->s390x.psw, 0xff, sizeof(map->s390x.psw));
+ memset(&map->s390x.pref_reg, 0xff, sizeof(map->s390x.pref_reg));
+ memset(&map->s390x.fp_ctrl_reg, 0xff, sizeof(map->s390x.fp_ctrl_reg));
+ memset(&map->s390x.tod_reg, 0xff, sizeof(map->s390x.tod_reg));
+ memset(&map->s390x.timer, 0xff, sizeof(map->s390x.timer));
+ memset(&map->s390x.clk_cmp, 0xff, sizeof(map->s390x.clk_cmp));
+ memset(&map->s390x.acc_regs, 0xff, sizeof(map->s390x.acc_regs));
+ memset(&map->s390x.ctrl_regs, 0xff, sizeof(map->s390x.ctrl_regs));
+}
+
+/*
+ * Initialize dump globals for a given architecture
+ */
+static int __init sys_info_init(enum arch_id arch)
+{
+ switch (arch) {
+ case ARCH_S390X:
+ MSG("DETECTED 'S390X (64 bit) OS'\n");
+ sys_info.sa_base = SAVE_AREA_BASE_S390X;
+ sys_info.sa_size = sizeof(struct save_area_s390x);
+ set_s390x_lc_mask(&sys_info.lc_mask);
+ break;
+ case ARCH_S390:
+ MSG("DETECTED 'S390 (32 bit) OS'\n");
+ sys_info.sa_base = SAVE_AREA_BASE_S390;
+ sys_info.sa_size = sizeof(struct save_area_s390);
+ set_s390_lc_mask(&sys_info.lc_mask);
+ break;
+ default:
+ ERROR_MSG("unknown architecture 0x%x.\n",arch);
+ return -EINVAL;
+ }
+ sys_info.arch = arch;
+ if (init_cpu_info(arch)) {
+ ERROR_MSG("get cpu info failed\n");
+ return -ENOMEM;
+ }
+ sys_info.mem_size = real_memory_size;
+
+ return 0;
+}
+
+static int __init check_sdias(void)
+{
+ int rc, act_hsa_size;
+
+ rc = sclp_sdias_blk_count();
+ if (rc < 0) {
+ ERROR_MSG("Could not determine HSA size\n");
+ return rc;
+ }
+ act_hsa_size = (rc - 1) * PAGE_SIZE;
+ if (act_hsa_size < ZFCPDUMP_HSA_SIZE) {
+ ERROR_MSG("HSA size too small: %i\n", act_hsa_size);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void __init zcore_header_init(int arch, struct zcore_header *hdr)
+{
+ if (arch == ARCH_S390X)
+ hdr->arch_id = DUMP_ARCH_S390X;
+ else
+ hdr->arch_id = DUMP_ARCH_S390;
+ hdr->mem_size = sys_info.mem_size;
+ hdr->mem_end = sys_info.mem_size;
+ hdr->num_pages = sys_info.mem_size / PAGE_SIZE;
+ hdr->tod = get_clock();
+ get_cpu_id(&hdr->cpu_id);
+}
+
+extern int sdias_init(void);
+
+static int __init zcore_init(void)
+{
+ unsigned char arch;
+ int rc;
+
+ if (ipl_info.type != IPL_TYPE_FCP_DUMP)
+ return -ENODATA;
+
+ zcore_dbf = debug_register("zcore", 4, 1, 4 * sizeof(long));
+ debug_register_view(zcore_dbf, &debug_sprintf_view);
+ debug_set_level(zcore_dbf, 6);
+
+ TRACE("devno: %x\n", ipl_info.data.fcp.dev_id.devno);
+ TRACE("wwpn: %llx\n", (unsigned long long) ipl_info.data.fcp.wwpn);
+ TRACE("lun: %llx\n", (unsigned long long) ipl_info.data.fcp.lun);
+
+ rc = sdias_init();
+ if (rc)
+ goto fail;
+
+ rc = check_sdias();
+ if (rc) {
+ ERROR_MSG("Dump initialization failed\n");
+ goto fail;
+ }
+
+ rc = memcpy_hsa_kernel(&arch, __LC_AR_MODE_ID, 1);
+ if (rc) {
+ ERROR_MSG("sdial memcpy for arch id failed\n");
+ goto fail;
+ }
+
+#ifndef __s390x__
+ if (arch == ARCH_S390X) {
+ ERROR_MSG("32 bit dumper can't dump 64 bit system!\n");
+ rc = -EINVAL;
+ goto fail;
+ }
+#endif
+
+ rc = sys_info_init(arch);
+ if (rc) {
+ ERROR_MSG("arch init failed\n");
+ goto fail;
+ }
+
+ zcore_header_init(arch, &zcore_header);
+
+ zcore_dir = debugfs_create_dir("zcore" , NULL);
+ if (!zcore_dir) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+ zcore_file = debugfs_create_file("mem", S_IRUSR, zcore_dir, NULL,
+ &zcore_fops);
+ if (!zcore_file) {
+ debugfs_remove(zcore_dir);
+ rc = -ENOMEM;
+ goto fail;
+ }
+ hsa_available = 1;
+ return 0;
+
+fail:
+ diag308(DIAG308_REL_HSA, NULL);
+ return rc;
+}
+
+extern void sdias_exit(void);
+
+static void __exit zcore_exit(void)
+{
+ debug_unregister(zcore_dbf);
+ sdias_exit();
+ diag308(DIAG308_REL_HSA, NULL);
+}
+
+MODULE_AUTHOR("Copyright IBM Corp. 2003,2007");
+MODULE_DESCRIPTION("zcore module for zfcpdump support");
+MODULE_LICENSE("GPL");
+
+subsys_initcall(zcore_init);
+module_exit(zcore_exit);
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
index c490c2a1c2f..cfaf77b320f 100644
--- a/drivers/s390/cio/Makefile
+++ b/drivers/s390/cio/Makefile
@@ -2,7 +2,7 @@
# Makefile for the S/390 common i/o drivers
#
-obj-y += airq.o blacklist.o chsc.o cio.o css.o
+obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o
ccw_device-objs += device.o device_fsm.o device_ops.o
ccw_device-objs += device_id.o device_pgid.o device_status.o
obj-y += ccw_device.o cmf.o
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 5aeb68e732b..e5ccda63e88 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -75,8 +75,10 @@ static void ccwgroup_ungroup_callback(struct device *dev)
{
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+ mutex_lock(&gdev->reg_mutex);
__ccwgroup_remove_symlinks(gdev);
device_unregister(dev);
+ mutex_unlock(&gdev->reg_mutex);
}
static ssize_t
@@ -173,7 +175,8 @@ ccwgroup_create(struct device *root,
return -ENOMEM;
atomic_set(&gdev->onoff, 0);
-
+ mutex_init(&gdev->reg_mutex);
+ mutex_lock(&gdev->reg_mutex);
for (i = 0; i < argc; i++) {
gdev->cdev[i] = get_ccwdev_by_busid(cdrv, argv[i]);
@@ -183,12 +186,12 @@ ccwgroup_create(struct device *root,
|| gdev->cdev[i]->id.driver_info !=
gdev->cdev[0]->id.driver_info) {
rc = -EINVAL;
- goto free_dev;
+ goto error;
}
/* Don't allow a device to belong to more than one group. */
if (gdev->cdev[i]->dev.driver_data) {
rc = -EINVAL;
- goto free_dev;
+ goto error;
}
gdev->cdev[i]->dev.driver_data = gdev;
}
@@ -203,9 +206,8 @@ ccwgroup_create(struct device *root,
gdev->cdev[0]->dev.bus_id);
rc = device_register(&gdev->dev);
-
if (rc)
- goto free_dev;
+ goto error;
get_device(&gdev->dev);
rc = device_create_file(&gdev->dev, &dev_attr_ungroup);
@@ -216,6 +218,7 @@ ccwgroup_create(struct device *root,
rc = __ccwgroup_create_symlinks(gdev);
if (!rc) {
+ mutex_unlock(&gdev->reg_mutex);
put_device(&gdev->dev);
return 0;
}
@@ -224,19 +227,12 @@ ccwgroup_create(struct device *root,
error:
for (i = 0; i < argc; i++)
if (gdev->cdev[i]) {
- put_device(&gdev->cdev[i]->dev);
- gdev->cdev[i]->dev.driver_data = NULL;
- }
- put_device(&gdev->dev);
- return rc;
-free_dev:
- for (i = 0; i < argc; i++)
- if (gdev->cdev[i]) {
if (gdev->cdev[i]->dev.driver_data == gdev)
gdev->cdev[i]->dev.driver_data = NULL;
put_device(&gdev->cdev[i]->dev);
}
- kfree(gdev);
+ mutex_unlock(&gdev->reg_mutex);
+ put_device(&gdev->dev);
return rc;
}
@@ -422,8 +418,12 @@ ccwgroup_driver_unregister (struct ccwgroup_driver *cdriver)
get_driver(&cdriver->driver);
while ((dev = driver_find_device(&cdriver->driver, NULL, NULL,
__ccwgroup_match_all))) {
- __ccwgroup_remove_symlinks(to_ccwgroupdev(dev));
+ struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+
+ mutex_lock(&gdev->reg_mutex);
+ __ccwgroup_remove_symlinks(gdev);
device_unregister(dev);
+ mutex_unlock(&gdev->reg_mutex);
put_device(dev);
}
put_driver(&cdriver->driver);
@@ -444,8 +444,10 @@ __ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev)
if (cdev->dev.driver_data) {
gdev = (struct ccwgroup_device *)cdev->dev.driver_data;
if (get_device(&gdev->dev)) {
+ mutex_lock(&gdev->reg_mutex);
if (device_is_registered(&gdev->dev))
return gdev;
+ mutex_unlock(&gdev->reg_mutex);
put_device(&gdev->dev);
}
return NULL;
@@ -465,6 +467,7 @@ ccwgroup_remove_ccwdev(struct ccw_device *cdev)
if (gdev) {
__ccwgroup_remove_symlinks(gdev);
device_unregister(&gdev->dev);
+ mutex_unlock(&gdev->reg_mutex);
put_device(&gdev->dev);
}
}
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
new file mode 100644
index 00000000000..ac289e6eadf
--- /dev/null
+++ b/drivers/s390/cio/chp.c
@@ -0,0 +1,683 @@
+/*
+ * drivers/s390/cio/chp.c
+ *
+ * Copyright IBM Corp. 1999,2007
+ * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
+ * Arnd Bergmann (arndb@de.ibm.com)
+ * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#include <linux/bug.h>
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/wait.h>
+#include <linux/mutex.h>
+#include <asm/errno.h>
+#include <asm/chpid.h>
+#include <asm/sclp.h>
+
+#include "cio.h"
+#include "css.h"
+#include "ioasm.h"
+#include "cio_debug.h"
+#include "chp.h"
+
+#define to_channelpath(device) container_of(device, struct channel_path, dev)
+#define CHP_INFO_UPDATE_INTERVAL 1*HZ
+
+enum cfg_task_t {
+ cfg_none,
+ cfg_configure,
+ cfg_deconfigure
+};
+
+/* Map for pending configure tasks. */
+static enum cfg_task_t chp_cfg_task[__MAX_CSSID + 1][__MAX_CHPID + 1];
+static DEFINE_MUTEX(cfg_lock);
+static int cfg_busy;
+
+/* Map for channel-path status. */
+static struct sclp_chp_info chp_info;
+static DEFINE_MUTEX(info_lock);
+
+/* Time after which channel-path status may be outdated. */
+static unsigned long chp_info_expires;
+
+/* Workqueue to perform pending configure tasks. */
+static struct workqueue_struct *chp_wq;
+static struct work_struct cfg_work;
+
+/* Wait queue for configure completion events. */
+static wait_queue_head_t cfg_wait_queue;
+
+/* Return channel_path struct for given chpid. */
+static inline struct channel_path *chpid_to_chp(struct chp_id chpid)
+{
+ return css[chpid.cssid]->chps[chpid.id];
+}
+
+/* Set vary state for given chpid. */
+static void set_chp_logically_online(struct chp_id chpid, int onoff)
+{
+ chpid_to_chp(chpid)->state = onoff;
+}
+
+/* On succes return 0 if channel-path is varied offline, 1 if it is varied
+ * online. Return -ENODEV if channel-path is not registered. */
+int chp_get_status(struct chp_id chpid)
+{
+ return (chpid_to_chp(chpid) ? chpid_to_chp(chpid)->state : -ENODEV);
+}
+
+/**
+ * chp_get_sch_opm - return opm for subchannel
+ * @sch: subchannel
+ *
+ * Calculate and return the operational path mask (opm) based on the chpids
+ * used by the subchannel and the status of the associated channel-paths.
+ */
+u8 chp_get_sch_opm(struct subchannel *sch)
+{
+ struct chp_id chpid;
+ int opm;
+ int i;
+
+ opm = 0;
+ chp_id_init(&chpid);
+ for (i=0; i < 8; i++) {
+ opm <<= 1;
+ chpid.id = sch->schib.pmcw.chpid[i];
+ if (chp_get_status(chpid) != 0)
+ opm |= 1;
+ }
+ return opm;
+}
+
+/**
+ * chp_is_registered - check if a channel-path is registered
+ * @chpid: channel-path ID
+ *
+ * Return non-zero if a channel-path with the given chpid is registered,
+ * zero otherwise.
+ */
+int chp_is_registered(struct chp_id chpid)
+{
+ return chpid_to_chp(chpid) != NULL;
+}
+
+/*
+ * Function: s390_vary_chpid
+ * Varies the specified chpid online or offline
+ */
+static int s390_vary_chpid(struct chp_id chpid, int on)
+{
+ char dbf_text[15];
+ int status;
+
+ sprintf(dbf_text, on?"varyon%x.%02x":"varyoff%x.%02x", chpid.cssid,
+ chpid.id);
+ CIO_TRACE_EVENT( 2, dbf_text);
+
+ status = chp_get_status(chpid);
+ if (status < 0) {
+ printk(KERN_ERR "Can't vary unknown chpid %x.%02x\n",
+ chpid.cssid, chpid.id);
+ return -EINVAL;
+ }
+
+ if (!on && !status) {
+ printk(KERN_ERR "chpid %x.%02x is already offline\n",
+ chpid.cssid, chpid.id);
+ return -EINVAL;
+ }
+
+ set_chp_logically_online(chpid, on);
+ chsc_chp_vary(chpid, on);
+ return 0;
+}
+
+/*
+ * Channel measurement related functions
+ */
+static ssize_t chp_measurement_chars_read(struct kobject *kobj, char *buf,
+ loff_t off, size_t count)
+{
+ struct channel_path *chp;
+ unsigned int size;
+
+ chp = to_channelpath(container_of(kobj, struct device, kobj));
+ if (!chp->cmg_chars)
+ return 0;
+
+ size = sizeof(struct cmg_chars);
+
+ if (off > size)
+ return 0;
+ if (off + count > size)
+ count = size - off;
+ memcpy(buf, chp->cmg_chars + off, count);
+ return count;
+}
+
+static struct bin_attribute chp_measurement_chars_attr = {
+ .attr = {
+ .name = "measurement_chars",
+ .mode = S_IRUSR,
+ .owner = THIS_MODULE,
+ },
+ .size = sizeof(struct cmg_chars),
+ .read = chp_measurement_chars_read,
+};
+
+static void chp_measurement_copy_block(struct cmg_entry *buf,
+ struct channel_subsystem *css,
+ struct chp_id chpid)
+{
+ void *area;
+ struct cmg_entry *entry, reference_buf;
+ int idx;
+
+ if (chpid.id < 128) {
+ area = css->cub_addr1;
+ idx = chpid.id;
+ } else {
+ area = css->cub_addr2;
+ idx = chpid.id - 128;
+ }
+ entry = area + (idx * sizeof(struct cmg_entry));
+ do {
+ memcpy(buf, entry, sizeof(*entry));
+ memcpy(&reference_buf, entry, sizeof(*entry));
+ } while (reference_buf.values[0] != buf->values[0]);
+}
+
+static ssize_t chp_measurement_read(struct kobject *kobj, char *buf,
+ loff_t off, size_t count)
+{
+ struct channel_path *chp;
+ struct channel_subsystem *css;
+ unsigned int size;
+
+ chp = to_channelpath(container_of(kobj, struct device, kobj));
+ css = to_css(chp->dev.parent);
+
+ size = sizeof(struct cmg_entry);
+
+ /* Only allow single reads. */
+ if (off || count < size)
+ return 0;
+ chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->chpid);
+ count = size;
+ return count;
+}
+
+static struct bin_attribute chp_measurement_attr = {
+ .attr = {
+ .name = "measurement",
+ .mode = S_IRUSR,
+ .owner = THIS_MODULE,
+ },
+ .size = sizeof(struct cmg_entry),
+ .read = chp_measurement_read,
+};
+
+void chp_remove_cmg_attr(struct channel_path *chp)
+{
+ device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
+ device_remove_bin_file(&chp->dev, &chp_measurement_attr);
+}
+
+int chp_add_cmg_attr(struct channel_path *chp)
+{
+ int ret;
+
+ ret = device_create_bin_file(&chp->dev, &chp_measurement_chars_attr);
+ if (ret)
+ return ret;
+ ret = device_create_bin_file(&chp->dev, &chp_measurement_attr);
+ if (ret)
+ device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
+ return ret;
+}
+
+/*
+ * Files for the channel path entries.
+ */
+static ssize_t chp_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct channel_path *chp = container_of(dev, struct channel_path, dev);
+
+ if (!chp)
+ return 0;
+ return (chp_get_status(chp->chpid) ? sprintf(buf, "online\n") :
+ sprintf(buf, "offline\n"));
+}
+
+static ssize_t chp_status_write(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct channel_path *cp = container_of(dev, struct channel_path, dev);
+ char cmd[10];
+ int num_args;
+ int error;
+
+ num_args = sscanf(buf, "%5s", cmd);
+ if (!num_args)
+ return count;
+
+ if (!strnicmp(cmd, "on", 2) || !strcmp(cmd, "1"))
+ error = s390_vary_chpid(cp->chpid, 1);
+ else if (!strnicmp(cmd, "off", 3) || !strcmp(cmd, "0"))
+ error = s390_vary_chpid(cp->chpid, 0);
+ else
+ error = -EINVAL;
+
+ return error < 0 ? error : count;
+
+}
+
+static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write);
+
+static ssize_t chp_configure_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct channel_path *cp;
+ int status;
+
+ cp = container_of(dev, struct channel_path, dev);
+ status = chp_info_get_status(cp->chpid);
+ if (status < 0)
+ return status;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", status);
+}
+
+static int cfg_wait_idle(void);
+
+static ssize_t chp_configure_write(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct channel_path *cp;
+ int val;
+ char delim;
+
+ if (sscanf(buf, "%d %c", &val, &delim) != 1)
+ return -EINVAL;
+ if (val != 0 && val != 1)
+ return -EINVAL;
+ cp = container_of(dev, struct channel_path, dev);
+ chp_cfg_schedule(cp->chpid, val);
+ cfg_wait_idle();
+
+ return count;
+}
+
+static DEVICE_ATTR(configure, 0644, chp_configure_show, chp_configure_write);
+
+static ssize_t chp_type_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct channel_path *chp = container_of(dev, struct channel_path, dev);
+
+ if (!chp)
+ return 0;
+ return sprintf(buf, "%x\n", chp->desc.desc);
+}
+
+static DEVICE_ATTR(type, 0444, chp_type_show, NULL);
+
+static ssize_t chp_cmg_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct channel_path *chp = to_channelpath(dev);
+
+ if (!chp)
+ return 0;
+ if (chp->cmg == -1) /* channel measurements not available */
+ return sprintf(buf, "unknown\n");
+ return sprintf(buf, "%x\n", chp->cmg);
+}
+
+static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL);
+
+static ssize_t chp_shared_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct channel_path *chp = to_channelpath(dev);
+
+ if (!chp)
+ return 0;
+ if (chp->shared == -1) /* channel measurements not available */
+ return sprintf(buf, "unknown\n");
+ return sprintf(buf, "%x\n", chp->shared);
+}
+
+static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL);
+
+static struct attribute * chp_attrs[] = {
+ &dev_attr_status.attr,
+ &dev_attr_configure.attr,
+ &dev_attr_type.attr,
+ &dev_attr_cmg.attr,
+ &dev_attr_shared.attr,
+ NULL,
+};
+
+static struct attribute_group chp_attr_group = {
+ .attrs = chp_attrs,
+};
+
+static void chp_release(struct device *dev)
+{
+ struct channel_path *cp;
+
+ cp = container_of(dev, struct channel_path, dev);
+ kfree(cp);
+}
+
+/**
+ * chp_new - register a new channel-path
+ * @chpid - channel-path ID
+ *
+ * Create and register data structure representing new channel-path. Return
+ * zero on success, non-zero otherwise.
+ */
+int chp_new(struct chp_id chpid)
+{
+ struct channel_path *chp;
+ int ret;
+
+ if (chp_is_registered(chpid))
+ return 0;
+ chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL);
+ if (!chp)
+ return -ENOMEM;
+
+ /* fill in status, etc. */
+ chp->chpid = chpid;
+ chp->state = 1;
+ chp->dev.parent = &css[chpid.cssid]->device;
+ chp->dev.release = chp_release;
+ snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp%x.%02x", chpid.cssid,
+ chpid.id);
+
+ /* Obtain channel path description and fill it in. */
+ ret = chsc_determine_channel_path_description(chpid, &chp->desc);
+ if (ret)
+ goto out_free;
+ if ((chp->desc.flags & 0x80) == 0) {
+ ret = -ENODEV;
+ goto out_free;
+ }
+ /* Get channel-measurement characteristics. */
+ if (css_characteristics_avail && css_chsc_characteristics.scmc
+ && css_chsc_characteristics.secm) {
+ ret = chsc_get_channel_measurement_chars(chp);
+ if (ret)
+ goto out_free;
+ } else {
+ static int msg_done;
+
+ if (!msg_done) {
+ printk(KERN_WARNING "cio: Channel measurements not "
+ "available, continuing.\n");
+ msg_done = 1;
+ }
+ chp->cmg = -1;
+ }
+
+ /* make it known to the system */
+ ret = device_register(&chp->dev);
+ if (ret) {
+ printk(KERN_WARNING "%s: could not register %x.%02x\n",
+ __func__, chpid.cssid, chpid.id);
+ goto out_free;
+ }
+ ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group);
+ if (ret) {
+ device_unregister(&chp->dev);
+ goto out_free;
+ }
+ mutex_lock(&css[chpid.cssid]->mutex);
+ if (css[chpid.cssid]->cm_enabled) {
+ ret = chp_add_cmg_attr(chp);
+ if (ret) {
+ sysfs_remove_group(&chp->dev.kobj, &chp_attr_group);
+ device_unregister(&chp->dev);
+ mutex_unlock(&css[chpid.cssid]->mutex);
+ goto out_free;
+ }
+ }
+ css[chpid.cssid]->chps[chpid.id] = chp;
+ mutex_unlock(&css[chpid.cssid]->mutex);
+ return ret;
+out_free:
+ kfree(chp);
+ return ret;
+}
+
+/**
+ * chp_get_chp_desc - return newly allocated channel-path description
+ * @chpid: channel-path ID
+ *
+ * On success return a newly allocated copy of the channel-path description
+ * data associated with the given channel-path ID. Return %NULL on error.
+ */
+void *chp_get_chp_desc(struct chp_id chpid)
+{
+ struct channel_path *chp;
+ struct channel_path_desc *desc;
+
+ chp = chpid_to_chp(chpid);
+ if (!chp)
+ return NULL;
+ desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL);
+ if (!desc)
+ return NULL;
+ memcpy(desc, &chp->desc, sizeof(struct channel_path_desc));
+ return desc;
+}
+
+/**
+ * chp_process_crw - process channel-path status change
+ * @id: channel-path ID number
+ * @status: non-zero if channel-path has become available, zero otherwise
+ *
+ * Handle channel-report-words indicating that the status of a channel-path
+ * has changed.
+ */
+void chp_process_crw(int id, int status)
+{
+ struct chp_id chpid;
+
+ chp_id_init(&chpid);
+ chpid.id = id;
+ if (status) {
+ if (!chp_is_registered(chpid))
+ chp_new(chpid);
+ chsc_chp_online(chpid);
+ } else
+ chsc_chp_offline(chpid);
+}
+
+static inline int info_bit_num(struct chp_id id)
+{
+ return id.id + id.cssid * (__MAX_CHPID + 1);
+}
+
+/* Force chp_info refresh on next call to info_validate(). */
+static void info_expire(void)
+{
+ mutex_lock(&info_lock);
+ chp_info_expires = jiffies - 1;
+ mutex_unlock(&info_lock);
+}
+
+/* Ensure that chp_info is up-to-date. */
+static int info_update(void)
+{
+ int rc;
+
+ mutex_lock(&info_lock);
+ rc = 0;
+ if (time_after(jiffies, chp_info_expires)) {
+ /* Data is too old, update. */
+ rc = sclp_chp_read_info(&chp_info);
+ chp_info_expires = jiffies + CHP_INFO_UPDATE_INTERVAL ;
+ }
+ mutex_unlock(&info_lock);
+
+ return rc;
+}
+
+/**
+ * chp_info_get_status - retrieve configure status of a channel-path
+ * @chpid: channel-path ID
+ *
+ * On success, return 0 for standby, 1 for configured, 2 for reserved,
+ * 3 for not recognized. Return negative error code on error.
+ */
+int chp_info_get_status(struct chp_id chpid)
+{
+ int rc;
+ int bit;
+
+ rc = info_update();
+ if (rc)
+ return rc;
+
+ bit = info_bit_num(chpid);
+ mutex_lock(&info_lock);
+ if (!chp_test_bit(chp_info.recognized, bit))
+ rc = CHP_STATUS_NOT_RECOGNIZED;
+ else if (chp_test_bit(chp_info.configured, bit))
+ rc = CHP_STATUS_CONFIGURED;
+ else if (chp_test_bit(chp_info.standby, bit))
+ rc = CHP_STATUS_STANDBY;
+ else
+ rc = CHP_STATUS_RESERVED;
+ mutex_unlock(&info_lock);
+
+ return rc;
+}
+
+/* Return configure task for chpid. */
+static enum cfg_task_t cfg_get_task(struct chp_id chpid)
+{
+ return chp_cfg_task[chpid.cssid][chpid.id];
+}
+
+/* Set configure task for chpid. */
+static void cfg_set_task(struct chp_id chpid, enum cfg_task_t cfg)
+{
+ chp_cfg_task[chpid.cssid][chpid.id] = cfg;
+}
+
+/* Perform one configure/deconfigure request. Reschedule work function until
+ * last request. */
+static void cfg_func(struct work_struct *work)
+{
+ struct chp_id chpid;
+ enum cfg_task_t t;
+
+ mutex_lock(&cfg_lock);
+ t = cfg_none;
+ chp_id_for_each(&chpid) {
+ t = cfg_get_task(chpid);
+ if (t != cfg_none) {
+ cfg_set_task(chpid, cfg_none);
+ break;
+ }
+ }
+ mutex_unlock(&cfg_lock);
+
+ switch (t) {
+ case cfg_configure:
+ sclp_chp_configure(chpid);
+ info_expire();
+ chsc_chp_online(chpid);
+ break;
+ case cfg_deconfigure:
+ sclp_chp_deconfigure(chpid);
+ info_expire();
+ chsc_chp_offline(chpid);
+ break;
+ case cfg_none:
+ /* Get updated information after last change. */
+ info_update();
+ mutex_lock(&cfg_lock);
+ cfg_busy = 0;
+ mutex_unlock(&cfg_lock);
+ wake_up_interruptible(&cfg_wait_queue);
+ return;
+ }
+ queue_work(chp_wq, &cfg_work);
+}
+
+/**
+ * chp_cfg_schedule - schedule chpid configuration request
+ * @chpid - channel-path ID
+ * @configure - Non-zero for configure, zero for deconfigure
+ *
+ * Schedule a channel-path configuration/deconfiguration request.
+ */
+void chp_cfg_schedule(struct chp_id chpid, int configure)
+{
+ CIO_MSG_EVENT(2, "chp_cfg_sched%x.%02x=%d\n", chpid.cssid, chpid.id,
+ configure);
+ mutex_lock(&cfg_lock);
+ cfg_set_task(chpid, configure ? cfg_configure : cfg_deconfigure);
+ cfg_busy = 1;
+ mutex_unlock(&cfg_lock);
+ queue_work(chp_wq, &cfg_work);
+}
+
+/**
+ * chp_cfg_cancel_deconfigure - cancel chpid deconfiguration request
+ * @chpid - channel-path ID
+ *
+ * Cancel an active channel-path deconfiguration request if it has not yet
+ * been performed.
+ */
+void chp_cfg_cancel_deconfigure(struct chp_id chpid)
+{
+ CIO_MSG_EVENT(2, "chp_cfg_cancel:%x.%02x\n", chpid.cssid, chpid.id);
+ mutex_lock(&cfg_lock);
+ if (cfg_get_task(chpid) == cfg_deconfigure)
+ cfg_set_task(chpid, cfg_none);
+ mutex_unlock(&cfg_lock);
+}
+
+static int cfg_wait_idle(void)
+{
+ if (wait_event_interruptible(cfg_wait_queue, !cfg_busy))
+ return -ERESTARTSYS;
+ return 0;
+}
+
+static int __init chp_init(void)
+{
+ struct chp_id chpid;
+
+ chp_wq = create_singlethread_workqueue("cio_chp");
+ if (!chp_wq)
+ return -ENOMEM;
+ INIT_WORK(&cfg_work, cfg_func);
+ init_waitqueue_head(&cfg_wait_queue);
+ if (info_update())
+ return 0;
+ /* Register available channel-paths. */
+ chp_id_for_each(&chpid) {
+ if (chp_info_get_status(chpid) != CHP_STATUS_NOT_RECOGNIZED)
+ chp_new(chpid);
+ }
+
+ return 0;
+}
+
+subsys_initcall(chp_init);
diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h
new file mode 100644
index 00000000000..65286563c59
--- /dev/null
+++ b/drivers/s390/cio/chp.h
@@ -0,0 +1,53 @@
+/*
+ * drivers/s390/cio/chp.h
+ *
+ * Copyright IBM Corp. 2007
+ * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#ifndef S390_CHP_H
+#define S390_CHP_H S390_CHP_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <asm/chpid.h>
+#include "chsc.h"
+
+#define CHP_STATUS_STANDBY 0
+#define CHP_STATUS_CONFIGURED 1
+#define CHP_STATUS_RESERVED 2
+#define CHP_STATUS_NOT_RECOGNIZED 3
+
+static inline int chp_test_bit(u8 *bitmap, int num)
+{
+ int byte = num >> 3;
+ int mask = 128 >> (num & 7);
+
+ return (bitmap[byte] & mask) ? 1 : 0;
+}
+
+
+struct channel_path {
+ struct chp_id chpid;
+ int state;
+ struct channel_path_desc desc;
+ /* Channel-measurement related stuff: */
+ int cmg;
+ int shared;
+ void *cmg_chars;
+ struct device dev;
+};
+
+int chp_get_status(struct chp_id chpid);
+u8 chp_get_sch_opm(struct subchannel *sch);
+int chp_is_registered(struct chp_id chpid);
+void *chp_get_chp_desc(struct chp_id chpid);
+void chp_process_crw(int id, int available);
+void chp_remove_cmg_attr(struct channel_path *chp);
+int chp_add_cmg_attr(struct channel_path *chp);
+int chp_new(struct chp_id chpid);
+void chp_cfg_schedule(struct chp_id chpid, int configure);
+void chp_cfg_cancel_deconfigure(struct chp_id chpid);
+int chp_info_get_status(struct chp_id chpid);
+
+#endif /* S390_CHP_H */
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 6f05a44e381..ea92ac4d657 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -15,202 +15,124 @@
#include <linux/device.h>
#include <asm/cio.h>
+#include <asm/chpid.h>
#include "css.h"
#include "cio.h"
#include "cio_debug.h"
#include "ioasm.h"
+#include "chp.h"
#include "chsc.h"
static void *sei_page;
-static int new_channel_path(int chpid);
-
-static inline void
-set_chp_logically_online(int chp, int onoff)
-{
- css[0]->chps[chp]->state = onoff;
-}
-
-static int
-get_chp_status(int chp)
-{
- return (css[0]->chps[chp] ? css[0]->chps[chp]->state : -ENODEV);
-}
-
-void
-chsc_validate_chpids(struct subchannel *sch)
-{
- int mask, chp;
-
- for (chp = 0; chp <= 7; chp++) {
- mask = 0x80 >> chp;
- if (!get_chp_status(sch->schib.pmcw.chpid[chp]))
- /* disable using this path */
- sch->opm &= ~mask;
- }
-}
-
-void
-chpid_is_actually_online(int chp)
-{
- int state;
-
- state = get_chp_status(chp);
- if (state < 0) {
- need_rescan = 1;
- queue_work(slow_path_wq, &slow_path_work);
- } else
- WARN_ON(!state);
-}
+struct chsc_ssd_area {
+ struct chsc_header request;
+ u16 :10;
+ u16 ssid:2;
+ u16 :4;
+ u16 f_sch; /* first subchannel */
+ u16 :16;
+ u16 l_sch; /* last subchannel */
+ u32 :32;
+ struct chsc_header response;
+ u32 :32;
+ u8 sch_valid : 1;
+ u8 dev_valid : 1;
+ u8 st : 3; /* subchannel type */
+ u8 zeroes : 3;
+ u8 unit_addr; /* unit address */
+ u16 devno; /* device number */
+ u8 path_mask;
+ u8 fla_valid_mask;
+ u16 sch; /* subchannel */
+ u8 chpid[8]; /* chpids 0-7 */
+ u16 fla[8]; /* full link addresses 0-7 */
+} __attribute__ ((packed));
-/* FIXME: this is _always_ called for every subchannel. shouldn't we
- * process more than one at a time? */
-static int
-chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
+int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
{
- int ccode, j;
-
- struct {
- struct chsc_header request;
- u16 reserved1a:10;
- u16 ssid:2;
- u16 reserved1b:4;
- u16 f_sch; /* first subchannel */
- u16 reserved2;
- u16 l_sch; /* last subchannel */
- u32 reserved3;
- struct chsc_header response;
- u32 reserved4;
- u8 sch_valid : 1;
- u8 dev_valid : 1;
- u8 st : 3; /* subchannel type */
- u8 zeroes : 3;
- u8 unit_addr; /* unit address */
- u16 devno; /* device number */
- u8 path_mask;
- u8 fla_valid_mask;
- u16 sch; /* subchannel */
- u8 chpid[8]; /* chpids 0-7 */
- u16 fla[8]; /* full link addresses 0-7 */
- } __attribute__ ((packed)) *ssd_area;
-
- ssd_area = page;
+ unsigned long page;
+ struct chsc_ssd_area *ssd_area;
+ int ccode;
+ int ret;
+ int i;
+ int mask;
+ page = get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!page)
+ return -ENOMEM;
+ ssd_area = (struct chsc_ssd_area *) page;
ssd_area->request.length = 0x0010;
ssd_area->request.code = 0x0004;
-
- ssd_area->ssid = sch->schid.ssid;
- ssd_area->f_sch = sch->schid.sch_no;
- ssd_area->l_sch = sch->schid.sch_no;
+ ssd_area->ssid = schid.ssid;
+ ssd_area->f_sch = schid.sch_no;
+ ssd_area->l_sch = schid.sch_no;
ccode = chsc(ssd_area);
+ /* Check response. */
if (ccode > 0) {
- pr_debug("chsc returned with ccode = %d\n", ccode);
- return (ccode == 3) ? -ENODEV : -EBUSY;
+ ret = (ccode == 3) ? -ENODEV : -EBUSY;
+ goto out_free;
}
-
- switch (ssd_area->response.code) {
- case 0x0001: /* everything ok */
- break;
- case 0x0002:
- CIO_CRW_EVENT(2, "Invalid command!\n");
- return -EINVAL;
- case 0x0003:
- CIO_CRW_EVENT(2, "Error in chsc request block!\n");
- return -EINVAL;
- case 0x0004:
- CIO_CRW_EVENT(2, "Model does not provide ssd\n");
- return -EOPNOTSUPP;
- default:
- CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
+ if (ssd_area->response.code != 0x0001) {
+ CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
+ schid.ssid, schid.sch_no,
ssd_area->response.code);
- return -EIO;
+ ret = -EIO;
+ goto out_free;
}
-
- /*
- * ssd_area->st stores the type of the detected
- * subchannel, with the following definitions:
- *
- * 0: I/O subchannel: All fields have meaning
- * 1: CHSC subchannel: Only sch_val, st and sch
- * have meaning
- * 2: Message subchannel: All fields except unit_addr
- * have meaning
- * 3: ADM subchannel: Only sch_val, st and sch
- * have meaning
- *
- * Other types are currently undefined.
- */
- if (ssd_area->st > 3) { /* uhm, that looks strange... */
- CIO_CRW_EVENT(0, "Strange subchannel type %d"
- " for sch 0.%x.%04x\n", ssd_area->st,
- sch->schid.ssid, sch->schid.sch_no);
- /*
- * There may have been a new subchannel type defined in the
- * time since this code was written; since we don't know which
- * fields have meaning and what to do with it we just jump out
- */
- return 0;
- } else {
- const char *type[4] = {"I/O", "chsc", "message", "ADM"};
- CIO_CRW_EVENT(6, "ssd: sch 0.%x.%04x is %s subchannel\n",
- sch->schid.ssid, sch->schid.sch_no,
- type[ssd_area->st]);
-
- sch->ssd_info.valid = 1;
- sch->ssd_info.type = ssd_area->st;
+ if (!ssd_area->sch_valid) {
+ ret = -ENODEV;
+ goto out_free;
}
-
- if (ssd_area->st == 0 || ssd_area->st == 2) {
- for (j = 0; j < 8; j++) {
- if (!((0x80 >> j) & ssd_area->path_mask &
- ssd_area->fla_valid_mask))
- continue;
- sch->ssd_info.chpid[j] = ssd_area->chpid[j];
- sch->ssd_info.fla[j] = ssd_area->fla[j];
+ /* Copy data */
+ ret = 0;
+ memset(ssd, 0, sizeof(struct chsc_ssd_info));
+ if ((ssd_area->st != 0) && (ssd_area->st != 2))
+ goto out_free;
+ ssd->path_mask = ssd_area->path_mask;
+ ssd->fla_valid_mask = ssd_area->fla_valid_mask;
+ for (i = 0; i < 8; i++) {
+ mask = 0x80 >> i;
+ if (ssd_area->path_mask & mask) {
+ chp_id_init(&ssd->chpid[i]);
+ ssd->chpid[i].id = ssd_area->chpid[i];
}
+ if (ssd_area->fla_valid_mask & mask)
+ ssd->fla[i] = ssd_area->fla[i];
}
- return 0;
+out_free:
+ free_page(page);
+ return ret;
}
-int
-css_get_ssd_info(struct subchannel *sch)
+static int check_for_io_on_path(struct subchannel *sch, int mask)
{
- int ret;
- void *page;
+ int cc;
- page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!page)
- return -ENOMEM;
- spin_lock_irq(sch->lock);
- ret = chsc_get_sch_desc_irq(sch, page);
- if (ret) {
- static int cio_chsc_err_msg;
-
- if (!cio_chsc_err_msg) {
- printk(KERN_ERR
- "chsc_get_sch_descriptions:"
- " Error %d while doing chsc; "
- "processing some machine checks may "
- "not work\n", ret);
- cio_chsc_err_msg = 1;
- }
- }
- spin_unlock_irq(sch->lock);
- free_page((unsigned long)page);
- if (!ret) {
- int j, chpid, mask;
- /* Allocate channel path structures, if needed. */
- for (j = 0; j < 8; j++) {
- mask = 0x80 >> j;
- chpid = sch->ssd_info.chpid[j];
- if ((sch->schib.pmcw.pim & mask) &&
- (get_chp_status(chpid) < 0))
- new_channel_path(chpid);
- }
+ cc = stsch(sch->schid, &sch->schib);
+ if (cc)
+ return 0;
+ if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == mask)
+ return 1;
+ return 0;
+}
+
+static void terminate_internal_io(struct subchannel *sch)
+{
+ if (cio_clear(sch)) {
+ /* Recheck device in case clear failed. */
+ sch->lpm = 0;
+ if (device_trigger_verify(sch) != 0)
+ css_schedule_eval(sch->schid);
+ return;
}
- return ret;
+ /* Request retry of internal operation. */
+ device_set_intretry(sch);
+ /* Call handler. */
+ if (sch->driver && sch->driver->termination)
+ sch->driver->termination(&sch->dev);
}
static int
@@ -219,7 +141,7 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
int j;
int mask;
struct subchannel *sch;
- struct channel_path *chpid;
+ struct chp_id *chpid;
struct schib schib;
sch = to_subchannel(dev);
@@ -243,106 +165,50 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
if (sch->schib.pmcw.pim == 0x80)
goto out_unreg;
- if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) &&
- (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) &&
- (sch->schib.pmcw.lpum == mask)) {
- int cc;
-
- cc = cio_clear(sch);
- if (cc == -ENODEV)
+ if (check_for_io_on_path(sch, mask)) {
+ if (device_is_online(sch))
+ device_kill_io(sch);
+ else {
+ terminate_internal_io(sch);
+ /* Re-start path verification. */
+ if (sch->driver && sch->driver->verify)
+ sch->driver->verify(&sch->dev);
+ }
+ } else {
+ /* trigger path verification. */
+ if (sch->driver && sch->driver->verify)
+ sch->driver->verify(&sch->dev);
+ else if (sch->lpm == mask)
goto out_unreg;
- /* Request retry of internal operation. */
- device_set_intretry(sch);
- /* Call handler. */
- if (sch->driver && sch->driver->termination)
- sch->driver->termination(&sch->dev);
- goto out_unlock;
}
- /* trigger path verification. */
- if (sch->driver && sch->driver->verify)
- sch->driver->verify(&sch->dev);
- else if (sch->lpm == mask)
- goto out_unreg;
-out_unlock:
spin_unlock_irq(sch->lock);
return 0;
+
out_unreg:
- spin_unlock_irq(sch->lock);
sch->lpm = 0;
- if (css_enqueue_subchannel_slow(sch->schid)) {
- css_clear_subchannel_slow_list();
- need_rescan = 1;
- }
+ spin_unlock_irq(sch->lock);
+ css_schedule_eval(sch->schid);
return 0;
}
-static void
-s390_set_chpid_offline( __u8 chpid)
+void chsc_chp_offline(struct chp_id chpid)
{
char dbf_txt[15];
- struct device *dev;
- sprintf(dbf_txt, "chpr%x", chpid);
+ sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
CIO_TRACE_EVENT(2, dbf_txt);
- if (get_chp_status(chpid) <= 0)
+ if (chp_get_status(chpid) <= 0)
return;
- dev = get_device(&css[0]->chps[chpid]->dev);
- bus_for_each_dev(&css_bus_type, NULL, to_channelpath(dev),
+ bus_for_each_dev(&css_bus_type, NULL, &chpid,
s390_subchannel_remove_chpid);
-
- if (need_rescan || css_slow_subchannels_exist())
- queue_work(slow_path_wq, &slow_path_work);
- put_device(dev);
-}
-
-struct res_acc_data {
- struct channel_path *chp;
- u32 fla_mask;
- u16 fla;
-};
-
-static int
-s390_process_res_acc_sch(struct res_acc_data *res_data, struct subchannel *sch)
-{
- int found;
- int chp;
- int ccode;
-
- found = 0;
- for (chp = 0; chp <= 7; chp++)
- /*
- * check if chpid is in information updated by ssd
- */
- if (sch->ssd_info.valid &&
- sch->ssd_info.chpid[chp] == res_data->chp->id &&
- (sch->ssd_info.fla[chp] & res_data->fla_mask)
- == res_data->fla) {
- found = 1;
- break;
- }
-
- if (found == 0)
- return 0;
-
- /*
- * Do a stsch to update our subchannel structure with the
- * new path information and eventually check for logically
- * offline chpids.
- */
- ccode = stsch(sch->schid, &sch->schib);
- if (ccode > 0)
- return 0;
-
- return 0x80 >> chp;
}
static int
s390_process_res_acc_new_sch(struct subchannel_id schid)
{
struct schib schib;
- int ret;
/*
* We don't know the device yet, but since a path
* may be available now to the device we'll have
@@ -353,14 +219,35 @@ s390_process_res_acc_new_sch(struct subchannel_id schid)
*/
if (stsch_err(schid, &schib))
/* We're through */
- return need_rescan ? -EAGAIN : -ENXIO;
+ return -ENXIO;
/* Put it on the slow path. */
- ret = css_enqueue_subchannel_slow(schid);
- if (ret) {
- css_clear_subchannel_slow_list();
- need_rescan = 1;
- return -EAGAIN;
+ css_schedule_eval(schid);
+ return 0;
+}
+
+struct res_acc_data {
+ struct chp_id chpid;
+ u32 fla_mask;
+ u16 fla;
+};
+
+static int get_res_chpid_mask(struct chsc_ssd_info *ssd,
+ struct res_acc_data *data)
+{
+ int i;
+ int mask;
+
+ for (i = 0; i < 8; i++) {
+ mask = 0x80 >> i;
+ if (!(ssd->path_mask & mask))
+ continue;
+ if (!chp_id_is_equal(&ssd->chpid[i], &data->chpid))
+ continue;
+ if ((ssd->fla_valid_mask & mask) &&
+ ((ssd->fla[i] & data->fla_mask) != data->fla))
+ continue;
+ return mask;
}
return 0;
}
@@ -379,14 +266,11 @@ __s390_process_res_acc(struct subchannel_id schid, void *data)
return s390_process_res_acc_new_sch(schid);
spin_lock_irq(sch->lock);
-
- chp_mask = s390_process_res_acc_sch(res_data, sch);
-
- if (chp_mask == 0) {
- spin_unlock_irq(sch->lock);
- put_device(&sch->dev);
- return 0;
- }
+ chp_mask = get_res_chpid_mask(&sch->ssd_info, res_data);
+ if (chp_mask == 0)
+ goto out;
+ if (stsch(sch->schid, &sch->schib))
+ goto out;
old_lpm = sch->lpm;
sch->lpm = ((sch->schib.pmcw.pim &
sch->schib.pmcw.pam &
@@ -396,20 +280,18 @@ __s390_process_res_acc(struct subchannel_id schid, void *data)
device_trigger_reprobe(sch);
else if (sch->driver && sch->driver->verify)
sch->driver->verify(&sch->dev);
-
+out:
spin_unlock_irq(sch->lock);
put_device(&sch->dev);
return 0;
}
-
-static int
-s390_process_res_acc (struct res_acc_data *res_data)
+static void s390_process_res_acc (struct res_acc_data *res_data)
{
- int rc;
char dbf_txt[15];
- sprintf(dbf_txt, "accpr%x", res_data->chp->id);
+ sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid,
+ res_data->chpid.id);
CIO_TRACE_EVENT( 2, dbf_txt);
if (res_data->fla != 0) {
sprintf(dbf_txt, "fla%x", res_data->fla);
@@ -423,12 +305,7 @@ s390_process_res_acc (struct res_acc_data *res_data)
* The more information we have (info), the less scanning
* will we have to do.
*/
- rc = for_each_subchannel(__s390_process_res_acc, res_data);
- if (css_slow_subchannels_exist())
- rc = -EAGAIN;
- else if (rc != -EAGAIN)
- rc = 0;
- return rc;
+ for_each_subchannel(__s390_process_res_acc, res_data);
}
static int
@@ -480,43 +357,45 @@ struct chsc_sei_area {
/* ccdf has to be big enough for a link-incident record */
} __attribute__ ((packed));
-static int chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
+static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
{
- int chpid;
+ struct chp_id chpid;
+ int id;
CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
sei_area->rs, sei_area->rsid);
if (sei_area->rs != 4)
- return 0;
- chpid = __get_chpid_from_lir(sei_area->ccdf);
- if (chpid < 0)
+ return;
+ id = __get_chpid_from_lir(sei_area->ccdf);
+ if (id < 0)
CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
- else
- s390_set_chpid_offline(chpid);
-
- return 0;
+ else {
+ chp_id_init(&chpid);
+ chpid.id = id;
+ chsc_chp_offline(chpid);
+ }
}
-static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
+static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
{
struct res_acc_data res_data;
- struct device *dev;
+ struct chp_id chpid;
int status;
- int rc;
CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
"rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
if (sei_area->rs != 4)
- return 0;
+ return;
+ chp_id_init(&chpid);
+ chpid.id = sei_area->rsid;
/* allocate a new channel path structure, if needed */
- status = get_chp_status(sei_area->rsid);
+ status = chp_get_status(chpid);
if (status < 0)
- new_channel_path(sei_area->rsid);
+ chp_new(chpid);
else if (!status)
- return 0;
- dev = get_device(&css[0]->chps[sei_area->rsid]->dev);
+ return;
memset(&res_data, 0, sizeof(struct res_acc_data));
- res_data.chp = to_channelpath(dev);
+ res_data.chpid = chpid;
if ((sei_area->vf & 0xc0) != 0) {
res_data.fla = sei_area->fla;
if ((sei_area->vf & 0xc0) == 0xc0)
@@ -526,51 +405,82 @@ static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
/* link address */
res_data.fla_mask = 0xff00;
}
- rc = s390_process_res_acc(&res_data);
- put_device(dev);
-
- return rc;
+ s390_process_res_acc(&res_data);
}
-static int chsc_process_sei(struct chsc_sei_area *sei_area)
+struct chp_config_data {
+ u8 map[32];
+ u8 op;
+ u8 pc;
+};
+
+static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
{
- int rc;
+ struct chp_config_data *data;
+ struct chp_id chpid;
+ int num;
+
+ CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
+ if (sei_area->rs != 0)
+ return;
+ data = (struct chp_config_data *) &(sei_area->ccdf);
+ chp_id_init(&chpid);
+ for (num = 0; num <= __MAX_CHPID; num++) {
+ if (!chp_test_bit(data->map, num))
+ continue;
+ chpid.id = num;
+ printk(KERN_WARNING "cio: processing configure event %d for "
+ "chpid %x.%02x\n", data->op, chpid.cssid, chpid.id);
+ switch (data->op) {
+ case 0:
+ chp_cfg_schedule(chpid, 1);
+ break;
+ case 1:
+ chp_cfg_schedule(chpid, 0);
+ break;
+ case 2:
+ chp_cfg_cancel_deconfigure(chpid);
+ break;
+ }
+ }
+}
+static void chsc_process_sei(struct chsc_sei_area *sei_area)
+{
/* Check if we might have lost some information. */
- if (sei_area->flags & 0x40)
+ if (sei_area->flags & 0x40) {
CIO_CRW_EVENT(2, "chsc: event overflow\n");
+ css_schedule_eval_all();
+ }
/* which kind of information was stored? */
- rc = 0;
switch (sei_area->cc) {
case 1: /* link incident*/
- rc = chsc_process_sei_link_incident(sei_area);
+ chsc_process_sei_link_incident(sei_area);
break;
case 2: /* i/o resource accessibiliy */
- rc = chsc_process_sei_res_acc(sei_area);
+ chsc_process_sei_res_acc(sei_area);
+ break;
+ case 8: /* channel-path-configuration notification */
+ chsc_process_sei_chp_config(sei_area);
break;
default: /* other stuff */
CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
sei_area->cc);
break;
}
-
- return rc;
}
-int chsc_process_crw(void)
+void chsc_process_crw(void)
{
struct chsc_sei_area *sei_area;
- int ret;
- int rc;
if (!sei_page)
- return 0;
+ return;
/* Access to sei_page is serialized through machine check handler
* thread, so no need for locking. */
sei_area = sei_page;
CIO_TRACE_EVENT( 2, "prcss");
- ret = 0;
do {
memset(sei_area, 0, sizeof(*sei_area));
sei_area->request.length = 0x0010;
@@ -580,37 +490,26 @@ int chsc_process_crw(void)
if (sei_area->response.code == 0x0001) {
CIO_CRW_EVENT(4, "chsc: sei successful\n");
- rc = chsc_process_sei(sei_area);
- if (rc)
- ret = rc;
+ chsc_process_sei(sei_area);
} else {
CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
sei_area->response.code);
- ret = 0;
break;
}
} while (sei_area->flags & 0x80);
-
- return ret;
}
static int
__chp_add_new_sch(struct subchannel_id schid)
{
struct schib schib;
- int ret;
if (stsch_err(schid, &schib))
/* We're through */
- return need_rescan ? -EAGAIN : -ENXIO;
+ return -ENXIO;
/* Put it on the slow path. */
- ret = css_enqueue_subchannel_slow(schid);
- if (ret) {
- css_clear_subchannel_slow_list();
- need_rescan = 1;
- return -EAGAIN;
- }
+ css_schedule_eval(schid);
return 0;
}
@@ -619,10 +518,10 @@ static int
__chp_add(struct subchannel_id schid, void *data)
{
int i, mask;
- struct channel_path *chp;
+ struct chp_id *chpid;
struct subchannel *sch;
- chp = data;
+ chpid = data;
sch = get_subchannel_by_schid(schid);
if (!sch)
/* Check if the subchannel is now available. */
@@ -631,7 +530,7 @@ __chp_add(struct subchannel_id schid, void *data)
for (i=0; i<8; i++) {
mask = 0x80 >> i;
if ((sch->schib.pmcw.pim & mask) &&
- (sch->schib.pmcw.chpid[i] == chp->id)) {
+ (sch->schib.pmcw.chpid[i] == chpid->id)) {
if (stsch(sch->schid, &sch->schib) != 0) {
/* Endgame. */
spin_unlock_irq(sch->lock);
@@ -657,122 +556,58 @@ __chp_add(struct subchannel_id schid, void *data)
return 0;
}
-static int
-chp_add(int chpid)
+void chsc_chp_online(struct chp_id chpid)
{
- int rc;
char dbf_txt[15];
- struct device *dev;
- if (!get_chp_status(chpid))
- return 0; /* no need to do the rest */
-
- sprintf(dbf_txt, "cadd%x", chpid);
+ sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
CIO_TRACE_EVENT(2, dbf_txt);
- dev = get_device(&css[0]->chps[chpid]->dev);
- rc = for_each_subchannel(__chp_add, to_channelpath(dev));
- if (css_slow_subchannels_exist())
- rc = -EAGAIN;
- if (rc != -EAGAIN)
- rc = 0;
- put_device(dev);
- return rc;
+ if (chp_get_status(chpid) != 0)
+ for_each_subchannel(__chp_add, &chpid);
}
-/*
- * Handling of crw machine checks with channel path source.
- */
-int
-chp_process_crw(int chpid, int on)
-{
- if (on == 0) {
- /* Path has gone. We use the link incident routine.*/
- s390_set_chpid_offline(chpid);
- return 0; /* De-register is async anyway. */
- }
- /*
- * Path has come. Allocate a new channel path structure,
- * if needed.
- */
- if (get_chp_status(chpid) < 0)
- new_channel_path(chpid);
- /* Avoid the extra overhead in process_rec_acc. */
- return chp_add(chpid);
-}
-
-static int check_for_io_on_path(struct subchannel *sch, int index)
-{
- int cc;
-
- cc = stsch(sch->schid, &sch->schib);
- if (cc)
- return 0;
- if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index))
- return 1;
- return 0;
-}
-
-static void terminate_internal_io(struct subchannel *sch)
-{
- if (cio_clear(sch)) {
- /* Recheck device in case clear failed. */
- sch->lpm = 0;
- if (device_trigger_verify(sch) != 0) {
- if(css_enqueue_subchannel_slow(sch->schid)) {
- css_clear_subchannel_slow_list();
- need_rescan = 1;
- }
- }
- return;
- }
- /* Request retry of internal operation. */
- device_set_intretry(sch);
- /* Call handler. */
- if (sch->driver && sch->driver->termination)
- sch->driver->termination(&sch->dev);
-}
-
-static void
-__s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
+static void __s390_subchannel_vary_chpid(struct subchannel *sch,
+ struct chp_id chpid, int on)
{
int chp, old_lpm;
+ int mask;
unsigned long flags;
- if (!sch->ssd_info.valid)
- return;
-
spin_lock_irqsave(sch->lock, flags);
old_lpm = sch->lpm;
for (chp = 0; chp < 8; chp++) {
- if (sch->ssd_info.chpid[chp] != chpid)
+ mask = 0x80 >> chp;
+ if (!(sch->ssd_info.path_mask & mask))
+ continue;
+ if (!chp_id_is_equal(&sch->ssd_info.chpid[chp], &chpid))
continue;
if (on) {
- sch->opm |= (0x80 >> chp);
- sch->lpm |= (0x80 >> chp);
+ sch->opm |= mask;
+ sch->lpm |= mask;
if (!old_lpm)
device_trigger_reprobe(sch);
else if (sch->driver && sch->driver->verify)
sch->driver->verify(&sch->dev);
break;
}
- sch->opm &= ~(0x80 >> chp);
- sch->lpm &= ~(0x80 >> chp);
- if (check_for_io_on_path(sch, chp)) {
+ sch->opm &= ~mask;
+ sch->lpm &= ~mask;
+ if (check_for_io_on_path(sch, mask)) {
if (device_is_online(sch))
/* Path verification is done after killing. */
device_kill_io(sch);
- else
+ else {
/* Kill and retry internal I/O. */
terminate_internal_io(sch);
- } else if (!sch->lpm) {
- if (device_trigger_verify(sch) != 0) {
- if (css_enqueue_subchannel_slow(sch->schid)) {
- css_clear_subchannel_slow_list();
- need_rescan = 1;
- }
+ /* Re-start path verification. */
+ if (sch->driver && sch->driver->verify)
+ sch->driver->verify(&sch->dev);
}
+ } else if (!sch->lpm) {
+ if (device_trigger_verify(sch) != 0)
+ css_schedule_eval(sch->schid);
} else if (sch->driver && sch->driver->verify)
sch->driver->verify(&sch->dev);
break;
@@ -780,11 +615,10 @@ __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
spin_unlock_irqrestore(sch->lock, flags);
}
-static int
-s390_subchannel_vary_chpid_off(struct device *dev, void *data)
+static int s390_subchannel_vary_chpid_off(struct device *dev, void *data)
{
struct subchannel *sch;
- __u8 *chpid;
+ struct chp_id *chpid;
sch = to_subchannel(dev);
chpid = data;
@@ -793,11 +627,10 @@ s390_subchannel_vary_chpid_off(struct device *dev, void *data)
return 0;
}
-static int
-s390_subchannel_vary_chpid_on(struct device *dev, void *data)
+static int s390_subchannel_vary_chpid_on(struct device *dev, void *data)
{
struct subchannel *sch;
- __u8 *chpid;
+ struct chp_id *chpid;
sch = to_subchannel(dev);
chpid = data;
@@ -821,40 +654,17 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data)
/* We're through */
return -ENXIO;
/* Put it on the slow path. */
- if (css_enqueue_subchannel_slow(schid)) {
- css_clear_subchannel_slow_list();
- need_rescan = 1;
- return -EAGAIN;
- }
+ css_schedule_eval(schid);
return 0;
}
-/*
- * Function: s390_vary_chpid
- * Varies the specified chpid online or offline
+/**
+ * chsc_chp_vary - propagate channel-path vary operation to subchannels
+ * @chpid: channl-path ID
+ * @on: non-zero for vary online, zero for vary offline
*/
-static int
-s390_vary_chpid( __u8 chpid, int on)
+int chsc_chp_vary(struct chp_id chpid, int on)
{
- char dbf_text[15];
- int status;
-
- sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid);
- CIO_TRACE_EVENT( 2, dbf_text);
-
- status = get_chp_status(chpid);
- if (status < 0) {
- printk(KERN_ERR "Can't vary unknown chpid %02X\n", chpid);
- return -EINVAL;
- }
-
- if (!on && !status) {
- printk(KERN_ERR "chpid %x is already offline\n", chpid);
- return -EINVAL;
- }
-
- set_chp_logically_online(chpid, on);
-
/*
* Redo PathVerification on the devices the chpid connects to
*/
@@ -865,118 +675,9 @@ s390_vary_chpid( __u8 chpid, int on)
if (on)
/* Scan for new devices on varied on path. */
for_each_subchannel(__s390_vary_chpid_on, NULL);
- if (need_rescan || css_slow_subchannels_exist())
- queue_work(slow_path_wq, &slow_path_work);
return 0;
}
-/*
- * Channel measurement related functions
- */
-static ssize_t
-chp_measurement_chars_read(struct kobject *kobj, char *buf, loff_t off,
- size_t count)
-{
- struct channel_path *chp;
- unsigned int size;
-
- chp = to_channelpath(container_of(kobj, struct device, kobj));
- if (!chp->cmg_chars)
- return 0;
-
- size = sizeof(struct cmg_chars);
-
- if (off > size)
- return 0;
- if (off + count > size)
- count = size - off;
- memcpy(buf, chp->cmg_chars + off, count);
- return count;
-}
-
-static struct bin_attribute chp_measurement_chars_attr = {
- .attr = {
- .name = "measurement_chars",
- .mode = S_IRUSR,
- .owner = THIS_MODULE,
- },
- .size = sizeof(struct cmg_chars),
- .read = chp_measurement_chars_read,
-};
-
-static void
-chp_measurement_copy_block(struct cmg_entry *buf,
- struct channel_subsystem *css, int chpid)
-{
- void *area;
- struct cmg_entry *entry, reference_buf;
- int idx;
-
- if (chpid < 128) {
- area = css->cub_addr1;
- idx = chpid;
- } else {
- area = css->cub_addr2;
- idx = chpid - 128;
- }
- entry = area + (idx * sizeof(struct cmg_entry));
- do {
- memcpy(buf, entry, sizeof(*entry));
- memcpy(&reference_buf, entry, sizeof(*entry));
- } while (reference_buf.values[0] != buf->values[0]);
-}
-
-static ssize_t
-chp_measurement_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
-{
- struct channel_path *chp;
- struct channel_subsystem *css;
- unsigned int size;
-
- chp = to_channelpath(container_of(kobj, struct device, kobj));
- css = to_css(chp->dev.parent);
-
- size = sizeof(struct cmg_entry);
-
- /* Only allow single reads. */
- if (off || count < size)
- return 0;
- chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->id);
- count = size;
- return count;
-}
-
-static struct bin_attribute chp_measurement_attr = {
- .attr = {
- .name = "measurement",
- .mode = S_IRUSR,
- .owner = THIS_MODULE,
- },
- .size = sizeof(struct cmg_entry),
- .read = chp_measurement_read,
-};
-
-static void
-chsc_remove_chp_cmg_attr(struct channel_path *chp)
-{
- device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
- device_remove_bin_file(&chp->dev, &chp_measurement_attr);
-}
-
-static int
-chsc_add_chp_cmg_attr(struct channel_path *chp)
-{
- int ret;
-
- ret = device_create_bin_file(&chp->dev, &chp_measurement_chars_attr);
- if (ret)
- return ret;
- ret = device_create_bin_file(&chp->dev, &chp_measurement_attr);
- if (ret)
- device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
- return ret;
-}
-
static void
chsc_remove_cmg_attr(struct channel_subsystem *css)
{
@@ -985,7 +686,7 @@ chsc_remove_cmg_attr(struct channel_subsystem *css)
for (i = 0; i <= __MAX_CHPID; i++) {
if (!css->chps[i])
continue;
- chsc_remove_chp_cmg_attr(css->chps[i]);
+ chp_remove_cmg_attr(css->chps[i]);
}
}
@@ -998,7 +699,7 @@ chsc_add_cmg_attr(struct channel_subsystem *css)
for (i = 0; i <= __MAX_CHPID; i++) {
if (!css->chps[i])
continue;
- ret = chsc_add_chp_cmg_attr(css->chps[i]);
+ ret = chp_add_cmg_attr(css->chps[i]);
if (ret)
goto cleanup;
}
@@ -1007,12 +708,11 @@ cleanup:
for (--i; i >= 0; i--) {
if (!css->chps[i])
continue;
- chsc_remove_chp_cmg_attr(css->chps[i]);
+ chp_remove_cmg_attr(css->chps[i]);
}
return ret;
}
-
static int
__chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
{
@@ -1118,7 +818,7 @@ chsc_secm(struct channel_subsystem *css, int enable)
} else
chsc_remove_cmg_attr(css);
}
- if (enable && !css->cm_enabled) {
+ if (!css->cm_enabled) {
free_page((unsigned long)css->cub_addr1);
free_page((unsigned long)css->cub_addr2);
}
@@ -1127,109 +827,8 @@ chsc_secm(struct channel_subsystem *css, int enable)
return ret;
}
-/*
- * Files for the channel path entries.
- */
-static ssize_t
-chp_status_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct channel_path *chp = container_of(dev, struct channel_path, dev);
-
- if (!chp)
- return 0;
- return (get_chp_status(chp->id) ? sprintf(buf, "online\n") :
- sprintf(buf, "offline\n"));
-}
-
-static ssize_t
-chp_status_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
-{
- struct channel_path *cp = container_of(dev, struct channel_path, dev);
- char cmd[10];
- int num_args;
- int error;
-
- num_args = sscanf(buf, "%5s", cmd);
- if (!num_args)
- return count;
-
- if (!strnicmp(cmd, "on", 2))
- error = s390_vary_chpid(cp->id, 1);
- else if (!strnicmp(cmd, "off", 3))
- error = s390_vary_chpid(cp->id, 0);
- else
- error = -EINVAL;
-
- return error < 0 ? error : count;
-
-}
-
-static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write);
-
-static ssize_t
-chp_type_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct channel_path *chp = container_of(dev, struct channel_path, dev);
-
- if (!chp)
- return 0;
- return sprintf(buf, "%x\n", chp->desc.desc);
-}
-
-static DEVICE_ATTR(type, 0444, chp_type_show, NULL);
-
-static ssize_t
-chp_cmg_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct channel_path *chp = to_channelpath(dev);
-
- if (!chp)
- return 0;
- if (chp->cmg == -1) /* channel measurements not available */
- return sprintf(buf, "unknown\n");
- return sprintf(buf, "%x\n", chp->cmg);
-}
-
-static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL);
-
-static ssize_t
-chp_shared_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct channel_path *chp = to_channelpath(dev);
-
- if (!chp)
- return 0;
- if (chp->shared == -1) /* channel measurements not available */
- return sprintf(buf, "unknown\n");
- return sprintf(buf, "%x\n", chp->shared);
-}
-
-static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL);
-
-static struct attribute * chp_attrs[] = {
- &dev_attr_status.attr,
- &dev_attr_type.attr,
- &dev_attr_cmg.attr,
- &dev_attr_shared.attr,
- NULL,
-};
-
-static struct attribute_group chp_attr_group = {
- .attrs = chp_attrs,
-};
-
-static void
-chp_release(struct device *dev)
-{
- struct channel_path *cp;
-
- cp = container_of(dev, struct channel_path, dev);
- kfree(cp);
-}
-
-static int
-chsc_determine_channel_path_description(int chpid,
- struct channel_path_desc *desc)
+int chsc_determine_channel_path_description(struct chp_id chpid,
+ struct channel_path_desc *desc)
{
int ccode, ret;
@@ -1252,8 +851,8 @@ chsc_determine_channel_path_description(int chpid,
scpd_area->request.length = 0x0010;
scpd_area->request.code = 0x0002;
- scpd_area->first_chpid = chpid;
- scpd_area->last_chpid = chpid;
+ scpd_area->first_chpid = chpid.id;
+ scpd_area->last_chpid = chpid.id;
ccode = chsc(scpd_area);
if (ccode > 0) {
@@ -1316,8 +915,7 @@ chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
}
}
-static int
-chsc_get_channel_measurement_chars(struct channel_path *chp)
+int chsc_get_channel_measurement_chars(struct channel_path *chp)
{
int ccode, ret;
@@ -1349,8 +947,8 @@ chsc_get_channel_measurement_chars(struct channel_path *chp)
scmc_area->request.length = 0x0010;
scmc_area->request.code = 0x0022;
- scmc_area->first_chpid = chp->id;
- scmc_area->last_chpid = chp->id;
+ scmc_area->first_chpid = chp->chpid.id;
+ scmc_area->last_chpid = chp->chpid.id;
ccode = chsc(scmc_area);
if (ccode > 0) {
@@ -1392,94 +990,6 @@ out:
return ret;
}
-/*
- * Entries for chpids on the system bus.
- * This replaces /proc/chpids.
- */
-static int
-new_channel_path(int chpid)
-{
- struct channel_path *chp;
- int ret;
-
- chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL);
- if (!chp)
- return -ENOMEM;
-
- /* fill in status, etc. */
- chp->id = chpid;
- chp->state = 1;
- chp->dev.parent = &css[0]->device;
- chp->dev.release = chp_release;
- snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid);
-
- /* Obtain channel path description and fill it in. */
- ret = chsc_determine_channel_path_description(chpid, &chp->desc);
- if (ret)
- goto out_free;
- /* Get channel-measurement characteristics. */
- if (css_characteristics_avail && css_chsc_characteristics.scmc
- && css_chsc_characteristics.secm) {
- ret = chsc_get_channel_measurement_chars(chp);
- if (ret)
- goto out_free;
- } else {
- static int msg_done;
-
- if (!msg_done) {
- printk(KERN_WARNING "cio: Channel measurements not "
- "available, continuing.\n");
- msg_done = 1;
- }
- chp->cmg = -1;
- }
-
- /* make it known to the system */
- ret = device_register(&chp->dev);
- if (ret) {
- printk(KERN_WARNING "%s: could not register %02x\n",
- __func__, chpid);
- goto out_free;
- }
- ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group);
- if (ret) {
- device_unregister(&chp->dev);
- goto out_free;
- }
- mutex_lock(&css[0]->mutex);
- if (css[0]->cm_enabled) {
- ret = chsc_add_chp_cmg_attr(chp);
- if (ret) {
- sysfs_remove_group(&chp->dev.kobj, &chp_attr_group);
- device_unregister(&chp->dev);
- mutex_unlock(&css[0]->mutex);
- goto out_free;
- }
- }
- css[0]->chps[chpid] = chp;
- mutex_unlock(&css[0]->mutex);
- return ret;
-out_free:
- kfree(chp);
- return ret;
-}
-
-void *
-chsc_get_chp_desc(struct subchannel *sch, int chp_no)
-{
- struct channel_path *chp;
- struct channel_path_desc *desc;
-
- chp = css[0]->chps[sch->schib.pmcw.chpid[chp_no]];
- if (!chp)
- return NULL;
- desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL);
- if (!desc)
- return NULL;
- memcpy(desc, &chp->desc, sizeof(struct channel_path_desc));
- return desc;
-}
-
static int __init
chsc_alloc_sei_area(void)
{
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 0fb2b024208..2ad81d11cf7 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -1,9 +1,10 @@
#ifndef S390_CHSC_H
#define S390_CHSC_H
-#define CHSC_SEI_ACC_CHPID 1
-#define CHSC_SEI_ACC_LINKADDR 2
-#define CHSC_SEI_ACC_FULLLINKADDR 3
+#include <linux/types.h>
+#include <linux/device.h>
+#include <asm/chpid.h>
+#include "schid.h"
#define CHSC_SDA_OC_MSS 0x2
@@ -33,23 +34,9 @@ struct channel_path_desc {
u8 chpp;
} __attribute__ ((packed));
-struct channel_path {
- int id;
- int state;
- struct channel_path_desc desc;
- /* Channel-measurement related stuff: */
- int cmg;
- int shared;
- void *cmg_chars;
- struct device dev;
-};
+struct channel_path;
-extern void s390_process_css( void );
-extern void chsc_validate_chpids(struct subchannel *);
-extern void chpid_is_actually_online(int);
-extern int css_get_ssd_info(struct subchannel *);
-extern int chsc_process_crw(void);
-extern int chp_process_crw(int, int);
+extern void chsc_process_crw(void);
struct css_general_char {
u64 : 41;
@@ -82,15 +69,26 @@ struct css_chsc_char {
extern struct css_general_char css_general_characteristics;
extern struct css_chsc_char css_chsc_characteristics;
+struct chsc_ssd_info {
+ u8 path_mask;
+ u8 fla_valid_mask;
+ struct chp_id chpid[8];
+ u16 fla[8];
+};
+extern int chsc_get_ssd_info(struct subchannel_id schid,
+ struct chsc_ssd_info *ssd);
extern int chsc_determine_css_characteristics(void);
extern int css_characteristics_avail;
-extern void *chsc_get_chp_desc(struct subchannel*, int);
-
extern int chsc_enable_facility(int);
struct channel_subsystem;
extern int chsc_secm(struct channel_subsystem *, int);
-#define to_channelpath(device) container_of(device, struct channel_path, dev)
+int chsc_chp_vary(struct chp_id chpid, int on);
+int chsc_determine_channel_path_description(struct chp_id chpid,
+ struct channel_path_desc *desc);
+void chsc_chp_online(struct chp_id chpid);
+void chsc_chp_offline(struct chp_id chpid);
+int chsc_get_channel_measurement_chars(struct channel_path *chp);
#endif
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 9cb129ab5be..ea1defba569 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -22,6 +22,7 @@
#include <asm/setup.h>
#include <asm/reset.h>
#include <asm/ipl.h>
+#include <asm/chpid.h>
#include "airq.h"
#include "cio.h"
#include "css.h"
@@ -29,6 +30,7 @@
#include "ioasm.h"
#include "blacklist.h"
#include "cio_debug.h"
+#include "chp.h"
#include "../s390mach.h"
debug_info_t *cio_debug_msg_id;
@@ -592,9 +594,10 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
err = -ENODEV;
goto out;
}
- sch->opm = 0xff;
- if (!cio_is_console(sch->schid))
- chsc_validate_chpids(sch);
+ if (cio_is_console(sch->schid))
+ sch->opm = 0xff;
+ else
+ sch->opm = chp_get_sch_opm(sch);
sch->lpm = sch->schib.pmcw.pam & sch->opm;
CIO_DEBUG(KERN_INFO, 0,
@@ -954,6 +957,7 @@ static void css_reset(void)
{
int i, ret;
unsigned long long timeout;
+ struct chp_id chpid;
/* Reset subchannels. */
for_each_subchannel(__shutdown_subchannel_easy, NULL);
@@ -963,8 +967,10 @@ static void css_reset(void)
__ctl_set_bit(14, 28);
/* Temporarily reenable machine checks. */
local_mcck_enable();
+ chp_id_init(&chpid);
for (i = 0; i <= __MAX_CHPID; i++) {
- ret = rchp(i);
+ chpid.id = i;
+ ret = rchp(chpid);
if ((ret == 0) || (ret == 2))
/*
* rchp either succeeded, or another rchp is already
@@ -1048,37 +1054,19 @@ void reipl_ccw_dev(struct ccw_dev_id *devid)
do_reipl_asm(*((__u32*)&schid));
}
-static struct schib __initdata ipl_schib;
-
-/*
- * ipl_save_parameters gets called very early. It is not allowed to access
- * anything in the bss section at all. The bss section is not cleared yet,
- * but may contain some ipl parameters written by the firmware.
- * These parameters (if present) are copied to 0x2000.
- * To avoid corruption of the ipl parameters, all variables used by this
- * function must reside on the stack or in the data section.
- */
-void ipl_save_parameters(void)
+int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo)
{
struct subchannel_id schid;
- unsigned int *ipl_ptr;
- void *src, *dst;
+ struct schib schib;
schid = *(struct subchannel_id *)__LC_SUBCHANNEL_ID;
if (!schid.one)
- return;
- if (stsch(schid, &ipl_schib))
- return;
- if (!ipl_schib.pmcw.dnv)
- return;
- ipl_devno = ipl_schib.pmcw.dev;
- ipl_flags |= IPL_DEVNO_VALID;
- if (!ipl_schib.pmcw.qf)
- return;
- ipl_flags |= IPL_PARMBLOCK_VALID;
- ipl_ptr = (unsigned int *)__LC_IPL_PARMBLOCK_PTR;
- src = (void *)(unsigned long)*ipl_ptr;
- dst = (void *)IPL_PARMBLOCK_ORIGIN;
- memmove(dst, src, PAGE_SIZE);
- *ipl_ptr = IPL_PARMBLOCK_ORIGIN;
+ return -ENODEV;
+ if (stsch(schid, &schib))
+ return -ENODEV;
+ if (!schib.pmcw.dnv)
+ return -ENODEV;
+ iplinfo->devno = schib.pmcw.dev;
+ iplinfo->is_qdio = schib.pmcw.qf;
+ return 0;
}
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 35154a21035..7446c39951a 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -1,18 +1,11 @@
#ifndef S390_CIO_H
#define S390_CIO_H
-#include "schid.h"
#include <linux/mutex.h>
-
-/*
- * where we put the ssd info
- */
-struct ssd_info {
- __u8 valid:1;
- __u8 type:7; /* subchannel type */
- __u8 chpid[8]; /* chpids */
- __u16 fla[8]; /* full link addresses */
-} __attribute__ ((packed));
+#include <linux/device.h>
+#include <asm/chpid.h>
+#include "chsc.h"
+#include "schid.h"
/*
* path management control word
@@ -108,7 +101,7 @@ struct subchannel {
struct schib schib; /* subchannel information block */
struct orb orb; /* operation request block */
struct ccw1 sense_ccw; /* static ccw for sense command */
- struct ssd_info ssd_info; /* subchannel description */
+ struct chsc_ssd_info ssd_info; /* subchannel description */
struct device dev; /* entry in device tree */
struct css_driver *driver;
} __attribute__ ((aligned(8)));
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 90b22faabbf..28abd697be1 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -476,7 +476,7 @@ struct cmb_area {
};
static struct cmb_area cmb_area = {
- .lock = SPIN_LOCK_UNLOCKED,
+ .lock = __SPIN_LOCK_UNLOCKED(cmb_area.lock),
.list = LIST_HEAD_INIT(cmb_area.list),
.num_channels = 1024,
};
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index fe0ace7aece..27c6d9e55b2 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -20,8 +20,9 @@
#include "ioasm.h"
#include "chsc.h"
#include "device.h"
+#include "idset.h"
+#include "chp.h"
-int need_rescan = 0;
int css_init_done = 0;
static int need_reprobe = 0;
static int max_ssid = 0;
@@ -125,8 +126,52 @@ void css_sch_device_unregister(struct subchannel *sch)
mutex_unlock(&sch->reg_mutex);
}
-static int
-css_register_subchannel(struct subchannel *sch)
+static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
+{
+ int i;
+ int mask;
+
+ memset(ssd, 0, sizeof(struct chsc_ssd_info));
+ ssd->path_mask = pmcw->pim;
+ for (i = 0; i < 8; i++) {
+ mask = 0x80 >> i;
+ if (pmcw->pim & mask) {
+ chp_id_init(&ssd->chpid[i]);
+ ssd->chpid[i].id = pmcw->chpid[i];
+ }
+ }
+}
+
+static void ssd_register_chpids(struct chsc_ssd_info *ssd)
+{
+ int i;
+ int mask;
+
+ for (i = 0; i < 8; i++) {
+ mask = 0x80 >> i;
+ if (ssd->path_mask & mask)
+ if (!chp_is_registered(ssd->chpid[i]))
+ chp_new(ssd->chpid[i]);
+ }
+}
+
+void css_update_ssd_info(struct subchannel *sch)
+{
+ int ret;
+
+ if (cio_is_console(sch->schid)) {
+ /* Console is initialized too early for functions requiring
+ * memory allocation. */
+ ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
+ } else {
+ ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
+ if (ret)
+ ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
+ ssd_register_chpids(&sch->ssd_info);
+ }
+}
+
+static int css_register_subchannel(struct subchannel *sch)
{
int ret;
@@ -135,9 +180,7 @@ css_register_subchannel(struct subchannel *sch)
sch->dev.bus = &css_bus_type;
sch->dev.release = &css_subchannel_release;
sch->dev.groups = subch_attr_groups;
-
- css_get_ssd_info(sch);
-
+ css_update_ssd_info(sch);
/* make it known to the system */
ret = css_sch_device_register(sch);
if (ret) {
@@ -306,7 +349,7 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
return css_probe_device(schid);
}
-static int css_evaluate_subchannel(struct subchannel_id schid, int slow)
+static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
{
struct subchannel *sch;
int ret;
@@ -317,53 +360,66 @@ static int css_evaluate_subchannel(struct subchannel_id schid, int slow)
put_device(&sch->dev);
} else
ret = css_evaluate_new_subchannel(schid, slow);
-
- return ret;
+ if (ret == -EAGAIN)
+ css_schedule_eval(schid);
}
-static int
-css_rescan_devices(struct subchannel_id schid, void *data)
+static struct idset *slow_subchannel_set;
+static spinlock_t slow_subchannel_lock;
+
+static int __init slow_subchannel_init(void)
{
- return css_evaluate_subchannel(schid, 1);
+ spin_lock_init(&slow_subchannel_lock);
+ slow_subchannel_set = idset_sch_new();
+ if (!slow_subchannel_set) {
+ printk(KERN_WARNING "cio: could not allocate slow subchannel "
+ "set\n");
+ return -ENOMEM;
+ }
+ return 0;
}
-struct slow_subchannel {
- struct list_head slow_list;
- struct subchannel_id schid;
-};
-
-static LIST_HEAD(slow_subchannels_head);
-static DEFINE_SPINLOCK(slow_subchannel_lock);
+subsys_initcall(slow_subchannel_init);
-static void
-css_trigger_slow_path(struct work_struct *unused)
+static void css_slow_path_func(struct work_struct *unused)
{
- CIO_TRACE_EVENT(4, "slowpath");
-
- if (need_rescan) {
- need_rescan = 0;
- for_each_subchannel(css_rescan_devices, NULL);
- return;
- }
+ struct subchannel_id schid;
+ CIO_TRACE_EVENT(4, "slowpath");
spin_lock_irq(&slow_subchannel_lock);
- while (!list_empty(&slow_subchannels_head)) {
- struct slow_subchannel *slow_sch =
- list_entry(slow_subchannels_head.next,
- struct slow_subchannel, slow_list);
-
- list_del_init(slow_subchannels_head.next);
+ init_subchannel_id(&schid);
+ while (idset_sch_get_first(slow_subchannel_set, &schid)) {
+ idset_sch_del(slow_subchannel_set, schid);
spin_unlock_irq(&slow_subchannel_lock);
- css_evaluate_subchannel(slow_sch->schid, 1);
+ css_evaluate_subchannel(schid, 1);
spin_lock_irq(&slow_subchannel_lock);
- kfree(slow_sch);
}
spin_unlock_irq(&slow_subchannel_lock);
}
-DECLARE_WORK(slow_path_work, css_trigger_slow_path);
+static DECLARE_WORK(slow_path_work, css_slow_path_func);
struct workqueue_struct *slow_path_wq;
+void css_schedule_eval(struct subchannel_id schid)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&slow_subchannel_lock, flags);
+ idset_sch_add(slow_subchannel_set, schid);
+ queue_work(slow_path_wq, &slow_path_work);
+ spin_unlock_irqrestore(&slow_subchannel_lock, flags);
+}
+
+void css_schedule_eval_all(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&slow_subchannel_lock, flags);
+ idset_fill(slow_subchannel_set);
+ queue_work(slow_path_wq, &slow_path_work);
+ spin_unlock_irqrestore(&slow_subchannel_lock, flags);
+}
+
/* Reprobe subchannel if unregistered. */
static int reprobe_subchannel(struct subchannel_id schid, void *data)
{
@@ -426,33 +482,14 @@ void css_schedule_reprobe(void)
EXPORT_SYMBOL_GPL(css_schedule_reprobe);
/*
- * Rescan for new devices. FIXME: This is slow.
- * This function is called when we have lost CRWs due to overflows and we have
- * to do subchannel housekeeping.
- */
-void
-css_reiterate_subchannels(void)
-{
- css_clear_subchannel_slow_list();
- need_rescan = 1;
-}
-
-/*
* Called from the machine check handler for subchannel report words.
*/
-int
-css_process_crw(int rsid1, int rsid2)
+void css_process_crw(int rsid1, int rsid2)
{
- int ret;
struct subchannel_id mchk_schid;
CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n",
rsid1, rsid2);
-
- if (need_rescan)
- /* We need to iterate all subchannels anyway. */
- return -EAGAIN;
-
init_subchannel_id(&mchk_schid);
mchk_schid.sch_no = rsid1;
if (rsid2 != 0)
@@ -463,14 +500,7 @@ css_process_crw(int rsid1, int rsid2)
* use stsch() to find out if the subchannel in question has come
* or gone.
*/
- ret = css_evaluate_subchannel(mchk_schid, 0);
- if (ret == -EAGAIN) {
- if (css_enqueue_subchannel_slow(mchk_schid)) {
- css_clear_subchannel_slow_list();
- need_rescan = 1;
- }
- }
- return ret;
+ css_evaluate_subchannel(mchk_schid, 0);
}
static int __init
@@ -745,47 +775,6 @@ struct bus_type css_bus_type = {
subsys_initcall(init_channel_subsystem);
-int
-css_enqueue_subchannel_slow(struct subchannel_id schid)
-{
- struct slow_subchannel *new_slow_sch;
- unsigned long flags;
-
- new_slow_sch = kzalloc(sizeof(struct slow_subchannel), GFP_ATOMIC);
- if (!new_slow_sch)
- return -ENOMEM;
- new_slow_sch->schid = schid;
- spin_lock_irqsave(&slow_subchannel_lock, flags);
- list_add_tail(&new_slow_sch->slow_list, &slow_subchannels_head);
- spin_unlock_irqrestore(&slow_subchannel_lock, flags);
- return 0;
-}
-
-void
-css_clear_subchannel_slow_list(void)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&slow_subchannel_lock, flags);
- while (!list_empty(&slow_subchannels_head)) {
- struct slow_subchannel *slow_sch =
- list_entry(slow_subchannels_head.next,
- struct slow_subchannel, slow_list);
-
- list_del_init(slow_subchannels_head.next);
- kfree(slow_sch);
- }
- spin_unlock_irqrestore(&slow_subchannel_lock, flags);
-}
-
-
-
-int
-css_slow_subchannels_exist(void)
-{
- return (!list_empty(&slow_subchannels_head));
-}
-
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(css_bus_type);
EXPORT_SYMBOL_GPL(css_characteristics_avail);
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index ca2bab932a8..71fcfdc4280 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -4,8 +4,11 @@
#include <linux/mutex.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/types.h>
#include <asm/cio.h>
+#include <asm/chpid.h>
#include "schid.h"
@@ -143,13 +146,12 @@ extern void css_sch_device_unregister(struct subchannel *);
extern struct subchannel * get_subchannel_by_schid(struct subchannel_id);
extern int css_init_done;
extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *);
-extern int css_process_crw(int, int);
+extern void css_process_crw(int, int);
extern void css_reiterate_subchannels(void);
+void css_update_ssd_info(struct subchannel *sch);
#define __MAX_SUBCHANNEL 65535
#define __MAX_SSID 3
-#define __MAX_CHPID 255
-#define __MAX_CSSID 0
struct channel_subsystem {
u8 cssid;
@@ -185,16 +187,12 @@ int device_trigger_verify(struct subchannel *sch);
void device_kill_pending_timer(struct subchannel *);
/* Helper functions to build lists for the slow path. */
-extern int css_enqueue_subchannel_slow(struct subchannel_id schid);
-void css_walk_subchannel_slow_list(void (*fn)(unsigned long));
-void css_clear_subchannel_slow_list(void);
-int css_slow_subchannels_exist(void);
-extern int need_rescan;
+void css_schedule_eval(struct subchannel_id schid);
+void css_schedule_eval_all(void);
int sch_is_pseudo_sch(struct subchannel *);
extern struct workqueue_struct *slow_path_wq;
-extern struct work_struct slow_path_work;
int subchannel_add_files (struct device *);
extern struct attribute_group *subch_attr_groups[];
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index e322111fb36..03355902c58 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -56,13 +56,12 @@ ccw_bus_match (struct device * dev, struct device_driver * drv)
/* Store modalias string delimited by prefix/suffix string into buffer with
* specified size. Return length of resulting string (excluding trailing '\0')
* even if string doesn't fit buffer (snprintf semantics). */
-static int snprint_alias(char *buf, size_t size, const char *prefix,
+static int snprint_alias(char *buf, size_t size,
struct ccw_device_id *id, const char *suffix)
{
int len;
- len = snprintf(buf, size, "%sccw:t%04Xm%02X", prefix, id->cu_type,
- id->cu_model);
+ len = snprintf(buf, size, "ccw:t%04Xm%02X", id->cu_type, id->cu_model);
if (len > size)
return len;
buf += len;
@@ -85,53 +84,40 @@ static int ccw_uevent(struct device *dev, char **envp, int num_envp,
struct ccw_device *cdev = to_ccwdev(dev);
struct ccw_device_id *id = &(cdev->id);
int i = 0;
- int len;
+ int len = 0;
+ int ret;
+ char modalias_buf[30];
/* CU_TYPE= */
- len = snprintf(buffer, buffer_size, "CU_TYPE=%04X", id->cu_type) + 1;
- if (len > buffer_size || i >= num_envp)
- return -ENOMEM;
- envp[i++] = buffer;
- buffer += len;
- buffer_size -= len;
+ ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len,
+ "CU_TYPE=%04X", id->cu_type);
+ if (ret)
+ return ret;
/* CU_MODEL= */
- len = snprintf(buffer, buffer_size, "CU_MODEL=%02X", id->cu_model) + 1;
- if (len > buffer_size || i >= num_envp)
- return -ENOMEM;
- envp[i++] = buffer;
- buffer += len;
- buffer_size -= len;
+ ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len,
+ "CU_MODEL=%02X", id->cu_model);
+ if (ret)
+ return ret;
/* The next two can be zero, that's ok for us */
/* DEV_TYPE= */
- len = snprintf(buffer, buffer_size, "DEV_TYPE=%04X", id->dev_type) + 1;
- if (len > buffer_size || i >= num_envp)
- return -ENOMEM;
- envp[i++] = buffer;
- buffer += len;
- buffer_size -= len;
+ ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len,
+ "DEV_TYPE=%04X", id->dev_type);
+ if (ret)
+ return ret;
/* DEV_MODEL= */
- len = snprintf(buffer, buffer_size, "DEV_MODEL=%02X",
- (unsigned char) id->dev_model) + 1;
- if (len > buffer_size || i >= num_envp)
- return -ENOMEM;
- envp[i++] = buffer;
- buffer += len;
- buffer_size -= len;
+ ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len,
+ "DEV_MODEL=%02X", id->dev_model);
+ if (ret)
+ return ret;
/* MODALIAS= */
- len = snprint_alias(buffer, buffer_size, "MODALIAS=", id, "") + 1;
- if (len > buffer_size || i >= num_envp)
- return -ENOMEM;
- envp[i++] = buffer;
- buffer += len;
- buffer_size -= len;
-
- envp[i] = NULL;
-
- return 0;
+ snprint_alias(modalias_buf, sizeof(modalias_buf), id, "");
+ ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len,
+ "MODALIAS=%s", modalias_buf);
+ return ret;
}
struct bus_type ccw_bus_type;
@@ -230,12 +216,18 @@ static ssize_t
chpids_show (struct device * dev, struct device_attribute *attr, char * buf)
{
struct subchannel *sch = to_subchannel(dev);
- struct ssd_info *ssd = &sch->ssd_info;
+ struct chsc_ssd_info *ssd = &sch->ssd_info;
ssize_t ret = 0;
int chp;
+ int mask;
- for (chp = 0; chp < 8; chp++)
- ret += sprintf (buf+ret, "%02x ", ssd->chpid[chp]);
+ for (chp = 0; chp < 8; chp++) {
+ mask = 0x80 >> chp;
+ if (ssd->path_mask & mask)
+ ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
+ else
+ ret += sprintf(buf + ret, "00 ");
+ }
ret += sprintf (buf+ret, "\n");
return min((ssize_t)PAGE_SIZE, ret);
}
@@ -280,7 +272,7 @@ modalias_show (struct device *dev, struct device_attribute *attr, char *buf)
struct ccw_device_id *id = &(cdev->id);
int len;
- len = snprint_alias(buf, PAGE_SIZE, "", id, "\n") + 1;
+ len = snprint_alias(buf, PAGE_SIZE, id, "\n") + 1;
return len > PAGE_SIZE ? PAGE_SIZE : len;
}
@@ -298,16 +290,10 @@ int ccw_device_is_orphan(struct ccw_device *cdev)
return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent));
}
-static void ccw_device_unregister(struct work_struct *work)
+static void ccw_device_unregister(struct ccw_device *cdev)
{
- struct ccw_device_private *priv;
- struct ccw_device *cdev;
-
- priv = container_of(work, struct ccw_device_private, kick_work);
- cdev = priv->cdev;
if (test_and_clear_bit(1, &cdev->private->registered))
- device_unregister(&cdev->dev);
- put_device(&cdev->dev);
+ device_del(&cdev->dev);
}
static void
@@ -324,11 +310,8 @@ ccw_device_remove_disconnected(struct ccw_device *cdev)
spin_lock_irqsave(cdev->ccwlock, flags);
cdev->private->state = DEV_STATE_NOT_OPER;
spin_unlock_irqrestore(cdev->ccwlock, flags);
- if (get_device(&cdev->dev)) {
- PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_unregister);
- queue_work(ccw_device_work, &cdev->private->kick_work);
- }
+ ccw_device_unregister(cdev);
+ put_device(&cdev->dev);
return ;
}
sch = to_subchannel(cdev->dev.parent);
@@ -413,11 +396,60 @@ ccw_device_set_online(struct ccw_device *cdev)
return (ret == 0) ? -ENODEV : ret;
}
-static ssize_t
-online_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+static void online_store_handle_offline(struct ccw_device *cdev)
+{
+ if (cdev->private->state == DEV_STATE_DISCONNECTED)
+ ccw_device_remove_disconnected(cdev);
+ else if (cdev->drv && cdev->drv->set_offline)
+ ccw_device_set_offline(cdev);
+}
+
+static int online_store_recog_and_online(struct ccw_device *cdev)
+{
+ int ret;
+
+ /* Do device recognition, if needed. */
+ if (cdev->id.cu_type == 0) {
+ ret = ccw_device_recognition(cdev);
+ if (ret) {
+ printk(KERN_WARNING"Couldn't start recognition "
+ "for device %s (ret=%d)\n",
+ cdev->dev.bus_id, ret);
+ return ret;
+ }
+ wait_event(cdev->private->wait_q,
+ cdev->private->flags.recog_done);
+ }
+ if (cdev->drv && cdev->drv->set_online)
+ ccw_device_set_online(cdev);
+ return 0;
+}
+static void online_store_handle_online(struct ccw_device *cdev, int force)
+{
+ int ret;
+
+ ret = online_store_recog_and_online(cdev);
+ if (ret)
+ return;
+ if (force && cdev->private->state == DEV_STATE_BOXED) {
+ ret = ccw_device_stlck(cdev);
+ if (ret) {
+ printk(KERN_WARNING"ccw_device_stlck for device %s "
+ "returned %d!\n", cdev->dev.bus_id, ret);
+ return;
+ }
+ if (cdev->id.cu_type == 0)
+ cdev->private->state = DEV_STATE_NOT_OPER;
+ online_store_recog_and_online(cdev);
+ }
+
+}
+
+static ssize_t online_store (struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct ccw_device *cdev = to_ccwdev(dev);
- int i, force, ret;
+ int i, force;
char *tmp;
if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
@@ -434,51 +466,17 @@ online_store (struct device *dev, struct device_attribute *attr, const char *buf
force = 0;
i = simple_strtoul(buf, &tmp, 16);
}
- if (i == 1) {
- /* Do device recognition, if needed. */
- if (cdev->id.cu_type == 0) {
- ret = ccw_device_recognition(cdev);
- if (ret) {
- printk(KERN_WARNING"Couldn't start recognition "
- "for device %s (ret=%d)\n",
- cdev->dev.bus_id, ret);
- goto out;
- }
- wait_event(cdev->private->wait_q,
- cdev->private->flags.recog_done);
- }
- if (cdev->drv && cdev->drv->set_online)
- ccw_device_set_online(cdev);
- } else if (i == 0) {
- if (cdev->private->state == DEV_STATE_DISCONNECTED)
- ccw_device_remove_disconnected(cdev);
- else if (cdev->drv && cdev->drv->set_offline)
- ccw_device_set_offline(cdev);
- }
- if (force && cdev->private->state == DEV_STATE_BOXED) {
- ret = ccw_device_stlck(cdev);
- if (ret) {
- printk(KERN_WARNING"ccw_device_stlck for device %s "
- "returned %d!\n", cdev->dev.bus_id, ret);
- goto out;
- }
- /* Do device recognition, if needed. */
- if (cdev->id.cu_type == 0) {
- cdev->private->state = DEV_STATE_NOT_OPER;
- ret = ccw_device_recognition(cdev);
- if (ret) {
- printk(KERN_WARNING"Couldn't start recognition "
- "for device %s (ret=%d)\n",
- cdev->dev.bus_id, ret);
- goto out;
- }
- wait_event(cdev->private->wait_q,
- cdev->private->flags.recog_done);
- }
- if (cdev->drv && cdev->drv->set_online)
- ccw_device_set_online(cdev);
+
+ switch (i) {
+ case 0:
+ online_store_handle_offline(cdev);
+ break;
+ case 1:
+ online_store_handle_online(cdev, force);
+ break;
+ default:
+ count = -EINVAL;
}
- out:
if (cdev->drv)
module_put(cdev->drv->owner);
atomic_set(&cdev->private->onoff, 0);
@@ -548,17 +546,10 @@ static struct attribute_group ccwdev_attr_group = {
.attrs = ccwdev_attrs,
};
-static int
-device_add_files (struct device *dev)
-{
- return sysfs_create_group(&dev->kobj, &ccwdev_attr_group);
-}
-
-static void
-device_remove_files(struct device *dev)
-{
- sysfs_remove_group(&dev->kobj, &ccwdev_attr_group);
-}
+struct attribute_group *ccwdev_attr_groups[] = {
+ &ccwdev_attr_group,
+ NULL,
+};
/* this is a simple abstraction for device_register that sets the
* correct bus type and adds the bus specific files */
@@ -573,10 +564,6 @@ static int ccw_device_register(struct ccw_device *cdev)
return ret;
set_bit(1, &cdev->private->registered);
- if ((ret = device_add_files(dev))) {
- if (test_and_clear_bit(1, &cdev->private->registered))
- device_del(dev);
- }
return ret;
}
@@ -648,10 +635,6 @@ ccw_device_add_changed(struct work_struct *work)
return;
}
set_bit(1, &cdev->private->registered);
- if (device_add_files(&cdev->dev)) {
- if (test_and_clear_bit(1, &cdev->private->registered))
- device_unregister(&cdev->dev);
- }
}
void ccw_device_do_unreg_rereg(struct work_struct *work)
@@ -664,9 +647,7 @@ void ccw_device_do_unreg_rereg(struct work_struct *work)
cdev = priv->cdev;
sch = to_subchannel(cdev->dev.parent);
- device_remove_files(&cdev->dev);
- if (test_and_clear_bit(1, &cdev->private->registered))
- device_del(&cdev->dev);
+ ccw_device_unregister(cdev);
PREPARE_WORK(&cdev->private->kick_work,
ccw_device_add_changed);
queue_work(ccw_device_work, &cdev->private->kick_work);
@@ -705,6 +686,7 @@ static int io_subchannel_initialize_dev(struct subchannel *sch,
cdev->dev.parent = &sch->dev;
cdev->dev.release = ccw_device_release;
INIT_LIST_HEAD(&cdev->private->kick_work.entry);
+ cdev->dev.groups = ccwdev_attr_groups;
/* Do first half of device_register. */
device_initialize(&cdev->dev);
if (!get_device(&sch->dev)) {
@@ -736,6 +718,7 @@ static int io_subchannel_recog(struct ccw_device *, struct subchannel *);
static void sch_attach_device(struct subchannel *sch,
struct ccw_device *cdev)
{
+ css_update_ssd_info(sch);
spin_lock_irq(sch->lock);
sch->dev.driver_data = cdev;
cdev->private->schid = sch->schid;
@@ -871,7 +854,7 @@ io_subchannel_register(struct work_struct *work)
priv = container_of(work, struct ccw_device_private, kick_work);
cdev = priv->cdev;
sch = to_subchannel(cdev->dev.parent);
-
+ css_update_ssd_info(sch);
/*
* io_subchannel_register() will also be called after device
* recognition has been done for a boxed device (which will already
@@ -1133,15 +1116,8 @@ io_subchannel_remove (struct subchannel *sch)
sch->dev.driver_data = NULL;
cdev->private->state = DEV_STATE_NOT_OPER;
spin_unlock_irqrestore(cdev->ccwlock, flags);
- /*
- * Put unregistration on workqueue to avoid livelocks on the css bus
- * semaphore.
- */
- if (get_device(&cdev->dev)) {
- PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_unregister);
- queue_work(ccw_device_work, &cdev->private->kick_work);
- }
+ ccw_device_unregister(cdev);
+ put_device(&cdev->dev);
return 0;
}
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 089a3ddd626..898ec3b2beb 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -15,6 +15,7 @@
#include <asm/ccwdev.h>
#include <asm/cio.h>
+#include <asm/chpid.h>
#include "cio.h"
#include "cio_debug.h"
@@ -22,6 +23,7 @@
#include "device.h"
#include "chsc.h"
#include "ioasm.h"
+#include "chp.h"
int
device_is_online(struct subchannel *sch)
@@ -210,14 +212,18 @@ static void
__recover_lost_chpids(struct subchannel *sch, int old_lpm)
{
int mask, i;
+ struct chp_id chpid;
+ chp_id_init(&chpid);
for (i = 0; i<8; i++) {
mask = 0x80 >> i;
if (!(sch->lpm & mask))
continue;
if (old_lpm & mask)
continue;
- chpid_is_actually_online(sch->schib.pmcw.chpid[i]);
+ chpid.id = sch->schib.pmcw.chpid[i];
+ if (!chp_is_registered(chpid))
+ css_schedule_eval_all();
}
}
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 7c7775aae38..16f59fcb66b 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -16,12 +16,14 @@
#include <asm/ccwdev.h>
#include <asm/idals.h>
+#include <asm/chpid.h>
#include "cio.h"
#include "cio_debug.h"
#include "css.h"
#include "chsc.h"
#include "device.h"
+#include "chp.h"
int ccw_device_set_options_mask(struct ccw_device *cdev, unsigned long flags)
{
@@ -606,9 +608,12 @@ void *
ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no)
{
struct subchannel *sch;
+ struct chp_id chpid;
sch = to_subchannel(cdev->dev.parent);
- return chsc_get_chp_desc(sch, chp_no);
+ chp_id_init(&chpid);
+ chpid.id = sch->schib.pmcw.chpid[chp_no];
+ return chp_get_chp_desc(chpid);
}
// FIXME: these have to go:
diff --git a/drivers/s390/cio/idset.c b/drivers/s390/cio/idset.c
new file mode 100644
index 00000000000..16ea828e99f
--- /dev/null
+++ b/drivers/s390/cio/idset.c
@@ -0,0 +1,112 @@
+/*
+ * drivers/s390/cio/idset.c
+ *
+ * Copyright IBM Corp. 2007
+ * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#include <linux/slab.h>
+#include <asm/bitops.h>
+#include "idset.h"
+#include "css.h"
+
+struct idset {
+ int num_ssid;
+ int num_id;
+ unsigned long bitmap[0];
+};
+
+static inline unsigned long bitmap_size(int num_ssid, int num_id)
+{
+ return __BITOPS_WORDS(num_ssid * num_id) * sizeof(unsigned long);
+}
+
+static struct idset *idset_new(int num_ssid, int num_id)
+{
+ struct idset *set;
+
+ set = kzalloc(sizeof(struct idset) + bitmap_size(num_ssid, num_id),
+ GFP_KERNEL);
+ if (set) {
+ set->num_ssid = num_ssid;
+ set->num_id = num_id;
+ }
+ return set;
+}
+
+void idset_free(struct idset *set)
+{
+ kfree(set);
+}
+
+void idset_clear(struct idset *set)
+{
+ memset(set->bitmap, 0, bitmap_size(set->num_ssid, set->num_id));
+}
+
+void idset_fill(struct idset *set)
+{
+ memset(set->bitmap, 0xff, bitmap_size(set->num_ssid, set->num_id));
+}
+
+static inline void idset_add(struct idset *set, int ssid, int id)
+{
+ set_bit(ssid * set->num_id + id, set->bitmap);
+}
+
+static inline void idset_del(struct idset *set, int ssid, int id)
+{
+ clear_bit(ssid * set->num_id + id, set->bitmap);
+}
+
+static inline int idset_contains(struct idset *set, int ssid, int id)
+{
+ return test_bit(ssid * set->num_id + id, set->bitmap);
+}
+
+static inline int idset_get_first(struct idset *set, int *ssid, int *id)
+{
+ int bitnum;
+
+ bitnum = find_first_bit(set->bitmap, set->num_ssid * set->num_id);
+ if (bitnum >= set->num_ssid * set->num_id)
+ return 0;
+ *ssid = bitnum / set->num_id;
+ *id = bitnum % set->num_id;
+ return 1;
+}
+
+struct idset *idset_sch_new(void)
+{
+ return idset_new(__MAX_SSID + 1, __MAX_SUBCHANNEL + 1);
+}
+
+void idset_sch_add(struct idset *set, struct subchannel_id schid)
+{
+ idset_add(set, schid.ssid, schid.sch_no);
+}
+
+void idset_sch_del(struct idset *set, struct subchannel_id schid)
+{
+ idset_del(set, schid.ssid, schid.sch_no);
+}
+
+int idset_sch_contains(struct idset *set, struct subchannel_id schid)
+{
+ return idset_contains(set, schid.ssid, schid.sch_no);
+}
+
+int idset_sch_get_first(struct idset *set, struct subchannel_id *schid)
+{
+ int ssid = 0;
+ int id = 0;
+ int rc;
+
+ rc = idset_get_first(set, &ssid, &id);
+ if (rc) {
+ init_subchannel_id(schid);
+ schid->ssid = ssid;
+ schid->sch_no = id;
+ }
+ return rc;
+}
diff --git a/drivers/s390/cio/idset.h b/drivers/s390/cio/idset.h
new file mode 100644
index 00000000000..144466ab8c1
--- /dev/null
+++ b/drivers/s390/cio/idset.h
@@ -0,0 +1,25 @@
+/*
+ * drivers/s390/cio/idset.h
+ *
+ * Copyright IBM Corp. 2007
+ * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#ifndef S390_IDSET_H
+#define S390_IDSET_H S390_IDSET_H
+
+#include "schid.h"
+
+struct idset;
+
+void idset_free(struct idset *set);
+void idset_clear(struct idset *set);
+void idset_fill(struct idset *set);
+
+struct idset *idset_sch_new(void);
+void idset_sch_add(struct idset *set, struct subchannel_id id);
+void idset_sch_del(struct idset *set, struct subchannel_id id);
+int idset_sch_contains(struct idset *set, struct subchannel_id id);
+int idset_sch_get_first(struct idset *set, struct subchannel_id *id);
+
+#endif /* S390_IDSET_H */
diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h
index ad6d8294006..7153dd95908 100644
--- a/drivers/s390/cio/ioasm.h
+++ b/drivers/s390/cio/ioasm.h
@@ -1,6 +1,7 @@
#ifndef S390_CIO_IOASM_H
#define S390_CIO_IOASM_H
+#include <asm/chpid.h>
#include "schid.h"
/*
@@ -189,9 +190,9 @@ static inline int chsc(void *chsc_area)
return cc;
}
-static inline int rchp(int chpid)
+static inline int rchp(struct chp_id chpid)
{
- register unsigned int reg1 asm ("1") = chpid;
+ register struct chp_id reg1 asm ("1") = chpid;
int ccode;
asm volatile(
diff --git a/drivers/s390/net/ctcmain.c b/drivers/s390/net/ctcmain.c
index 0d6d5fcc128..570a960bfb5 100644
--- a/drivers/s390/net/ctcmain.c
+++ b/drivers/s390/net/ctcmain.c
@@ -1638,21 +1638,19 @@ add_channel(struct ccw_device *cdev, enum channel_types type)
struct channel *ch;
DBF_TEXT(trace, 2, __FUNCTION__);
- if ((ch =
- (struct channel *) kmalloc(sizeof (struct channel),
- GFP_KERNEL)) == NULL) {
+ ch = kzalloc(sizeof(struct channel), GFP_KERNEL);
+ if (!ch) {
ctc_pr_warn("ctc: Out of memory in add_channel\n");
return -1;
}
- memset(ch, 0, sizeof (struct channel));
- if ((ch->ccw = kmalloc(8*sizeof(struct ccw1),
- GFP_KERNEL | GFP_DMA)) == NULL) {
+ /* assure all flags and counters are reset */
+ ch->ccw = kzalloc(8 * sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
+ if (!ch->ccw) {
kfree(ch);
ctc_pr_warn("ctc: Out of memory in add_channel\n");
return -1;
}
- memset(ch->ccw, 0, 8*sizeof(struct ccw1)); // assure all flags and counters are reset
/**
* "static" ccws are used in the following way:
@@ -1692,15 +1690,14 @@ add_channel(struct ccw_device *cdev, enum channel_types type)
return -1;
}
fsm_newstate(ch->fsm, CH_STATE_IDLE);
- if ((ch->irb = kmalloc(sizeof (struct irb),
- GFP_KERNEL)) == NULL) {
+ ch->irb = kzalloc(sizeof(struct irb), GFP_KERNEL);
+ if (!ch->irb) {
ctc_pr_warn("ctc: Out of memory in add_channel\n");
kfree_fsm(ch->fsm);
kfree(ch->ccw);
kfree(ch);
return -1;
}
- memset(ch->irb, 0, sizeof (struct irb));
while (*c && less_than((*c)->id, ch->id))
c = &(*c)->next;
if (*c && (!strncmp((*c)->id, ch->id, CTC_ID_SIZE))) {
@@ -2745,14 +2742,13 @@ ctc_probe_device(struct ccwgroup_device *cgdev)
if (!get_device(&cgdev->dev))
return -ENODEV;
- priv = kmalloc(sizeof (struct ctc_priv), GFP_KERNEL);
+ priv = kzalloc(sizeof(struct ctc_priv), GFP_KERNEL);
if (!priv) {
ctc_pr_err("%s: Out of memory\n", __func__);
put_device(&cgdev->dev);
return -ENOMEM;
}
- memset(priv, 0, sizeof (struct ctc_priv));
rc = ctc_add_files(&cgdev->dev);
if (rc) {
kfree(priv);
@@ -2793,10 +2789,9 @@ ctc_init_netdevice(struct net_device * dev, int alloc_device,
DBF_TEXT(setup, 3, __FUNCTION__);
if (alloc_device) {
- dev = kmalloc(sizeof (struct net_device), GFP_KERNEL);
+ dev = kzalloc(sizeof(struct net_device), GFP_KERNEL);
if (!dev)
return NULL;
- memset(dev, 0, sizeof (struct net_device));
}
dev->priv = privptr;
diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c
index 806bb1a921e..644a06eba82 100644
--- a/drivers/s390/s390mach.c
+++ b/drivers/s390/s390mach.c
@@ -21,6 +21,7 @@
#include "cio/cio.h"
#include "cio/chsc.h"
#include "cio/css.h"
+#include "cio/chp.h"
#include "s390mach.h"
static struct semaphore m_sem;
@@ -44,14 +45,13 @@ static int
s390_collect_crw_info(void *param)
{
struct crw crw[2];
- int ccode, ret, slow;
+ int ccode;
struct semaphore *sem;
unsigned int chain;
sem = (struct semaphore *)param;
repeat:
down_interruptible(sem);
- slow = 0;
chain = 0;
while (1) {
if (unlikely(chain > 1)) {
@@ -84,9 +84,8 @@ repeat:
/* Check for overflows. */
if (crw[chain].oflw) {
pr_debug("%s: crw overflow detected!\n", __FUNCTION__);
- css_reiterate_subchannels();
+ css_schedule_eval_all();
chain = 0;
- slow = 1;
continue;
}
switch (crw[chain].rsc) {
@@ -94,10 +93,7 @@ repeat:
if (crw[0].chn && !chain)
break;
pr_debug("source is subchannel %04X\n", crw[0].rsid);
- ret = css_process_crw (crw[0].rsid,
- chain ? crw[1].rsid : 0);
- if (ret == -EAGAIN)
- slow = 1;
+ css_process_crw(crw[0].rsid, chain ? crw[1].rsid : 0);
break;
case CRW_RSC_MONITOR:
pr_debug("source is monitoring facility\n");
@@ -116,28 +112,23 @@ repeat:
}
switch (crw[0].erc) {
case CRW_ERC_IPARM: /* Path has come. */
- ret = chp_process_crw(crw[0].rsid, 1);
+ chp_process_crw(crw[0].rsid, 1);
break;
case CRW_ERC_PERRI: /* Path has gone. */
case CRW_ERC_PERRN:
- ret = chp_process_crw(crw[0].rsid, 0);
+ chp_process_crw(crw[0].rsid, 0);
break;
default:
pr_debug("Don't know how to handle erc=%x\n",
crw[0].erc);
- ret = 0;
}
- if (ret == -EAGAIN)
- slow = 1;
break;
case CRW_RSC_CONFIG:
pr_debug("source is configuration-alert facility\n");
break;
case CRW_RSC_CSS:
pr_debug("source is channel subsystem\n");
- ret = chsc_process_crw();
- if (ret == -EAGAIN)
- slow = 1;
+ chsc_process_crw();
break;
default:
pr_debug("unknown source\n");
@@ -146,8 +137,6 @@ repeat:
/* chain is always 0 or 1 here. */
chain = crw[chain].chn ? chain + 1 : 0;
}
- if (slow)
- queue_work(slow_path_wq, &slow_path_work);
goto repeat;
return 0;
}
diff --git a/drivers/s390/sysinfo.c b/drivers/s390/sysinfo.c
index 090743d2f91..19343f9675c 100644
--- a/drivers/s390/sysinfo.c
+++ b/drivers/s390/sysinfo.c
@@ -357,6 +357,24 @@ static __init int create_proc_sysinfo(void)
__initcall(create_proc_sysinfo);
+int get_cpu_capability(unsigned int *capability)
+{
+ struct sysinfo_1_2_2 *info;
+ int rc;
+
+ info = (void *) get_zeroed_page(GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+ rc = stsi(info, 1, 2, 2);
+ if (rc == -ENOSYS)
+ goto out;
+ rc = 0;
+ *capability = info->capability;
+out:
+ free_page((unsigned long) info);
+ return rc;
+}
+
/*
* CPU capability might have changed. Therefore recalculate loops_per_jiffy.
*/
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 6d7e279b149..dc8f99ee305 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -139,8 +139,15 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
#define pte_same(A,B) (pte_val(A) == pte_val(B))
#endif
-#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY
-#define page_test_and_clear_dirty(page) (0)
+#ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
+#define page_test_dirty(page) (0)
+#endif
+
+#ifndef __HAVE_ARCH_PAGE_CLEAR_DIRTY
+#define page_clear_dirty(page) do { } while (0)
+#endif
+
+#ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
#define pte_maybe_dirty(pte) pte_dirty(pte)
#else
#define pte_maybe_dirty(pte) (1)
diff --git a/include/asm-s390/bug.h b/include/asm-s390/bug.h
index 87689836394..838684dc6d3 100644
--- a/include/asm-s390/bug.h
+++ b/include/asm-s390/bug.h
@@ -1,27 +1,70 @@
-#ifndef _S390_BUG_H
-#define _S390_BUG_H
+#ifndef _ASM_S390_BUG_H
+#define _ASM_S390_BUG_H
#include <linux/kernel.h>
#ifdef CONFIG_BUG
-static inline __attribute__((noreturn)) void __do_illegal_op(void)
-{
-#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 3)
- __builtin_trap();
+#ifdef CONFIG_64BIT
+#define S390_LONG ".quad"
#else
- asm volatile(".long 0");
+#define S390_LONG ".long"
#endif
-}
-#define BUG() do { \
- printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
- __do_illegal_op(); \
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+
+#define __EMIT_BUG(x) do { \
+ asm volatile( \
+ "0: j 0b+2\n" \
+ "1:\n" \
+ ".section .rodata.str,\"aMS\",@progbits,1\n" \
+ "2: .asciz \""__FILE__"\"\n" \
+ ".previous\n" \
+ ".section __bug_table,\"a\"\n" \
+ "3:\t" S390_LONG "\t1b,2b\n" \
+ " .short %0,%1\n" \
+ " .org 3b+%2\n" \
+ ".previous\n" \
+ : : "i" (__LINE__), \
+ "i" (x), \
+ "i" (sizeof(struct bug_entry))); \
} while (0)
+#else /* CONFIG_DEBUG_BUGVERBOSE */
+
+#define __EMIT_BUG(x) do { \
+ asm volatile( \
+ "0: j 0b+2\n" \
+ "1:\n" \
+ ".section __bug_table,\"a\"\n" \
+ "2:\t" S390_LONG "\t1b\n" \
+ " .short %0\n" \
+ " .org 2b+%1\n" \
+ ".previous\n" \
+ : : "i" (x), \
+ "i" (sizeof(struct bug_entry))); \
+} while (0)
+
+#endif /* CONFIG_DEBUG_BUGVERBOSE */
+
+#define BUG() __EMIT_BUG(0)
+
+#define WARN_ON(x) ({ \
+ typeof(x) __ret_warn_on = (x); \
+ if (__builtin_constant_p(__ret_warn_on)) { \
+ if (__ret_warn_on) \
+ __EMIT_BUG(BUGFLAG_WARNING); \
+ } else { \
+ if (unlikely(__ret_warn_on)) \
+ __EMIT_BUG(BUGFLAG_WARNING); \
+ } \
+ unlikely(__ret_warn_on); \
+})
+
#define HAVE_ARCH_BUG
-#endif
+#define HAVE_ARCH_WARN_ON
+#endif /* CONFIG_BUG */
#include <asm-generic/bug.h>
-#endif
+#endif /* _ASM_S390_BUG_H */
diff --git a/include/asm-s390/ccwgroup.h b/include/asm-s390/ccwgroup.h
index d2f9c0d53a9..925b3ddfa14 100644
--- a/include/asm-s390/ccwgroup.h
+++ b/include/asm-s390/ccwgroup.h
@@ -11,6 +11,7 @@ struct ccwgroup_device {
CCWGROUP_ONLINE,
} state;
atomic_t onoff;
+ struct mutex reg_mutex;
unsigned int count; /* number of attached slave devices */
struct device dev; /* master device */
struct ccw_device *cdev[0]; /* variable number, allocate as needed */
diff --git a/include/asm-s390/chpid.h b/include/asm-s390/chpid.h
new file mode 100644
index 00000000000..b203336fd89
--- /dev/null
+++ b/include/asm-s390/chpid.h
@@ -0,0 +1,53 @@
+/*
+ * drivers/s390/cio/chpid.h
+ *
+ * Copyright IBM Corp. 2007
+ * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#ifndef _ASM_S390_CHPID_H
+#define _ASM_S390_CHPID_H _ASM_S390_CHPID_H
+
+#include <linux/string.h>
+#include <asm/types.h>
+#include <asm/cio.h>
+
+#define __MAX_CHPID 255
+
+struct chp_id {
+ u8 reserved1;
+ u8 cssid;
+ u8 reserved2;
+ u8 id;
+} __attribute__((packed));
+
+static inline void chp_id_init(struct chp_id *chpid)
+{
+ memset(chpid, 0, sizeof(struct chp_id));
+}
+
+static inline int chp_id_is_equal(struct chp_id *a, struct chp_id *b)
+{
+ return (a->id == b->id) && (a->cssid == b->cssid);
+}
+
+static inline void chp_id_next(struct chp_id *chpid)
+{
+ if (chpid->id < __MAX_CHPID)
+ chpid->id++;
+ else {
+ chpid->id = 0;
+ chpid->cssid++;
+ }
+}
+
+static inline int chp_id_is_valid(struct chp_id *chpid)
+{
+ return (chpid->cssid <= __MAX_CSSID);
+}
+
+
+#define chp_id_for_each(c) \
+ for (chp_id_init(c); chp_id_is_valid(c); chp_id_next(c))
+
+#endif /* _ASM_S390_CHPID_H */
diff --git a/include/asm-s390/cio.h b/include/asm-s390/cio.h
index d9278503098..f738d282758 100644
--- a/include/asm-s390/cio.h
+++ b/include/asm-s390/cio.h
@@ -13,6 +13,7 @@
#ifdef __KERNEL__
#define LPM_ANYPATH 0xff
+#define __MAX_CSSID 0
/*
* subchannel status word
@@ -292,6 +293,13 @@ extern void css_schedule_reprobe(void);
extern void reipl_ccw_dev(struct ccw_dev_id *id);
+struct cio_iplinfo {
+ u16 devno;
+ int is_qdio;
+};
+
+extern int cio_get_iplinfo(struct cio_iplinfo *iplinfo);
+
#endif
#endif
diff --git a/include/asm-s390/ipl.h b/include/asm-s390/ipl.h
index 0eb64083480..bdcd448d43f 100644
--- a/include/asm-s390/ipl.h
+++ b/include/asm-s390/ipl.h
@@ -8,6 +8,8 @@
#define _ASM_S390_IPL_H
#include <asm/types.h>
+#include <asm/cio.h>
+#include <asm/setup.h>
#define IPL_PARMBLOCK_ORIGIN 0x2000
@@ -74,12 +76,12 @@ struct ipl_parameter_block {
} __attribute__((packed));
/*
- * IPL validity flags and parameters as detected in head.S
+ * IPL validity flags
*/
extern u32 ipl_flags;
-extern u16 ipl_devno;
extern u32 dump_prefix_page;
+
extern void do_reipl(void);
extern void ipl_save_parameters(void);
@@ -89,6 +91,35 @@ enum {
IPL_NSS_VALID = 4,
};
+enum ipl_type {
+ IPL_TYPE_UNKNOWN = 1,
+ IPL_TYPE_CCW = 2,
+ IPL_TYPE_FCP = 4,
+ IPL_TYPE_FCP_DUMP = 8,
+ IPL_TYPE_NSS = 16,
+};
+
+struct ipl_info
+{
+ enum ipl_type type;
+ union {
+ struct {
+ struct ccw_dev_id dev_id;
+ } ccw;
+ struct {
+ struct ccw_dev_id dev_id;
+ u64 wwpn;
+ u64 lun;
+ } fcp;
+ struct {
+ char name[NSS_NAME_SIZE + 1];
+ } nss;
+ } data;
+};
+
+extern struct ipl_info ipl_info;
+extern void setup_ipl_info(void);
+
/*
* DIAG 308 support
*/
diff --git a/include/asm-s390/lowcore.h b/include/asm-s390/lowcore.h
index 4a31d0a7ee8..ffc9788a21a 100644
--- a/include/asm-s390/lowcore.h
+++ b/include/asm-s390/lowcore.h
@@ -147,6 +147,52 @@ void pgm_check_handler(void);
void mcck_int_handler(void);
void io_int_handler(void);
+struct save_area_s390 {
+ u32 ext_save;
+ u64 timer;
+ u64 clk_cmp;
+ u8 pad1[24];
+ u8 psw[8];
+ u32 pref_reg;
+ u8 pad2[20];
+ u32 acc_regs[16];
+ u64 fp_regs[4];
+ u32 gp_regs[16];
+ u32 ctrl_regs[16];
+} __attribute__((packed));
+
+struct save_area_s390x {
+ u64 fp_regs[16];
+ u64 gp_regs[16];
+ u8 psw[16];
+ u8 pad1[8];
+ u32 pref_reg;
+ u32 fp_ctrl_reg;
+ u8 pad2[4];
+ u32 tod_reg;
+ u64 timer;
+ u64 clk_cmp;
+ u8 pad3[8];
+ u32 acc_regs[16];
+ u64 ctrl_regs[16];
+} __attribute__((packed));
+
+union save_area {
+ struct save_area_s390 s390;
+ struct save_area_s390x s390x;
+};
+
+#define SAVE_AREA_BASE_S390 0xd4
+#define SAVE_AREA_BASE_S390X 0x1200
+
+#ifndef __s390x__
+#define SAVE_AREA_SIZE sizeof(struct save_area_s390)
+#define SAVE_AREA_BASE SAVE_AREA_BASE_S390
+#else
+#define SAVE_AREA_SIZE sizeof(struct save_area_s390x)
+#define SAVE_AREA_BASE SAVE_AREA_BASE_S390X
+#endif
+
struct _lowcore
{
#ifndef __s390x__
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h
index 13c16546eff..8fe8d42e64c 100644
--- a/include/asm-s390/pgtable.h
+++ b/include/asm-s390/pgtable.h
@@ -753,14 +753,14 @@ ptep_establish(struct vm_area_struct *vma,
* should therefore only be called if it is not mapped in any
* address space.
*/
-static inline int page_test_and_clear_dirty(struct page *page)
+static inline int page_test_dirty(struct page *page)
{
- unsigned long physpage = page_to_phys(page);
- int skey = page_get_storage_key(physpage);
+ return (page_get_storage_key(page_to_phys(page)) & _PAGE_CHANGED) != 0;
+}
- if (skey & _PAGE_CHANGED)
- page_set_storage_key(physpage, skey & ~_PAGE_CHANGED);
- return skey & _PAGE_CHANGED;
+static inline void page_clear_dirty(struct page *page)
+{
+ page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY);
}
/*
@@ -953,7 +953,8 @@ extern void memmap_init(unsigned long, int, unsigned long, unsigned long);
#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
#define __HAVE_ARCH_PTE_SAME
-#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY
+#define __HAVE_ARCH_PAGE_TEST_DIRTY
+#define __HAVE_ARCH_PAGE_CLEAR_DIRTY
#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
#include <asm-generic/pgtable.h>
diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h
index 33b80ced4bc..e0fcea8c64c 100644
--- a/include/asm-s390/processor.h
+++ b/include/asm-s390/processor.h
@@ -57,6 +57,7 @@ struct cpuinfo_S390
extern void s390_adjust_jiffies(void);
extern void print_cpu_info(struct cpuinfo_S390 *);
+extern int get_cpu_capability(unsigned int *);
/* Lazy FPU handling on uni-processor */
extern struct task_struct *last_task_used_math;
@@ -196,6 +197,7 @@ extern unsigned long thread_saved_pc(struct task_struct *t);
extern char *task_show_regs(struct task_struct *task, char *buffer);
extern void show_registers(struct pt_regs *regs);
+extern void show_code(struct pt_regs *regs);
extern void show_trace(struct task_struct *task, unsigned long *sp);
unsigned long get_wchan(struct task_struct *p);
diff --git a/include/asm-s390/sclp.h b/include/asm-s390/sclp.h
index 468b9701840..21ed6477321 100644
--- a/include/asm-s390/sclp.h
+++ b/include/asm-s390/sclp.h
@@ -9,6 +9,7 @@
#define _ASM_S390_SCLP_H
#include <linux/types.h>
+#include <asm/chpid.h>
struct sccb_header {
u16 length;
@@ -33,7 +34,20 @@ struct sclp_readinfo_sccb {
u8 _reserved3[4096 - 112]; /* 112-4095 */
} __attribute__((packed, aligned(4096)));
+#define SCLP_CHP_INFO_MASK_SIZE 32
+
+struct sclp_chp_info {
+ u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
+ u8 standby[SCLP_CHP_INFO_MASK_SIZE];
+ u8 configured[SCLP_CHP_INFO_MASK_SIZE];
+};
+
extern struct sclp_readinfo_sccb s390_readinfo_sccb;
extern void sclp_readinfo_early(void);
+extern int sclp_sdias_blk_count(void);
+extern int sclp_sdias_copy(void *dest, int blk_num, int nr_blks);
+extern int sclp_chp_configure(struct chp_id chpid);
+extern int sclp_chp_deconfigure(struct chp_id chpid);
+extern int sclp_chp_read_info(struct sclp_chp_info *info);
#endif /* _ASM_S390_SCLP_H */
diff --git a/include/asm-s390/setup.h b/include/asm-s390/setup.h
index 44c7aee2bd3..a76a6b8fd88 100644
--- a/include/asm-s390/setup.h
+++ b/include/asm-s390/setup.h
@@ -40,6 +40,7 @@ struct mem_chunk {
};
extern struct mem_chunk memory_chunk[];
+extern unsigned long real_memory_size;
#ifdef CONFIG_S390_SWITCH_AMODE
extern unsigned int switch_amode;
@@ -77,6 +78,7 @@ extern unsigned long machine_flags;
#endif /* __s390x__ */
#define MACHINE_HAS_SCLP (!MACHINE_IS_P390)
+#define ZFCPDUMP_HSA_SIZE (32UL<<20)
/*
* Console mode. Override with conmode=
diff --git a/include/asm-s390/smp.h b/include/asm-s390/smp.h
index b957e4cda46..0a28e6d6ef4 100644
--- a/include/asm-s390/smp.h
+++ b/include/asm-s390/smp.h
@@ -54,9 +54,6 @@ extern int smp_call_function_on(void (*func) (void *info), void *info,
#define raw_smp_processor_id() (S390_lowcore.cpu_data.cpu_nr)
-extern int smp_get_cpu(cpumask_t cpu_map);
-extern void smp_put_cpu(int cpu);
-
static inline __u16 hard_smp_processor_id(void)
{
__u16 cpu_address;
@@ -114,9 +111,8 @@ static inline void smp_send_stop(void)
}
#define smp_cpu_not_running(cpu) 1
-#define smp_get_cpu(cpu) ({ 0; })
-#define smp_put_cpu(cpu) ({ 0; })
#define smp_setup_cpu_possible_map() do { } while (0)
#endif
+extern union save_area *zfcpdump_save_areas[NR_CPUS + 1];
#endif
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 9cd0d0eaf52..96326594e55 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -133,7 +133,7 @@
static inline void SetPageUptodate(struct page *page)
{
if (!test_and_set_bit(PG_uptodate, &page->flags))
- page_test_and_clear_dirty(page);
+ page_clear_dirty(page);
}
#else
#define SetPageUptodate(page) set_bit(PG_uptodate, &(page)->flags)
diff --git a/mm/rmap.c b/mm/rmap.c
index b82146e6dfc..59da5b734c8 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -498,8 +498,10 @@ int page_mkclean(struct page *page)
struct address_space *mapping = page_mapping(page);
if (mapping)
ret = page_mkclean_file(mapping, page);
- if (page_test_and_clear_dirty(page))
+ if (page_test_dirty(page)) {
+ page_clear_dirty(page);
ret = 1;
+ }
}
return ret;
@@ -605,8 +607,10 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma)
* Leaving it set also helps swapoff to reinstate ptes
* faster for those pages still in swapcache.
*/
- if (page_test_and_clear_dirty(page))
+ if (page_test_dirty(page)) {
+ page_clear_dirty(page);
set_page_dirty(page);
+ }
__dec_zone_page_state(page,
PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED);
}