diff options
Diffstat (limited to 'drivers/s390')
32 files changed, 1535 insertions, 266 deletions
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index 6a89cefe99b..0c67258fb9e 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c @@ -291,7 +291,7 @@ dasd_parse_keyword( char *parsestring ) { dasd_page_cache = kmem_cache_create("dasd_page_cache", PAGE_SIZE, PAGE_SIZE, SLAB_CACHE_DMA, - NULL, NULL ); + NULL); if (!dasd_page_cache) MESSAGE(KERN_WARNING, "%s", "Failed to create slab, " "fixed buffer mode disabled."); diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c index 8b3b0f4a157..ac7e8ef504c 100644 --- a/drivers/s390/block/dasd_proc.c +++ b/drivers/s390/block/dasd_proc.c @@ -28,6 +28,7 @@ static struct proc_dir_entry *dasd_proc_root_entry = NULL; static struct proc_dir_entry *dasd_devices_entry = NULL; static struct proc_dir_entry *dasd_statistics_entry = NULL; +#ifdef CONFIG_DASD_PROFILE static char * dasd_get_user_string(const char __user *user_buf, size_t user_len) { @@ -47,6 +48,7 @@ dasd_get_user_string(const char __user *user_buf, size_t user_len) buffer[user_len] = 0; return buffer; } +#endif /* CONFIG_DASD_PROFILE */ static int dasd_devices_show(struct seq_file *m, void *v) @@ -167,6 +169,7 @@ dasd_calc_metrics(char *page, char **start, off_t off, return len; } +#ifdef CONFIG_DASD_PROFILE static char * dasd_statistics_array(char *str, unsigned int *array, int shift) { @@ -180,6 +183,7 @@ dasd_statistics_array(char *str, unsigned int *array, int shift) str += sprintf(str,"\n"); return str; } +#endif /* CONFIG_DASD_PROFILE */ static int dasd_statistics_read(char *page, char **start, off_t off, diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 1340451ea40..35765f6a86e 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -747,14 +747,9 @@ dcssblk_check_params(void) static void __exit dcssblk_exit(void) { - int rc; - PRINT_DEBUG("DCSSBLOCK EXIT...\n"); s390_root_dev_unregister(dcssblk_root_dev); - rc = unregister_blkdev(dcssblk_major, DCSSBLK_NAME); - if (rc) { - PRINT_ERR("unregister_blkdev() failed!\n"); - } + unregister_blkdev(dcssblk_major, DCSSBLK_NAME); PRINT_DEBUG("...finished!\n"); } diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig index 66102a18432..3f36cb3910e 100644 --- a/drivers/s390/char/Kconfig +++ b/drivers/s390/char/Kconfig @@ -164,3 +164,10 @@ config MONWRITER help Character device driver for writing z/VM monitor service records +config S390_VMUR + tristate "z/VM unit record device driver" + depends on S390 + default "m" + help + Character device driver for z/VM reader, puncher and printer. + diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile index c210784bdf4..130de19916f 100644 --- a/drivers/s390/char/Makefile +++ b/drivers/s390/char/Makefile @@ -29,6 +29,7 @@ obj-$(CONFIG_S390_TAPE_34XX) += tape_34xx.o obj-$(CONFIG_S390_TAPE_3590) += tape_3590.o obj-$(CONFIG_MONREADER) += monreader.o obj-$(CONFIG_MONWRITER) += monwriter.o +obj-$(CONFIG_S390_VMUR) += vmur.o zcore_mod-objs := sclp_sdias.o zcore.o obj-$(CONFIG_ZFCPDUMP) += zcore_mod.o diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h index dbb99d1b6f5..c7318a12585 100644 --- a/drivers/s390/char/sclp.h +++ b/drivers/s390/char/sclp.h @@ -72,6 +72,18 @@ typedef unsigned int sclp_cmdw_t; typedef u32 sccb_mask_t; /* ATTENTION: assumes 32bit mask !!! */ +struct sccb_header { + u16 length; + u8 function_code; + u8 control_mask[3]; + u16 response_code; +} __attribute__((packed)); + +extern u64 sclp_facilities; + +#define SCLP_HAS_CHP_INFO (sclp_facilities & 0x8000000000000000ULL) +#define SCLP_HAS_CHP_RECONFIG (sclp_facilities & 0x2000000000000000ULL) + struct gds_subvector { u8 length; u8 key; diff --git a/drivers/s390/char/sclp_chp.c b/drivers/s390/char/sclp_chp.c index a66b914519b..c68f5e7e63a 100644 --- a/drivers/s390/char/sclp_chp.c +++ b/drivers/s390/char/sclp_chp.c @@ -55,6 +55,8 @@ static int do_configure(sclp_cmdw_t cmd) struct chp_cfg_data *data; int rc; + if (!SCLP_HAS_CHP_RECONFIG) + return -EOPNOTSUPP; /* Prepare sccb. */ data = (struct chp_cfg_data *) get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!data) @@ -152,6 +154,8 @@ int sclp_chp_read_info(struct sclp_chp_info *info) struct chp_info_data *data; int rc; + if (!SCLP_HAS_CHP_INFO) + return -EOPNOTSUPP; /* Prepare sccb. */ data = (struct chp_info_data *) get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!data) diff --git a/drivers/s390/char/sclp_info.c b/drivers/s390/char/sclp_info.c index 7bcbe643b08..a1136e05275 100644 --- a/drivers/s390/char/sclp_info.c +++ b/drivers/s390/char/sclp_info.c @@ -11,47 +11,106 @@ #include <asm/sclp.h> #include "sclp.h" -struct sclp_readinfo_sccb s390_readinfo_sccb; +struct sclp_readinfo_sccb { + struct sccb_header header; /* 0-7 */ + u16 rnmax; /* 8-9 */ + u8 rnsize; /* 10 */ + u8 _reserved0[24 - 11]; /* 11-23 */ + u8 loadparm[8]; /* 24-31 */ + u8 _reserved1[48 - 32]; /* 32-47 */ + u64 facilities; /* 48-55 */ + u8 _reserved2[91 - 56]; /* 56-90 */ + u8 flags; /* 91 */ + u8 _reserved3[100 - 92]; /* 92-99 */ + u32 rnsize2; /* 100-103 */ + u64 rnmax2; /* 104-111 */ + u8 _reserved4[4096 - 112]; /* 112-4095 */ +} __attribute__((packed, aligned(4096))); + +static struct sclp_readinfo_sccb __initdata early_readinfo_sccb; +static int __initdata early_readinfo_sccb_valid; + +u64 sclp_facilities; void __init sclp_readinfo_early(void) { - sclp_cmdw_t command; - struct sccb_header *sccb; int ret; + int i; + struct sclp_readinfo_sccb *sccb; + sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED, + SCLP_CMDW_READ_SCP_INFO}; - __ctl_set_bit(0, 9); /* enable service signal subclass mask */ - - sccb = &s390_readinfo_sccb.header; - command = SCLP_CMDW_READ_SCP_INFO_FORCED; - while (1) { - u16 response; - - memset(&s390_readinfo_sccb, 0, sizeof(s390_readinfo_sccb)); - sccb->length = sizeof(s390_readinfo_sccb); - sccb->control_mask[2] = 0x80; - - ret = sclp_service_call(command, &s390_readinfo_sccb); - - if (ret == -EIO) - goto out; - if (ret == -EBUSY) - continue; + /* Enable service signal subclass mask. */ + __ctl_set_bit(0, 9); + sccb = &early_readinfo_sccb; + for (i = 0; i < ARRAY_SIZE(commands); i++) { + do { + memset(sccb, 0, sizeof(*sccb)); + sccb->header.length = sizeof(*sccb); + sccb->header.control_mask[2] = 0x80; + ret = sclp_service_call(commands[i], sccb); + } while (ret == -EBUSY); + if (ret) + break; __load_psw_mask(PSW_BASE_BITS | PSW_MASK_EXT | PSW_MASK_WAIT | PSW_DEFAULT_KEY); local_irq_disable(); + /* + * Contents of the sccb might have changed + * therefore a barrier is needed. + */ barrier(); + if (sccb->header.response_code == 0x10) { + early_readinfo_sccb_valid = 1; + break; + } + if (sccb->header.response_code != 0x1f0) + break; + } + /* Disable service signal subclass mask again. */ + __ctl_clear_bit(0, 9); +} - response = sccb->response_code; +void __init sclp_facilities_detect(void) +{ + if (!early_readinfo_sccb_valid) + return; + sclp_facilities = early_readinfo_sccb.facilities; +} - if (response == 0x10) - break; +unsigned long long __init sclp_memory_detect(void) +{ + unsigned long long memsize; + struct sclp_readinfo_sccb *sccb; - if (response != 0x1f0 || command == SCLP_CMDW_READ_SCP_INFO) - break; + if (!early_readinfo_sccb_valid) + return 0; + sccb = &early_readinfo_sccb; + if (sccb->rnsize) + memsize = sccb->rnsize << 20; + else + memsize = sccb->rnsize2 << 20; + if (sccb->rnmax) + memsize *= sccb->rnmax; + else + memsize *= sccb->rnmax2; + return memsize; +} - command = SCLP_CMDW_READ_SCP_INFO; - } -out: - __ctl_clear_bit(0, 9); /* disable service signal subclass mask */ +/* + * This function will be called after sclp_memory_detect(), which gets called + * early from early.c code. Therefore the sccb should have valid contents. + */ +void __init sclp_get_ipl_info(struct sclp_ipl_info *info) +{ + struct sclp_readinfo_sccb *sccb; + + if (!early_readinfo_sccb_valid) + return; + sccb = &early_readinfo_sccb; + info->is_valid = 1; + if (sccb->flags & 0x2) + info->has_dump = 1; + memcpy(&info->loadparm, &sccb->loadparm, LOADPARM_LEN); } diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c index e765875e8db..80e7a537e7d 100644 --- a/drivers/s390/char/tape_34xx.c +++ b/drivers/s390/char/tape_34xx.c @@ -131,10 +131,9 @@ tape_34xx_schedule_work(struct tape_device *device, enum tape_op op) { struct tape_34xx_work *p; - if ((p = kmalloc(sizeof(*p), GFP_ATOMIC)) == NULL) + if ((p = kzalloc(sizeof(*p), GFP_ATOMIC)) == NULL) return -ENOMEM; - memset(p, 0, sizeof(*p)); INIT_WORK(&p->work, tape_34xx_work_handler); p->device = tape_get_device_reference(device); diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c index fce3dac5cb3..2f419b0ea62 100644 --- a/drivers/s390/char/vmcp.c +++ b/drivers/s390/char/vmcp.c @@ -1,7 +1,7 @@ /* - * Copyright (C) 2004,2005 IBM Corporation + * Copyright IBM Corp. 2004,2007 * Interface implementation for communication with the z/VM control program - * Author(s): Christian Borntraeger <cborntra@de.ibm.com> + * Author(s): Christian Borntraeger <borntraeger@de.ibm.com> * * * z/VMs CP offers the possibility to issue commands via the diagnose code 8 @@ -22,9 +22,11 @@ #include "vmcp.h" MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Christian Borntraeger <cborntra@de.ibm.com>"); +MODULE_AUTHOR("Christian Borntraeger <borntraeger@de.ibm.com>"); MODULE_DESCRIPTION("z/VM CP interface"); +#define PRINTK_HEADER "vmcp: " + static debug_info_t *vmcp_debug; static int vmcp_open(struct inode *inode, struct file *file) @@ -40,7 +42,7 @@ static int vmcp_open(struct inode *inode, struct file *file) session->bufsize = PAGE_SIZE; session->response = NULL; session->resp_size = 0; - init_MUTEX(&session->mutex); + mutex_init(&session->mutex); file->private_data = session; return nonseekable_open(inode, file); } @@ -57,37 +59,37 @@ static int vmcp_release(struct inode *inode, struct file *file) } static ssize_t -vmcp_read(struct file *file, char __user * buff, size_t count, loff_t * ppos) +vmcp_read(struct file *file, char __user *buff, size_t count, loff_t *ppos) { size_t tocopy; struct vmcp_session *session; session = (struct vmcp_session *)file->private_data; - if (down_interruptible(&session->mutex)) + if (mutex_lock_interruptible(&session->mutex)) return -ERESTARTSYS; if (!session->response) { - up(&session->mutex); + mutex_unlock(&session->mutex); return 0; } if (*ppos > session->resp_size) { - up(&session->mutex); + mutex_unlock(&session->mutex); return 0; } tocopy = min(session->resp_size - (size_t) (*ppos), count); - tocopy = min(tocopy,session->bufsize - (size_t) (*ppos)); + tocopy = min(tocopy, session->bufsize - (size_t) (*ppos)); if (copy_to_user(buff, session->response + (*ppos), tocopy)) { - up(&session->mutex); + mutex_unlock(&session->mutex); return -EFAULT; } - up(&session->mutex); + mutex_unlock(&session->mutex); *ppos += tocopy; return tocopy; } static ssize_t -vmcp_write(struct file *file, const char __user * buff, size_t count, - loff_t * ppos) +vmcp_write(struct file *file, const char __user *buff, size_t count, + loff_t *ppos) { char *cmd; struct vmcp_session *session; @@ -103,24 +105,23 @@ vmcp_write(struct file *file, const char __user * buff, size_t count, } cmd[count] = '\0'; session = (struct vmcp_session *)file->private_data; - if (down_interruptible(&session->mutex)) { + if (mutex_lock_interruptible(&session->mutex)) { kfree(cmd); return -ERESTARTSYS; } if (!session->response) session->response = (char *)__get_free_pages(GFP_KERNEL - | __GFP_REPEAT | GFP_DMA, + | __GFP_REPEAT | GFP_DMA, get_order(session->bufsize)); if (!session->response) { - up(&session->mutex); + mutex_unlock(&session->mutex); kfree(cmd); return -ENOMEM; } debug_text_event(vmcp_debug, 1, cmd); - session->resp_size = cpcmd(cmd, session->response, - session->bufsize, - &session->resp_code); - up(&session->mutex); + session->resp_size = cpcmd(cmd, session->response, session->bufsize, + &session->resp_code); + mutex_unlock(&session->mutex); kfree(cmd); *ppos = 0; /* reset the file pointer after a command */ return count; @@ -145,12 +146,12 @@ static long vmcp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) int temp; session = (struct vmcp_session *)file->private_data; - if (down_interruptible(&session->mutex)) + if (mutex_lock_interruptible(&session->mutex)) return -ERESTARTSYS; switch (cmd) { case VMCP_GETCODE: temp = session->resp_code; - up(&session->mutex); + mutex_unlock(&session->mutex); return put_user(temp, (int __user *)arg); case VMCP_SETBUF: free_pages((unsigned long)session->response, @@ -161,27 +162,26 @@ static long vmcp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) session->bufsize = PAGE_SIZE; temp = -EINVAL; } - up(&session->mutex); + mutex_unlock(&session->mutex); return temp; case VMCP_GETSIZE: temp = session->resp_size; - up(&session->mutex); + mutex_unlock(&session->mutex); return put_user(temp, (int __user *)arg); default: - up(&session->mutex); + mutex_unlock(&session->mutex); return -ENOIOCTLCMD; } } static const struct file_operations vmcp_fops = { .owner = THIS_MODULE, - .open = &vmcp_open, - .release = &vmcp_release, - .read = &vmcp_read, - .llseek = &no_llseek, - .write = &vmcp_write, - .unlocked_ioctl = &vmcp_ioctl, - .compat_ioctl = &vmcp_ioctl + .open = vmcp_open, + .release = vmcp_release, + .read = vmcp_read, + .write = vmcp_write, + .unlocked_ioctl = vmcp_ioctl, + .compat_ioctl = vmcp_ioctl, }; static struct miscdevice vmcp_dev = { @@ -195,26 +195,38 @@ static int __init vmcp_init(void) int ret; if (!MACHINE_IS_VM) { - printk(KERN_WARNING - "z/VM CP interface is only available under z/VM\n"); + PRINT_WARN("z/VM CP interface is only available under z/VM\n"); return -ENODEV; } - ret = misc_register(&vmcp_dev); - if (!ret) - printk(KERN_INFO "z/VM CP interface loaded\n"); - else - printk(KERN_WARNING - "z/VM CP interface not loaded. Could not register misc device.\n"); vmcp_debug = debug_register("vmcp", 1, 1, 240); - debug_register_view(vmcp_debug, &debug_hex_ascii_view); - return ret; + if (!vmcp_debug) { + PRINT_ERR("z/VM CP interface not loaded. Could not register " + "debug feature\n"); + return -ENOMEM; + } + ret = debug_register_view(vmcp_debug, &debug_hex_ascii_view); + if (ret) { + PRINT_ERR("z/VM CP interface not loaded. Could not register " + "debug feature view. Error code: %d\n", ret); + debug_unregister(vmcp_debug); + return ret; + } + ret = misc_register(&vmcp_dev); + if (ret) { + PRINT_ERR("z/VM CP interface not loaded. Could not register " + "misc device. Error code: %d\n", ret); + debug_unregister(vmcp_debug); + return ret; + } + PRINT_INFO("z/VM CP interface loaded\n"); + return 0; } static void __exit vmcp_exit(void) { - WARN_ON(misc_deregister(&vmcp_dev) != 0); + misc_deregister(&vmcp_dev); debug_unregister(vmcp_debug); - printk(KERN_INFO "z/VM CP interface unloaded.\n"); + PRINT_INFO("z/VM CP interface unloaded.\n"); } module_init(vmcp_init); diff --git a/drivers/s390/char/vmcp.h b/drivers/s390/char/vmcp.h index 8a5975f3dad..6a993948e18 100644 --- a/drivers/s390/char/vmcp.h +++ b/drivers/s390/char/vmcp.h @@ -12,8 +12,8 @@ * The idea of this driver is based on cpint from Neale Ferguson */ -#include <asm/semaphore.h> #include <linux/ioctl.h> +#include <linux/mutex.h> #define VMCP_GETCODE _IOR(0x10, 1, int) #define VMCP_SETBUF _IOW(0x10, 2, int) @@ -26,5 +26,5 @@ struct vmcp_session { int resp_code; /* As we use copy_from/to_user, which might * * sleep and cannot use a spinlock */ - struct semaphore mutex; + struct mutex mutex; }; diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c index a5a00e9ae4d..12f7a4ce82c 100644 --- a/drivers/s390/char/vmlogrdr.c +++ b/drivers/s390/char/vmlogrdr.c @@ -835,7 +835,7 @@ static void vmlogrdr_cleanup(void) } -static int vmlogrdr_init(void) +static int __init vmlogrdr_init(void) { int rc; int i; @@ -885,7 +885,7 @@ cleanup: } -static void vmlogrdr_exit(void) +static void __exit vmlogrdr_exit(void) { vmlogrdr_cleanup(); printk (KERN_INFO "vmlogrdr: driver unloaded\n"); diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c new file mode 100644 index 00000000000..e90b0f84619 --- /dev/null +++ b/drivers/s390/char/vmur.c @@ -0,0 +1,906 @@ +/* + * Linux driver for System z and s390 unit record devices + * (z/VM virtual punch, reader, printer) + * + * Copyright IBM Corp. 2001, 2007 + * Authors: Malcolm Beattie <beattiem@uk.ibm.com> + * Michael Holzheu <holzheu@de.ibm.com> + * Frank Munzert <munzert@de.ibm.com> + */ + +#include <linux/cdev.h> + +#include <asm/uaccess.h> +#include <asm/cio.h> +#include <asm/ccwdev.h> +#include <asm/debug.h> + +#include "vmur.h" + +/* + * Driver overview + * + * Unit record device support is implemented as a character device driver. + * We can fit at least 16 bits into a device minor number and use the + * simple method of mapping a character device number with minor abcd + * to the unit record device with devno abcd. + * I/O to virtual unit record devices is handled as follows: + * Reads: Diagnose code 0x14 (input spool file manipulation) + * is used to read spool data page-wise. + * Writes: The CCW used is WRITE_CCW_CMD (0x01). The device's record length + * is available by reading sysfs attr reclen. Each write() to the device + * must specify an integral multiple (maximal 511) of reclen. + */ + +static char ur_banner[] = "z/VM virtual unit record device driver"; + +MODULE_AUTHOR("IBM Corporation"); +MODULE_DESCRIPTION("s390 z/VM virtual unit record device driver"); +MODULE_LICENSE("GPL"); + +#define PRINTK_HEADER "vmur: " + +static dev_t ur_first_dev_maj_min; +static struct class *vmur_class; +static struct debug_info *vmur_dbf; + +/* We put the device's record length (for writes) in the driver_info field */ +static struct ccw_device_id ur_ids[] = { + { CCWDEV_CU_DI(READER_PUNCH_DEVTYPE, 80) }, + { CCWDEV_CU_DI(PRINTER_DEVTYPE, 132) }, + { /* end of list */ } +}; + +MODULE_DEVICE_TABLE(ccw, ur_ids); + +static int ur_probe(struct ccw_device *cdev); +static void ur_remove(struct ccw_device *cdev); +static int ur_set_online(struct ccw_device *cdev); +static int ur_set_offline(struct ccw_device *cdev); + +static struct ccw_driver ur_driver = { + .name = "vmur", + .owner = THIS_MODULE, + .ids = ur_ids, + .probe = ur_probe, + .remove = ur_remove, + .set_online = ur_set_online, + .set_offline = ur_set_offline, +}; + +/* + * Allocation, freeing, getting and putting of urdev structures + */ +static struct urdev *urdev_alloc(struct ccw_device *cdev) +{ + struct urdev *urd; + + urd = kzalloc(sizeof(struct urdev), GFP_KERNEL); + if (!urd) + return NULL; + urd->cdev = cdev; + urd->reclen = cdev->id.driver_info; + ccw_device_get_id(cdev, &urd->dev_id); + mutex_init(&urd->io_mutex); + mutex_init(&urd->open_mutex); + return urd; +} + +static void urdev_free(struct urdev *urd) +{ + kfree(urd); +} + +/* + * This is how the character device driver gets a reference to a + * ur device. When this call returns successfully, a reference has + * been taken (by get_device) on the underlying kobject. The recipient + * of this urdev pointer must eventually drop it with urdev_put(urd) + * which does the corresponding put_device(). + */ +static struct urdev *urdev_get_from_devno(u16 devno) +{ + char bus_id[16]; + struct ccw_device *cdev; + + sprintf(bus_id, "0.0.%04x", devno); + cdev = get_ccwdev_by_busid(&ur_driver, bus_id); + if (!cdev) + return NULL; + + return cdev->dev.driver_data; +} + +static void urdev_put(struct urdev *urd) +{ + put_device(&urd->cdev->dev); +} + +/* + * Low-level functions to do I/O to a ur device. + * alloc_chan_prog + * do_ur_io + * ur_int_handler + * + * alloc_chan_prog allocates and builds the channel program + * + * do_ur_io issues the channel program to the device and blocks waiting + * on a completion event it publishes at urd->io_done. The function + * serialises itself on the device's mutex so that only one I/O + * is issued at a time (and that I/O is synchronous). + * + * ur_int_handler catches the "I/O done" interrupt, writes the + * subchannel status word into the scsw member of the urdev structure + * and complete()s the io_done to wake the waiting do_ur_io. + * + * The caller of do_ur_io is responsible for kfree()ing the channel program + * address pointer that alloc_chan_prog returned. + */ + + +/* + * alloc_chan_prog + * The channel program we use is write commands chained together + * with a final NOP CCW command-chained on (which ensures that CE and DE + * are presented together in a single interrupt instead of as separate + * interrupts unless an incorrect length indication kicks in first). The + * data length in each CCW is reclen. The caller must ensure that count + * is an integral multiple of reclen. + * The channel program pointer returned by this function must be freed + * with kfree. The caller is responsible for checking that + * count/reclen is not ridiculously large. + */ +static struct ccw1 *alloc_chan_prog(char *buf, size_t count, size_t reclen) +{ + size_t num_ccws; + struct ccw1 *cpa; + int i; + + TRACE("alloc_chan_prog(%p, %zu, %zu)\n", buf, count, reclen); + + /* + * We chain a NOP onto the writes to force CE+DE together. + * That means we allocate room for CCWs to cover count/reclen + * records plus a NOP. + */ + num_ccws = count / reclen + 1; + cpa = kmalloc(num_ccws * sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); + if (!cpa) + return NULL; + + for (i = 0; count; i++) { + cpa[i].cmd_code = WRITE_CCW_CMD; + cpa[i].flags = CCW_FLAG_CC | CCW_FLAG_SLI; + cpa[i].count = reclen; + cpa[i].cda = __pa(buf); + buf += reclen; + count -= reclen; + } + /* The following NOP CCW forces CE+DE to be presented together */ + cpa[i].cmd_code = CCW_CMD_NOOP; + cpa[i].flags = 0; + cpa[i].count = 0; + cpa[i].cda = 0; + + return cpa; +} + +static int do_ur_io(struct urdev *urd, struct ccw1 *cpa) +{ + int rc; + struct ccw_device *cdev = urd->cdev; + DECLARE_COMPLETION(event); + + TRACE("do_ur_io: cpa=%p\n", cpa); + + rc = mutex_lock_interruptible(&urd->io_mutex); + if (rc) + return rc; + + urd->io_done = &event; + + spin_lock_irq(get_ccwdev_lock(cdev)); + rc = ccw_device_start(cdev, cpa, 1, 0, 0); + spin_unlock_irq(get_ccwdev_lock(cdev)); + + TRACE("do_ur_io: ccw_device_start returned %d\n", rc); + if (rc) + goto out; + + wait_for_completion(&event); + TRACE("do_ur_io: I/O complete\n"); + rc = 0; + +out: + mutex_unlock(&urd->io_mutex); + return rc; +} + +/* + * ur interrupt handler, called from the ccw_device layer + */ +static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm, + struct irb *irb) +{ + struct urdev *urd; + + TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n", + intparm, irb->scsw.cstat, irb->scsw.dstat, irb->scsw.count); + + if (!intparm) { + TRACE("ur_int_handler: unsolicited interrupt\n"); + return; + } + urd = cdev->dev.driver_data; + /* On special conditions irb is an error pointer */ + if (IS_ERR(irb)) + urd->io_request_rc = PTR_ERR(irb); + else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END)) + urd->io_request_rc = 0; + else + urd->io_request_rc = -EIO; + + complete(urd->io_done); +} + +/* + * reclen sysfs attribute - The record length to be used for write CCWs + */ +static ssize_t ur_attr_reclen_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct urdev *urd = dev->driver_data; + + return sprintf(buf, "%zu\n", urd->reclen); +} + +static DEVICE_ATTR(reclen, 0444, ur_attr_reclen_show, NULL); + +static int ur_create_attributes(struct device *dev) +{ + return device_create_file(dev, &dev_attr_reclen); +} + +static void ur_remove_attributes(struct device *dev) +{ + device_remove_file(dev, &dev_attr_reclen); +} + +/* + * diagnose code 0x210 - retrieve device information + * cc=0 normal completion, we have a real device + * cc=1 CP paging error + * cc=2 The virtual device exists, but is not associated with a real device + * cc=3 Invalid device address, or the virtual device does not exist + */ +static int get_urd_class(struct urdev *urd) +{ + static struct diag210 ur_diag210; + int cc; + + ur_diag210.vrdcdvno = urd->dev_id.devno; + ur_diag210.vrdclen = sizeof(struct diag210); + + cc = diag210(&ur_diag210); + switch (cc) { + case 0: + return -ENOTSUPP; + case 2: + return ur_diag210.vrdcvcla; /* virtual device class */ + case 3: + return -ENODEV; + default: + return -EIO; + } +} + +/* + * Allocation and freeing of urfile structures + */ +static struct urfile *urfile_alloc(struct urdev *urd) +{ + struct urfile *urf; + + urf = kzalloc(sizeof(struct urfile), GFP_KERNEL); + if (!urf) + return NULL; + urf->urd = urd; + + TRACE("urfile_alloc: urd=%p urf=%p rl=%zu\n", urd, urf, + urf->dev_reclen); + + return urf; +} + +static void urfile_free(struct urfile *urf) +{ + TRACE("urfile_free: urf=%p urd=%p\n", urf, urf->urd); + kfree(urf); +} + +/* + * The fops implementation of the character device driver + */ +static ssize_t do_write(struct urdev *urd, const char __user *udata, + size_t count, size_t reclen, loff_t *ppos) +{ + struct ccw1 *cpa; + char *buf; + int rc; + + /* Data buffer must be under 2GB line for fmt1 CCWs: hence GFP_DMA */ + buf = kmalloc(count, GFP_KERNEL | GFP_DMA); + if (!buf) + return -ENOMEM; + + if (copy_from_user(buf, udata, count)) { + rc = -EFAULT; + goto fail_kfree_buf; + } + + cpa = alloc_chan_prog(buf, count, reclen); + if (!cpa) { + rc = -ENOMEM; + goto fail_kfree_buf; + } + + rc = do_ur_io(urd, cpa); + if (rc) + goto fail_kfree_cpa; + + if (urd->io_request_rc) { + rc = urd->io_request_rc; + goto fail_kfree_cpa; + } + *ppos += count; + rc = count; +fail_kfree_cpa: + kfree(cpa); +fail_kfree_buf: + kfree(buf); + return rc; +} + +static ssize_t ur_write(struct file *file, const char __user *udata, + size_t count, loff_t *ppos) +{ + struct urfile *urf = file->private_data; + + TRACE("ur_write: count=%zu\n", count); + + if (count == 0) + return 0; + + if (count % urf->dev_reclen) + return -EINVAL; /* count must be a multiple of reclen */ + + if (count > urf->dev_reclen * MAX_RECS_PER_IO) + count = urf->dev_reclen * MAX_RECS_PER_IO; + + return do_write(urf->urd, udata, count, urf->dev_reclen, ppos); +} + +static int do_diag_14(unsigned long rx, unsigned long ry1, + unsigned long subcode) +{ + register unsigned long _ry1 asm("2") = ry1; + register unsigned long _ry2 asm("3") = subcode; + int rc = 0; + + asm volatile( +#ifdef CONFIG_64BIT + " sam31\n" + " diag %2,2,0x14\n" + " sam64\n" +#else + " diag %2,2,0x14\n" +#endif + " ipm %0\n" + " srl %0,28\n" + : "=d" (rc), "+d" (_ry2) + : "d" (rx), "d" (_ry1) + : "cc"); + + TRACE("diag 14: subcode=0x%lx, cc=%i\n", subcode, rc); + return rc; +} + +/* + * diagnose code 0x14 subcode 0x0028 - position spool file to designated + * record + * cc=0 normal completion + * cc=2 no file active on the virtual reader or device not ready + * cc=3 record specified is beyond EOF + */ +static int diag_position_to_record(int devno, int record) +{ + int cc; + + cc = do_diag_14(record, devno, 0x28); + switch (cc) { + case 0: + return 0; + case 2: + return -ENOMEDIUM; + case 3: + return -ENODATA; /* position beyond end of file */ + default: + return -EIO; + } +} + +/* + * diagnose code 0x14 subcode 0x0000 - read next spool file buffer + * cc=0 normal completion + * cc=1 EOF reached + * cc=2 no file active on the virtual reader, and no file eligible + * cc=3 file already active on the virtual reader or specified virtual + * reader does not exist or is not a reader + */ +static int diag_read_file(int devno, char *buf) +{ + int cc; + + cc = do_diag_14((unsigned long) buf, devno, 0x00); + switch (cc) { + case 0: + return 0; + case 1: + return -ENODATA; + case 2: + return -ENOMEDIUM; + default: + return -EIO; + } +} + +static ssize_t diag14_read(struct file *file, char __user *ubuf, size_t count, + loff_t *offs) +{ + size_t len, copied, res; + char *buf; + int rc; + u16 reclen; + struct urdev *urd; + + urd = ((struct urfile *) file->private_data)->urd; + reclen = ((struct urfile *) file->private_data)->file_reclen; + + rc = diag_position_to_record(urd->dev_id.devno, *offs / PAGE_SIZE + 1); + if (rc == -ENODATA) + return 0; + if (rc) + return rc; + + len = min((size_t) PAGE_SIZE, count); + buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + copied = 0; + res = (size_t) (*offs % PAGE_SIZE); + do { + rc = diag_read_file(urd->dev_id.devno, buf); + if (rc == -ENODATA) { + break; + } + if (rc) + goto fail; + if (reclen) + *((u16 *) &buf[FILE_RECLEN_OFFSET]) = reclen; + len = min(count - copied, PAGE_SIZE - res); + if (copy_to_user(ubuf + copied, buf + res, len)) { + rc = -EFAULT; + goto fail; + } + res = 0; + copied += len; + } while (copied != count); + + *offs += copied; + rc = copied; +fail: + kfree(buf); + return rc; +} + +static ssize_t ur_read(struct file *file, char __user *ubuf, size_t count, + loff_t *offs) +{ + struct urdev *urd; + int rc; + + TRACE("ur_read: count=%zu ppos=%li\n", count, (unsigned long) *offs); + + if (count == 0) + return 0; + + urd = ((struct urfile *) file->private_data)->urd; + rc = mutex_lock_interruptible(&urd->io_mutex); + if (rc) + return rc; + rc = diag14_read(file, ubuf, count, offs); + mutex_unlock(&urd->io_mutex); + return rc; +} + +/* + * diagnose code 0x14 subcode 0x0fff - retrieve next file descriptor + * cc=0 normal completion + * cc=1 no files on reader queue or no subsequent file + * cc=2 spid specified is invalid + */ +static int diag_read_next_file_info(struct file_control_block *buf, int spid) +{ + int cc; + + cc = do_diag_14((unsigned long) buf, spid, 0xfff); + switch (cc) { + case 0: + return 0; + default: + return -ENODATA; + } +} + +static int verify_device(struct urdev *urd) +{ + struct file_control_block fcb; + char *buf; + int rc; + + switch (urd->class) { + case DEV_CLASS_UR_O: + return 0; /* no check needed here */ + case DEV_CLASS_UR_I: + /* check for empty reader device (beginning of chain) */ + rc = diag_read_next_file_info(&fcb, 0); + if (rc) + return rc; + + /* open file on virtual reader */ + buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + rc = diag_read_file(urd->dev_id.devno, buf); + kfree(buf); + + if ((rc != 0) && (rc != -ENODATA)) /* EOF does not hurt */ + return rc; + return 0; + default: + return -ENOTSUPP; + } +} + +static int get_file_reclen(struct urdev *urd) +{ + struct file_control_block fcb; + int rc; + + switch (urd->class) { + case DEV_CLASS_UR_O: + return 0; + case DEV_CLASS_UR_I: + rc = diag_read_next_file_info(&fcb, 0); + if (rc) + return rc; + break; + default: + return -ENOTSUPP; + } + if (fcb.file_stat & FLG_CP_DUMP) + return 0; + + return fcb.rec_len; +} + +static int ur_open(struct inode *inode, struct file *file) +{ + u16 devno; + struct urdev *urd; + struct urfile *urf; + unsigned short accmode; + int rc; + + accmode = file->f_flags & O_ACCMODE; + + if (accmode == O_RDWR) + return -EACCES; + + /* + * We treat the minor number as the devno of the ur device + * to find in the driver tree. + */ + devno = MINOR(file->f_dentry->d_inode->i_rdev); + + urd = urdev_get_from_devno(devno); + if (!urd) + return -ENXIO; + + if (file->f_flags & O_NONBLOCK) { + if (!mutex_trylock(&urd->open_mutex)) { + rc = -EBUSY; + goto fail_put; + } + } else { + if (mutex_lock_interruptible(&urd->open_mutex)) { + rc = -ERESTARTSYS; + goto fail_put; + } + } + + TRACE("ur_open\n"); + + if (((accmode == O_RDONLY) && (urd->class != DEV_CLASS_UR_I)) || + ((accmode == O_WRONLY) && (urd->class != DEV_CLASS_UR_O))) { + TRACE("ur_open: unsupported dev class (%d)\n", urd->class); + rc = -EACCES; + goto fail_unlock; + } + + rc = verify_device(urd); + if (rc) + goto fail_unlock; + + urf = urfile_alloc(urd); + if (!urf) { + rc = -ENOMEM; + goto fail_unlock; + } + + urf->dev_reclen = urd->reclen; + rc = get_file_reclen(urd); + if (rc < 0) + goto fail_urfile_free; + urf->file_reclen = rc; + file->private_data = urf; + return 0; + +fail_urfile_free: + urfile_free(urf); +fail_unlock: + mutex_unlock(&urd->open_mutex); +fail_put: + urdev_put(urd); + return rc; +} + +static int ur_release(struct inode *inode, struct file *file) +{ + struct urfile *urf = file->private_data; + + TRACE("ur_release\n"); + mutex_unlock(&urf->urd->open_mutex); + urdev_put(urf->urd); + urfile_free(urf); + return 0; +} + +static loff_t ur_llseek(struct file *file, loff_t offset, int whence) +{ + loff_t newpos; + + if ((file->f_flags & O_ACCMODE) != O_RDONLY) + return -ESPIPE; /* seek allowed only for reader */ + if (offset % PAGE_SIZE) + return -ESPIPE; /* only multiples of 4K allowed */ + switch (whence) { + case 0: /* SEEK_SET */ + newpos = offset; + break; + case 1: /* SEEK_CUR */ + newpos = file->f_pos + offset; + break; + default: + return -EINVAL; + } + file->f_pos = newpos; + return newpos; +} + +static struct file_operations ur_fops = { + .owner = THIS_MODULE, + .open = ur_open, + .release = ur_release, + .read = ur_read, + .write = ur_write, + .llseek = ur_llseek, +}; + +/* + * ccw_device infrastructure: + * ur_probe gets its own ref to the device (i.e. get_device), + * creates the struct urdev, the device attributes, sets up + * the interrupt handler and validates the virtual unit record device. + * ur_remove removes the device attributes, frees the struct urdev + * and drops (put_device) the ref to the device we got in ur_probe. + */ +static int ur_probe(struct ccw_device *cdev) +{ + struct urdev *urd; + int rc; + + TRACE("ur_probe: cdev=%p state=%d\n", cdev, *(int *) cdev->private); + + if (!get_device(&cdev->dev)) + return -ENODEV; + + urd = urdev_alloc(cdev); + if (!urd) { + rc = -ENOMEM; + goto fail; + } + rc = ur_create_attributes(&cdev->dev); + if (rc) { + rc = -ENOMEM; + goto fail; + } + cdev->dev.driver_data = urd; + cdev->handler = ur_int_handler; + + /* validate virtual unit record device */ + urd->class = get_urd_class(urd); + if (urd->class < 0) { + rc = urd->class; + goto fail; + } + if ((urd->class != DEV_CLASS_UR_I) && (urd->class != DEV_CLASS_UR_O)) { + rc = -ENOTSUPP; + goto fail; + } + + return 0; + +fail: + urdev_free(urd); + put_device(&cdev->dev); + return rc; +} + +static void ur_remove(struct ccw_device *cdev) +{ + struct urdev *urd = cdev->dev.driver_data; + + TRACE("ur_remove\n"); + if (cdev->online) + ur_set_offline(cdev); + ur_remove_attributes(&cdev->dev); + urdev_free(urd); + put_device(&cdev->dev); +} + +static int ur_set_online(struct ccw_device *cdev) +{ + struct urdev *urd; + int minor, major, rc; + char node_id[16]; + + TRACE("ur_set_online: cdev=%p state=%d\n", cdev, + *(int *) cdev->private); + + if (!try_module_get(ur_driver.owner)) + return -EINVAL; + + urd = (struct urdev *) cdev->dev.driver_data; + minor = urd->dev_id.devno; + major = MAJOR(ur_first_dev_maj_min); + + urd->char_device = cdev_alloc(); + if (!urd->char_device) { + rc = -ENOMEM; + goto fail_module_put; + } + + cdev_init(urd->char_device, &ur_fops); + urd->char_device->dev = MKDEV(major, minor); + urd->char_device->owner = ur_fops.owner; + + rc = cdev_add(urd->char_device, urd->char_device->dev, 1); + if (rc) + goto fail_free_cdev; + if (urd->cdev->id.cu_type == READER_PUNCH_DEVTYPE) { + if (urd->class == DEV_CLASS_UR_I) + sprintf(node_id, "vmrdr-%s", cdev->dev.bus_id); + if (urd->class == DEV_CLASS_UR_O) + sprintf(node_id, "vmpun-%s", cdev->dev.bus_id); + } else if (urd->cdev->id.cu_type == PRINTER_DEVTYPE) { + sprintf(node_id, "vmprt-%s", cdev->dev.bus_id); + } else { + rc = -ENOTSUPP; + goto fail_free_cdev; + } + + urd->device = device_create(vmur_class, NULL, urd->char_device->dev, + "%s", node_id); + if (IS_ERR(urd->device)) { + rc = PTR_ERR(urd->device); + TRACE("ur_set_online: device_create rc=%d\n", rc); + goto fail_free_cdev; + } + + return 0; + +fail_free_cdev: + cdev_del(urd->char_device); +fail_module_put: + module_put(ur_driver.owner); + + return rc; +} + +static int ur_set_offline(struct ccw_device *cdev) +{ + struct urdev *urd; + + TRACE("ur_set_offline: cdev=%p cdev->private=%p state=%d\n", + cdev, cdev->private, *(int *) cdev->private); + urd = (struct urdev *) cdev->dev.driver_data; + device_destroy(vmur_class, urd->char_device->dev); + cdev_del(urd->char_device); + module_put(ur_driver.owner); + + return 0; +} + +/* + * Module initialisation and cleanup + */ +static int __init ur_init(void) +{ + int rc; + dev_t dev; + + if (!MACHINE_IS_VM) { + PRINT_ERR("%s is only available under z/VM.\n", ur_banner); + return -ENODEV; + } + + vmur_dbf = debug_register("vmur", 4, 1, 4 * sizeof(long)); + if (!vmur_dbf) + return -ENOMEM; + rc = debug_register_view(vmur_dbf, &debug_sprintf_view); + if (rc) + goto fail_free_dbf; + + debug_set_level(vmur_dbf, 6); + + rc = ccw_driver_register(&ur_driver); + if (rc) + goto fail_free_dbf; + + rc = alloc_chrdev_region(&dev, 0, NUM_MINORS, "vmur"); + if (rc) { + PRINT_ERR("alloc_chrdev_region failed: err = %d\n", rc); + goto fail_unregister_driver; + } + ur_first_dev_maj_min = MKDEV(MAJOR(dev), 0); + + vmur_class = class_create(THIS_MODULE, "vmur"); + if (IS_ERR(vmur_class)) { + rc = PTR_ERR(vmur_class); + goto fail_unregister_region; + } + PRINT_INFO("%s loaded.\n", ur_banner); + return 0; + +fail_unregister_region: + unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS); +fail_unregister_driver: + ccw_driver_unregister(&ur_driver); +fail_free_dbf: + debug_unregister(vmur_dbf); + return rc; +} + +static void __exit ur_exit(void) +{ + class_destroy(vmur_class); + unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS); + ccw_driver_unregister(&ur_driver); + debug_unregister(vmur_dbf); + PRINT_INFO("%s unloaded.\n", ur_banner); +} + +module_init(ur_init); +module_exit(ur_exit); diff --git a/drivers/s390/char/vmur.h b/drivers/s390/char/vmur.h new file mode 100644 index 00000000000..16d0a4e38e4 --- /dev/null +++ b/drivers/s390/char/vmur.h @@ -0,0 +1,104 @@ +/* + * Linux driver for System z and s390 unit record devices + * (z/VM virtual punch, reader, printer) + * + * Copyright IBM Corp. 2001, 2007 + * Authors: Malcolm Beattie <beattiem@uk.ibm.com> + * Michael Holzheu <holzheu@de.ibm.com> + * Frank Munzert <munzert@de.ibm.com> + */ + +#ifndef _VMUR_H_ +#define _VMUR_H_ + +#define DEV_CLASS_UR_I 0x20 /* diag210 unit record input device class */ +#define DEV_CLASS_UR_O 0x10 /* diag210 unit record output device class */ +/* + * we only support z/VM's default unit record devices: + * both in SPOOL directory control statement and in CP DEFINE statement + * RDR defaults to 2540 reader + * PUN defaults to 2540 punch + * PRT defaults to 1403 printer + */ +#define READER_PUNCH_DEVTYPE 0x2540 +#define PRINTER_DEVTYPE 0x1403 + +/* z/VM spool file control block SFBLOK */ +struct file_control_block { + char reserved_1[8]; + char user_owner[8]; + char user_orig[8]; + __s32 data_recs; + __s16 rec_len; + __s16 file_num; + __u8 file_stat; + __u8 dev_type; + char reserved_2[6]; + char file_name[12]; + char file_type[12]; + char create_date[8]; + char create_time[8]; + char reserved_3[6]; + __u8 file_class; + __u8 sfb_lok; + __u64 distr_code; + __u32 reserved_4; + __u8 current_starting_copy_number; + __u8 sfblock_cntrl_flags; + __u8 reserved_5; + __u8 more_status_flags; + char rest[200]; +} __attribute__ ((packed)); + +#define FLG_CP_DUMP 0x10 + +/* + * A struct urdev is created for each ur device that is made available + * via the ccw_device driver model. + */ +struct urdev { + struct ccw_device *cdev; /* Backpointer to ccw device */ + struct mutex io_mutex; /* Serialises device IO */ + struct mutex open_mutex; /* Serialises access to device */ + struct completion *io_done; /* do_ur_io waits; irq completes */ + struct device *device; + struct cdev *char_device; + struct ccw_dev_id dev_id; /* device id */ + size_t reclen; /* Record length for *write* CCWs */ + int class; /* VM device class */ + int io_request_rc; /* return code from I/O request */ +}; + +/* + * A struct urfile is allocated at open() time for each device and + * freed on release(). + */ +struct urfile { + struct urdev *urd; + unsigned int flags; + size_t dev_reclen; + __u16 file_reclen; +}; + +/* + * Device major/minor definitions. + */ + +#define UR_MAJOR 0 /* get dynamic major */ +/* + * We map minor numbers directly to device numbers (0-FFFF) for simplicity. + * This avoids having to allocate (and manage) slot numbers. + */ +#define NUM_MINORS 65536 + +/* Limiting each I/O to 511 records limits chan prog to 4KB (511 r/w + 1 NOP) */ +#define MAX_RECS_PER_IO 511 +#define WRITE_CCW_CMD 0x01 + +#define TRACE(x...) debug_sprintf_event(vmur_dbf, 1, x) +#define CCWDEV_CU_DI(cutype, di) \ + CCW_DEVICE(cutype, 0x00), .driver_info = (di) + +#define FILE_RECLEN_OFFSET 4064 /* reclen offset in spool data block */ + +#endif /* _VMUR_H_ */ diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index 4e711a985d5..3712ede1672 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c @@ -156,7 +156,7 @@ static int memcpy_real(void *dest, unsigned long src, size_t count) return rc; } -static int memcpy_real_user(__user void *dest, unsigned long src, size_t count) +static int memcpy_real_user(void __user *dest, unsigned long src, size_t count) { static char buf[4096]; int offs = 0, size; diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c index ac289e6eadf..b57d93d986c 100644 --- a/drivers/s390/cio/chp.c +++ b/drivers/s390/cio/chp.c @@ -141,8 +141,9 @@ static int s390_vary_chpid(struct chp_id chpid, int on) /* * Channel measurement related functions */ -static ssize_t chp_measurement_chars_read(struct kobject *kobj, char *buf, - loff_t off, size_t count) +static ssize_t chp_measurement_chars_read(struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) { struct channel_path *chp; unsigned int size; @@ -165,7 +166,6 @@ static struct bin_attribute chp_measurement_chars_attr = { .attr = { .name = "measurement_chars", .mode = S_IRUSR, - .owner = THIS_MODULE, }, .size = sizeof(struct cmg_chars), .read = chp_measurement_chars_read, @@ -193,8 +193,9 @@ static void chp_measurement_copy_block(struct cmg_entry *buf, } while (reference_buf.values[0] != buf->values[0]); } -static ssize_t chp_measurement_read(struct kobject *kobj, char *buf, - loff_t off, size_t count) +static ssize_t chp_measurement_read(struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) { struct channel_path *chp; struct channel_subsystem *css; @@ -217,7 +218,6 @@ static struct bin_attribute chp_measurement_attr = { .attr = { .name = "measurement", .mode = S_IRUSR, - .owner = THIS_MODULE, }, .size = sizeof(struct cmg_entry), .read = chp_measurement_read, diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 6b264bdb5bf..001682e70f6 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -272,7 +272,7 @@ modalias_show (struct device *dev, struct device_attribute *attr, char *buf) struct ccw_device_id *id = &(cdev->id); int len; - len = snprint_alias(buf, PAGE_SIZE, id, "\n") + 1; + len = snprint_alias(buf, PAGE_SIZE, id, "\n"); return len > PAGE_SIZE ? PAGE_SIZE : len; } diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c index 997f4687453..60b9347f7c9 100644 --- a/drivers/s390/cio/device_id.c +++ b/drivers/s390/cio/device_id.c @@ -27,7 +27,6 @@ /* * diag210 is used under VM to get information about a virtual device */ -#ifdef CONFIG_64BIT int diag210(struct diag210 * addr) { @@ -43,6 +42,7 @@ diag210(struct diag210 * addr) spin_lock_irqsave(&diag210_lock, flags); diag210_tmp = *addr; +#ifdef CONFIG_64BIT asm volatile( " lhi %0,-1\n" " sam31\n" @@ -51,19 +51,8 @@ diag210(struct diag210 * addr) " srl %0,28\n" "1: sam64\n" EX_TABLE(0b,1b) - : "=&d" (ccode) : "a" (__pa(&diag210_tmp)) : "cc", "memory"); - - *addr = diag210_tmp; - spin_unlock_irqrestore(&diag210_lock, flags); - - return ccode; -} + : "=&d" (ccode) : "a" (&diag210_tmp) : "cc", "memory"); #else -int -diag210(struct diag210 * addr) -{ - int ccode; - asm volatile( " lhi %0,-1\n" " diag %1,0,0x210\n" @@ -71,11 +60,14 @@ diag210(struct diag210 * addr) " srl %0,28\n" "1:\n" EX_TABLE(0b,1b) - : "=&d" (ccode) : "a" (__pa(addr)) : "cc", "memory"); + : "=&d" (ccode) : "a" (&diag210_tmp) : "cc", "memory"); +#endif + + *addr = diag210_tmp; + spin_unlock_irqrestore(&diag210_lock, flags); return ccode; } -#endif /* * Input : diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c index e70aeb7a378..ed026a1dc32 100644 --- a/drivers/s390/cio/qdio.c +++ b/drivers/s390/cio/qdio.c @@ -166,9 +166,9 @@ qdio_check_ccq(struct qdio_q *q, unsigned int ccq) { char dbf_text[15]; - if (ccq == 0 || ccq == 32 || ccq == 96) + if (ccq == 0 || ccq == 32) return 0; - if (ccq == 97) + if (ccq == 96 || ccq == 97) return 1; /*notify devices immediately*/ sprintf(dbf_text,"%d", ccq); @@ -2306,8 +2306,8 @@ qdio_get_ssqd_information(struct qdio_irq *irq_ptr) if (!ssqd_area) { QDIO_PRINT_WARN("Could not get memory for chsc. Using all " \ "SIGAs for sch x%x.\n", irq_ptr->schid.sch_no); - irq_ptr->qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY || - CHSC_FLAG_SIGA_OUTPUT_NECESSARY || + irq_ptr->qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY | + CHSC_FLAG_SIGA_OUTPUT_NECESSARY | CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */ irq_ptr->is_qebsm = 0; irq_ptr->sch_token = 0; @@ -2328,8 +2328,8 @@ qdio_get_ssqd_information(struct qdio_irq *irq_ptr) QDIO_PRINT_WARN("CHSC returned cc %i. Using all " \ "SIGAs for sch 0.%x.%x.\n", result, irq_ptr->schid.ssid, irq_ptr->schid.sch_no); - qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY || - CHSC_FLAG_SIGA_OUTPUT_NECESSARY || + qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY | + CHSC_FLAG_SIGA_OUTPUT_NECESSARY | CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */ irq_ptr->is_qebsm = 0; goto out; @@ -2340,8 +2340,8 @@ qdio_get_ssqd_information(struct qdio_irq *irq_ptr) "is 0x%x. Using all SIGAs for sch 0.%x.%x.\n", ssqd_area->response.code, irq_ptr->schid.ssid, irq_ptr->schid.sch_no); - qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY || - CHSC_FLAG_SIGA_OUTPUT_NECESSARY || + qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY | + CHSC_FLAG_SIGA_OUTPUT_NECESSARY | CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */ irq_ptr->is_qebsm = 0; goto out; diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 5aac0ec3636..90bd2201451 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -43,6 +43,7 @@ static void ap_poll_all(unsigned long); static void ap_poll_timeout(unsigned long); static int ap_poll_thread_start(void); static void ap_poll_thread_stop(void); +static void ap_request_timeout(unsigned long); /** * Module description. @@ -189,6 +190,7 @@ int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length) case AP_RESPONSE_NORMAL: return 0; case AP_RESPONSE_Q_FULL: + case AP_RESPONSE_RESET_IN_PROGRESS: return -EBUSY; default: /* Device is gone. */ return -ENODEV; @@ -252,6 +254,8 @@ int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length) if (status.queue_empty) return -ENOENT; return -EBUSY; + case AP_RESPONSE_RESET_IN_PROGRESS: + return -EBUSY; default: return -ENODEV; } @@ -326,11 +330,12 @@ static int ap_init_queue(ap_qid_t qid) i = AP_MAX_RESET; /* return with -ENODEV */ break; case AP_RESPONSE_RESET_IN_PROGRESS: + rc = -EBUSY; case AP_RESPONSE_BUSY: default: break; } - if (rc != -ENODEV) + if (rc != -ENODEV && rc != -EBUSY) break; if (i < AP_MAX_RESET - 1) { udelay(5); @@ -341,6 +346,40 @@ static int ap_init_queue(ap_qid_t qid) } /** + * Arm request timeout if a AP device was idle and a new request is submitted. + */ +static void ap_increase_queue_count(struct ap_device *ap_dev) +{ + int timeout = ap_dev->drv->request_timeout; + + ap_dev->queue_count++; + if (ap_dev->queue_count == 1) { + mod_timer(&ap_dev->timeout, jiffies + timeout); + ap_dev->reset = AP_RESET_ARMED; + } +} + +/** + * AP device is still alive, re-schedule request timeout if there are still + * pending requests. + */ +static void ap_decrease_queue_count(struct ap_device *ap_dev) +{ + int timeout = ap_dev->drv->request_timeout; + + ap_dev->queue_count--; + if (ap_dev->queue_count > 0) + mod_timer(&ap_dev->timeout, jiffies + timeout); + else + /** + * The timeout timer should to be disabled now - since + * del_timer_sync() is very expensive, we just tell via the + * reset flag to ignore the pending timeout timer. + */ + ap_dev->reset = AP_RESET_IGNORE; +} + +/** * AP device related attributes. */ static ssize_t ap_hwtype_show(struct device *dev, @@ -498,6 +537,7 @@ static int ap_device_remove(struct device *dev) struct ap_driver *ap_drv = ap_dev->drv; ap_flush_queue(ap_dev); + del_timer_sync(&ap_dev->timeout); if (ap_drv->remove) ap_drv->remove(ap_dev); spin_lock_bh(&ap_device_lock); @@ -759,17 +799,21 @@ static void ap_scan_bus(struct work_struct *unused) __ap_scan_bus); rc = ap_query_queue(qid, &queue_depth, &device_type); if (dev) { + if (rc == -EBUSY) { + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(AP_RESET_TIMEOUT); + rc = ap_query_queue(qid, &queue_depth, + &device_type); + } ap_dev = to_ap_dev(dev); spin_lock_bh(&ap_dev->lock); if (rc || ap_dev->unregistered) { spin_unlock_bh(&ap_dev->lock); - put_device(dev); device_unregister(dev); + put_device(dev); continue; - } else - spin_unlock_bh(&ap_dev->lock); - } - if (dev) { + } + spin_unlock_bh(&ap_dev->lock); put_device(dev); continue; } @@ -788,6 +832,8 @@ static void ap_scan_bus(struct work_struct *unused) INIT_LIST_HEAD(&ap_dev->pendingq); INIT_LIST_HEAD(&ap_dev->requestq); INIT_LIST_HEAD(&ap_dev->list); + setup_timer(&ap_dev->timeout, ap_request_timeout, + (unsigned long) ap_dev); if (device_type == 0) ap_probe_device_type(ap_dev); else @@ -853,7 +899,7 @@ static int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags) switch (status.response_code) { case AP_RESPONSE_NORMAL: atomic_dec(&ap_poll_requests); - ap_dev->queue_count--; + ap_decrease_queue_count(ap_dev); list_for_each_entry(ap_msg, &ap_dev->pendingq, list) { if (ap_msg->psmid != ap_dev->reply->psmid) continue; @@ -904,7 +950,7 @@ static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags) switch (status.response_code) { case AP_RESPONSE_NORMAL: atomic_inc(&ap_poll_requests); - ap_dev->queue_count++; + ap_increase_queue_count(ap_dev); list_move_tail(&ap_msg->list, &ap_dev->pendingq); ap_dev->requestq_count--; ap_dev->pendingq_count++; @@ -914,6 +960,7 @@ static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags) *flags |= 2; break; case AP_RESPONSE_Q_FULL: + case AP_RESPONSE_RESET_IN_PROGRESS: *flags |= 2; break; case AP_RESPONSE_MESSAGE_TOO_BIG: @@ -960,10 +1007,11 @@ static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_ms list_add_tail(&ap_msg->list, &ap_dev->pendingq); atomic_inc(&ap_poll_requests); ap_dev->pendingq_count++; - ap_dev->queue_count++; + ap_increase_queue_count(ap_dev); ap_dev->total_request_count++; break; case AP_RESPONSE_Q_FULL: + case AP_RESPONSE_RESET_IN_PROGRESS: list_add_tail(&ap_msg->list, &ap_dev->requestq); ap_dev->requestq_count++; ap_dev->total_request_count++; @@ -1046,6 +1094,25 @@ static void ap_poll_timeout(unsigned long unused) } /** + * Reset a not responding AP device and move all requests from the + * pending queue to the request queue. + */ +static void ap_reset(struct ap_device *ap_dev) +{ + int rc; + + ap_dev->reset = AP_RESET_IGNORE; + atomic_sub(ap_dev->queue_count, &ap_poll_requests); + ap_dev->queue_count = 0; + list_splice_init(&ap_dev->pendingq, &ap_dev->requestq); + ap_dev->requestq_count += ap_dev->pendingq_count; + ap_dev->pendingq_count = 0; + rc = ap_init_queue(ap_dev->qid); + if (rc == -ENODEV) + ap_dev->unregistered = 1; +} + +/** * Poll all AP devices on the bus in a round robin fashion. Continue * polling until bit 2^0 of the control flags is not set. If bit 2^1 * of the control flags has been set arm the poll timer. @@ -1056,6 +1123,8 @@ static int __ap_poll_all(struct ap_device *ap_dev, unsigned long *flags) if (!ap_dev->unregistered) { if (ap_poll_queue(ap_dev, flags)) ap_dev->unregistered = 1; + if (ap_dev->reset == AP_RESET_DO) + ap_reset(ap_dev); } spin_unlock(&ap_dev->lock); return 0; @@ -1147,6 +1216,17 @@ static void ap_poll_thread_stop(void) mutex_unlock(&ap_poll_thread_mutex); } +/** + * Handling of request timeouts + */ +static void ap_request_timeout(unsigned long data) +{ + struct ap_device *ap_dev = (struct ap_device *) data; + + if (ap_dev->reset == AP_RESET_ARMED) + ap_dev->reset = AP_RESET_DO; +} + static void ap_reset_domain(void) { int i; diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h index 008559ea742..87c2d644287 100644 --- a/drivers/s390/crypto/ap_bus.h +++ b/drivers/s390/crypto/ap_bus.h @@ -33,6 +33,7 @@ #define AP_DEVICES 64 /* Number of AP devices. */ #define AP_DOMAINS 16 /* Number of AP domains. */ #define AP_MAX_RESET 90 /* Maximum number of resets. */ +#define AP_RESET_TIMEOUT (HZ/2) /* Time in ticks for reset timeouts. */ #define AP_CONFIG_TIME 30 /* Time in seconds between AP bus rescans. */ #define AP_POLL_TIME 1 /* Time in ticks between receive polls. */ @@ -83,6 +84,13 @@ struct ap_queue_status { #define AP_DEVICE_TYPE_CEX2A 6 #define AP_DEVICE_TYPE_CEX2C 7 +/** + * AP reset flag states + */ +#define AP_RESET_IGNORE 0 /* request timeout will be ignored */ +#define AP_RESET_ARMED 1 /* request timeout timer is active */ +#define AP_RESET_DO 2 /* AP reset required */ + struct ap_device; struct ap_message; @@ -95,6 +103,7 @@ struct ap_driver { /* receive is called from tasklet context */ void (*receive)(struct ap_device *, struct ap_message *, struct ap_message *); + int request_timeout; /* request timeout in jiffies */ }; #define to_ap_drv(x) container_of((x), struct ap_driver, driver) @@ -112,6 +121,8 @@ struct ap_device { int queue_depth; /* AP queue depth.*/ int device_type; /* AP device type. */ int unregistered; /* marks AP device as unregistered */ + struct timer_list timeout; /* Timer for request timeouts. */ + int reset; /* Reset required after req. timeout. */ int queue_count; /* # messages currently on AP queue. */ diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c index 5bb13a9d089..08657f604b8 100644 --- a/drivers/s390/crypto/zcrypt_cex2a.c +++ b/drivers/s390/crypto/zcrypt_cex2a.c @@ -70,6 +70,7 @@ static struct ap_driver zcrypt_cex2a_driver = { .remove = zcrypt_cex2a_remove, .receive = zcrypt_cex2a_receive, .ids = zcrypt_cex2a_ids, + .request_timeout = CEX2A_CLEANUP_TIME, }; /** @@ -306,18 +307,13 @@ static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev, goto out_free; init_completion(&work); ap_queue_message(zdev->ap_dev, &ap_msg); - rc = wait_for_completion_interruptible_timeout( - &work, CEX2A_CLEANUP_TIME); - if (rc > 0) + rc = wait_for_completion_interruptible(&work); + if (rc == 0) rc = convert_response(zdev, &ap_msg, mex->outputdata, mex->outputdatalength); - else { - /* Signal pending or message timed out. */ + else + /* Signal pending. */ ap_cancel_message(zdev->ap_dev, &ap_msg); - if (rc == 0) - /* Message timed out. */ - rc = -ETIME; - } out_free: kfree(ap_msg.message); return rc; @@ -348,18 +344,13 @@ static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev, goto out_free; init_completion(&work); ap_queue_message(zdev->ap_dev, &ap_msg); - rc = wait_for_completion_interruptible_timeout( - &work, CEX2A_CLEANUP_TIME); - if (rc > 0) + rc = wait_for_completion_interruptible(&work); + if (rc == 0) rc = convert_response(zdev, &ap_msg, crt->outputdata, crt->outputdatalength); - else { - /* Signal pending or message timed out. */ + else + /* Signal pending. */ ap_cancel_message(zdev->ap_dev, &ap_msg); - if (rc == 0) - /* Message timed out. */ - rc = -ETIME; - } out_free: kfree(ap_msg.message); return rc; diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c index 818ffe05ac0..6e93b475178 100644 --- a/drivers/s390/crypto/zcrypt_pcica.c +++ b/drivers/s390/crypto/zcrypt_pcica.c @@ -70,6 +70,7 @@ static struct ap_driver zcrypt_pcica_driver = { .remove = zcrypt_pcica_remove, .receive = zcrypt_pcica_receive, .ids = zcrypt_pcica_ids, + .request_timeout = PCICA_CLEANUP_TIME, }; /** @@ -290,18 +291,13 @@ static long zcrypt_pcica_modexpo(struct zcrypt_device *zdev, goto out_free; init_completion(&work); ap_queue_message(zdev->ap_dev, &ap_msg); - rc = wait_for_completion_interruptible_timeout( - &work, PCICA_CLEANUP_TIME); - if (rc > 0) + rc = wait_for_completion_interruptible(&work); + if (rc == 0) rc = convert_response(zdev, &ap_msg, mex->outputdata, mex->outputdatalength); - else { - /* Signal pending or message timed out. */ + else + /* Signal pending. */ ap_cancel_message(zdev->ap_dev, &ap_msg); - if (rc == 0) - /* Message timed out. */ - rc = -ETIME; - } out_free: kfree(ap_msg.message); return rc; @@ -332,18 +328,13 @@ static long zcrypt_pcica_modexpo_crt(struct zcrypt_device *zdev, goto out_free; init_completion(&work); ap_queue_message(zdev->ap_dev, &ap_msg); - rc = wait_for_completion_interruptible_timeout( - &work, PCICA_CLEANUP_TIME); - if (rc > 0) + rc = wait_for_completion_interruptible(&work); + if (rc == 0) rc = convert_response(zdev, &ap_msg, crt->outputdata, crt->outputdatalength); - else { - /* Signal pending or message timed out. */ + else + /* Signal pending. */ ap_cancel_message(zdev->ap_dev, &ap_msg); - if (rc == 0) - /* Message timed out. */ - rc = -ETIME; - } out_free: kfree(ap_msg.message); return rc; diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c index f295a403b29..d6d59bf9ac3 100644 --- a/drivers/s390/crypto/zcrypt_pcicc.c +++ b/drivers/s390/crypto/zcrypt_pcicc.c @@ -82,6 +82,7 @@ static struct ap_driver zcrypt_pcicc_driver = { .remove = zcrypt_pcicc_remove, .receive = zcrypt_pcicc_receive, .ids = zcrypt_pcicc_ids, + .request_timeout = PCICC_CLEANUP_TIME, }; /** @@ -501,18 +502,13 @@ static long zcrypt_pcicc_modexpo(struct zcrypt_device *zdev, goto out_free; init_completion(&work); ap_queue_message(zdev->ap_dev, &ap_msg); - rc = wait_for_completion_interruptible_timeout( - &work, PCICC_CLEANUP_TIME); - if (rc > 0) + rc = wait_for_completion_interruptible(&work); + if (rc == 0) rc = convert_response(zdev, &ap_msg, mex->outputdata, mex->outputdatalength); - else { - /* Signal pending or message timed out. */ + else + /* Signal pending. */ ap_cancel_message(zdev->ap_dev, &ap_msg); - if (rc == 0) - /* Message timed out. */ - rc = -ETIME; - } out_free: free_page((unsigned long) ap_msg.message); return rc; @@ -544,18 +540,13 @@ static long zcrypt_pcicc_modexpo_crt(struct zcrypt_device *zdev, goto out_free; init_completion(&work); ap_queue_message(zdev->ap_dev, &ap_msg); - rc = wait_for_completion_interruptible_timeout( - &work, PCICC_CLEANUP_TIME); - if (rc > 0) + rc = wait_for_completion_interruptible(&work); + if (rc == 0) rc = convert_response(zdev, &ap_msg, crt->outputdata, crt->outputdatalength); - else { - /* Signal pending or message timed out. */ + else + /* Signal pending. */ ap_cancel_message(zdev->ap_dev, &ap_msg); - if (rc == 0) - /* Message timed out. */ - rc = -ETIME; - } out_free: free_page((unsigned long) ap_msg.message); return rc; diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c index 252443b6bd1..64948788d30 100644 --- a/drivers/s390/crypto/zcrypt_pcixcc.c +++ b/drivers/s390/crypto/zcrypt_pcixcc.c @@ -93,6 +93,7 @@ static struct ap_driver zcrypt_pcixcc_driver = { .remove = zcrypt_pcixcc_remove, .receive = zcrypt_pcixcc_receive, .ids = zcrypt_pcixcc_ids, + .request_timeout = PCIXCC_CLEANUP_TIME, }; /** @@ -641,18 +642,13 @@ static long zcrypt_pcixcc_modexpo(struct zcrypt_device *zdev, goto out_free; init_completion(&resp_type.work); ap_queue_message(zdev->ap_dev, &ap_msg); - rc = wait_for_completion_interruptible_timeout( - &resp_type.work, PCIXCC_CLEANUP_TIME); - if (rc > 0) + rc = wait_for_completion_interruptible(&resp_type.work); + if (rc == 0) rc = convert_response_ica(zdev, &ap_msg, mex->outputdata, mex->outputdatalength); - else { - /* Signal pending or message timed out. */ + else + /* Signal pending. */ ap_cancel_message(zdev->ap_dev, &ap_msg); - if (rc == 0) - /* Message timed out. */ - rc = -ETIME; - } out_free: free_page((unsigned long) ap_msg.message); return rc; @@ -685,18 +681,13 @@ static long zcrypt_pcixcc_modexpo_crt(struct zcrypt_device *zdev, goto out_free; init_completion(&resp_type.work); ap_queue_message(zdev->ap_dev, &ap_msg); - rc = wait_for_completion_interruptible_timeout( - &resp_type.work, PCIXCC_CLEANUP_TIME); - if (rc > 0) + rc = wait_for_completion_interruptible(&resp_type.work); + if (rc == 0) rc = convert_response_ica(zdev, &ap_msg, crt->outputdata, crt->outputdatalength); - else { - /* Signal pending or message timed out. */ + else + /* Signal pending. */ ap_cancel_message(zdev->ap_dev, &ap_msg); - if (rc == 0) - /* Message timed out. */ - rc = -ETIME; - } out_free: free_page((unsigned long) ap_msg.message); return rc; @@ -729,17 +720,12 @@ static long zcrypt_pcixcc_send_cprb(struct zcrypt_device *zdev, goto out_free; init_completion(&resp_type.work); ap_queue_message(zdev->ap_dev, &ap_msg); - rc = wait_for_completion_interruptible_timeout( - &resp_type.work, PCIXCC_CLEANUP_TIME); - if (rc > 0) + rc = wait_for_completion_interruptible(&resp_type.work); + if (rc == 0) rc = convert_response_xcrb(zdev, &ap_msg, xcRB); - else { - /* Signal pending or message timed out. */ + else + /* Signal pending. */ ap_cancel_message(zdev->ap_dev, &ap_msg); - if (rc == 0) - /* Message timed out. */ - rc = -ETIME; - } out_free: memset(ap_msg.message, 0x0, ap_msg.length); kfree(ap_msg.message); diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c index 348bb7b8277..023455a0b34 100644 --- a/drivers/s390/net/claw.c +++ b/drivers/s390/net/claw.c @@ -317,8 +317,8 @@ claw_probe(struct ccwgroup_device *cgdev) CLAW_DBF_TEXT_(2,setup,"probex%d",-ENOMEM); return -ENOMEM; } - privptr->p_mtc_envelope= kmalloc( MAX_ENVELOPE_SIZE, GFP_KERNEL); - privptr->p_env = kmalloc(sizeof(struct claw_env), GFP_KERNEL); + privptr->p_mtc_envelope= kzalloc( MAX_ENVELOPE_SIZE, GFP_KERNEL); + privptr->p_env = kzalloc(sizeof(struct claw_env), GFP_KERNEL); if ((privptr->p_mtc_envelope==NULL) || (privptr->p_env==NULL)) { probe_error(cgdev); put_device(&cgdev->dev); @@ -327,8 +327,6 @@ claw_probe(struct ccwgroup_device *cgdev) CLAW_DBF_TEXT_(2,setup,"probex%d",-ENOMEM); return -ENOMEM; } - memset(privptr->p_mtc_envelope, 0x00, MAX_ENVELOPE_SIZE); - memset(privptr->p_env, 0x00, sizeof(struct claw_env)); memcpy(privptr->p_env->adapter_name,WS_NAME_NOT_DEF,8); memcpy(privptr->p_env->host_name,WS_NAME_NOT_DEF,8); memcpy(privptr->p_env->api_type,WS_NAME_NOT_DEF,8); @@ -3924,7 +3922,7 @@ add_channel(struct ccw_device *cdev,int i,struct claw_privbk *privptr) snprintf(p_ch->id, CLAW_ID_SIZE, "cl-%s", cdev->dev.bus_id); ccw_device_get_id(cdev, &dev_id); p_ch->devno = dev_id.devno; - if ((p_ch->irb = kmalloc(sizeof (struct irb),GFP_KERNEL)) == NULL) { + if ((p_ch->irb = kzalloc(sizeof (struct irb),GFP_KERNEL)) == NULL) { printk(KERN_WARNING "%s Out of memory in %s for irb\n", p_ch->id,__FUNCTION__); #ifdef FUNCTRACE @@ -3933,7 +3931,6 @@ add_channel(struct ccw_device *cdev,int i,struct claw_privbk *privptr) #endif return -ENOMEM; } - memset(p_ch->irb, 0, sizeof (struct irb)); #ifdef FUNCTRACE printk(KERN_INFO "%s:%s Exit on line %d\n", cdev->dev.bus_id,__FUNCTION__,__LINE__); diff --git a/drivers/s390/net/qeth.h b/drivers/s390/net/qeth.h index b34eb82edd9..ec18bae05df 100644 --- a/drivers/s390/net/qeth.h +++ b/drivers/s390/net/qeth.h @@ -211,6 +211,10 @@ struct qeth_perf_stats { /* initial values when measuring starts */ unsigned long initial_rx_packets; unsigned long initial_tx_packets; + /* inbound scatter gather data */ + unsigned int sg_skbs_rx; + unsigned int sg_frags_rx; + unsigned int sg_alloc_page_rx; }; /* Routing stuff */ @@ -341,6 +345,9 @@ qeth_is_ipa_enabled(struct qeth_ipa_info *ipa, enum qeth_ipa_funcs func) #define QETH_IP_HEADER_SIZE 40 +/* large receive scatter gather copy break */ +#define QETH_RX_SG_CB (PAGE_SIZE >> 1) + struct qeth_hdr_layer3 { __u8 id; __u8 flags; @@ -771,6 +778,7 @@ struct qeth_card_options { int layer2; enum qeth_large_send_types large_send; int performance_stats; + int rx_sg_cb; }; /* @@ -828,6 +836,7 @@ struct qeth_card { int (*orig_hard_header)(struct sk_buff *,struct net_device *, unsigned short,void *,void *,unsigned); struct qeth_osn_info osn_info; + atomic_t force_alloc_skb; }; struct qeth_card_list_struct { diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c index 86b0c44165c..57f69434fbf 100644 --- a/drivers/s390/net/qeth_main.c +++ b/drivers/s390/net/qeth_main.c @@ -1054,6 +1054,7 @@ qeth_set_intial_options(struct qeth_card *card) else card->options.layer2 = 0; card->options.performance_stats = 0; + card->options.rx_sg_cb = QETH_RX_SG_CB; } /** @@ -1934,6 +1935,7 @@ qeth_send_control_data(struct qeth_card *card, int len, atomic_inc(&reply->received); wake_up(&reply->wait_q); } + cpu_relax(); }; rc = reply->rc; qeth_put_reply(reply); @@ -2258,6 +2260,89 @@ qeth_get_skb(unsigned int length, struct qeth_hdr *hdr) return skb; } +static inline int +qeth_create_skb_frag(struct qdio_buffer_element *element, + struct sk_buff **pskb, + int offset, int *pfrag, int data_len) +{ + struct page *page = virt_to_page(element->addr); + if (*pfrag == 0) { + /* the upper protocol layers assume that there is data in the + * skb itself. Copy a small amount (64 bytes) to make them + * happy. */ + *pskb = dev_alloc_skb(64 + QETH_FAKE_LL_LEN_ETH); + if (!(*pskb)) + return -ENOMEM; + skb_reserve(*pskb, QETH_FAKE_LL_LEN_ETH); + if (data_len <= 64) { + memcpy(skb_put(*pskb, data_len), element->addr + offset, + data_len); + } else { + get_page(page); + memcpy(skb_put(*pskb, 64), element->addr + offset, 64); + skb_fill_page_desc(*pskb, *pfrag, page, offset + 64, + data_len - 64); + (*pskb)->data_len += data_len - 64; + (*pskb)->len += data_len - 64; + (*pskb)->truesize += data_len - 64; + } + } else { + get_page(page); + skb_fill_page_desc(*pskb, *pfrag, page, offset, data_len); + (*pskb)->data_len += data_len; + (*pskb)->len += data_len; + (*pskb)->truesize += data_len; + } + (*pfrag)++; + return 0; +} + +static inline struct qeth_buffer_pool_entry * +qeth_find_free_buffer_pool_entry(struct qeth_card *card) +{ + struct list_head *plh; + struct qeth_buffer_pool_entry *entry; + int i, free; + struct page *page; + + if (list_empty(&card->qdio.in_buf_pool.entry_list)) + return NULL; + + list_for_each(plh, &card->qdio.in_buf_pool.entry_list) { + entry = list_entry(plh, struct qeth_buffer_pool_entry, list); + free = 1; + for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { + if (page_count(virt_to_page(entry->elements[i])) > 1) { + free = 0; + break; + } + } + if (free) { + list_del_init(&entry->list); + return entry; + } + } + + /* no free buffer in pool so take first one and swap pages */ + entry = list_entry(card->qdio.in_buf_pool.entry_list.next, + struct qeth_buffer_pool_entry, list); + for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { + if (page_count(virt_to_page(entry->elements[i])) > 1) { + page = alloc_page(GFP_ATOMIC|GFP_DMA); + if (!page) { + return NULL; + } else { + free_page((unsigned long)entry->elements[i]); + entry->elements[i] = page_address(page); + if (card->options.performance_stats) + card->perf_stats.sg_alloc_page_rx++; + } + } + } + list_del_init(&entry->list); + return entry; +} + static struct sk_buff * qeth_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer, struct qdio_buffer_element **__element, int *__offset, @@ -2269,6 +2354,8 @@ qeth_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer, int skb_len; void *data_ptr; int data_len; + int use_rx_sg = 0; + int frag = 0; QETH_DBF_TEXT(trace,6,"nextskb"); /* qeth_hdr must not cross element boundaries */ @@ -2293,23 +2380,43 @@ qeth_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer, if (!skb_len) return NULL; - if (card->options.fake_ll){ - if(card->dev->type == ARPHRD_IEEE802_TR){ - if (!(skb = qeth_get_skb(skb_len+QETH_FAKE_LL_LEN_TR, *hdr))) - goto no_mem; - skb_reserve(skb,QETH_FAKE_LL_LEN_TR); + if ((skb_len >= card->options.rx_sg_cb) && + (!(card->info.type == QETH_CARD_TYPE_OSN)) && + (!atomic_read(&card->force_alloc_skb))) { + use_rx_sg = 1; + } else { + if (card->options.fake_ll) { + if (card->dev->type == ARPHRD_IEEE802_TR) { + if (!(skb = qeth_get_skb(skb_len + + QETH_FAKE_LL_LEN_TR, *hdr))) + goto no_mem; + skb_reserve(skb, QETH_FAKE_LL_LEN_TR); + } else { + if (!(skb = qeth_get_skb(skb_len + + QETH_FAKE_LL_LEN_ETH, *hdr))) + goto no_mem; + skb_reserve(skb, QETH_FAKE_LL_LEN_ETH); + } } else { - if (!(skb = qeth_get_skb(skb_len+QETH_FAKE_LL_LEN_ETH, *hdr))) + skb = qeth_get_skb(skb_len, *hdr); + if (!skb) goto no_mem; - skb_reserve(skb,QETH_FAKE_LL_LEN_ETH); } - } else if (!(skb = qeth_get_skb(skb_len, *hdr))) - goto no_mem; + } + data_ptr = element->addr + offset; while (skb_len) { data_len = min(skb_len, (int)(element->length - offset)); - if (data_len) - memcpy(skb_put(skb, data_len), data_ptr, data_len); + if (data_len) { + if (use_rx_sg) { + if (qeth_create_skb_frag(element, &skb, offset, + &frag, data_len)) + goto no_mem; + } else { + memcpy(skb_put(skb, data_len), data_ptr, + data_len); + } + } skb_len -= data_len; if (skb_len){ if (qeth_is_last_sbale(element)){ @@ -2331,6 +2438,10 @@ qeth_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer, } *__element = element; *__offset = offset; + if (use_rx_sg && card->options.performance_stats) { + card->perf_stats.sg_skbs_rx++; + card->perf_stats.sg_frags_rx += skb_shinfo(skb)->nr_frags; + } return skb; no_mem: if (net_ratelimit()){ @@ -2608,28 +2719,15 @@ qeth_process_inbound_buffer(struct qeth_card *card, } } -static struct qeth_buffer_pool_entry * -qeth_get_buffer_pool_entry(struct qeth_card *card) -{ - struct qeth_buffer_pool_entry *entry; - - QETH_DBF_TEXT(trace, 6, "gtbfplen"); - if (!list_empty(&card->qdio.in_buf_pool.entry_list)) { - entry = list_entry(card->qdio.in_buf_pool.entry_list.next, - struct qeth_buffer_pool_entry, list); - list_del_init(&entry->list); - return entry; - } - return NULL; -} - -static void +static int qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf) { struct qeth_buffer_pool_entry *pool_entry; int i; - - pool_entry = qeth_get_buffer_pool_entry(card); + + pool_entry = qeth_find_free_buffer_pool_entry(card); + if (!pool_entry) + return 1; /* * since the buffer is accessed only from the input_tasklet * there shouldn't be a need to synchronize; also, since we use @@ -2648,6 +2746,7 @@ qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf) buf->buffer->element[i].flags = 0; } buf->state = QETH_QDIO_BUF_EMPTY; + return 0; } static void @@ -2682,6 +2781,7 @@ qeth_queue_input_buffer(struct qeth_card *card, int index) int count; int i; int rc; + int newcount = 0; QETH_DBF_TEXT(trace,6,"queinbuf"); count = (index < queue->next_buf_to_init)? @@ -2692,9 +2792,27 @@ qeth_queue_input_buffer(struct qeth_card *card, int index) /* only requeue at a certain threshold to avoid SIGAs */ if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)){ for (i = queue->next_buf_to_init; - i < queue->next_buf_to_init + count; ++i) - qeth_init_input_buffer(card, - &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]); + i < queue->next_buf_to_init + count; ++i) { + if (qeth_init_input_buffer(card, + &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q])) { + break; + } else { + newcount++; + } + } + + if (newcount < count) { + /* we are in memory shortage so we switch back to + traditional skb allocation and drop packages */ + if (atomic_cmpxchg(&card->force_alloc_skb, 0, 1)) + printk(KERN_WARNING + "qeth: switch to alloc skb\n"); + count = newcount; + } else { + if (atomic_cmpxchg(&card->force_alloc_skb, 1, 0)) + printk(KERN_WARNING "qeth: switch to sg\n"); + } + /* * according to old code it should be avoided to requeue all * 128 buffers in order to benefit from PCI avoidance. @@ -6494,6 +6612,7 @@ qeth_hardsetup_card(struct qeth_card *card) QETH_DBF_TEXT(setup, 2, "hrdsetup"); + atomic_set(&card->force_alloc_skb, 0); retry: if (retries < 3){ PRINT_WARN("Retrying to do IDX activates.\n"); diff --git a/drivers/s390/net/qeth_proc.c b/drivers/s390/net/qeth_proc.c index 89d56c8ecdd..f1ff165a5e0 100644 --- a/drivers/s390/net/qeth_proc.c +++ b/drivers/s390/net/qeth_proc.c @@ -212,6 +212,12 @@ qeth_perf_procfile_seq_show(struct seq_file *s, void *it) " Skb fragments sent in SG mode : %u\n\n", card->perf_stats.sg_skbs_sent, card->perf_stats.sg_frags_sent); + seq_printf(s, " Skbs received in SG mode : %u\n" + " Skb fragments received in SG mode : %u\n" + " Page allocations for rx SG mode : %u\n\n", + card->perf_stats.sg_skbs_rx, + card->perf_stats.sg_frags_rx, + card->perf_stats.sg_alloc_page_rx); seq_printf(s, " large_send tx (in Kbytes) : %u\n" " large_send count : %u\n\n", card->perf_stats.large_send_bytes >> 10, diff --git a/drivers/s390/net/qeth_sys.c b/drivers/s390/net/qeth_sys.c index 65ffc21afc3..bb0287ad1aa 100644 --- a/drivers/s390/net/qeth_sys.c +++ b/drivers/s390/net/qeth_sys.c @@ -991,7 +991,7 @@ static struct attribute_group qeth_osn_device_attr_group = { #define QETH_DEVICE_ATTR(_id,_name,_mode,_show,_store) \ struct device_attribute dev_attr_##_id = { \ - .attr = {.name=__stringify(_name), .mode=_mode, .owner=THIS_MODULE },\ + .attr = {.name=__stringify(_name), .mode=_mode, },\ .show = _show, \ .store = _store, \ }; diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 821cde65e36..9726261c367 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -259,21 +259,21 @@ zfcp_module_init(void) size = sizeof(struct zfcp_fsf_req_qtcb); align = calc_alignment(size); zfcp_data.fsf_req_qtcb_cache = - kmem_cache_create("zfcp_fsf", size, align, 0, NULL, NULL); + kmem_cache_create("zfcp_fsf", size, align, 0, NULL); if (!zfcp_data.fsf_req_qtcb_cache) goto out; size = sizeof(struct fsf_status_read_buffer); align = calc_alignment(size); zfcp_data.sr_buffer_cache = - kmem_cache_create("zfcp_sr", size, align, 0, NULL, NULL); + kmem_cache_create("zfcp_sr", size, align, 0, NULL); if (!zfcp_data.sr_buffer_cache) goto out_sr_cache; size = sizeof(struct zfcp_gid_pn_data); align = calc_alignment(size); zfcp_data.gid_pn_cache = - kmem_cache_create("zfcp_gid", size, align, 0, NULL, NULL); + kmem_cache_create("zfcp_gid", size, align, 0, NULL); if (!zfcp_data.gid_pn_cache) goto out_gid_cache; @@ -815,9 +815,7 @@ zfcp_get_adapter_by_busid(char *bus_id) struct zfcp_unit * zfcp_unit_enqueue(struct zfcp_port *port, fcp_lun_t fcp_lun) { - struct zfcp_unit *unit, *tmp_unit; - unsigned int scsi_lun; - int found; + struct zfcp_unit *unit; /* * check that there is no unit with this FCP_LUN already in list @@ -863,22 +861,10 @@ zfcp_unit_enqueue(struct zfcp_port *port, fcp_lun_t fcp_lun) } zfcp_unit_get(unit); + unit->scsi_lun = scsilun_to_int((struct scsi_lun *)&unit->fcp_lun); - scsi_lun = 0; - found = 0; write_lock_irq(&zfcp_data.config_lock); - list_for_each_entry(tmp_unit, &port->unit_list_head, list) { - if (tmp_unit->scsi_lun != scsi_lun) { - found = 1; - break; - } - scsi_lun++; - } - unit->scsi_lun = scsi_lun; - if (found) - list_add_tail(&unit->list, &tmp_unit->list); - else - list_add_tail(&unit->list, &port->unit_list_head); + list_add_tail(&unit->list, &port->unit_list_head); atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status); atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &unit->status); write_unlock_irq(&zfcp_data.config_lock); diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index aef66bc2b6c..4e7cb6dc4d3 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -1986,6 +1986,10 @@ zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *erp_action, int close) failed_openfcp: zfcp_close_fsf(erp_action->adapter); failed_qdio: + atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK | + ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | + ZFCP_STATUS_ADAPTER_XPORT_OK, + &erp_action->adapter->status); out: return retval; } @@ -2167,6 +2171,9 @@ zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *erp_action) sleep *= 2; } + atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, + &adapter->status); + if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, &adapter->status)) { ZFCP_LOG_INFO("error: exchange of configuration data for " |