diff options
author | Dmitry Torokhov <dtor@insightbb.com> | 2007-02-10 01:26:32 -0500 |
---|---|---|
committer | Dmitry Torokhov <dtor@insightbb.com> | 2007-02-10 01:26:32 -0500 |
commit | b22364c8eec89e6b0c081a237f3b6348df87796f (patch) | |
tree | 233a923281fb640106465d076997ff511efb6edf /drivers/s390 | |
parent | 2c8dc071517ec2843869024dc82be2e246f41064 (diff) | |
parent | 66efc5a7e3061c3597ac43a8bb1026488d57e66b (diff) |
Merge rsync://rsync.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'drivers/s390')
88 files changed, 3231 insertions, 5680 deletions
diff --git a/drivers/s390/Kconfig b/drivers/s390/Kconfig index ae89b9b8874..165af398fde 100644 --- a/drivers/s390/Kconfig +++ b/drivers/s390/Kconfig @@ -103,14 +103,8 @@ config CCW_CONSOLE depends on TN3215_CONSOLE || TN3270_CONSOLE default y -config SCLP - bool "Support for SCLP" - help - Include support for the SCLP interface to the service element. - config SCLP_TTY bool "Support for SCLP line mode terminal" - depends on SCLP help Include support for IBM SCLP line-mode terminals. @@ -123,7 +117,6 @@ config SCLP_CONSOLE config SCLP_VT220_TTY bool "Support for SCLP VT220-compatible terminal" - depends on SCLP help Include support for an IBM SCLP VT220-compatible terminal. @@ -136,7 +129,6 @@ config SCLP_VT220_CONSOLE config SCLP_CPI tristate "Control-Program Identification" - depends on SCLP help This option enables the hardware console interface for system identification. This is commonly used for workload management and diff --git a/drivers/s390/Makefile b/drivers/s390/Makefile index 9803c9352d7..5a888704a8d 100644 --- a/drivers/s390/Makefile +++ b/drivers/s390/Makefile @@ -2,6 +2,8 @@ # Makefile for the S/390 specific device drivers # +CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w + obj-y += s390mach.o sysinfo.o s390_rdev.o obj-y += cio/ block/ char/ crypto/ net/ scsi/ diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 2af2d9b53d1..eb5dc62f0d9 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -37,6 +37,7 @@ */ debug_info_t *dasd_debug_area; struct dasd_discipline *dasd_diag_discipline_pointer; +void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); MODULE_DESCRIPTION("Linux on S/390 DASD device driver," @@ -51,7 +52,6 @@ static int dasd_alloc_queue(struct dasd_device * device); static void dasd_setup_queue(struct dasd_device * device); static void dasd_free_queue(struct dasd_device * device); static void dasd_flush_request_queue(struct dasd_device *); -static void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); static int dasd_flush_ccw_queue(struct dasd_device *, int); static void dasd_tasklet(struct dasd_device *); static void do_kick_device(struct work_struct *); @@ -483,7 +483,7 @@ unsigned int dasd_profile_level = DASD_PROFILE_OFF; /* * Add profiling information for cqr before execution. */ -static inline void +static void dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr, struct request *req) { @@ -505,7 +505,7 @@ dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr, /* * Add profiling information for cqr after execution. */ -static inline void +static void dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr, struct request *req) { @@ -1022,8 +1022,6 @@ dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, irb->scsw.cstat == 0 && !irb->esw.esw0.erw.cons) era = dasd_era_none; - else if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) - era = dasd_era_fatal; /* don't recover this request */ else if (irb->esw.esw0.erw.cons) era = device->discipline->examine_error(cqr, irb); else @@ -1050,10 +1048,10 @@ dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, } } else { /* error */ memcpy(&cqr->irb, irb, sizeof (struct irb)); -#ifdef ERP_DEBUG - /* dump sense data */ - dasd_log_sense(cqr, irb); -#endif + if (device->features & DASD_FEATURE_ERPLOG) { + /* dump sense data */ + dasd_log_sense(cqr, irb); + } switch (era) { case dasd_era_fatal: cqr->status = DASD_CQR_FAILED; @@ -1104,7 +1102,7 @@ __dasd_process_erp(struct dasd_device *device, struct dasd_ccw_req *cqr) /* * Process ccw request queue. */ -static inline void +static void __dasd_process_ccw_queue(struct dasd_device * device, struct list_head *final_queue) { @@ -1127,7 +1125,9 @@ restart: cqr->status = DASD_CQR_FAILED; cqr->stopclk = get_clock(); } else { - if (cqr->irb.esw.esw0.erw.cons) { + if (cqr->irb.esw.esw0.erw.cons && + test_bit(DASD_CQR_FLAGS_USE_ERP, + &cqr->flags)) { erp_fn = device->discipline-> erp_action(cqr); erp_fn(cqr); @@ -1181,7 +1181,7 @@ dasd_end_request_cb(struct dasd_ccw_req * cqr, void *data) /* * Fetch requests from the block device queue. */ -static inline void +static void __dasd_process_blk_queue(struct dasd_device * device) { request_queue_t *queue; @@ -1232,6 +1232,19 @@ __dasd_process_blk_queue(struct dasd_device * device) if (IS_ERR(cqr)) { if (PTR_ERR(cqr) == -ENOMEM) break; /* terminate request queue loop */ + if (PTR_ERR(cqr) == -EAGAIN) { + /* + * The current request cannot be build right + * now, we have to try later. If this request + * is the head-of-queue we stop the device + * for 1/2 second. + */ + if (!list_empty(&device->ccw_queue)) + break; + device->stopped |= DASD_STOPPED_PENDING; + dasd_set_timer(device, HZ/2); + break; + } DBF_DEV_EVENT(DBF_ERR, device, "CCW creation failed (rc=%ld) " "on request %p", @@ -1254,7 +1267,7 @@ __dasd_process_blk_queue(struct dasd_device * device) * Take a look at the first request on the ccw queue and check * if it reached its expire time. If so, terminate the IO. */ -static inline void +static void __dasd_check_expire(struct dasd_device * device) { struct dasd_ccw_req *cqr; @@ -1285,7 +1298,7 @@ __dasd_check_expire(struct dasd_device * device) * Take a look at the first request on the ccw queue and check * if it needs to be started. */ -static inline void +static void __dasd_start_head(struct dasd_device * device) { struct dasd_ccw_req *cqr; diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c index 669805d4402..8b9d68f6e01 100644 --- a/drivers/s390/block/dasd_3990_erp.c +++ b/drivers/s390/block/dasd_3990_erp.c @@ -170,7 +170,6 @@ dasd_3990_erp_examine(struct dasd_ccw_req * cqr, struct irb * irb) /* log the erp chain if fatal error occurred */ if ((era == dasd_era_fatal) && (device->state >= DASD_STATE_READY)) { dasd_log_sense(cqr, irb); - dasd_log_ccw(cqr, 0, irb->scsw.cpa); } return era; @@ -2640,15 +2639,12 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr) struct dasd_ccw_req *erp = NULL; struct dasd_device *device = cqr->device; - __u32 cpa = cqr->irb.scsw.cpa; - -#ifdef ERP_DEBUG - /* print current erp_chain */ - DEV_MESSAGE(KERN_ERR, device, "%s", - "ERP chain at BEGINNING of ERP-ACTION"); - { - struct dasd_ccw_req *temp_erp = NULL; + struct dasd_ccw_req *temp_erp = NULL; + if (device->features & DASD_FEATURE_ERPLOG) { + /* print current erp_chain */ + DEV_MESSAGE(KERN_ERR, device, "%s", + "ERP chain at BEGINNING of ERP-ACTION"); for (temp_erp = cqr; temp_erp != NULL; temp_erp = temp_erp->refers) { @@ -2658,7 +2654,6 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr) temp_erp->refers); } } -#endif /* ERP_DEBUG */ /* double-check if current erp/cqr was successfull */ if ((cqr->irb.scsw.cstat == 0x00) && @@ -2695,11 +2690,10 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr) erp = dasd_3990_erp_handle_match_erp(cqr, erp); } -#ifdef ERP_DEBUG - /* print current erp_chain */ - DEV_MESSAGE(KERN_ERR, device, "%s", "ERP chain at END of ERP-ACTION"); - { - struct dasd_ccw_req *temp_erp = NULL; + if (device->features & DASD_FEATURE_ERPLOG) { + /* print current erp_chain */ + DEV_MESSAGE(KERN_ERR, device, "%s", + "ERP chain at END of ERP-ACTION"); for (temp_erp = erp; temp_erp != NULL; temp_erp = temp_erp->refers) { @@ -2709,10 +2703,6 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr) temp_erp->refers); } } -#endif /* ERP_DEBUG */ - - if (erp->status == DASD_CQR_FAILED) - dasd_log_ccw(erp, 1, cpa); /* enqueue added ERP request */ if (erp->status == DASD_CQR_FILLED) { diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index cf28ccc5794..ed70852cc91 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c @@ -136,7 +136,7 @@ __setup ("dasd=", dasd_call_setup); /* * Read a device busid/devno from a string. */ -static inline int +static int dasd_busid(char **str, int *id0, int *id1, int *devno) { int val, old_style; @@ -182,7 +182,7 @@ dasd_busid(char **str, int *id0, int *id1, int *devno) * only one: "ro" for read-only devices. The default feature set * is empty (value 0). */ -static inline int +static int dasd_feature_list(char *str, char **endp) { int features, len, rc; @@ -202,6 +202,8 @@ dasd_feature_list(char *str, char **endp) features |= DASD_FEATURE_READONLY; else if (len == 4 && !strncmp(str, "diag", 4)) features |= DASD_FEATURE_USEDIAG; + else if (len == 6 && !strncmp(str, "erplog", 6)) + features |= DASD_FEATURE_ERPLOG; else { MESSAGE(KERN_WARNING, "unsupported feature: %*s, " @@ -339,7 +341,7 @@ dasd_parse_range( char *parsestring ) { return ERR_PTR(-EINVAL); } -static inline char * +static char * dasd_parse_next_element( char *parsestring ) { char * residual_str; residual_str = dasd_parse_keyword(parsestring); @@ -709,6 +711,52 @@ dasd_ro_store(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR(readonly, 0644, dasd_ro_show, dasd_ro_store); +/* + * erplog controls the logging of ERP related data + * (e.g. failing channel programs). + */ +static ssize_t +dasd_erplog_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct dasd_devmap *devmap; + int erplog; + + devmap = dasd_find_busid(dev->bus_id); + if (!IS_ERR(devmap)) + erplog = (devmap->features & DASD_FEATURE_ERPLOG) != 0; + else + erplog = (DASD_FEATURE_DEFAULT & DASD_FEATURE_ERPLOG) != 0; + return snprintf(buf, PAGE_SIZE, erplog ? "1\n" : "0\n"); +} + +static ssize_t +dasd_erplog_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct dasd_devmap *devmap; + int val; + char *endp; + + devmap = dasd_devmap_from_cdev(to_ccwdev(dev)); + if (IS_ERR(devmap)) + return PTR_ERR(devmap); + + val = simple_strtoul(buf, &endp, 0); + if (((endp + 1) < (buf + count)) || (val > 1)) + return -EINVAL; + + spin_lock(&dasd_devmap_lock); + if (val) + devmap->features |= DASD_FEATURE_ERPLOG; + else + devmap->features &= ~DASD_FEATURE_ERPLOG; + if (devmap->device) + devmap->device->features = devmap->features; + spin_unlock(&dasd_devmap_lock); + return count; +} + +static DEVICE_ATTR(erplog, 0644, dasd_erplog_show, dasd_erplog_store); /* * use_diag controls whether the driver should use diag rather than ssch @@ -896,6 +944,7 @@ static struct attribute * dasd_attrs[] = { &dev_attr_uid.attr, &dev_attr_use_diag.attr, &dev_attr_eer_enabled.attr, + &dev_attr_erplog.attr, NULL, }; diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c index 53db58a6861..ab782bb46ac 100644 --- a/drivers/s390/block/dasd_diag.c +++ b/drivers/s390/block/dasd_diag.c @@ -43,7 +43,7 @@ MODULE_LICENSE("GPL"); #define DIAG_MAX_RETRIES 32 #define DIAG_TIMEOUT 50 * HZ -struct dasd_discipline dasd_diag_discipline; +static struct dasd_discipline dasd_diag_discipline; struct dasd_diag_private { struct dasd_diag_characteristics rdc_data; @@ -90,7 +90,7 @@ static inline int dia250(void *iob, int cmd) * block offset. On success, return zero and set end_block to contain the * number of blocks on the device minus the specified offset. Return non-zero * otherwise. */ -static __inline__ int +static inline int mdsk_init_io(struct dasd_device *device, unsigned int blocksize, blocknum_t offset, blocknum_t *end_block) { @@ -117,7 +117,7 @@ mdsk_init_io(struct dasd_device *device, unsigned int blocksize, /* Remove block I/O environment for device. Return zero on success, non-zero * otherwise. */ -static __inline__ int +static inline int mdsk_term_io(struct dasd_device * device) { struct dasd_diag_private *private; @@ -576,7 +576,7 @@ dasd_diag_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req, "dump sense not available for DIAG data"); } -struct dasd_discipline dasd_diag_discipline = { +static struct dasd_discipline dasd_diag_discipline = { .owner = THIS_MODULE, .name = "DIAG", .ebcname = "DIAG", diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index fdaa471e845..cecab2274a6 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -134,44 +134,7 @@ ceil_quot(unsigned int d1, unsigned int d2) return (d1 + (d2 - 1)) / d2; } -static inline int -bytes_per_record(struct dasd_eckd_characteristics *rdc, int kl, int dl) -{ - unsigned int fl1, fl2, int1, int2; - int bpr; - - switch (rdc->formula) { - case 0x01: - fl1 = round_up_multiple(ECKD_F2(rdc) + dl, ECKD_F1(rdc)); - fl2 = round_up_multiple(kl ? ECKD_F2(rdc) + kl : 0, - ECKD_F1(rdc)); - bpr = fl1 + fl2; - break; - case 0x02: - int1 = ceil_quot(dl + ECKD_F6(rdc), ECKD_F5(rdc) << 1); - int2 = ceil_quot(kl + ECKD_F6(rdc), ECKD_F5(rdc) << 1); - fl1 = round_up_multiple(ECKD_F1(rdc) * ECKD_F2(rdc) + dl + - ECKD_F6(rdc) + ECKD_F4(rdc) * int1, - ECKD_F1(rdc)); - fl2 = round_up_multiple(ECKD_F1(rdc) * ECKD_F3(rdc) + kl + - ECKD_F6(rdc) + ECKD_F4(rdc) * int2, - ECKD_F1(rdc)); - bpr = fl1 + fl2; - break; - default: - bpr = 0; - break; - } - return bpr; -} - -static inline unsigned int -bytes_per_track(struct dasd_eckd_characteristics *rdc) -{ - return *(unsigned int *) (rdc->byte_per_track) >> 8; -} - -static inline unsigned int +static unsigned int recs_per_track(struct dasd_eckd_characteristics * rdc, unsigned int kl, unsigned int dl) { @@ -204,37 +167,39 @@ recs_per_track(struct dasd_eckd_characteristics * rdc, return 0; } -static inline void +static int check_XRC (struct ccw1 *de_ccw, struct DE_eckd_data *data, struct dasd_device *device) { struct dasd_eckd_private *private; + int rc; private = (struct dasd_eckd_private *) device->private; + if (!private->rdc_data.facilities.XRC_supported) + return 0; /* switch on System Time Stamp - needed for XRC Support */ - if (private->rdc_data.facilities.XRC_supported) { - - data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */ - data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */ - - data->ep_sys_time = get_clock (); - - de_ccw->count = sizeof (struct DE_eckd_data); - de_ccw->flags |= CCW_FLAG_SLI; - } + data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */ + data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */ - return; + rc = get_sync_clock(&data->ep_sys_time); + /* Ignore return code if sync clock is switched off. */ + if (rc == -ENOSYS || rc == -EACCES) + rc = 0; -} /* end check_XRC */ + de_ccw->count = sizeof (struct DE_eckd_data); + de_ccw->flags |= CCW_FLAG_SLI; + return rc; +} -static inline void +static int define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk, int totrk, int cmd, struct dasd_device * device) { struct dasd_eckd_private *private; struct ch_t geo, beg, end; + int rc = 0; private = (struct dasd_eckd_private *) device->private; @@ -263,12 +228,12 @@ define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk, case DASD_ECKD_CCW_WRITE_KD_MT: data->mask.perm = 0x02; data->attributes.operation = private->attrib.operation; - check_XRC (ccw, data, device); + rc = check_XRC (ccw, data, device); break; case DASD_ECKD_CCW_WRITE_CKD: case DASD_ECKD_CCW_WRITE_CKD_MT: data->attributes.operation = DASD_BYPASS_CACHE; - check_XRC (ccw, data, device); + rc = check_XRC (ccw, data, device); break; case DASD_ECKD_CCW_ERASE: case DASD_ECKD_CCW_WRITE_HOME_ADDRESS: @@ -276,7 +241,7 @@ define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk, data->mask.perm = 0x3; data->mask.auth = 0x1; data->attributes.operation = DASD_BYPASS_CACHE; - check_XRC (ccw, data, device); + rc = check_XRC (ccw, data, device); break; default: DEV_MESSAGE(KERN_ERR, device, "unknown opcode 0x%x", cmd); @@ -312,9 +277,10 @@ define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk, data->beg_ext.head = beg.head; data->end_ext.cyl = end.cyl; data->end_ext.head = end.head; + return rc; } -static inline void +static void locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, int trk, int rec_on_trk, int no_rec, int cmd, struct dasd_device * device, int reclen) @@ -548,7 +514,7 @@ dasd_eckd_read_conf(struct dasd_device *device) /* * Build CP for Perform Subsystem Function - SSC. */ -struct dasd_ccw_req * +static struct dasd_ccw_req * dasd_eckd_build_psf_ssc(struct dasd_device *device) { struct dasd_ccw_req *cqr; @@ -1200,7 +1166,12 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req) return cqr; ccw = cqr->cpaddr; /* First ccw is define extent. */ - define_extent(ccw++, cqr->data, first_trk, last_trk, cmd, device); + if (define_extent(ccw++, cqr->data, first_trk, + last_trk, cmd, device) == -EAGAIN) { + /* Clock not in sync and XRC is enabled. Try again later. */ + dasd_sfree_request(cqr, device); + return ERR_PTR(-EAGAIN); + } /* Build locate_record+read/write/ccws. */ idaws = (unsigned long *) (cqr->data + sizeof(struct DE_eckd_data)); LO_data = (struct LO_eckd_data *) (idaws + cidaw); @@ -1380,7 +1351,7 @@ dasd_eckd_release(struct dasd_device *device) cqr->device = device; clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); - cqr->retries = 0; + cqr->retries = 2; /* set retry counter to enable basic ERP */ cqr->expires = 2 * HZ; cqr->buildclk = get_clock(); cqr->status = DASD_CQR_FILLED; @@ -1420,7 +1391,7 @@ dasd_eckd_reserve(struct dasd_device *device) cqr->device = device; clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); - cqr->retries = 0; + cqr->retries = 2; /* set retry counter to enable basic ERP */ cqr->expires = 2 * HZ; cqr->buildclk = get_clock(); cqr->status = DASD_CQR_FILLED; @@ -1459,7 +1430,7 @@ dasd_eckd_steal_lock(struct dasd_device *device) cqr->device = device; clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); - cqr->retries = 0; + cqr->retries = 2; /* set retry counter to enable basic ERP */ cqr->expires = 2 * HZ; cqr->buildclk = get_clock(); cqr->status = DASD_CQR_FILLED; @@ -1609,7 +1580,7 @@ dasd_eckd_ioctl(struct dasd_device *device, unsigned int cmd, void __user *argp) * Dump the range of CCWs into 'page' buffer * and return number of printed chars. */ -static inline int +static int dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page) { int len, count; diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c index e0bf30ebb21..6cedc914077 100644 --- a/drivers/s390/block/dasd_eer.c +++ b/drivers/s390/block/dasd_eer.c @@ -658,18 +658,24 @@ static struct file_operations dasd_eer_fops = { .owner = THIS_MODULE, }; -static struct miscdevice dasd_eer_dev = { - .minor = MISC_DYNAMIC_MINOR, - .name = "dasd_eer", - .fops = &dasd_eer_fops, -}; +static struct miscdevice *dasd_eer_dev = NULL; int __init dasd_eer_init(void) { int rc; - rc = misc_register(&dasd_eer_dev); + dasd_eer_dev = kzalloc(sizeof(*dasd_eer_dev), GFP_KERNEL); + if (!dasd_eer_dev) + return -ENOMEM; + + dasd_eer_dev->minor = MISC_DYNAMIC_MINOR; + dasd_eer_dev->name = "dasd_eer"; + dasd_eer_dev->fops = &dasd_eer_fops; + + rc = misc_register(dasd_eer_dev); if (rc) { + kfree(dasd_eer_dev); + dasd_eer_dev = NULL; MESSAGE(KERN_ERR, "%s", "dasd_eer_init could not " "register misc device"); return rc; @@ -680,5 +686,9 @@ int __init dasd_eer_init(void) void dasd_eer_exit(void) { - WARN_ON(misc_deregister(&dasd_eer_dev) != 0); + if (dasd_eer_dev) { + WARN_ON(misc_deregister(dasd_eer_dev) != 0); + kfree(dasd_eer_dev); + dasd_eer_dev = NULL; + } } diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c index 58a65097922..caa5d91420f 100644 --- a/drivers/s390/block/dasd_erp.c +++ b/drivers/s390/block/dasd_erp.c @@ -152,25 +152,6 @@ dasd_default_erp_postaction(struct dasd_ccw_req * cqr) } /* end default_erp_postaction */ -/* - * Print the hex dump of the memory used by a request. This includes - * all error recovery ccws that have been chained in from of the - * real request. - */ -static inline void -hex_dump_memory(struct dasd_device *device, void *data, int len) -{ - int *pint; - - pint = (int *) data; - while (len > 0) { - DEV_MESSAGE(KERN_ERR, device, "%p: %08x %08x %08x %08x", - pint, pint[0], pint[1], pint[2], pint[3]); - pint += 4; - len -= 16; - } -} - void dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb) { @@ -182,69 +163,8 @@ dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb) device->discipline->dump_sense(device, cqr, irb); } -void -dasd_log_ccw(struct dasd_ccw_req * cqr, int caller, __u32 cpa) -{ - struct dasd_device *device; - struct dasd_ccw_req *lcqr; - struct ccw1 *ccw; - int cplength; - - device = cqr->device; - /* log the channel program */ - for (lcqr = cqr; lcqr != NULL; lcqr = lcqr->refers) { - DEV_MESSAGE(KERN_ERR, device, - "(%s) ERP chain report for req: %p", - caller == 0 ? "EXAMINE" : "ACTION", lcqr); - hex_dump_memory(device, lcqr, sizeof(struct dasd_ccw_req)); - - cplength = 1; - ccw = lcqr->cpaddr; - while (ccw++->flags & (CCW_FLAG_DC | CCW_FLAG_CC)) - cplength++; - - if (cplength > 40) { /* log only parts of the CP */ - DEV_MESSAGE(KERN_ERR, device, "%s", - "Start of channel program:"); - hex_dump_memory(device, lcqr->cpaddr, - 40*sizeof(struct ccw1)); - - DEV_MESSAGE(KERN_ERR, device, "%s", - "End of channel program:"); - hex_dump_memory(device, lcqr->cpaddr + cplength - 10, - 10*sizeof(struct ccw1)); - } else { /* log the whole CP */ - DEV_MESSAGE(KERN_ERR, device, "%s", - "Channel program (complete):"); - hex_dump_memory(device, lcqr->cpaddr, - cplength*sizeof(struct ccw1)); - } - - if (lcqr != cqr) - continue; - - /* - * Log bytes arround failed CCW but only if we did - * not log the whole CP of the CCW is outside the - * logged CP. - */ - if (cplength > 40 || - ((addr_t) cpa < (addr_t) lcqr->cpaddr && - (addr_t) cpa > (addr_t) (lcqr->cpaddr + cplength + 4))) { - - DEV_MESSAGE(KERN_ERR, device, - "Failed CCW (%p) (area):", - (void *) (long) cpa); - hex_dump_memory(device, cqr->cpaddr - 10, - 20*sizeof(struct ccw1)); - } - } - -} /* end log_erp_chain */ - EXPORT_SYMBOL(dasd_default_erp_action); EXPORT_SYMBOL(dasd_default_erp_postaction); EXPORT_SYMBOL(dasd_alloc_erp_request); EXPORT_SYMBOL(dasd_free_erp_request); EXPORT_SYMBOL(dasd_log_sense); -EXPORT_SYMBOL(dasd_log_ccw); diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c index b857fd5893f..be0909e3922 100644 --- a/drivers/s390/block/dasd_fba.c +++ b/drivers/s390/block/dasd_fba.c @@ -75,7 +75,7 @@ static struct ccw_driver dasd_fba_driver = { .notify = dasd_generic_notify, }; -static inline void +static void define_extent(struct ccw1 * ccw, struct DE_fba_data *data, int rw, int blksize, int beg, int nr) { @@ -95,7 +95,7 @@ define_extent(struct ccw1 * ccw, struct DE_fba_data *data, int rw, data->ext_end = nr - 1; } -static inline void +static void locate_record(struct ccw1 * ccw, struct LO_fba_data *data, int rw, int block_nr, int block_ct) { diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c index d163632101d..47ba4462708 100644 --- a/drivers/s390/block/dasd_genhd.c +++ b/drivers/s390/block/dasd_genhd.c @@ -147,7 +147,7 @@ dasd_destroy_partitions(struct dasd_device * device) */ memset(&bpart, 0, sizeof(struct blkpg_partition)); memset(&barg, 0, sizeof(struct blkpg_ioctl_arg)); - barg.data = (void __user *) &bpart; + barg.data = (void __force __user *) &bpart; barg.op = BLKPG_DEL_PARTITION; for (bpart.pno = device->gdp->minors - 1; bpart.pno > 0; bpart.pno--) ioctl_by_bdev(bdev, BLKPG, (unsigned long) &barg); diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index dc5dd509434..a2cc69e1141 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h @@ -13,10 +13,6 @@ #ifdef __KERNEL__ -/* erp debugging in dasd.c and dasd_3990_erp.c */ -#define ERP_DEBUG - - /* we keep old device allocation scheme; IOW, minors are still in 0..255 */ #define DASD_PER_MAJOR (1U << (MINORBITS - DASD_PARTN_BITS)) #define DASD_PARTN_MASK ((1 << DASD_PARTN_BITS) - 1) @@ -563,7 +559,6 @@ struct dasd_ccw_req *dasd_alloc_erp_request(char *, int, int, struct dasd_device *); void dasd_free_erp_request(struct dasd_ccw_req *, struct dasd_device *); void dasd_log_sense(struct dasd_ccw_req *, struct irb *); -void dasd_log_ccw(struct dasd_ccw_req *, int, __u32); /* externals in dasd_3370_erp.c */ dasd_era_t dasd_3370_erp_examine(struct dasd_ccw_req *, struct irb *); diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c index 8fed3603e9e..758cfb54286 100644 --- a/drivers/s390/block/dasd_ioctl.c +++ b/drivers/s390/block/dasd_ioctl.c @@ -430,7 +430,7 @@ dasd_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) int rval; lock_kernel(); - rval = dasd_ioctl(filp->f_dentry->d_inode, filp, cmd, arg); + rval = dasd_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg); unlock_kernel(); return (rval == -EINVAL) ? -ENOIOCTLCMD : rval; diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c index bfa010f6dab..8b7e11815d7 100644 --- a/drivers/s390/block/dasd_proc.c +++ b/drivers/s390/block/dasd_proc.c @@ -28,7 +28,7 @@ static struct proc_dir_entry *dasd_proc_root_entry = NULL; static struct proc_dir_entry *dasd_devices_entry = NULL; static struct proc_dir_entry *dasd_statistics_entry = NULL; -static inline char * +static char * dasd_get_user_string(const char __user *user_buf, size_t user_len) { char *buffer; @@ -154,7 +154,7 @@ static struct file_operations dasd_devices_file_ops = { .release = seq_release, }; -static inline int +static int dasd_calc_metrics(char *page, char **start, off_t off, int count, int *eof, int len) { @@ -167,8 +167,8 @@ dasd_calc_metrics(char *page, char **start, off_t off, return len; } -static inline char * -dasd_statistics_array(char *str, int *array, int shift) +static char * +dasd_statistics_array(char *str, unsigned int *array, int shift) { int i; diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index be9b05347b4..1340451ea40 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -102,7 +102,7 @@ dcssblk_release_segment(struct device *dev) * device needs to be enqueued before the semaphore is * freed. */ -static inline int +static int dcssblk_assign_free_minor(struct dcssblk_dev_info *dev_info) { int minor, found; @@ -230,7 +230,7 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch SEGMENT_SHARED); if (rc < 0) { BUG_ON(rc == -EINVAL); - if (rc == -EIO || rc == -ENOENT) + if (rc != -EAGAIN) goto removeseg; } else { dev_info->is_shared = 1; @@ -253,7 +253,7 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch SEGMENT_EXCLUSIVE); if (rc < 0) { BUG_ON(rc == -EINVAL); - if (rc == -EIO || rc == -ENOENT) + if (rc != -EAGAIN) goto removeseg; } else { dev_info->is_shared = 0; diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile index c3e97b4fc18..293e667b50f 100644 --- a/drivers/s390/char/Makefile +++ b/drivers/s390/char/Makefile @@ -2,7 +2,8 @@ # S/390 character devices # -obj-y += ctrlchar.o keyboard.o defkeymap.o +obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \ + sclp_info.o obj-$(CONFIG_TN3270) += raw3270.o obj-$(CONFIG_TN3270_CONSOLE) += con3270.o @@ -11,7 +12,6 @@ obj-$(CONFIG_TN3270_FS) += fs3270.o obj-$(CONFIG_TN3215) += con3215.o -obj-$(CONFIG_SCLP) += sclp.o sclp_rw.o sclp_quiesce.o obj-$(CONFIG_SCLP_TTY) += sclp_tty.o obj-$(CONFIG_SCLP_CONSOLE) += sclp_con.o obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c index c9321b920e9..9a328f14a64 100644 --- a/drivers/s390/char/con3215.c +++ b/drivers/s390/char/con3215.c @@ -688,7 +688,7 @@ raw3215_probe (struct ccw_device *cdev) raw->cdev = cdev; raw->inbuf = (char *) raw + sizeof(struct raw3215_info); memset(raw, 0, sizeof(struct raw3215_info)); - raw->buffer = (char *) kmalloc(RAW3215_BUFFER_SIZE, + raw->buffer = kmalloc(RAW3215_BUFFER_SIZE, GFP_KERNEL|GFP_DMA); if (raw->buffer == NULL) { spin_lock(&raw3215_device_lock); @@ -1121,7 +1121,7 @@ static const struct tty_operations tty3215_ops = { * 3215 tty registration code called from tty_init(). * Most kernel services (incl. kmalloc) are available at this poimt. */ -int __init +static int __init tty3215_init(void) { struct tty_driver *driver; diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c index 7566be89068..8e7f2d7633d 100644 --- a/drivers/s390/char/con3270.c +++ b/drivers/s390/char/con3270.c @@ -69,8 +69,7 @@ static void con3270_update(struct con3270 *); /* * Setup timeout for a device. On timeout trigger an update. */ -void -con3270_set_timer(struct con3270 *cp, int expires) +static void con3270_set_timer(struct con3270 *cp, int expires) { if (expires == 0) { if (timer_pending(&cp->timer)) diff --git a/drivers/s390/char/ctrlchar.c b/drivers/s390/char/ctrlchar.c index 49e9628d929..c6cbcb3f925 100644 --- a/drivers/s390/char/ctrlchar.c +++ b/drivers/s390/char/ctrlchar.c @@ -16,14 +16,15 @@ #ifdef CONFIG_MAGIC_SYSRQ static int ctrlchar_sysrq_key; +static struct tty_struct *sysrq_tty; static void -ctrlchar_handle_sysrq(void *tty) +ctrlchar_handle_sysrq(struct work_struct *work) { - handle_sysrq(ctrlchar_sysrq_key, (struct tty_struct *) tty); + handle_sysrq(ctrlchar_sysrq_key, sysrq_tty); } -static DECLARE_WORK(ctrlchar_work, ctrlchar_handle_sysrq, NULL); +static DECLARE_WORK(ctrlchar_work, ctrlchar_handle_sysrq); #endif @@ -53,7 +54,7 @@ ctrlchar_handle(const unsigned char *buf, int len, struct tty_struct *tty) /* racy */ if (len == 3 && buf[1] == '-') { ctrlchar_sysrq_key = buf[2]; - ctrlchar_work.data = tty; + sysrq_tty = tty; schedule_work(&ctrlchar_work); return CTRLCHAR_SYSRQ; } diff --git a/drivers/s390/char/defkeymap.c b/drivers/s390/char/defkeymap.c index 17027d918cf..564baca01b7 100644 --- a/drivers/s390/char/defkeymap.c +++ b/drivers/s390/char/defkeymap.c @@ -5,6 +5,8 @@ #include <linux/types.h> #include <linux/keyboard.h> #include <linux/kd.h> +#include <linux/kbd_kern.h> +#include <linux/kbd_diacr.h> u_short plain_map[NR_KEYS] = { 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c index 78f8bda81da..e1a746269c4 100644 --- a/drivers/s390/char/fs3270.c +++ b/drivers/s390/char/fs3270.c @@ -23,7 +23,7 @@ #include "raw3270.h" #include "ctrlchar.h" -struct raw3270_fn fs3270_fn; +static struct raw3270_fn fs3270_fn; struct fs3270 { struct raw3270_view view; @@ -401,7 +401,7 @@ fs3270_release(struct raw3270_view *view) } /* View to a 3270 device. Can be console, tty or fullscreen. */ -struct raw3270_fn fs3270_fn = { +static struct raw3270_fn fs3270_fn = { .activate = fs3270_activate, .deactivate = fs3270_deactivate, .intv = (void *) fs3270_irq, @@ -419,16 +419,20 @@ fs3270_open(struct inode *inode, struct file *filp) struct idal_buffer *ib; int minor, rc; - if (imajor(filp->f_dentry->d_inode) != IBM_FS3270_MAJOR) + if (imajor(filp->f_path.dentry->d_inode) != IBM_FS3270_MAJOR) return -ENODEV; - minor = iminor(filp->f_dentry->d_inode); + minor = iminor(filp->f_path.dentry->d_inode); /* Check for minor 0 multiplexer. */ if (minor == 0) { - if (!current->signal->tty) + struct tty_struct *tty; + mutex_lock(&tty_mutex); + tty = get_current_tty(); + if (!tty || tty->driver->major != IBM_TTY3270_MAJOR) { + mutex_unlock(&tty_mutex); return -ENODEV; - if (current->signal->tty->driver->major != IBM_TTY3270_MAJOR) - return -ENODEV; - minor = current->signal->tty->index + RAW3270_FIRSTMINOR; + } + minor = tty->index + RAW3270_FIRSTMINOR; + mutex_unlock(&tty_mutex); } /* Check if some other program is already using fullscreen mode. */ fp = (struct fs3270 *) raw3270_find_view(&fs3270_fn, minor); diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c index e3491a5f521..f62f9a4e895 100644 --- a/drivers/s390/char/keyboard.c +++ b/drivers/s390/char/keyboard.c @@ -148,6 +148,7 @@ kbd_ascebc(struct kbd_data *kbd, unsigned char *ascebc) } } +#if 0 /* * Generate ebcdic -> ascii translation table from kbd_data. */ @@ -173,6 +174,7 @@ kbd_ebcasc(struct kbd_data *kbd, unsigned char *ebcasc) } } } +#endif /* * We have a combining character DIACR here, followed by the character CH. @@ -377,7 +379,7 @@ do_kdsk_ioctl(struct kbd_data *kbd, struct kbentry __user *user_kbe, if (!(key_map = kbd->key_maps[tmp.kb_table])) { int j; - key_map = (ushort *) kmalloc(sizeof(plain_map), + key_map = kmalloc(sizeof(plain_map), GFP_KERNEL); if (!key_map) return -ENOMEM; diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c index a138b151009..3a1a958fb5f 100644 --- a/drivers/s390/char/monreader.c +++ b/drivers/s390/char/monreader.c @@ -3,7 +3,7 @@ * * Character device driver for reading z/VM *MONITOR service records. * - * Copyright (C) 2004 IBM Corporation, IBM Deutschland Entwicklung GmbH. + * Copyright 2004 IBM Corporation, IBM Deutschland Entwicklung GmbH. * * Author: Gerald Schaefer <geraldsc@de.ibm.com> */ @@ -22,7 +22,7 @@ #include <asm/ebcdic.h> #include <asm/extmem.h> #include <linux/poll.h> -#include "../net/iucv.h" +#include <net/iucv/iucv.h> //#define MON_DEBUG /* Debug messages on/off */ @@ -50,14 +50,13 @@ static char mon_dcss_name[9] = "MONDCSS\0"; struct mon_msg { u32 pos; u32 mca_offset; - iucv_MessagePending local_eib; + struct iucv_message msg; char msglim_reached; char replied_msglim; }; struct mon_private { - u16 pathid; - iucv_handle_t iucv_handle; + struct iucv_path *path; struct mon_msg *msg_array[MON_MSGLIM]; unsigned int write_index; unsigned int read_index; @@ -75,8 +74,6 @@ static unsigned long mon_dcss_end; static DECLARE_WAIT_QUEUE_HEAD(mon_read_wait_queue); static DECLARE_WAIT_QUEUE_HEAD(mon_conn_wait_queue); -static u8 iucv_host[8] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; - static u8 user_data_connect[16] = { /* Version code, must be 0x01 for shared mode */ 0x01, @@ -100,8 +97,7 @@ static u8 user_data_sever[16] = { * Create the 8 bytes EBCDIC DCSS segment name from * an ASCII name, incl. padding */ -static inline void -dcss_mkname(char *ascii_name, char *ebcdic_name) +static inline void dcss_mkname(char *ascii_name, char *ebcdic_name) { int i; @@ -119,8 +115,7 @@ dcss_mkname(char *ascii_name, char *ebcdic_name) * print appropriate error message for segment_load()/segment_type() * return code */ -static void -mon_segment_warn(int rc, char* seg_name) +static void mon_segment_warn(int rc, char* seg_name) { switch (rc) { case -ENOENT: @@ -166,44 +161,37 @@ mon_segment_warn(int rc, char* seg_name) } } -static inline unsigned long -mon_mca_start(struct mon_msg *monmsg) +static inline unsigned long mon_mca_start(struct mon_msg *monmsg) { - return monmsg->local_eib.ln1msg1.iprmmsg1_u32; + return *(u32 *) &monmsg->msg.rmmsg; } -static inline unsigned long -mon_mca_end(struct mon_msg *monmsg) +static inline unsigned long mon_mca_end(struct mon_msg *monmsg) { - return monmsg->local_eib.ln1msg2.ipbfln1f; + return *(u32 *) &monmsg->msg.rmmsg[4]; } -static inline u8 -mon_mca_type(struct mon_msg *monmsg, u8 index) +static inline u8 mon_mca_type(struct mon_msg *monmsg, u8 index) { return *((u8 *) mon_mca_start(monmsg) + monmsg->mca_offset + index); } -static inline u32 -mon_mca_size(struct mon_msg *monmsg) +static inline u32 mon_mca_size(struct mon_msg *monmsg) { return mon_mca_end(monmsg) - mon_mca_start(monmsg) + 1; } -static inline u32 -mon_rec_start(struct mon_msg *monmsg) +static inline u32 mon_rec_start(struct mon_msg *monmsg) { return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 4)); } -static inline u32 -mon_rec_end(struct mon_msg *monmsg) +static inline u32 mon_rec_end(struct mon_msg *monmsg) { return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 8)); } -static inline int -mon_check_mca(struct mon_msg *monmsg) +static inline int mon_check_mca(struct mon_msg *monmsg) { if ((mon_rec_end(monmsg) <= mon_rec_start(monmsg)) || (mon_rec_start(monmsg) < mon_dcss_start) || @@ -221,20 +209,17 @@ mon_check_mca(struct mon_msg *monmsg) return 0; } -static inline int -mon_send_reply(struct mon_msg *monmsg, struct mon_private *monpriv) +static inline int mon_send_reply(struct mon_msg *monmsg, + struct mon_private *monpriv) { - u8 prmmsg[8]; int rc; P_DEBUG("read, REPLY: pathid = 0x%04X, msgid = 0x%08X, trgcls = " "0x%08X\n\n", - monmsg->local_eib.ippathid, monmsg->local_eib.ipmsgid, - monmsg->local_eib.iptrgcls); - rc = iucv_reply_prmmsg(monmsg->local_eib.ippathid, - monmsg->local_eib.ipmsgid, - monmsg->local_eib.iptrgcls, - 0, prmmsg); + monpriv->path->pathid, monmsg->msg.id, monmsg->msg.class); + + rc = iucv_message_reply(monpriv->path, &monmsg->msg, + IUCV_IPRMDATA, NULL, 0); atomic_dec(&monpriv->msglim_count); if (likely(!monmsg->msglim_reached)) { monmsg->pos = 0; @@ -251,10 +236,19 @@ mon_send_reply(struct mon_msg *monmsg, struct mon_private *monpriv) return 0; } -static inline struct mon_private * -mon_alloc_mem(void) +static inline void mon_free_mem(struct mon_private *monpriv) +{ + int i; + + for (i = 0; i < MON_MSGLIM; i++) + if (monpriv->msg_array[i]) + kfree(monpriv->msg_array[i]); + kfree(monpriv); +} + +static inline struct mon_private *mon_alloc_mem(void) { - int i,j; + int i; struct mon_private *monpriv; monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL); @@ -267,16 +261,15 @@ mon_alloc_mem(void) GFP_KERNEL); if (!monpriv->msg_array[i]) { P_ERROR("open, no memory for msg_array\n"); - for (j = 0; j < i; j++) - kfree(monpriv->msg_array[j]); + mon_free_mem(monpriv); return NULL; } } return monpriv; } -static inline void -mon_read_debug(struct mon_msg *monmsg, struct mon_private *monpriv) +static inline void mon_read_debug(struct mon_msg *monmsg, + struct mon_private *monpriv) { #ifdef MON_DEBUG u8 msg_type[2], mca_type; @@ -284,7 +277,7 @@ mon_read_debug(struct mon_msg *monmsg, struct mon_private *monpriv) records_len = mon_rec_end(monmsg) - mon_rec_start(monmsg) + 1; - memcpy(msg_type, &monmsg->local_eib.iptrgcls, 2); + memcpy(msg_type, &monmsg->msg.class, 2); EBCASC(msg_type, 2); mca_type = mon_mca_type(monmsg, 0); EBCASC(&mca_type, 1); @@ -292,8 +285,7 @@ mon_read_debug(struct mon_msg *monmsg, struct mon_private *monpriv) P_DEBUG("read, mon_read_index = %i, mon_write_index = %i\n", monpriv->read_index, monpriv->write_index); P_DEBUG("read, pathid = 0x%04X, msgid = 0x%08X, trgcls = 0x%08X\n", - monmsg->local_eib.ippathid, monmsg->local_eib.ipmsgid, - monmsg->local_eib.iptrgcls); + monpriv->path->pathid, monmsg->msg.id, monmsg->msg.class); P_DEBUG("read, msg_type = '%c%c', mca_type = '%c' / 0x%X / 0x%X\n", msg_type[0], msg_type[1], mca_type ? mca_type : 'X', mon_mca_type(monmsg, 1), mon_mca_type(monmsg, 2)); @@ -306,8 +298,7 @@ mon_read_debug(struct mon_msg *monmsg, struct mon_private *monpriv) #endif } -static inline void -mon_next_mca(struct mon_msg *monmsg) +static inline void mon_next_mca(struct mon_msg *monmsg) { if (likely((mon_mca_size(monmsg) - monmsg->mca_offset) == 12)) return; @@ -316,8 +307,7 @@ mon_next_mca(struct mon_msg *monmsg) monmsg->pos = 0; } -static inline struct mon_msg * -mon_next_message(struct mon_private *monpriv) +static inline struct mon_msg *mon_next_message(struct mon_private *monpriv) { struct mon_msg *monmsg; @@ -342,39 +332,37 @@ mon_next_message(struct mon_private *monpriv) /****************************************************************************** * IUCV handler * *****************************************************************************/ -static void -mon_iucv_ConnectionComplete(iucv_ConnectionComplete *eib, void *pgm_data) +static void mon_iucv_path_complete(struct iucv_path *path, u8 ipuser[16]) { - struct mon_private *monpriv = (struct mon_private *) pgm_data; + struct mon_private *monpriv = path->private; P_DEBUG("IUCV connection completed\n"); P_DEBUG("IUCV ACCEPT (from *MONITOR): Version = 0x%02X, Event = " "0x%02X, Sample = 0x%02X\n", - eib->ipuser[0], eib->ipuser[1], eib->ipuser[2]); + ipuser[0], ipuser[1], ipuser[2]); atomic_set(&monpriv->iucv_connected, 1); wake_up(&mon_conn_wait_queue); } -static void -mon_iucv_ConnectionSevered(iucv_ConnectionSevered *eib, void *pgm_data) +static void mon_iucv_path_severed(struct iucv_path *path, u8 ipuser[16]) { - struct mon_private *monpriv = (struct mon_private *) pgm_data; + struct mon_private *monpriv = path->private; - P_ERROR("IUCV connection severed with rc = 0x%X\n", - (u8) eib->ipuser[0]); + P_ERROR("IUCV connection severed with rc = 0x%X\n", ipuser[0]); + iucv_path_sever(path, NULL); atomic_set(&monpriv->iucv_severed, 1); wake_up(&mon_conn_wait_queue); wake_up_interruptible(&mon_read_wait_queue); } -static void -mon_iucv_MessagePending(iucv_MessagePending *eib, void *pgm_data) +static void mon_iucv_message_pending(struct iucv_path *path, + struct iucv_message *msg) { - struct mon_private *monpriv = (struct mon_private *) pgm_data; + struct mon_private *monpriv = path->private; P_DEBUG("IUCV message pending\n"); - memcpy(&monpriv->msg_array[monpriv->write_index]->local_eib, eib, - sizeof(iucv_MessagePending)); + memcpy(&monpriv->msg_array[monpriv->write_index]->msg, + msg, sizeof(*msg)); if (atomic_inc_return(&monpriv->msglim_count) == MON_MSGLIM) { P_WARNING("IUCV message pending, message limit (%i) reached\n", MON_MSGLIM); @@ -385,54 +373,45 @@ mon_iucv_MessagePending(iucv_MessagePending *eib, void *pgm_data) wake_up_interruptible(&mon_read_wait_queue); } -static iucv_interrupt_ops_t mon_iucvops = { - .ConnectionComplete = mon_iucv_ConnectionComplete, - .ConnectionSevered = mon_iucv_ConnectionSevered, - .MessagePending = mon_iucv_MessagePending, +static struct iucv_handler monreader_iucv_handler = { + .path_complete = mon_iucv_path_complete, + .path_severed = mon_iucv_path_severed, + .message_pending = mon_iucv_message_pending, }; /****************************************************************************** * file operations * *****************************************************************************/ -static int -mon_open(struct inode *inode, struct file *filp) +static int mon_open(struct inode *inode, struct file *filp) { - int rc, i; struct mon_private *monpriv; + int rc; /* * only one user allowed */ + rc = -EBUSY; if (test_and_set_bit(MON_IN_USE, &mon_in_use)) - return -EBUSY; + goto out; + rc = -ENOMEM; monpriv = mon_alloc_mem(); if (!monpriv) - return -ENOMEM; + goto out_use; /* - * Register with IUCV and connect to *MONITOR service + * Connect to *MONITOR service */ - monpriv->iucv_handle = iucv_register_program("my_monreader ", - MON_SERVICE, - NULL, - &mon_iucvops, - monpriv); - if (!monpriv->iucv_handle) { - P_ERROR("failed to register with iucv driver\n"); - rc = -EIO; - goto out_error; - } - P_INFO("open, registered with IUCV\n"); - - rc = iucv_connect(&monpriv->pathid, MON_MSGLIM, user_data_connect, - MON_SERVICE, iucv_host, IPRMDATA, NULL, NULL, - monpriv->iucv_handle, NULL); + monpriv->path = iucv_path_alloc(MON_MSGLIM, IUCV_IPRMDATA, GFP_KERNEL); + if (!monpriv->path) + goto out_priv; + rc = iucv_path_connect(monpriv->path, &monreader_iucv_handler, + MON_SERVICE, NULL, user_data_connect, monpriv); if (rc) { P_ERROR("iucv connection to *MONITOR failed with " "IPUSER SEVER code = %i\n", rc); rc = -EIO; - goto out_unregister; + goto out_path; } /* * Wait for connection confirmation @@ -444,24 +423,23 @@ mon_open(struct inode *inode, struct file *filp) atomic_set(&monpriv->iucv_severed, 0); atomic_set(&monpriv->iucv_connected, 0); rc = -EIO; - goto out_unregister; + goto out_path; } P_INFO("open, established connection to *MONITOR service\n\n"); filp->private_data = monpriv; return nonseekable_open(inode, filp); -out_unregister: - iucv_unregister_program(monpriv->iucv_handle); -out_error: - for (i = 0; i < MON_MSGLIM; i++) - kfree(monpriv->msg_array[i]); - kfree(monpriv); +out_path: + kfree(monpriv->path); +out_priv: + mon_free_mem(monpriv); +out_use: clear_bit(MON_IN_USE, &mon_in_use); +out: return rc; } -static int -mon_close(struct inode *inode, struct file *filp) +static int mon_close(struct inode *inode, struct file *filp) { int rc, i; struct mon_private *monpriv = filp->private_data; @@ -469,18 +447,12 @@ mon_close(struct inode *inode, struct file *filp) /* * Close IUCV connection and unregister */ - rc = iucv_sever(monpriv->pathid, user_data_sever); + rc = iucv_path_sever(monpriv->path, user_data_sever); if (rc) P_ERROR("close, iucv_sever failed with rc = %i\n", rc); else P_INFO("close, terminated connection to *MONITOR service\n"); - rc = iucv_unregister_program(monpriv->iucv_handle); - if (rc) - P_ERROR("close, iucv_unregister failed with rc = %i\n", rc); - else - P_INFO("close, unregistered with IUCV\n"); - atomic_set(&monpriv->iucv_severed, 0); atomic_set(&monpriv->iucv_connected, 0); atomic_set(&monpriv->read_ready, 0); @@ -495,8 +467,8 @@ mon_close(struct inode *inode, struct file *filp) return 0; } -static ssize_t -mon_read(struct file *filp, char __user *data, size_t count, loff_t *ppos) +static ssize_t mon_read(struct file *filp, char __user *data, + size_t count, loff_t *ppos) { struct mon_private *monpriv = filp->private_data; struct mon_msg *monmsg; @@ -563,8 +535,7 @@ out_copy: return count; } -static unsigned int -mon_poll(struct file *filp, struct poll_table_struct *p) +static unsigned int mon_poll(struct file *filp, struct poll_table_struct *p) { struct mon_private *monpriv = filp->private_data; @@ -593,8 +564,7 @@ static struct miscdevice mon_dev = { /****************************************************************************** * module init/exit * *****************************************************************************/ -static int __init -mon_init(void) +static int __init mon_init(void) { int rc; @@ -603,22 +573,34 @@ mon_init(void) return -ENODEV; } + /* + * Register with IUCV and connect to *MONITOR service + */ + rc = iucv_register(&monreader_iucv_handler, 1); + if (rc) { + P_ERROR("failed to register with iucv driver\n"); + return rc; + } + P_INFO("open, registered with IUCV\n"); + rc = segment_type(mon_dcss_name); if (rc < 0) { mon_segment_warn(rc, mon_dcss_name); - return rc; + goto out_iucv; } if (rc != SEG_TYPE_SC) { P_ERROR("segment %s has unsupported type, should be SC\n", mon_dcss_name); - return -EINVAL; + rc = -EINVAL; + goto out_iucv; } rc = segment_load(mon_dcss_name, SEGMENT_SHARED, &mon_dcss_start, &mon_dcss_end); if (rc < 0) { mon_segment_warn(rc, mon_dcss_name); - return -EINVAL; + rc = -EINVAL; + goto out_iucv; } dcss_mkname(mon_dcss_name, &user_data_connect[8]); @@ -634,14 +616,16 @@ mon_init(void) out: segment_unload(mon_dcss_name); +out_iucv: + iucv_unregister(&monreader_iucv_handler, 1); return rc; } -static void __exit -mon_exit(void) +static void __exit mon_exit(void) { segment_unload(mon_dcss_name); WARN_ON(misc_deregister(&mon_dev) != 0); + iucv_unregister(&monreader_iucv_handler, 1); return; } diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c index b9b0fc3f812..9e451acc649 100644 --- a/drivers/s390/char/monwriter.c +++ b/drivers/s390/char/monwriter.c @@ -23,7 +23,7 @@ #include <asm/appldata.h> #include <asm/monwriter.h> -#define MONWRITE_MAX_DATALEN 4024 +#define MONWRITE_MAX_DATALEN 4010 static int mon_max_bufs = 255; static int mon_buf_count; @@ -67,8 +67,8 @@ static int monwrite_diag(struct monwrite_hdr *myhdr, char *buffer, int fcn) return -EINVAL; } -static inline struct mon_buf *monwrite_find_hdr(struct mon_private *monpriv, - struct monwrite_hdr *monhdr) +static struct mon_buf *monwrite_find_hdr(struct mon_private *monpriv, + struct monwrite_hdr *monhdr) { struct mon_buf *entry, *next; diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c index 7a84014f203..8facd14adb7 100644 --- a/drivers/s390/char/raw3270.c +++ b/drivers/s390/char/raw3270.c @@ -29,7 +29,7 @@ #include <linux/device.h> #include <linux/mutex.h> -struct class *class3270; +static struct class *class3270; /* The main 3270 data structure. */ struct raw3270 { @@ -86,7 +86,7 @@ DECLARE_WAIT_QUEUE_HEAD(raw3270_wait_queue); /* * Encode array for 12 bit 3270 addresses. */ -unsigned char raw3270_ebcgraf[64] = { +static unsigned char raw3270_ebcgraf[64] = { 0x40, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c index 8a056df09d6..f171de3b0b1 100644 --- a/drivers/s390/char/sclp.c +++ b/drivers/s390/char/sclp.c @@ -59,7 +59,8 @@ static volatile enum sclp_init_state_t { /* Internal state: is a request active at the sclp? */ static volatile enum sclp_running_state_t { sclp_running_state_idle, - sclp_running_state_running + sclp_running_state_running, + sclp_running_state_reset_pending } sclp_running_state = sclp_running_state_idle; /* Internal state: is a read request pending? */ @@ -88,15 +89,15 @@ static volatile enum sclp_mask_state_t { /* Timeout intervals in seconds.*/ #define SCLP_BUSY_INTERVAL 10 -#define SCLP_RETRY_INTERVAL 15 +#define SCLP_RETRY_INTERVAL 30 static void sclp_process_queue(void); static int sclp_init_mask(int calculate); static int sclp_init(void); /* Perform service call. Return 0 on success, non-zero otherwise. */ -static int -service_call(sclp_cmdw_t command, void *sccb) +int +sclp_service_call(sclp_cmdw_t command, void *sccb) { int cc; @@ -113,19 +114,17 @@ service_call(sclp_cmdw_t command, void *sccb) return 0; } -/* Request timeout handler. Restart the request queue. If DATA is non-zero, - * force restart of running request. */ +static inline void __sclp_make_read_req(void); + static void -sclp_request_timeout(unsigned long data) +__sclp_queue_read_req(void) { - unsigned long flags; - - if (data) { - spin_lock_irqsave(&sclp_lock, flags); - sclp_running_state = sclp_running_state_idle; - spin_unlock_irqrestore(&sclp_lock, flags); + if (sclp_reading_state == sclp_reading_state_idle) { + sclp_reading_state = sclp_reading_state_reading; + __sclp_make_read_req(); + /* Add request to head of queue */ + list_add(&sclp_read_req.list, &sclp_req_queue); } - sclp_process_queue(); } /* Set up request retry timer. Called while sclp_lock is locked. */ @@ -140,6 +139,29 @@ __sclp_set_request_timer(unsigned long time, void (*function)(unsigned long), add_timer(&sclp_request_timer); } +/* Request timeout handler. Restart the request queue. If DATA is non-zero, + * force restart of running request. */ +static void +sclp_request_timeout(unsigned long data) +{ + unsigned long flags; + + spin_lock_irqsave(&sclp_lock, flags); + if (data) { + if (sclp_running_state == sclp_running_state_running) { + /* Break running state and queue NOP read event request + * to get a defined interface state. */ + __sclp_queue_read_req(); + sclp_running_state = sclp_running_state_idle; + } + } else { + __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ, + sclp_request_timeout, 0); + } + spin_unlock_irqrestore(&sclp_lock, flags); + sclp_process_queue(); +} + /* Try to start a request. Return zero if the request was successfully * started or if it will be started at a later time. Return non-zero otherwise. * Called while sclp_lock is locked. */ @@ -151,7 +173,7 @@ __sclp_start_request(struct sclp_req *req) if (sclp_running_state != sclp_running_state_idle) return 0; del_timer(&sclp_request_timer); - rc = service_call(req->command, req->sccb); + rc = sclp_service_call(req->command, req->sccb); req->start_count++; if (rc == 0) { @@ -191,7 +213,15 @@ sclp_process_queue(void) rc = __sclp_start_request(req); if (rc == 0) break; - /* Request failed. */ + /* Request failed */ + if (req->start_count > 1) { + /* Cannot abort already submitted request - could still + * be active at the SCLP */ + __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ, + sclp_request_timeout, 0); + break; + } + /* Post-processing for aborted request */ list_del(&req->list); if (req->callback) { spin_unlock_irqrestore(&sclp_lock, flags); @@ -221,7 +251,8 @@ sclp_add_request(struct sclp_req *req) list_add_tail(&req->list, &sclp_req_queue); rc = 0; /* Start if request is first in list */ - if (req->list.prev == &sclp_req_queue) { + if (sclp_running_state == sclp_running_state_idle && + req->list.prev == &sclp_req_queue) { rc = __sclp_start_request(req); if (rc) list_del(&req->list); @@ -294,7 +325,7 @@ __sclp_make_read_req(void) sccb = (struct sccb_header *) sclp_read_sccb; clear_page(sccb); memset(&sclp_read_req, 0, sizeof(struct sclp_req)); - sclp_read_req.command = SCLP_CMDW_READDATA; + sclp_read_req.command = SCLP_CMDW_READ_EVENT_DATA; sclp_read_req.status = SCLP_REQ_QUEUED; sclp_read_req.start_count = 0; sclp_read_req.callback = sclp_read_cb; @@ -334,6 +365,8 @@ sclp_interrupt_handler(__u16 code) finished_sccb = S390_lowcore.ext_params & 0xfffffff8; evbuf_pending = S390_lowcore.ext_params & 0x3; if (finished_sccb) { + del_timer(&sclp_request_timer); + sclp_running_state = sclp_running_state_reset_pending; req = __sclp_find_req(finished_sccb); if (req) { /* Request post-processing */ @@ -348,13 +381,8 @@ sclp_interrupt_handler(__u16 code) sclp_running_state = sclp_running_state_idle; } if (evbuf_pending && sclp_receive_mask != 0 && - sclp_reading_state == sclp_reading_state_idle && - sclp_activation_state == sclp_activation_state_active ) { - sclp_reading_state = sclp_reading_state_reading; - __sclp_make_read_req(); - /* Add request to head of queue */ - list_add(&sclp_read_req.list, &sclp_req_queue); - } + sclp_activation_state == sclp_activation_state_active) + __sclp_queue_read_req(); spin_unlock(&sclp_lock); sclp_process_queue(); } @@ -374,6 +402,7 @@ sclp_sync_wait(void) unsigned long flags; unsigned long cr0, cr0_sync; u64 timeout; + int irq_context; /* We'll be disabling timer interrupts, so we need a custom timeout * mechanism */ @@ -386,7 +415,9 @@ sclp_sync_wait(void) } local_irq_save(flags); /* Prevent bottom half from executing once we force interrupts open */ - local_bh_disable(); + irq_context = in_interrupt(); + if (!irq_context) + local_bh_disable(); /* Enable service-signal interruption, disable timer interrupts */ trace_hardirqs_on(); __ctl_store(cr0, 0, 0); @@ -402,19 +433,19 @@ sclp_sync_wait(void) get_clock() > timeout && del_timer(&sclp_request_timer)) sclp_request_timer.function(sclp_request_timer.data); - barrier(); cpu_relax(); } local_irq_disable(); __ctl_load(cr0, 0, 0); - _local_bh_enable(); + if (!irq_context) + _local_bh_enable(); local_irq_restore(flags); } EXPORT_SYMBOL(sclp_sync_wait); /* Dispatch changes in send and receive mask to registered listeners. */ -static inline void +static void sclp_dispatch_state_change(void) { struct list_head *l; @@ -597,7 +628,7 @@ __sclp_make_init_req(u32 receive_mask, u32 send_mask) sccb = (struct init_sccb *) sclp_init_sccb; clear_page(sccb); memset(&sclp_init_req, 0, sizeof(struct sclp_req)); - sclp_init_req.command = SCLP_CMDW_WRITEMASK; + sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK; sclp_init_req.status = SCLP_REQ_FILLED; sclp_init_req.start_count = 0; sclp_init_req.callback = NULL; @@ -800,7 +831,7 @@ sclp_check_interface(void) for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) { __sclp_make_init_req(0, 0); sccb = (struct init_sccb *) sclp_init_req.sccb; - rc = service_call(sclp_init_req.command, sccb); + rc = sclp_service_call(sclp_init_req.command, sccb); if (rc == -EIO) break; sclp_init_req.status = SCLP_REQ_RUNNING; diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h index 2c71d6ee7b5..7d29ab45a6e 100644 --- a/drivers/s390/char/sclp.h +++ b/drivers/s390/char/sclp.h @@ -12,7 +12,7 @@ #include <linux/types.h> #include <linux/list.h> - +#include <asm/sclp.h> #include <asm/ebcdic.h> /* maximum number of pages concerning our own memory management */ @@ -49,9 +49,11 @@ typedef unsigned int sclp_cmdw_t; -#define SCLP_CMDW_READDATA 0x00770005 -#define SCLP_CMDW_WRITEDATA 0x00760005 -#define SCLP_CMDW_WRITEMASK 0x00780005 +#define SCLP_CMDW_READ_EVENT_DATA 0x00770005 +#define SCLP_CMDW_WRITE_EVENT_DATA 0x00760005 +#define SCLP_CMDW_WRITE_EVENT_MASK 0x00780005 +#define SCLP_CMDW_READ_SCP_INFO 0x00020001 +#define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001 #define GDS_ID_MDSMU 0x1310 #define GDS_ID_MDSRouteInfo 0x1311 @@ -66,13 +68,6 @@ typedef unsigned int sclp_cmdw_t; typedef u32 sccb_mask_t; /* ATTENTION: assumes 32bit mask !!! */ -struct sccb_header { - u16 length; - u8 function_code; - u8 control_mask[3]; - u16 response_code; -} __attribute__((packed)); - struct gds_subvector { u8 length; u8 key; @@ -131,6 +126,7 @@ void sclp_unregister(struct sclp_register *reg); int sclp_remove_processed(struct sccb_header *sccb); int sclp_deactivate(void); int sclp_reactivate(void); +int sclp_service_call(sclp_cmdw_t command, void *sccb); /* useful inlines */ diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c index 86864f64171..ead1043d788 100644 --- a/drivers/s390/char/sclp_con.c +++ b/drivers/s390/char/sclp_con.c @@ -66,7 +66,7 @@ sclp_conbuf_callback(struct sclp_buffer *buffer, int rc) } while (buffer && sclp_emit_buffer(buffer, sclp_conbuf_callback)); } -static inline void +static void sclp_conbuf_emit(void) { struct sclp_buffer* buffer; diff --git a/drivers/s390/char/sclp_cpi.c b/drivers/s390/char/sclp_cpi.c index 732dfbdb85c..65aa2c85737 100644 --- a/drivers/s390/char/sclp_cpi.c +++ b/drivers/s390/char/sclp_cpi.c @@ -49,6 +49,8 @@ static struct sclp_register sclp_cpi_event = .send_mask = EvTyp_CtlProgIdent_Mask }; +MODULE_LICENSE("GPL"); + MODULE_AUTHOR( "Martin Peschke, IBM Deutschland Entwicklung GmbH " "<mpeschke@de.ibm.com>"); @@ -127,7 +129,7 @@ cpi_prepare_req(void) struct cpi_sccb *sccb; struct cpi_evbuf *evb; - req = (struct sclp_req *) kmalloc(sizeof(struct sclp_req), GFP_KERNEL); + req = kmalloc(sizeof(struct sclp_req), GFP_KERNEL); if (req == NULL) return ERR_PTR(-ENOMEM); sccb = (struct cpi_sccb *) __get_free_page(GFP_KERNEL | GFP_DMA); @@ -167,7 +169,7 @@ cpi_prepare_req(void) } /* prepare request data structure presented to SCLP driver */ - req->command = SCLP_CMDW_WRITEDATA; + req->command = SCLP_CMDW_WRITE_EVENT_DATA; req->sccb = sccb; req->status = SCLP_REQ_FILLED; req->callback = cpi_callback; diff --git a/drivers/s390/char/sclp_info.c b/drivers/s390/char/sclp_info.c new file mode 100644 index 00000000000..7bcbe643b08 --- /dev/null +++ b/drivers/s390/char/sclp_info.c @@ -0,0 +1,57 @@ +/* + * drivers/s390/char/sclp_info.c + * + * Copyright IBM Corp. 2007 + * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> + */ + +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/string.h> +#include <asm/sclp.h> +#include "sclp.h" + +struct sclp_readinfo_sccb s390_readinfo_sccb; + +void __init sclp_readinfo_early(void) +{ + sclp_cmdw_t command; + struct sccb_header *sccb; + int ret; + + __ctl_set_bit(0, 9); /* enable service signal subclass mask */ + + sccb = &s390_readinfo_sccb.header; + command = SCLP_CMDW_READ_SCP_INFO_FORCED; + while (1) { + u16 response; + + memset(&s390_readinfo_sccb, 0, sizeof(s390_readinfo_sccb)); + sccb->length = sizeof(s390_readinfo_sccb); + sccb->control_mask[2] = 0x80; + + ret = sclp_service_call(command, &s390_readinfo_sccb); + + if (ret == -EIO) + goto out; + if (ret == -EBUSY) + continue; + + __load_psw_mask(PSW_BASE_BITS | PSW_MASK_EXT | + PSW_MASK_WAIT | PSW_DEFAULT_KEY); + local_irq_disable(); + barrier(); + + response = sccb->response_code; + + if (response == 0x10) + break; + + if (response != 0x1f0 || command == SCLP_CMDW_READ_SCP_INFO) + break; + + command = SCLP_CMDW_READ_SCP_INFO; + } +out: + __ctl_clear_bit(0, 9); /* disable service signal subclass mask */ +} diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c index 0c92d3909cc..2486783ea58 100644 --- a/drivers/s390/char/sclp_rw.c +++ b/drivers/s390/char/sclp_rw.c @@ -460,7 +460,7 @@ sclp_emit_buffer(struct sclp_buffer *buffer, sccb->msg_buf.header.type = EvTyp_PMsgCmd; else return -ENOSYS; - buffer->request.command = SCLP_CMDW_WRITEDATA; + buffer->request.command = SCLP_CMDW_WRITE_EVENT_DATA; buffer->request.status = SCLP_REQ_FILLED; buffer->request.callback = sclp_writedata_callback; buffer->request.callback_data = buffer; diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c index 6f43e04dbef..90536f60bf5 100644 --- a/drivers/s390/char/sclp_tty.c +++ b/drivers/s390/char/sclp_tty.c @@ -60,8 +60,6 @@ static unsigned short int sclp_tty_chars_count; struct tty_driver *sclp_tty_driver; -extern struct termios tty_std_termios; - static struct sclp_ioctls sclp_ioctls; static struct sclp_ioctls sclp_ioctls_init = { @@ -723,7 +721,7 @@ static const struct tty_operations sclp_ops = { .ioctl = sclp_tty_ioctl, }; -int __init +static int __init sclp_tty_init(void) { struct tty_driver *driver; diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c index 723bf4191bf..544f137d70d 100644 --- a/drivers/s390/char/sclp_vt220.c +++ b/drivers/s390/char/sclp_vt220.c @@ -207,7 +207,7 @@ __sclp_vt220_emit(struct sclp_vt220_request *request) request->sclp_req.status = SCLP_REQ_FAILED; return -EIO; } - request->sclp_req.command = SCLP_CMDW_WRITEDATA; + request->sclp_req.command = SCLP_CMDW_WRITE_EVENT_DATA; request->sclp_req.status = SCLP_REQ_FILLED; request->sclp_req.callback = sclp_vt220_callback; request->sclp_req.callback_data = (void *) request; @@ -669,7 +669,7 @@ static const struct tty_operations sclp_vt220_ops = { /* * Register driver with SCLP and Linux and initialize internal tty structures. */ -int __init +static int __init sclp_vt220_tty_init(void) { struct tty_driver *driver; diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h index 1f4c89967be..bb4ff537729 100644 --- a/drivers/s390/char/tape.h +++ b/drivers/s390/char/tape.h @@ -3,7 +3,7 @@ * tape device driver for 3480/3490E/3590 tapes. * * S390 and zSeries version - * Copyright (C) 2001,2005 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Copyright IBM Corp. 2001,2006 * Author(s): Carsten Otte <cotte@de.ibm.com> * Tuan Ngo-Anh <ngoanh@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> @@ -99,7 +99,11 @@ enum tape_op { TO_DIS, /* Tape display */ TO_ASSIGN, /* Assign tape to channel path */ TO_UNASSIGN, /* Unassign tape from channel path */ - TO_SIZE /* #entries in tape_op_t */ + TO_CRYPT_ON, /* Enable encrpytion */ + TO_CRYPT_OFF, /* Disable encrpytion */ + TO_KEKL_SET, /* Set KEK label */ + TO_KEKL_QUERY, /* Query KEK label */ + TO_SIZE, /* #entries in tape_op_t */ }; /* Forward declaration */ @@ -112,6 +116,7 @@ enum tape_request_status { TAPE_REQUEST_IN_IO, /* request is currently in IO */ TAPE_REQUEST_DONE, /* request is completed. */ TAPE_REQUEST_CANCEL, /* request should be canceled. */ + TAPE_REQUEST_LONG_BUSY, /* request has to be restarted after long busy */ }; /* Tape CCW request */ @@ -164,10 +169,11 @@ struct tape_discipline { * The discipline irq function either returns an error code (<0) which * means that the request has failed with an error or one of the following: */ -#define TAPE_IO_SUCCESS 0 /* request successful */ -#define TAPE_IO_PENDING 1 /* request still running */ -#define TAPE_IO_RETRY 2 /* retry to current request */ -#define TAPE_IO_STOP 3 /* stop the running request */ +#define TAPE_IO_SUCCESS 0 /* request successful */ +#define TAPE_IO_PENDING 1 /* request still running */ +#define TAPE_IO_RETRY 2 /* retry to current request */ +#define TAPE_IO_STOP 3 /* stop the running request */ +#define TAPE_IO_LONG_BUSY 4 /* delay the running request */ /* Char Frontend Data */ struct tape_char_data { @@ -179,6 +185,7 @@ struct tape_char_data { /* Block Frontend Data */ struct tape_blk_data { + struct tape_device * device; /* Block device request queue. */ request_queue_t * request_queue; spinlock_t request_queue_lock; @@ -240,7 +247,11 @@ struct tape_device { #endif /* Function to start or stop the next request later. */ - struct work_struct tape_dnr; + struct delayed_work tape_dnr; + + /* Timer for long busy */ + struct timer_list lb_timeout; + }; /* Externals from tape_core.c */ diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c index 7b95dab913d..e765875e8db 100644 --- a/drivers/s390/char/tape_34xx.c +++ b/drivers/s390/char/tape_34xx.c @@ -95,6 +95,12 @@ tape_34xx_medium_sense(struct tape_device *device) return rc; } +struct tape_34xx_work { + struct tape_device *device; + enum tape_op op; + struct work_struct work; +}; + /* * These functions are currently used only to schedule a medium_sense for * later execution. This is because we get an interrupt whenever a medium @@ -103,13 +109,10 @@ tape_34xx_medium_sense(struct tape_device *device) * interrupt handler. */ static void -tape_34xx_work_handler(void *data) +tape_34xx_work_handler(struct work_struct *work) { - struct { - struct tape_device *device; - enum tape_op op; - struct work_struct work; - } *p = data; + struct tape_34xx_work *p = + container_of(work, struct tape_34xx_work, work); switch(p->op) { case TO_MSEN: @@ -126,17 +129,13 @@ tape_34xx_work_handler(void *data) static int tape_34xx_schedule_work(struct tape_device *device, enum tape_op op) { - struct { - struct tape_device *device; - enum tape_op op; - struct work_struct work; - } *p; + struct tape_34xx_work *p; if ((p = kmalloc(sizeof(*p), GFP_ATOMIC)) == NULL) return -ENOMEM; memset(p, 0, sizeof(*p)); - INIT_WORK(&p->work, tape_34xx_work_handler, p); + INIT_WORK(&p->work, tape_34xx_work_handler); p->device = tape_get_device_reference(device); p->op = op; diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c index 928cbefc49d..50f5edab83d 100644 --- a/drivers/s390/char/tape_3590.c +++ b/drivers/s390/char/tape_3590.c @@ -2,7 +2,7 @@ * drivers/s390/char/tape_3590.c * tape device discipline for 3590 tapes. * - * Copyright (C) IBM Corp. 2001,2006 + * Copyright IBM Corp. 2001,2006 * Author(s): Stefan Bader <shbader@de.ibm.com> * Michael Holzheu <holzheu@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> @@ -11,6 +11,7 @@ #include <linux/module.h> #include <linux/init.h> #include <linux/bio.h> +#include <asm/ebcdic.h> #define TAPE_DBF_AREA tape_3590_dbf @@ -30,7 +31,7 @@ EXPORT_SYMBOL(TAPE_DBF_AREA); * - Read Device (buffered) log: BRA * - Read Library log: BRA * - Swap Devices: BRA - * - Long Busy: BRA + * - Long Busy: implemented * - Special Intercept: BRA * - Read Alternate: implemented *******************************************************************/ @@ -94,6 +95,332 @@ static const char *tape_3590_msg[TAPE_3590_MAX_MSG] = { [0xae] = "Subsystem environmental alert", }; +static int crypt_supported(struct tape_device *device) +{ + return TAPE390_CRYPT_SUPPORTED(TAPE_3590_CRYPT_INFO(device)); +} + +static int crypt_enabled(struct tape_device *device) +{ + return TAPE390_CRYPT_ON(TAPE_3590_CRYPT_INFO(device)); +} + +static void ext_to_int_kekl(struct tape390_kekl *in, + struct tape3592_kekl *out) +{ + int i; + + memset(out, 0, sizeof(*out)); + if (in->type == TAPE390_KEKL_TYPE_HASH) + out->flags |= 0x40; + if (in->type_on_tape == TAPE390_KEKL_TYPE_HASH) + out->flags |= 0x80; + strncpy(out->label, in->label, 64); + for (i = strlen(in->label); i < sizeof(out->label); i++) + out->label[i] = ' '; + ASCEBC(out->label, sizeof(out->label)); +} + +static void int_to_ext_kekl(struct tape3592_kekl *in, + struct tape390_kekl *out) +{ + memset(out, 0, sizeof(*out)); + if(in->flags & 0x40) + out->type = TAPE390_KEKL_TYPE_HASH; + else + out->type = TAPE390_KEKL_TYPE_LABEL; + if(in->flags & 0x80) + out->type_on_tape = TAPE390_KEKL_TYPE_HASH; + else + out->type_on_tape = TAPE390_KEKL_TYPE_LABEL; + memcpy(out->label, in->label, sizeof(in->label)); + EBCASC(out->label, sizeof(in->label)); + strstrip(out->label); +} + +static void int_to_ext_kekl_pair(struct tape3592_kekl_pair *in, + struct tape390_kekl_pair *out) +{ + if (in->count == 0) { + out->kekl[0].type = TAPE390_KEKL_TYPE_NONE; + out->kekl[0].type_on_tape = TAPE390_KEKL_TYPE_NONE; + out->kekl[1].type = TAPE390_KEKL_TYPE_NONE; + out->kekl[1].type_on_tape = TAPE390_KEKL_TYPE_NONE; + } else if (in->count == 1) { + int_to_ext_kekl(&in->kekl[0], &out->kekl[0]); + out->kekl[1].type = TAPE390_KEKL_TYPE_NONE; + out->kekl[1].type_on_tape = TAPE390_KEKL_TYPE_NONE; + } else if (in->count == 2) { + int_to_ext_kekl(&in->kekl[0], &out->kekl[0]); + int_to_ext_kekl(&in->kekl[1], &out->kekl[1]); + } else { + printk("Invalid KEKL number: %d\n", in->count); + BUG(); + } +} + +static int check_ext_kekl(struct tape390_kekl *kekl) +{ + if (kekl->type == TAPE390_KEKL_TYPE_NONE) + goto invalid; + if (kekl->type > TAPE390_KEKL_TYPE_HASH) + goto invalid; + if (kekl->type_on_tape == TAPE390_KEKL_TYPE_NONE) + goto invalid; + if (kekl->type_on_tape > TAPE390_KEKL_TYPE_HASH) + goto invalid; + if ((kekl->type == TAPE390_KEKL_TYPE_HASH) && + (kekl->type_on_tape == TAPE390_KEKL_TYPE_LABEL)) + goto invalid; + + return 0; +invalid: + return -EINVAL; +} + +static int check_ext_kekl_pair(struct tape390_kekl_pair *kekls) +{ + if (check_ext_kekl(&kekls->kekl[0])) + goto invalid; + if (check_ext_kekl(&kekls->kekl[1])) + goto invalid; + + return 0; +invalid: + return -EINVAL; +} + +/* + * Query KEKLs + */ +static int tape_3592_kekl_query(struct tape_device *device, + struct tape390_kekl_pair *ext_kekls) +{ + struct tape_request *request; + struct tape3592_kekl_query_order *order; + struct tape3592_kekl_query_data *int_kekls; + int rc; + + DBF_EVENT(6, "tape3592_kekl_query\n"); + int_kekls = kmalloc(sizeof(*int_kekls), GFP_KERNEL|GFP_DMA); + if (!int_kekls) + return -ENOMEM; + request = tape_alloc_request(2, sizeof(*order)); + if (IS_ERR(request)) { + rc = PTR_ERR(request); + goto fail_malloc; + } + order = request->cpdata; + memset(order,0,sizeof(*order)); + order->code = 0xe2; + order->max_count = 2; + request->op = TO_KEKL_QUERY; + tape_ccw_cc(request->cpaddr, PERF_SUBSYS_FUNC, sizeof(*order), order); + tape_ccw_end(request->cpaddr + 1, READ_SS_DATA, sizeof(*int_kekls), + int_kekls); + rc = tape_do_io(device, request); + if (rc) + goto fail_request; + int_to_ext_kekl_pair(&int_kekls->kekls, ext_kekls); + + rc = 0; +fail_request: + tape_free_request(request); +fail_malloc: + kfree(int_kekls); + return rc; +} + +/* + * IOCTL: Query KEKLs + */ +static int tape_3592_ioctl_kekl_query(struct tape_device *device, + unsigned long arg) +{ + int rc; + struct tape390_kekl_pair *ext_kekls; + + DBF_EVENT(6, "tape_3592_ioctl_kekl_query\n"); + if (!crypt_supported(device)) + return -ENOSYS; + if (!crypt_enabled(device)) + return -EUNATCH; + ext_kekls = kmalloc(sizeof(*ext_kekls), GFP_KERNEL); + if (!ext_kekls) + return -ENOMEM; + rc = tape_3592_kekl_query(device, ext_kekls); + if (rc != 0) + goto fail; + if (copy_to_user((char __user *) arg, ext_kekls, sizeof(*ext_kekls))) { + rc = -EFAULT; + goto fail; + } + rc = 0; +fail: + kfree(ext_kekls); + return rc; +} + +static int tape_3590_mttell(struct tape_device *device, int mt_count); + +/* + * Set KEKLs + */ +static int tape_3592_kekl_set(struct tape_device *device, + struct tape390_kekl_pair *ext_kekls) +{ + struct tape_request *request; + struct tape3592_kekl_set_order *order; + + DBF_EVENT(6, "tape3592_kekl_set\n"); + if (check_ext_kekl_pair(ext_kekls)) { + DBF_EVENT(6, "invalid kekls\n"); + return -EINVAL; + } + if (tape_3590_mttell(device, 0) != 0) + return -EBADSLT; + request = tape_alloc_request(1, sizeof(*order)); + if (IS_ERR(request)) + return PTR_ERR(request); + order = request->cpdata; + memset(order, 0, sizeof(*order)); + order->code = 0xe3; + order->kekls.count = 2; + ext_to_int_kekl(&ext_kekls->kekl[0], &order->kekls.kekl[0]); + ext_to_int_kekl(&ext_kekls->kekl[1], &order->kekls.kekl[1]); + request->op = TO_KEKL_SET; + tape_ccw_end(request->cpaddr, PERF_SUBSYS_FUNC, sizeof(*order), order); + + return tape_do_io_free(device, request); +} + +/* + * IOCTL: Set KEKLs + */ +static int tape_3592_ioctl_kekl_set(struct tape_device *device, + unsigned long arg) +{ + int rc; + struct tape390_kekl_pair *ext_kekls; + + DBF_EVENT(6, "tape_3592_ioctl_kekl_set\n"); + if (!crypt_supported(device)) + return -ENOSYS; + if (!crypt_enabled(device)) + return -EUNATCH; + ext_kekls = kmalloc(sizeof(*ext_kekls), GFP_KERNEL); + if (!ext_kekls) + return -ENOMEM; + if (copy_from_user(ext_kekls, (char __user *)arg, sizeof(*ext_kekls))) { + rc = -EFAULT; + goto out; + } + rc = tape_3592_kekl_set(device, ext_kekls); +out: + kfree(ext_kekls); + return rc; +} + +/* + * Enable encryption + */ +static int tape_3592_enable_crypt(struct tape_device *device) +{ + struct tape_request *request; + char *data; + + DBF_EVENT(6, "tape_3592_enable_crypt\n"); + if (!crypt_supported(device)) + return -ENOSYS; + request = tape_alloc_request(2, 72); + if (IS_ERR(request)) + return PTR_ERR(request); + data = request->cpdata; + memset(data,0,72); + + data[0] = 0x05; + data[36 + 0] = 0x03; + data[36 + 1] = 0x03; + data[36 + 4] = 0x40; + data[36 + 6] = 0x01; + data[36 + 14] = 0x2f; + data[36 + 18] = 0xc3; + data[36 + 35] = 0x72; + request->op = TO_CRYPT_ON; + tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data); + tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36); + return tape_do_io_free(device, request); +} + +/* + * Disable encryption + */ +static int tape_3592_disable_crypt(struct tape_device *device) +{ + struct tape_request *request; + char *data; + + DBF_EVENT(6, "tape_3592_disable_crypt\n"); + if (!crypt_supported(device)) + return -ENOSYS; + request = tape_alloc_request(2, 72); + if (IS_ERR(request)) + return PTR_ERR(request); + data = request->cpdata; + memset(data,0,72); + + data[0] = 0x05; + data[36 + 0] = 0x03; + data[36 + 1] = 0x03; + data[36 + 35] = 0x32; + + request->op = TO_CRYPT_OFF; + tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data); + tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36); + + return tape_do_io_free(device, request); +} + +/* + * IOCTL: Set encryption status + */ +static int tape_3592_ioctl_crypt_set(struct tape_device *device, + unsigned long arg) +{ + struct tape390_crypt_info info; + + DBF_EVENT(6, "tape_3592_ioctl_crypt_set\n"); + if (!crypt_supported(device)) + return -ENOSYS; + if (copy_from_user(&info, (char __user *)arg, sizeof(info))) + return -EFAULT; + if (info.status & ~TAPE390_CRYPT_ON_MASK) + return -EINVAL; + if (info.status & TAPE390_CRYPT_ON_MASK) + return tape_3592_enable_crypt(device); + else + return tape_3592_disable_crypt(device); +} + +static int tape_3590_sense_medium(struct tape_device *device); + +/* + * IOCTL: Query enryption status + */ +static int tape_3592_ioctl_crypt_query(struct tape_device *device, + unsigned long arg) +{ + DBF_EVENT(6, "tape_3592_ioctl_crypt_query\n"); + if (!crypt_supported(device)) + return -ENOSYS; + tape_3590_sense_medium(device); + if (copy_to_user((char __user *) arg, &TAPE_3590_CRYPT_INFO(device), + sizeof(TAPE_3590_CRYPT_INFO(device)))) + return -EFAULT; + else + return 0; +} + /* * 3590 IOCTL Overload */ @@ -109,6 +436,14 @@ tape_3590_ioctl(struct tape_device *device, unsigned int cmd, unsigned long arg) return tape_std_display(device, &disp); } + case TAPE390_KEKL_SET: + return tape_3592_ioctl_kekl_set(device, arg); + case TAPE390_KEKL_QUERY: + return tape_3592_ioctl_kekl_query(device, arg); + case TAPE390_CRYPT_SET: + return tape_3592_ioctl_crypt_set(device, arg); + case TAPE390_CRYPT_QUERY: + return tape_3592_ioctl_crypt_query(device, arg); default: return -EINVAL; /* no additional ioctls */ } @@ -236,9 +571,10 @@ struct work_handler_data { }; static void -tape_3590_work_handler(void *data) +tape_3590_work_handler(struct work_struct *work) { - struct work_handler_data *p = data; + struct work_handler_data *p = + container_of(work, struct work_handler_data, work); switch (p->op) { case TO_MSEN: @@ -247,6 +583,12 @@ tape_3590_work_handler(void *data) case TO_READ_ATTMSG: tape_3590_read_attmsg(p->device); break; + case TO_CRYPT_ON: + tape_3592_enable_crypt(p->device); + break; + case TO_CRYPT_OFF: + tape_3592_disable_crypt(p->device); + break; default: DBF_EVENT(3, "T3590: work handler undefined for " "operation 0x%02x\n", p->op); @@ -263,7 +605,7 @@ tape_3590_schedule_work(struct tape_device *device, enum tape_op op) if ((p = kzalloc(sizeof(*p), GFP_ATOMIC)) == NULL) return -ENOMEM; - INIT_WORK(&p->work, tape_3590_work_handler, p); + INIT_WORK(&p->work, tape_3590_work_handler); p->device = tape_get_device_reference(device); p->op = op; @@ -364,6 +706,33 @@ tape_3590_check_locate(struct tape_device *device, struct tape_request *request) } #endif +static void tape_3590_med_state_set(struct tape_device *device, + struct tape_3590_med_sense *sense) +{ + struct tape390_crypt_info *c_info; + + c_info = &TAPE_3590_CRYPT_INFO(device); + + if (sense->masst == MSENSE_UNASSOCIATED) { + tape_med_state_set(device, MS_UNLOADED); + TAPE_3590_CRYPT_INFO(device).medium_status = 0; + return; + } + if (sense->masst != MSENSE_ASSOCIATED_MOUNT) { + PRINT_ERR("Unknown medium state: %x\n", sense->masst); + return; + } + tape_med_state_set(device, MS_LOADED); + c_info->medium_status |= TAPE390_MEDIUM_LOADED_MASK; + if (sense->flags & MSENSE_CRYPT_MASK) { + PRINT_INFO("Medium is encrypted (%04x)\n", sense->flags); + c_info->medium_status |= TAPE390_MEDIUM_ENCRYPTED_MASK; + } else { + DBF_EVENT(6, "Medium is not encrypted %04x\n", sense->flags); + c_info->medium_status &= ~TAPE390_MEDIUM_ENCRYPTED_MASK; + } +} + /* * The done handler is called at device/channel end and wakes up the sleeping * process @@ -371,9 +740,10 @@ tape_3590_check_locate(struct tape_device *device, struct tape_request *request) static int tape_3590_done(struct tape_device *device, struct tape_request *request) { - struct tape_3590_med_sense *sense; + struct tape_3590_disc_data *disc_data; DBF_EVENT(6, "%s done\n", tape_op_verbose[request->op]); + disc_data = device->discdata; switch (request->op) { case TO_BSB: @@ -393,13 +763,20 @@ tape_3590_done(struct tape_device *device, struct tape_request *request) break; case TO_RUN: tape_med_state_set(device, MS_UNLOADED); + tape_3590_schedule_work(device, TO_CRYPT_OFF); break; case TO_MSEN: - sense = (struct tape_3590_med_sense *) request->cpdata; - if (sense->masst == MSENSE_UNASSOCIATED) - tape_med_state_set(device, MS_UNLOADED); - if (sense->masst == MSENSE_ASSOCIATED_MOUNT) - tape_med_state_set(device, MS_LOADED); + tape_3590_med_state_set(device, request->cpdata); + break; + case TO_CRYPT_ON: + TAPE_3590_CRYPT_INFO(device).status + |= TAPE390_CRYPT_ON_MASK; + *(device->modeset_byte) |= 0x03; + break; + case TO_CRYPT_OFF: + TAPE_3590_CRYPT_INFO(device).status + &= ~TAPE390_CRYPT_ON_MASK; + *(device->modeset_byte) &= ~0x03; break; case TO_RBI: /* RBI seems to succeed even without medium loaded. */ case TO_NOP: /* Same to NOP. */ @@ -408,8 +785,9 @@ tape_3590_done(struct tape_device *device, struct tape_request *request) case TO_DIS: case TO_ASSIGN: case TO_UNASSIGN: - break; case TO_SIZE: + case TO_KEKL_SET: + case TO_KEKL_QUERY: break; } return TAPE_IO_SUCCESS; @@ -539,10 +917,8 @@ static int tape_3590_erp_long_busy(struct tape_device *device, struct tape_request *request, struct irb *irb) { - /* FIXME: how about WAITING for a minute ? */ - PRINT_WARN("(%s): Device is busy! Please wait a minute!\n", - device->cdev->dev.bus_id); - return tape_3590_erp_basic(device, request, irb, -EBUSY); + DBF_EVENT(6, "Device is busy\n"); + return TAPE_IO_LONG_BUSY; } /* @@ -950,6 +1326,34 @@ tape_3590_print_era_msg(struct tape_device *device, struct irb *irb) device->cdev->dev.bus_id, sense->mc); } +static int tape_3590_crypt_error(struct tape_device *device, + struct tape_request *request, struct irb *irb) +{ + u8 cu_rc, ekm_rc1; + u16 ekm_rc2; + u32 drv_rc; + char *bus_id, *sense; + + sense = ((struct tape_3590_sense *) irb->ecw)->fmt.data; + bus_id = device->cdev->dev.bus_id; + cu_rc = sense[0]; + drv_rc = *((u32*) &sense[5]) & 0xffffff; + ekm_rc1 = sense[9]; + ekm_rc2 = *((u16*) &sense[10]); + if ((cu_rc == 0) && (ekm_rc2 == 0xee31)) + /* key not defined on EKM */ + return tape_3590_erp_basic(device, request, irb, -EKEYREJECTED); + if ((cu_rc == 1) || (cu_rc == 2)) + /* No connection to EKM */ + return tape_3590_erp_basic(device, request, irb, -ENOTCONN); + + PRINT_ERR("(%s): Unable to get encryption key from EKM\n", bus_id); + PRINT_ERR("(%s): CU=%02X DRIVE=%06X EKM=%02X:%04X\n", bus_id, cu_rc, + drv_rc, ekm_rc1, ekm_rc2); + + return tape_3590_erp_basic(device, request, irb, -ENOKEY); +} + /* * 3590 error Recovery routine: * If possible, it tries to recover from the error. If this is not possible, @@ -978,6 +1382,8 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request, sense = (struct tape_3590_sense *) irb->ecw; + DBF_EVENT(6, "Unit Check: RQC = %x\n", sense->rc_rqc); + /* * First check all RC-QRCs where we want to do something special * - "break": basic error recovery is done @@ -998,6 +1404,8 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request, case 0x2231: tape_3590_print_era_msg(device, irb); return tape_3590_erp_special_interrupt(device, request, irb); + case 0x2240: + return tape_3590_crypt_error(device, request, irb); case 0x3010: DBF_EVENT(2, "(%08x): Backward at Beginning of Partition\n", @@ -1019,6 +1427,7 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request, DBF_EVENT(2, "(%08x): Rewind Unload complete\n", device->cdev_id); tape_med_state_set(device, MS_UNLOADED); + tape_3590_schedule_work(device, TO_CRYPT_OFF); return tape_3590_erp_basic(device, request, irb, 0); case 0x4010: @@ -1029,9 +1438,15 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request, PRINT_WARN("(%s): Tape operation when medium not loaded\n", device->cdev->dev.bus_id); tape_med_state_set(device, MS_UNLOADED); + tape_3590_schedule_work(device, TO_CRYPT_OFF); return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM); case 0x4012: /* Device Long Busy */ + /* XXX: Also use long busy handling here? */ + DBF_EVENT(6, "(%08x): LONG BUSY\n", device->cdev_id); tape_3590_print_era_msg(device, irb); + return tape_3590_erp_basic(device, request, irb, -EBUSY); + case 0x4014: + DBF_EVENT(6, "(%08x): Crypto LONG BUSY\n", device->cdev_id); return tape_3590_erp_long_busy(device, request, irb); case 0x5010: @@ -1063,6 +1478,7 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request, case 0x5120: case 0x1120: tape_med_state_set(device, MS_UNLOADED); + tape_3590_schedule_work(device, TO_CRYPT_OFF); return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM); case 0x6020: @@ -1141,21 +1557,47 @@ tape_3590_setup_device(struct tape_device *device) { int rc; struct tape_3590_disc_data *data; + char *rdc_data; DBF_EVENT(6, "3590 device setup\n"); - data = kmalloc(sizeof(struct tape_3590_disc_data), - GFP_KERNEL | GFP_DMA); + data = kzalloc(sizeof(struct tape_3590_disc_data), GFP_KERNEL | GFP_DMA); if (data == NULL) return -ENOMEM; data->read_back_op = READ_PREVIOUS; device->discdata = data; - if ((rc = tape_std_assign(device)) == 0) { - /* Try to find out if medium is loaded */ - if ((rc = tape_3590_sense_medium(device)) != 0) - DBF_LH(3, "3590 medium sense returned %d\n", rc); + rdc_data = kmalloc(64, GFP_KERNEL | GFP_DMA); + if (!rdc_data) { + rc = -ENOMEM; + goto fail_kmalloc; + } + rc = read_dev_chars(device->cdev, (void**)&rdc_data, 64); + if (rc) { + DBF_LH(3, "Read device characteristics failed!\n"); + goto fail_kmalloc; + } + rc = tape_std_assign(device); + if (rc) + goto fail_rdc_data; + if (rdc_data[31] == 0x13) { + PRINT_INFO("Device has crypto support\n"); + data->crypt_info.capability |= TAPE390_CRYPT_SUPPORTED_MASK; + tape_3592_disable_crypt(device); + } else { + DBF_EVENT(6, "Device has NO crypto support\n"); } + /* Try to find out if medium is loaded */ + rc = tape_3590_sense_medium(device); + if (rc) { + DBF_LH(3, "3590 medium sense returned %d\n", rc); + goto fail_rdc_data; + } + return 0; +fail_rdc_data: + kfree(rdc_data); +fail_kmalloc: + kfree(data); return rc; } diff --git a/drivers/s390/char/tape_3590.h b/drivers/s390/char/tape_3590.h index cf274b9445a..aa5138807af 100644 --- a/drivers/s390/char/tape_3590.h +++ b/drivers/s390/char/tape_3590.h @@ -2,7 +2,7 @@ * drivers/s390/char/tape_3590.h * tape device discipline for 3590 tapes. * - * Copyright (C) IBM Corp. 2001,2006 + * Copyright IBM Corp. 2001,2006 * Author(s): Stefan Bader <shbader@de.ibm.com> * Michael Holzheu <holzheu@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> @@ -38,16 +38,22 @@ #define MSENSE_UNASSOCIATED 0x00 #define MSENSE_ASSOCIATED_MOUNT 0x01 #define MSENSE_ASSOCIATED_UMOUNT 0x02 +#define MSENSE_CRYPT_MASK 0x00000010 #define TAPE_3590_MAX_MSG 0xb0 /* Datatypes */ struct tape_3590_disc_data { - unsigned char modeset_byte; + struct tape390_crypt_info crypt_info; int read_back_op; }; +#define TAPE_3590_CRYPT_INFO(device) \ + ((struct tape_3590_disc_data*)(device->discdata))->crypt_info +#define TAPE_3590_READ_BACK_OP(device) \ + ((struct tape_3590_disc_data*)(device->discdata))->read_back_op + struct tape_3590_sense { unsigned int command_rej:1; @@ -118,7 +124,48 @@ struct tape_3590_sense { struct tape_3590_med_sense { unsigned int macst:4; unsigned int masst:4; - char pad[127]; + char pad1[7]; + unsigned int flags; + char pad2[116]; +} __attribute__ ((packed)); + +/* Datastructures for 3592 encryption support */ + +struct tape3592_kekl { + __u8 flags; + char label[64]; +} __attribute__ ((packed)); + +struct tape3592_kekl_pair { + __u8 count; + struct tape3592_kekl kekl[2]; +} __attribute__ ((packed)); + +struct tape3592_kekl_query_data { + __u16 len; + __u8 fmt; + __u8 mc; + __u32 id; + __u8 flags; + struct tape3592_kekl_pair kekls; + char reserved[116]; +} __attribute__ ((packed)); + +struct tape3592_kekl_query_order { + __u8 code; + __u8 flags; + char reserved1[2]; + __u8 max_count; + char reserved2[35]; +} __attribute__ ((packed)); + +struct tape3592_kekl_set_order { + __u8 code; + __u8 flags; + char reserved1[2]; + __u8 op; + struct tape3592_kekl_pair kekls; + char reserved2[120]; } __attribute__ ((packed)); #endif /* _TAPE_3590_H */ diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c index 3225fcd1dcb..dd0ecaed592 100644 --- a/drivers/s390/char/tape_block.c +++ b/drivers/s390/char/tape_block.c @@ -15,6 +15,7 @@ #include <linux/blkdev.h> #include <linux/interrupt.h> #include <linux/buffer_head.h> +#include <linux/kernel.h> #include <asm/debug.h> @@ -72,7 +73,7 @@ tapeblock_trigger_requeue(struct tape_device *device) /* * Post finished request. */ -static inline void +static void tapeblock_end_request(struct request *req, int uptodate) { if (end_that_request_first(req, uptodate, req->hard_nr_sectors)) @@ -107,7 +108,7 @@ __tapeblock_end_request(struct tape_request *ccw_req, void *data) /* * Feed the tape device CCW queue with requests supplied in a list. */ -static inline int +static int tapeblock_start_request(struct tape_device *device, struct request *req) { struct tape_request * ccw_req; @@ -143,7 +144,8 @@ tapeblock_start_request(struct tape_device *device, struct request *req) * queue. */ static void -tapeblock_requeue(void *data) { +tapeblock_requeue(struct work_struct *work) { + struct tape_blk_data * blkdat; struct tape_device * device; request_queue_t * queue; int nr_queued; @@ -151,7 +153,8 @@ tapeblock_requeue(void *data) { struct list_head * l; int rc; - device = (struct tape_device *) data; + blkdat = container_of(work, struct tape_blk_data, requeue_task); + device = blkdat->device; if (!device) return; @@ -212,6 +215,7 @@ tapeblock_setup_device(struct tape_device * device) int rc; blkdat = &device->blk_data; + blkdat->device = device; spin_lock_init(&blkdat->request_queue_lock); atomic_set(&blkdat->requeue_scheduled, 0); @@ -255,8 +259,8 @@ tapeblock_setup_device(struct tape_device * device) add_disk(disk); - INIT_WORK(&blkdat->requeue_task, tapeblock_requeue, - tape_get_device_reference(device)); + tape_get_device_reference(device); + INIT_WORK(&blkdat->requeue_task, tapeblock_requeue); return 0; @@ -271,7 +275,7 @@ void tapeblock_cleanup_device(struct tape_device *device) { flush_scheduled_work(); - device->blk_data.requeue_task.data = tape_put_device(device); + tape_put_device(device); if (!device->blk_data.disk) { PRINT_ERR("(%s): No gendisk to clean up!\n", diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c index 97f75237bed..9faea04e11e 100644 --- a/drivers/s390/char/tape_char.c +++ b/drivers/s390/char/tape_char.c @@ -3,7 +3,7 @@ * character device frontend for tape device driver * * S390 and zSeries version - * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Copyright IBM Corp. 2001,2006 * Author(s): Carsten Otte <cotte@de.ibm.com> * Michael Holzheu <holzheu@de.ibm.com> * Tuan Ngo-Anh <ngoanh@de.ibm.com> @@ -89,22 +89,7 @@ tapechar_cleanup_device(struct tape_device *device) device->nt = NULL; } -/* - * Terminate write command (we write two TMs and skip backward over last) - * This ensures that the tape is always correctly terminated. - * When the user writes afterwards a new file, he will overwrite the - * second TM and therefore one TM will remain to separate the - * two files on the tape... - */ -static inline void -tapechar_terminate_write(struct tape_device *device) -{ - if (tape_mtop(device, MTWEOF, 1) == 0 && - tape_mtop(device, MTWEOF, 1) == 0) - tape_mtop(device, MTBSR, 1); -} - -static inline int +static int tapechar_check_idalbuffer(struct tape_device *device, size_t block_size) { struct idal_buffer *new; @@ -137,7 +122,7 @@ tapechar_check_idalbuffer(struct tape_device *device, size_t block_size) /* * Tape device read function */ -ssize_t +static ssize_t tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos) { struct tape_device *device; @@ -201,7 +186,7 @@ tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos) /* * Tape device write function */ -ssize_t +static ssize_t tapechar_write(struct file *filp, const char __user *data, size_t count, loff_t *ppos) { struct tape_device *device; @@ -291,20 +276,20 @@ tapechar_write(struct file *filp, const char __user *data, size_t count, loff_t /* * Character frontend tape device open function. */ -int +static int tapechar_open (struct inode *inode, struct file *filp) { struct tape_device *device; int minor, rc; DBF_EVENT(6, "TCHAR:open: %i:%i\n", - imajor(filp->f_dentry->d_inode), - iminor(filp->f_dentry->d_inode)); + imajor(filp->f_path.dentry->d_inode), + iminor(filp->f_path.dentry->d_inode)); - if (imajor(filp->f_dentry->d_inode) != tapechar_major) + if (imajor(filp->f_path.dentry->d_inode) != tapechar_major) return -ENODEV; - minor = iminor(filp->f_dentry->d_inode); + minor = iminor(filp->f_path.dentry->d_inode); device = tape_get_device(minor / TAPE_MINORS_PER_DEV); if (IS_ERR(device)) { DBF_EVENT(3, "TCHAR:open: tape_get_device() failed\n"); @@ -326,7 +311,7 @@ tapechar_open (struct inode *inode, struct file *filp) * Character frontend tape device release function. */ -int +static int tapechar_release(struct inode *inode, struct file *filp) { struct tape_device *device; diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c index 2826aed9104..e2a8a1a04ba 100644 --- a/drivers/s390/char/tape_core.c +++ b/drivers/s390/char/tape_core.c @@ -3,7 +3,7 @@ * basic function of the tape device driver * * S390 and zSeries version - * Copyright (C) 2001,2005 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Copyright IBM Corp. 2001,2006 * Author(s): Carsten Otte <cotte@de.ibm.com> * Michael Holzheu <holzheu@de.ibm.com> * Tuan Ngo-Anh <ngoanh@de.ibm.com> @@ -26,9 +26,11 @@ #include "tape_std.h" #define PRINTK_HEADER "TAPE_CORE: " +#define LONG_BUSY_TIMEOUT 180 /* seconds */ static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *); -static void tape_delayed_next_request(void * data); +static void tape_delayed_next_request(struct work_struct *); +static void tape_long_busy_timeout(unsigned long data); /* * One list to contain all tape devices of all disciplines, so @@ -69,10 +71,12 @@ const char *tape_op_verbose[TO_SIZE] = [TO_LOAD] = "LOA", [TO_READ_CONFIG] = "RCF", [TO_READ_ATTMSG] = "RAT", [TO_DIS] = "DIS", [TO_ASSIGN] = "ASS", - [TO_UNASSIGN] = "UAS" + [TO_UNASSIGN] = "UAS", [TO_CRYPT_ON] = "CON", + [TO_CRYPT_OFF] = "COF", [TO_KEKL_SET] = "KLS", + [TO_KEKL_QUERY] = "KLQ", }; -static inline int +static int busid_to_int(char *bus_id) { int dec; @@ -252,7 +256,7 @@ tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate) /* * Stop running ccw. Has to be called with the device lock held. */ -static inline int +static int __tape_cancel_io(struct tape_device *device, struct tape_request *request) { int retries; @@ -272,7 +276,7 @@ __tape_cancel_io(struct tape_device *device, struct tape_request *request) return 0; case -EBUSY: request->status = TAPE_REQUEST_CANCEL; - schedule_work(&device->tape_dnr); + schedule_delayed_work(&device->tape_dnr, 0); return 0; case -ENODEV: DBF_EXCEPTION(2, "device gone, retry\n"); @@ -346,6 +350,9 @@ tape_generic_online(struct tape_device *device, return -EINVAL; } + init_timer(&device->lb_timeout); + device->lb_timeout.function = tape_long_busy_timeout; + /* Let the discipline have a go at the device. */ device->discipline = discipline; if (!try_module_get(discipline->owner)) { @@ -385,7 +392,7 @@ out: return rc; } -static inline void +static void tape_cleanup_device(struct tape_device *device) { tapeblock_cleanup_device(device); @@ -470,7 +477,7 @@ tape_alloc_device(void) *device->modeset_byte = 0; device->first_minor = -1; atomic_set(&device->ref_count, 1); - INIT_WORK(&device->tape_dnr, tape_delayed_next_request, device); + INIT_DELAYED_WORK(&device->tape_dnr, tape_delayed_next_request); return device; } @@ -563,7 +570,7 @@ tape_generic_probe(struct ccw_device *cdev) return ret; } -static inline void +static void __tape_discard_requests(struct tape_device *device) { struct tape_request * request; @@ -703,7 +710,7 @@ tape_free_request (struct tape_request * request) kfree(request); } -static inline int +static int __tape_start_io(struct tape_device *device, struct tape_request *request) { int rc; @@ -724,7 +731,7 @@ __tape_start_io(struct tape_device *device, struct tape_request *request) } else if (rc == -EBUSY) { /* The common I/O subsystem is currently busy. Retry later. */ request->status = TAPE_REQUEST_QUEUED; - schedule_work(&device->tape_dnr); + schedule_delayed_work(&device->tape_dnr, 0); rc = 0; } else { /* Start failed. Remove request and indicate failure. */ @@ -733,7 +740,7 @@ __tape_start_io(struct tape_device *device, struct tape_request *request) return rc; } -static inline void +static void __tape_start_next_request(struct tape_device *device) { struct list_head *l, *n; @@ -790,18 +797,34 @@ __tape_start_next_request(struct tape_device *device) } static void -tape_delayed_next_request(void *data) +tape_delayed_next_request(struct work_struct *work) { - struct tape_device * device; + struct tape_device *device = + container_of(work, struct tape_device, tape_dnr.work); - device = (struct tape_device *) data; DBF_LH(6, "tape_delayed_next_request(%p)\n", device); spin_lock_irq(get_ccwdev_lock(device->cdev)); __tape_start_next_request(device); spin_unlock_irq(get_ccwdev_lock(device->cdev)); } -static inline void +static void tape_long_busy_timeout(unsigned long data) +{ + struct tape_request *request; + struct tape_device *device; + + device = (struct tape_device *) data; + spin_lock_irq(get_ccwdev_lock(device->cdev)); + request = list_entry(device->req_queue.next, struct tape_request, list); + if (request->status != TAPE_REQUEST_LONG_BUSY) + BUG(); + DBF_LH(6, "%08x: Long busy timeout.\n", device->cdev_id); + __tape_start_next_request(device); + device->lb_timeout.data = (unsigned long) tape_put_device(device); + spin_unlock_irq(get_ccwdev_lock(device->cdev)); +} + +static void __tape_end_request( struct tape_device * device, struct tape_request * request, @@ -878,7 +901,7 @@ tape_dump_sense_dbf(struct tape_device *device, struct tape_request *request, * and starts it if the tape is idle. Has to be called with * the device lock held. */ -static inline int +static int __tape_start_request(struct tape_device *device, struct tape_request *request) { int rc; @@ -1094,7 +1117,22 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) /* May be an unsolicited irq */ if(request != NULL) request->rescnt = irb->scsw.count; - + else if ((irb->scsw.dstat == 0x85 || irb->scsw.dstat == 0x80) && + !list_empty(&device->req_queue)) { + /* Not Ready to Ready after long busy ? */ + struct tape_request *req; + req = list_entry(device->req_queue.next, + struct tape_request, list); + if (req->status == TAPE_REQUEST_LONG_BUSY) { + DBF_EVENT(3, "(%08x): del timer\n", device->cdev_id); + if (del_timer(&device->lb_timeout)) { + device->lb_timeout.data = (unsigned long) + tape_put_device(device); + __tape_start_next_request(device); + } + return; + } + } if (irb->scsw.dstat != 0x0c) { /* Set the 'ONLINE' flag depending on sense byte 1 */ if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE) @@ -1142,6 +1180,15 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) break; case TAPE_IO_PENDING: break; + case TAPE_IO_LONG_BUSY: + device->lb_timeout.data = + (unsigned long)tape_get_device_reference(device); + device->lb_timeout.expires = jiffies + + LONG_BUSY_TIMEOUT * HZ; + DBF_EVENT(3, "(%08x): add timer\n", device->cdev_id); + add_timer(&device->lb_timeout); + request->status = TAPE_REQUEST_LONG_BUSY; + break; case TAPE_IO_RETRY: rc = __tape_start_io(device, request); if (rc) diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c index 4717c361160..bc33068b9ce 100644 --- a/drivers/s390/char/tty3270.c +++ b/drivers/s390/char/tty3270.c @@ -36,7 +36,7 @@ struct tty_driver *tty3270_driver; static int tty3270_max_index; -struct raw3270_fn tty3270_fn; +static struct raw3270_fn tty3270_fn; struct tty3270_cell { unsigned char character; @@ -119,8 +119,7 @@ static void tty3270_update(struct tty3270 *); /* * Setup timeout for a device. On timeout trigger an update. */ -void -tty3270_set_timer(struct tty3270 *tp, int expires) +static void tty3270_set_timer(struct tty3270 *tp, int expires) { if (expires == 0) { if (timer_pending(&tp->timer) && del_timer(&tp->timer)) @@ -841,7 +840,7 @@ tty3270_del_views(void) } } -struct raw3270_fn tty3270_fn = { +static struct raw3270_fn tty3270_fn = { .activate = tty3270_activate, .deactivate = tty3270_deactivate, .intv = (void *) tty3270_irq, @@ -1659,7 +1658,7 @@ tty3270_flush_buffer(struct tty_struct *tty) * Check for visible/invisible input switches */ static void -tty3270_set_termios(struct tty_struct *tty, struct termios *old) +tty3270_set_termios(struct tty_struct *tty, struct ktermios *old) { struct tty3270 *tp; int new; @@ -1754,8 +1753,7 @@ static const struct tty_operations tty3270_ops = { .set_termios = tty3270_set_termios }; -void -tty3270_notifier(int index, int active) +static void tty3270_notifier(int index, int active) { if (active) tty_register_device(tty3270_driver, index, NULL); @@ -1767,8 +1765,7 @@ tty3270_notifier(int index, int active) * 3270 tty registration code called from tty_init(). * Most kernel services (incl. kmalloc) are available at this poimt. */ -int __init -tty3270_init(void) +static int __init tty3270_init(void) { struct tty_driver *driver; int ret; diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c index 1678b6c757e..a420cd09904 100644 --- a/drivers/s390/char/vmcp.c +++ b/drivers/s390/char/vmcp.c @@ -117,7 +117,7 @@ vmcp_write(struct file *file, const char __user * buff, size_t count, return -ENOMEM; } debug_text_event(vmcp_debug, 1, cmd); - session->resp_size = __cpcmd(cmd, session->response, + session->resp_size = cpcmd(cmd, session->response, session->bufsize, &session->resp_code); up(&session->mutex); diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c index 6cb23040954..8432a76b961 100644 --- a/drivers/s390/char/vmlogrdr.c +++ b/drivers/s390/char/vmlogrdr.c @@ -3,7 +3,7 @@ * character device driver for reading z/VM system service records * * - * Copyright (C) 2004 IBM Corporation + * Copyright 2004 IBM Corporation * character device driver for reading z/VM system service records, * Version 1.0 * Author(s): Xenia Tkatschow <xenia@us.ibm.com> @@ -21,7 +21,7 @@ #include <asm/cpcmd.h> #include <asm/debug.h> #include <asm/ebcdic.h> -#include "../net/iucv.h" +#include <net/iucv/iucv.h> #include <linux/kmod.h> #include <linux/cdev.h> #include <linux/device.h> @@ -60,12 +60,11 @@ struct vmlogrdr_priv_t { char system_service[8]; char internal_name[8]; char recording_name[8]; - u16 pathid; + struct iucv_path *path; int connection_established; int iucv_path_severed; - iucv_MessagePending local_interrupt_buffer; + struct iucv_message local_interrupt_buffer; atomic_t receive_ready; - iucv_handle_t iucv_handle; int minor_num; char * buffer; char * current_position; @@ -97,40 +96,21 @@ static struct file_operations vmlogrdr_fops = { }; -static u8 iucvMagic[16] = { - 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, - 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40 -}; +static void vmlogrdr_iucv_path_complete(struct iucv_path *, u8 ipuser[16]); +static void vmlogrdr_iucv_path_severed(struct iucv_path *, u8 ipuser[16]); +static void vmlogrdr_iucv_message_pending(struct iucv_path *, + struct iucv_message *); -static u8 mask[] = { - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff +static struct iucv_handler vmlogrdr_iucv_handler = { + .path_complete = vmlogrdr_iucv_path_complete, + .path_severed = vmlogrdr_iucv_path_severed, + .message_pending = vmlogrdr_iucv_message_pending, }; -static u8 iucv_host[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; - - -static void -vmlogrdr_iucv_ConnectionComplete(iucv_ConnectionComplete *eib, void *pgm_data); -static void -vmlogrdr_iucv_ConnectionSevered(iucv_ConnectionSevered *eib, void *pgm_data); -static void -vmlogrdr_iucv_MessagePending(iucv_MessagePending *eib, void *pgm_data); - - -static iucv_interrupt_ops_t vmlogrdr_iucvops = { - .ConnectionComplete = vmlogrdr_iucv_ConnectionComplete, - .ConnectionSevered = vmlogrdr_iucv_ConnectionSevered, - .MessagePending = vmlogrdr_iucv_MessagePending, -}; - - -DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue); -DECLARE_WAIT_QUEUE_HEAD(read_wait_queue); +static DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue); +static DECLARE_WAIT_QUEUE_HEAD(read_wait_queue); /* * pointer to system service private structure @@ -177,28 +157,29 @@ static struct cdev *vmlogrdr_cdev = NULL; static int recording_class_AB; -static void -vmlogrdr_iucv_ConnectionComplete (iucv_ConnectionComplete * eib, - void * pgm_data) +static void vmlogrdr_iucv_path_complete(struct iucv_path *path, u8 ipuser[16]) { - struct vmlogrdr_priv_t * logptr = pgm_data; + struct vmlogrdr_priv_t * logptr = path->private; + spin_lock(&logptr->priv_lock); logptr->connection_established = 1; spin_unlock(&logptr->priv_lock); wake_up(&conn_wait_queue); - return; } -static void -vmlogrdr_iucv_ConnectionSevered (iucv_ConnectionSevered * eib, void * pgm_data) +static void vmlogrdr_iucv_path_severed(struct iucv_path *path, u8 ipuser[16]) { - u8 reason = (u8) eib->ipuser[8]; - struct vmlogrdr_priv_t * logptr = pgm_data; + struct vmlogrdr_priv_t * logptr = path->private; + u8 reason = (u8) ipuser[8]; printk (KERN_ERR "vmlogrdr: connection severed with" " reason %i\n", reason); + iucv_path_sever(path, NULL); + kfree(path); + logptr->path = NULL; + spin_lock(&logptr->priv_lock); logptr->connection_established = 0; logptr->iucv_path_severed = 1; @@ -210,10 +191,10 @@ vmlogrdr_iucv_ConnectionSevered (iucv_ConnectionSevered * eib, void * pgm_data) } -static void -vmlogrdr_iucv_MessagePending (iucv_MessagePending * eib, void * pgm_data) +static void vmlogrdr_iucv_message_pending(struct iucv_path *path, + struct iucv_message *msg) { - struct vmlogrdr_priv_t * logptr = pgm_data; + struct vmlogrdr_priv_t * logptr = path->private; /* * This function is the bottom half so it should be quick. @@ -221,15 +202,15 @@ vmlogrdr_iucv_MessagePending (iucv_MessagePending * eib, void * pgm_data) * the usage count */ spin_lock(&logptr->priv_lock); - memcpy(&(logptr->local_interrupt_buffer), eib, sizeof(*eib)); + memcpy(&logptr->local_interrupt_buffer, msg, sizeof(*msg)); atomic_inc(&logptr->receive_ready); spin_unlock(&logptr->priv_lock); wake_up_interruptible(&read_wait_queue); } -static int -vmlogrdr_get_recording_class_AB(void) { +static int vmlogrdr_get_recording_class_AB(void) +{ char cp_command[]="QUERY COMMAND RECORDING "; char cp_response[80]; char *tail; @@ -259,8 +240,9 @@ vmlogrdr_get_recording_class_AB(void) { } -static int -vmlogrdr_recording(struct vmlogrdr_priv_t * logptr, int action, int purge) { +static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr, + int action, int purge) +{ char cp_command[80]; char cp_response[160]; @@ -318,8 +300,7 @@ vmlogrdr_recording(struct vmlogrdr_priv_t * logptr, int action, int purge) { } -static int -vmlogrdr_open (struct inode *inode, struct file *filp) +static int vmlogrdr_open (struct inode *inode, struct file *filp) { int dev_num = 0; struct vmlogrdr_priv_t * logptr = NULL; @@ -329,10 +310,7 @@ vmlogrdr_open (struct inode *inode, struct file *filp) dev_num = iminor(inode); if (dev_num > MAXMINOR) return -ENODEV; - logptr = &sys_ser[dev_num]; - if (logptr == NULL) - return -ENODEV; /* * only allow for blocking reads to be open @@ -345,52 +323,38 @@ vmlogrdr_open (struct inode *inode, struct file *filp) if (logptr->dev_in_use) { spin_unlock_bh(&logptr->priv_lock); return -EBUSY; - } else { - logptr->dev_in_use = 1; - spin_unlock_bh(&logptr->priv_lock); } - + logptr->dev_in_use = 1; + logptr->connection_established = 0; + logptr->iucv_path_severed = 0; atomic_set(&logptr->receive_ready, 0); logptr->buffer_free = 1; + spin_unlock_bh(&logptr->priv_lock); /* set the file options */ filp->private_data = logptr; filp->f_op = &vmlogrdr_fops; /* start recording for this service*/ - ret=0; - if (logptr->autorecording) + if (logptr->autorecording) { ret = vmlogrdr_recording(logptr,1,logptr->autopurge); - if (ret) - printk (KERN_WARNING "vmlogrdr: failed to start " - "recording automatically\n"); - - /* Register with iucv driver */ - logptr->iucv_handle = iucv_register_program(iucvMagic, - logptr->system_service, mask, &vmlogrdr_iucvops, - logptr); - - if (logptr->iucv_handle == NULL) { - printk (KERN_ERR "vmlogrdr: failed to register with" - "iucv driver\n"); - goto not_registered; + if (ret) + printk (KERN_WARNING "vmlogrdr: failed to start " + "recording automatically\n"); } /* create connection to the system service */ - spin_lock_bh(&logptr->priv_lock); - logptr->connection_established = 0; - logptr->iucv_path_severed = 0; - spin_unlock_bh(&logptr->priv_lock); - - connect_rc = iucv_connect (&(logptr->pathid), 10, iucvMagic, - logptr->system_service, iucv_host, 0, - NULL, NULL, - logptr->iucv_handle, NULL); + logptr->path = iucv_path_alloc(10, 0, GFP_KERNEL); + if (!logptr->path) + goto out_dev; + connect_rc = iucv_path_connect(logptr->path, &vmlogrdr_iucv_handler, + logptr->system_service, NULL, NULL, + logptr); if (connect_rc) { printk (KERN_ERR "vmlogrdr: iucv connection to %s " "failed with rc %i \n", logptr->system_service, connect_rc); - goto not_connected; + goto out_path; } /* We've issued the connect and now we must wait for a @@ -399,35 +363,28 @@ vmlogrdr_open (struct inode *inode, struct file *filp) */ wait_event(conn_wait_queue, (logptr->connection_established) || (logptr->iucv_path_severed)); - if (logptr->iucv_path_severed) { - goto not_connected; - } - + if (logptr->iucv_path_severed) + goto out_record; return nonseekable_open(inode, filp); -not_connected: - iucv_unregister_program(logptr->iucv_handle); - logptr->iucv_handle = NULL; -not_registered: +out_record: if (logptr->autorecording) vmlogrdr_recording(logptr,0,logptr->autopurge); +out_path: + kfree(logptr->path); /* kfree(NULL) is ok. */ + logptr->path = NULL; +out_dev: logptr->dev_in_use = 0; return -EIO; - - } -static int -vmlogrdr_release (struct inode *inode, struct file *filp) +static int vmlogrdr_release (struct inode *inode, struct file *filp) { int ret; struct vmlogrdr_priv_t * logptr = filp->private_data; - iucv_unregister_program(logptr->iucv_handle); - logptr->iucv_handle = NULL; - if (logptr->autorecording) { ret = vmlogrdr_recording(logptr,0,logptr->autopurge); if (ret) @@ -440,8 +397,8 @@ vmlogrdr_release (struct inode *inode, struct file *filp) } -static int -vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv) { +static int vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv) +{ int rc, *temp; /* we need to keep track of two data sizes here: * The number of bytes we need to receive from iucv and @@ -462,8 +419,7 @@ vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv) { * We need to return the total length of the record * + size of FENCE in the first 4 bytes of the buffer. */ - iucv_data_count = - priv->local_interrupt_buffer.ln1msg2.ipbfln1f; + iucv_data_count = priv->local_interrupt_buffer.length; user_data_count = sizeof(int); temp = (int*)priv->buffer; *temp= iucv_data_count + sizeof(FENCE); @@ -475,14 +431,10 @@ vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv) { */ if (iucv_data_count > NET_BUFFER_SIZE) iucv_data_count = NET_BUFFER_SIZE; - rc = iucv_receive(priv->pathid, - priv->local_interrupt_buffer.ipmsgid, - priv->local_interrupt_buffer.iptrgcls, - buffer, - iucv_data_count, - NULL, - NULL, - &priv->residual_length); + rc = iucv_message_receive(priv->path, + &priv->local_interrupt_buffer, + 0, buffer, iucv_data_count, + &priv->residual_length); spin_unlock_bh(&priv->priv_lock); /* An rc of 5 indicates that the record was bigger then * the buffer, which is OK for us. A 9 indicates that the @@ -514,8 +466,8 @@ vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv) { } -static ssize_t -vmlogrdr_read(struct file *filp, char __user *data, size_t count, loff_t * ppos) +static ssize_t vmlogrdr_read(struct file *filp, char __user *data, + size_t count, loff_t * ppos) { int rc; struct vmlogrdr_priv_t * priv = filp->private_data; @@ -547,8 +499,10 @@ vmlogrdr_read(struct file *filp, char __user *data, size_t count, loff_t * ppos) return count; } -static ssize_t -vmlogrdr_autopurge_store(struct device * dev, struct device_attribute *attr, const char * buf, size_t count) { +static ssize_t vmlogrdr_autopurge_store(struct device * dev, + struct device_attribute *attr, + const char * buf, size_t count) +{ struct vmlogrdr_priv_t *priv = dev->driver_data; ssize_t ret = count; @@ -566,8 +520,10 @@ vmlogrdr_autopurge_store(struct device * dev, struct device_attribute *attr, con } -static ssize_t -vmlogrdr_autopurge_show(struct device *dev, struct device_attribute *attr, char *buf) { +static ssize_t vmlogrdr_autopurge_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ struct vmlogrdr_priv_t *priv = dev->driver_data; return sprintf(buf, "%u\n", priv->autopurge); } @@ -577,8 +533,10 @@ static DEVICE_ATTR(autopurge, 0644, vmlogrdr_autopurge_show, vmlogrdr_autopurge_store); -static ssize_t -vmlogrdr_purge_store(struct device * dev, struct device_attribute *attr, const char * buf, size_t count) { +static ssize_t vmlogrdr_purge_store(struct device * dev, + struct device_attribute *attr, + const char * buf, size_t count) +{ char cp_command[80]; char cp_response[80]; @@ -618,9 +576,10 @@ vmlogrdr_purge_store(struct device * dev, struct device_attribute *attr, const c static DEVICE_ATTR(purge, 0200, NULL, vmlogrdr_purge_store); -static ssize_t -vmlogrdr_autorecording_store(struct device *dev, struct device_attribute *attr, const char *buf, - size_t count) { +static ssize_t vmlogrdr_autorecording_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ struct vmlogrdr_priv_t *priv = dev->driver_data; ssize_t ret = count; @@ -638,8 +597,10 @@ vmlogrdr_autorecording_store(struct device *dev, struct device_attribute *attr, } -static ssize_t -vmlogrdr_autorecording_show(struct device *dev, struct device_attribute *attr, char *buf) { +static ssize_t vmlogrdr_autorecording_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ struct vmlogrdr_priv_t *priv = dev->driver_data; return sprintf(buf, "%u\n", priv->autorecording); } @@ -649,9 +610,10 @@ static DEVICE_ATTR(autorecording, 0644, vmlogrdr_autorecording_show, vmlogrdr_autorecording_store); -static ssize_t -vmlogrdr_recording_store(struct device * dev, struct device_attribute *attr, const char * buf, size_t count) { - +static ssize_t vmlogrdr_recording_store(struct device * dev, + struct device_attribute *attr, + const char * buf, size_t count) +{ struct vmlogrdr_priv_t *priv = dev->driver_data; ssize_t ret; @@ -676,8 +638,9 @@ vmlogrdr_recording_store(struct device * dev, struct device_attribute *attr, con static DEVICE_ATTR(recording, 0200, NULL, vmlogrdr_recording_store); -static ssize_t -vmlogrdr_recording_status_show(struct device_driver *driver, char *buf) { +static ssize_t vmlogrdr_recording_status_show(struct device_driver *driver, + char *buf) +{ char cp_command[] = "QUERY RECORDING "; int len; @@ -710,52 +673,63 @@ static struct device_driver vmlogrdr_driver = { }; -static int -vmlogrdr_register_driver(void) { +static int vmlogrdr_register_driver(void) +{ int ret; + /* Register with iucv driver */ + ret = iucv_register(&vmlogrdr_iucv_handler, 1); + if (ret) { + printk (KERN_ERR "vmlogrdr: failed to register with" + "iucv driver\n"); + goto out; + } + ret = driver_register(&vmlogrdr_driver); if (ret) { printk(KERN_ERR "vmlogrdr: failed to register driver.\n"); - return ret; + goto out_iucv; } ret = driver_create_file(&vmlogrdr_driver, &driver_attr_recording_status); if (ret) { printk(KERN_ERR "vmlogrdr: failed to add driver attribute.\n"); - goto unregdriver; + goto out_driver; } vmlogrdr_class = class_create(THIS_MODULE, "vmlogrdr"); if (IS_ERR(vmlogrdr_class)) { printk(KERN_ERR "vmlogrdr: failed to create class.\n"); - ret=PTR_ERR(vmlogrdr_class); - vmlogrdr_class=NULL; - goto unregattr; + ret = PTR_ERR(vmlogrdr_class); + vmlogrdr_class = NULL; + goto out_attr; } return 0; -unregattr: +out_attr: driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status); -unregdriver: +out_driver: driver_unregister(&vmlogrdr_driver); +out_iucv: + iucv_unregister(&vmlogrdr_iucv_handler, 1); +out: return ret; } -static void -vmlogrdr_unregister_driver(void) { +static void vmlogrdr_unregister_driver(void) +{ class_destroy(vmlogrdr_class); vmlogrdr_class = NULL; driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status); driver_unregister(&vmlogrdr_driver); - return; + iucv_unregister(&vmlogrdr_iucv_handler, 1); } -static int -vmlogrdr_register_device(struct vmlogrdr_priv_t *priv) { +static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv) +{ struct device *dev; int ret; @@ -804,9 +778,10 @@ vmlogrdr_register_device(struct vmlogrdr_priv_t *priv) { } -static int -vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv ) { - class_device_destroy(vmlogrdr_class, MKDEV(vmlogrdr_major, priv->minor_num)); +static int vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv) +{ + class_device_destroy(vmlogrdr_class, + MKDEV(vmlogrdr_major, priv->minor_num)); if (priv->device != NULL) { sysfs_remove_group(&priv->device->kobj, &vmlogrdr_attr_group); device_unregister(priv->device); @@ -816,8 +791,8 @@ vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv ) { } -static int -vmlogrdr_register_cdev(dev_t dev) { +static int vmlogrdr_register_cdev(dev_t dev) +{ int rc = 0; vmlogrdr_cdev = cdev_alloc(); if (!vmlogrdr_cdev) { @@ -837,9 +812,10 @@ vmlogrdr_register_cdev(dev_t dev) { } -static void -vmlogrdr_cleanup(void) { +static void vmlogrdr_cleanup(void) +{ int i; + if (vmlogrdr_cdev) { cdev_del(vmlogrdr_cdev); vmlogrdr_cdev=NULL; @@ -856,8 +832,7 @@ vmlogrdr_cleanup(void) { } -static int -vmlogrdr_init(void) +static int vmlogrdr_init(void) { int rc; int i; @@ -907,8 +882,7 @@ cleanup: } -static void -vmlogrdr_exit(void) +static void vmlogrdr_exit(void) { vmlogrdr_cleanup(); printk (KERN_INFO "vmlogrdr: driver unloaded\n"); diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c index 12c2d6b746e..aa65df4dfce 100644 --- a/drivers/s390/cio/blacklist.c +++ b/drivers/s390/cio/blacklist.c @@ -43,7 +43,7 @@ typedef enum {add, free} range_action; * Function: blacklist_range * (Un-)blacklist the devices from-to */ -static inline void +static void blacklist_range (range_action action, unsigned int from, unsigned int to, unsigned int ssid) { @@ -69,7 +69,7 @@ blacklist_range (range_action action, unsigned int from, unsigned int to, * Get devno/busid from given string. * Shamelessly grabbed from dasd_devmap.c. */ -static inline int +static int blacklist_busid(char **str, int *id0, int *ssid, int *devno) { int val, old_style; @@ -123,10 +123,10 @@ confused: return 1; } -static inline int +static int blacklist_parse_parameters (char *str, range_action action) { - unsigned int from, to, from_id0, to_id0, from_ssid, to_ssid; + int from, to, from_id0, to_id0, from_ssid, to_ssid; while (*str != 0 && *str != '\n') { range_action ra = action; @@ -227,7 +227,7 @@ is_blacklisted (int ssid, int devno) * Function: blacklist_parse_proc_parameters * parse the stuff which is piped to /proc/cio_ignore */ -static inline void +static void blacklist_parse_proc_parameters (char *buf) { if (strncmp (buf, "free ", 5) == 0) { diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index 38954f5cd14..d48e3ca4752 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c @@ -53,7 +53,7 @@ ccwgroup_uevent (struct device *dev, char **envp, int num_envp, char *buffer, static struct bus_type ccwgroup_bus_type; -static inline void +static void __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev) { int i; @@ -104,7 +104,7 @@ ccwgroup_release (struct device *dev) kfree(gdev); } -static inline int +static int __ccwgroup_create_symlinks(struct ccwgroup_device *gdev) { char str[8]; @@ -424,7 +424,7 @@ ccwgroup_probe_ccwdev(struct ccw_device *cdev) return 0; } -static inline struct ccwgroup_device * +static struct ccwgroup_device * __ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev) { struct ccwgroup_device *gdev; diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index dbfb77b0392..6f05a44e381 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -93,7 +93,7 @@ chsc_get_sch_desc_irq(struct subchannel *sch, void *page) u16 sch; /* subchannel */ u8 chpid[8]; /* chpids 0-7 */ u16 fla[8]; /* full link addresses 0-7 */ - } *ssd_area; + } __attribute__ ((packed)) *ssd_area; ssd_area = page; @@ -183,7 +183,7 @@ css_get_ssd_info(struct subchannel *sch) page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!page) return -ENOMEM; - spin_lock_irq(&sch->lock); + spin_lock_irq(sch->lock); ret = chsc_get_sch_desc_irq(sch, page); if (ret) { static int cio_chsc_err_msg; @@ -197,7 +197,7 @@ css_get_ssd_info(struct subchannel *sch) cio_chsc_err_msg = 1; } } - spin_unlock_irq(&sch->lock); + spin_unlock_irq(sch->lock); free_page((unsigned long)page); if (!ret) { int j, chpid, mask; @@ -233,7 +233,7 @@ s390_subchannel_remove_chpid(struct device *dev, void *data) if (j >= 8) return 0; - spin_lock_irq(&sch->lock); + spin_lock_irq(sch->lock); stsch(sch->schid, &schib); if (!schib.pmcw.dnv) @@ -265,10 +265,10 @@ s390_subchannel_remove_chpid(struct device *dev, void *data) else if (sch->lpm == mask) goto out_unreg; out_unlock: - spin_unlock_irq(&sch->lock); + spin_unlock_irq(sch->lock); return 0; out_unreg: - spin_unlock_irq(&sch->lock); + spin_unlock_irq(sch->lock); sch->lpm = 0; if (css_enqueue_subchannel_slow(sch->schid)) { css_clear_subchannel_slow_list(); @@ -277,7 +277,7 @@ out_unreg: return 0; } -static inline void +static void s390_set_chpid_offline( __u8 chpid) { char dbf_txt[15]; @@ -338,7 +338,7 @@ s390_process_res_acc_sch(struct res_acc_data *res_data, struct subchannel *sch) return 0x80 >> chp; } -static inline int +static int s390_process_res_acc_new_sch(struct subchannel_id schid) { struct schib schib; @@ -378,12 +378,12 @@ __s390_process_res_acc(struct subchannel_id schid, void *data) /* Check if a subchannel is newly available. */ return s390_process_res_acc_new_sch(schid); - spin_lock_irq(&sch->lock); + spin_lock_irq(sch->lock); chp_mask = s390_process_res_acc_sch(res_data, sch); if (chp_mask == 0) { - spin_unlock_irq(&sch->lock); + spin_unlock_irq(sch->lock); put_device(&sch->dev); return 0; } @@ -397,7 +397,7 @@ __s390_process_res_acc(struct subchannel_id schid, void *data) else if (sch->driver && sch->driver->verify) sch->driver->verify(&sch->dev); - spin_unlock_irq(&sch->lock); + spin_unlock_irq(sch->lock); put_device(&sch->dev); return 0; } @@ -444,7 +444,7 @@ __get_chpid_from_lir(void *data) u32 andesc[28]; /* incident-specific information */ u32 isinfo[28]; - } *lir; + } __attribute__ ((packed)) *lir; lir = data; if (!(lir->iq&0x80)) @@ -461,154 +461,146 @@ __get_chpid_from_lir(void *data) return (u16) (lir->indesc[0]&0x000000ff); } -int -chsc_process_crw(void) +struct chsc_sei_area { + struct chsc_header request; + u32 reserved1; + u32 reserved2; + u32 reserved3; + struct chsc_header response; + u32 reserved4; + u8 flags; + u8 vf; /* validity flags */ + u8 rs; /* reporting source */ + u8 cc; /* content code */ + u16 fla; /* full link address */ + u16 rsid; /* reporting source id */ + u32 reserved5; + u32 reserved6; + u8 ccdf[4096 - 16 - 24]; /* content-code dependent field */ + /* ccdf has to be big enough for a link-incident record */ +} __attribute__ ((packed)); + +static int chsc_process_sei_link_incident(struct chsc_sei_area *sei_area) +{ + int chpid; + + CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n", + sei_area->rs, sei_area->rsid); + if (sei_area->rs != 4) + return 0; + chpid = __get_chpid_from_lir(sei_area->ccdf); + if (chpid < 0) + CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n"); + else + s390_set_chpid_offline(chpid); + + return 0; +} + +static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) { - int chpid, ret; struct res_acc_data res_data; - struct { - struct chsc_header request; - u32 reserved1; - u32 reserved2; - u32 reserved3; - struct chsc_header response; - u32 reserved4; - u8 flags; - u8 vf; /* validity flags */ - u8 rs; /* reporting source */ - u8 cc; /* content code */ - u16 fla; /* full link address */ - u16 rsid; /* reporting source id */ - u32 reserved5; - u32 reserved6; - u32 ccdf[96]; /* content-code dependent field */ - /* ccdf has to be big enough for a link-incident record */ - } *sei_area; + struct device *dev; + int status; + int rc; + + CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, " + "rs_id=%04x)\n", sei_area->rs, sei_area->rsid); + if (sei_area->rs != 4) + return 0; + /* allocate a new channel path structure, if needed */ + status = get_chp_status(sei_area->rsid); + if (status < 0) + new_channel_path(sei_area->rsid); + else if (!status) + return 0; + dev = get_device(&css[0]->chps[sei_area->rsid]->dev); + memset(&res_data, 0, sizeof(struct res_acc_data)); + res_data.chp = to_channelpath(dev); + if ((sei_area->vf & 0xc0) != 0) { + res_data.fla = sei_area->fla; + if ((sei_area->vf & 0xc0) == 0xc0) + /* full link address */ + res_data.fla_mask = 0xffff; + else + /* link address */ + res_data.fla_mask = 0xff00; + } + rc = s390_process_res_acc(&res_data); + put_device(dev); + + return rc; +} + +static int chsc_process_sei(struct chsc_sei_area *sei_area) +{ + int rc; + + /* Check if we might have lost some information. */ + if (sei_area->flags & 0x40) + CIO_CRW_EVENT(2, "chsc: event overflow\n"); + /* which kind of information was stored? */ + rc = 0; + switch (sei_area->cc) { + case 1: /* link incident*/ + rc = chsc_process_sei_link_incident(sei_area); + break; + case 2: /* i/o resource accessibiliy */ + rc = chsc_process_sei_res_acc(sei_area); + break; + default: /* other stuff */ + CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n", + sei_area->cc); + break; + } + + return rc; +} + +int chsc_process_crw(void) +{ + struct chsc_sei_area *sei_area; + int ret; + int rc; if (!sei_page) return 0; - /* - * build the chsc request block for store event information - * and do the call - * This function is only called by the machine check handler thread, - * so we don't need locking for the sei_page. - */ + /* Access to sei_page is serialized through machine check handler + * thread, so no need for locking. */ sei_area = sei_page; CIO_TRACE_EVENT( 2, "prcss"); ret = 0; do { - int ccode, status; - struct device *dev; memset(sei_area, 0, sizeof(*sei_area)); - memset(&res_data, 0, sizeof(struct res_acc_data)); sei_area->request.length = 0x0010; sei_area->request.code = 0x000e; + if (chsc(sei_area)) + break; - ccode = chsc(sei_area); - if (ccode > 0) - return 0; - - switch (sei_area->response.code) { - /* for debug purposes, check for problems */ - case 0x0001: - CIO_CRW_EVENT(4, "chsc_process_crw: event information " - "successfully stored\n"); - break; /* everything ok */ - case 0x0002: - CIO_CRW_EVENT(2, - "chsc_process_crw: invalid command!\n"); - return 0; - case 0x0003: - CIO_CRW_EVENT(2, "chsc_process_crw: error in chsc " - "request block!\n"); - return 0; - case 0x0005: - CIO_CRW_EVENT(2, "chsc_process_crw: no event " - "information stored\n"); - return 0; - default: - CIO_CRW_EVENT(2, "chsc_process_crw: chsc response %d\n", + if (sei_area->response.code == 0x0001) { + CIO_CRW_EVENT(4, "chsc: sei successful\n"); + rc = chsc_process_sei(sei_area); + if (rc) + ret = rc; + } else { + CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", sei_area->response.code); - return 0; - } - - /* Check if we might have lost some information. */ - if (sei_area->flags & 0x40) - CIO_CRW_EVENT(2, "chsc_process_crw: Event information " - "has been lost due to overflow!\n"); - - if (sei_area->rs != 4) { - CIO_CRW_EVENT(2, "chsc_process_crw: reporting source " - "(%04X) isn't a chpid!\n", - sei_area->rsid); - continue; - } - - /* which kind of information was stored? */ - switch (sei_area->cc) { - case 1: /* link incident*/ - CIO_CRW_EVENT(4, "chsc_process_crw: " - "channel subsystem reports link incident," - " reporting source is chpid %x\n", - sei_area->rsid); - chpid = __get_chpid_from_lir(sei_area->ccdf); - if (chpid < 0) - CIO_CRW_EVENT(4, "%s: Invalid LIR, skipping\n", - __FUNCTION__); - else - s390_set_chpid_offline(chpid); - break; - - case 2: /* i/o resource accessibiliy */ - CIO_CRW_EVENT(4, "chsc_process_crw: " - "channel subsystem reports some I/O " - "devices may have become accessible\n"); - pr_debug("Data received after sei: \n"); - pr_debug("Validity flags: %x\n", sei_area->vf); - - /* allocate a new channel path structure, if needed */ - status = get_chp_status(sei_area->rsid); - if (status < 0) - new_channel_path(sei_area->rsid); - else if (!status) - break; - dev = get_device(&css[0]->chps[sei_area->rsid]->dev); - res_data.chp = to_channelpath(dev); - pr_debug("chpid: %x", sei_area->rsid); - if ((sei_area->vf & 0xc0) != 0) { - res_data.fla = sei_area->fla; - if ((sei_area->vf & 0xc0) == 0xc0) { - pr_debug(" full link addr: %x", - sei_area->fla); - res_data.fla_mask = 0xffff; - } else { - pr_debug(" link addr: %x", - sei_area->fla); - res_data.fla_mask = 0xff00; - } - } - ret = s390_process_res_acc(&res_data); - pr_debug("\n\n"); - put_device(dev); - break; - - default: /* other stuff */ - CIO_CRW_EVENT(4, "chsc_process_crw: event %d\n", - sei_area->cc); + ret = 0; break; } } while (sei_area->flags & 0x80); + return ret; } -static inline int +static int __chp_add_new_sch(struct subchannel_id schid) { struct schib schib; int ret; - if (stsch(schid, &schib)) + if (stsch_err(schid, &schib)) /* We're through */ return need_rescan ? -EAGAIN : -ENXIO; @@ -635,21 +627,21 @@ __chp_add(struct subchannel_id schid, void *data) if (!sch) /* Check if the subchannel is now available. */ return __chp_add_new_sch(schid); - spin_lock_irq(&sch->lock); + spin_lock_irq(sch->lock); for (i=0; i<8; i++) { mask = 0x80 >> i; if ((sch->schib.pmcw.pim & mask) && (sch->schib.pmcw.chpid[i] == chp->id)) { if (stsch(sch->schid, &sch->schib) != 0) { /* Endgame. */ - spin_unlock_irq(&sch->lock); + spin_unlock_irq(sch->lock); return -ENXIO; } break; } } if (i==8) { - spin_unlock_irq(&sch->lock); + spin_unlock_irq(sch->lock); return 0; } sch->lpm = ((sch->schib.pmcw.pim & @@ -660,7 +652,7 @@ __chp_add(struct subchannel_id schid, void *data) if (sch->driver && sch->driver->verify) sch->driver->verify(&sch->dev); - spin_unlock_irq(&sch->lock); + spin_unlock_irq(sch->lock); put_device(&sch->dev); return 0; } @@ -709,7 +701,7 @@ chp_process_crw(int chpid, int on) return chp_add(chpid); } -static inline int check_for_io_on_path(struct subchannel *sch, int index) +static int check_for_io_on_path(struct subchannel *sch, int index) { int cc; @@ -741,7 +733,7 @@ static void terminate_internal_io(struct subchannel *sch) sch->driver->termination(&sch->dev); } -static inline void +static void __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on) { int chp, old_lpm; @@ -750,7 +742,7 @@ __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on) if (!sch->ssd_info.valid) return; - spin_lock_irqsave(&sch->lock, flags); + spin_lock_irqsave(sch->lock, flags); old_lpm = sch->lpm; for (chp = 0; chp < 8; chp++) { if (sch->ssd_info.chpid[chp] != chpid) @@ -785,7 +777,7 @@ __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on) sch->driver->verify(&sch->dev); break; } - spin_unlock_irqrestore(&sch->lock, flags); + spin_unlock_irqrestore(sch->lock, flags); } static int @@ -967,8 +959,8 @@ static struct bin_attribute chp_measurement_attr = { static void chsc_remove_chp_cmg_attr(struct channel_path *chp) { - sysfs_remove_bin_file(&chp->dev.kobj, &chp_measurement_chars_attr); - sysfs_remove_bin_file(&chp->dev.kobj, &chp_measurement_attr); + device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr); + device_remove_bin_file(&chp->dev, &chp_measurement_attr); } static int @@ -976,14 +968,12 @@ chsc_add_chp_cmg_attr(struct channel_path *chp) { int ret; - ret = sysfs_create_bin_file(&chp->dev.kobj, - &chp_measurement_chars_attr); + ret = device_create_bin_file(&chp->dev, &chp_measurement_chars_attr); if (ret) return ret; - ret = sysfs_create_bin_file(&chp->dev.kobj, &chp_measurement_attr); + ret = device_create_bin_file(&chp->dev, &chp_measurement_attr); if (ret) - sysfs_remove_bin_file(&chp->dev.kobj, - &chp_measurement_chars_attr); + device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr); return ret; } @@ -1042,7 +1032,7 @@ __chsc_do_secm(struct channel_subsystem *css, int enable, void *page) u32 : 4; u32 fmt : 4; u32 : 16; - } *secm_area; + } __attribute__ ((packed)) *secm_area; int ret, ccode; secm_area = page; @@ -1253,7 +1243,7 @@ chsc_determine_channel_path_description(int chpid, struct chsc_header response; u32 zeroes2; struct channel_path_desc desc; - } *scpd_area; + } __attribute__ ((packed)) *scpd_area; scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!scpd_area) @@ -1350,7 +1340,7 @@ chsc_get_channel_measurement_chars(struct channel_path *chp) u32 cmg : 8; u32 zeroes3; u32 data[NR_MEASUREMENT_CHARS]; - } *scmc_area; + } __attribute__ ((packed)) *scmc_area; scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!scmc_area) @@ -1517,7 +1507,7 @@ chsc_enable_facility(int operation_code) u32 reserved5:4; u32 format2:4; u32 reserved6:24; - } *sda_area; + } __attribute__ ((packed)) *sda_area; sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA); if (!sda_area) @@ -1569,7 +1559,7 @@ chsc_determine_css_characteristics(void) u32 reserved4; u32 general_char[510]; u32 chsc_char[518]; - } *scsc_area; + } __attribute__ ((packed)) *scsc_area; scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!scsc_area) { diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h index a259245780a..0fb2b024208 100644 --- a/drivers/s390/cio/chsc.h +++ b/drivers/s390/cio/chsc.h @@ -10,17 +10,17 @@ struct chsc_header { u16 length; u16 code; -}; +} __attribute__ ((packed)); #define NR_MEASUREMENT_CHARS 5 struct cmg_chars { u32 values[NR_MEASUREMENT_CHARS]; -}; +} __attribute__ ((packed)); #define NR_MEASUREMENT_ENTRIES 8 struct cmg_entry { u32 values[NR_MEASUREMENT_ENTRIES]; -}; +} __attribute__ ((packed)); struct channel_path_desc { u8 flags; @@ -31,7 +31,7 @@ struct channel_path_desc { u8 zeroes; u8 chla; u8 chpp; -}; +} __attribute__ ((packed)); struct channel_path { int id; @@ -47,6 +47,9 @@ struct channel_path { extern void s390_process_css( void ); extern void chsc_validate_chpids(struct subchannel *); extern void chpid_is_actually_online(int); +extern int css_get_ssd_info(struct subchannel *); +extern int chsc_process_crw(void); +extern int chp_process_crw(int, int); struct css_general_char { u64 : 41; diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 20aee278384..b3a56dc5f68 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -2,8 +2,7 @@ * drivers/s390/cio/cio.c * S/390 common I/O routines -- low level i/o calls * - * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, - * IBM Corporation + * Copyright (C) IBM Corp. 1999,2006 * Author(s): Ingo Adlung (adlung@de.ibm.com) * Cornelia Huck (cornelia.huck@de.ibm.com) * Arnd Bergmann (arndb@de.ibm.com) @@ -123,7 +122,7 @@ cio_get_options (struct subchannel *sch) * Use tpi to get a pending interrupt, call the interrupt handler and * return a pointer to the subchannel structure. */ -static inline int +static int cio_tpi(void) { struct tpi_info *tpi_info; @@ -143,17 +142,17 @@ cio_tpi(void) return 1; local_bh_disable(); irq_enter (); - spin_lock(&sch->lock); + spin_lock(sch->lock); memcpy (&sch->schib.scsw, &irb->scsw, sizeof (struct scsw)); if (sch->driver && sch->driver->irq) sch->driver->irq(&sch->dev); - spin_unlock(&sch->lock); + spin_unlock(sch->lock); irq_exit (); _local_bh_enable(); return 1; } -static inline int +static int cio_start_handle_notoper(struct subchannel *sch, __u8 lpm) { char dbf_text[15]; @@ -415,6 +414,8 @@ cio_enable_subchannel (struct subchannel *sch, unsigned int isc) CIO_TRACE_EVENT (2, "ensch"); CIO_TRACE_EVENT (2, sch->dev.bus_id); + if (sch_is_pseudo_sch(sch)) + return -EINVAL; ccode = stsch (sch->schid, &sch->schib); if (ccode) return -ENODEV; @@ -462,6 +463,8 @@ cio_disable_subchannel (struct subchannel *sch) CIO_TRACE_EVENT (2, "dissch"); CIO_TRACE_EVENT (2, sch->dev.bus_id); + if (sch_is_pseudo_sch(sch)) + return 0; ccode = stsch (sch->schid, &sch->schib); if (ccode == 3) /* Not operational. */ return -ENODEV; @@ -496,6 +499,15 @@ cio_disable_subchannel (struct subchannel *sch) return ret; } +int cio_create_sch_lock(struct subchannel *sch) +{ + sch->lock = kmalloc(sizeof(spinlock_t), GFP_KERNEL); + if (!sch->lock) + return -ENOMEM; + spin_lock_init(sch->lock); + return 0; +} + /* * cio_validate_subchannel() * @@ -513,6 +525,7 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid) { char dbf_txt[15]; int ccode; + int err; sprintf (dbf_txt, "valsch%x", schid.sch_no); CIO_TRACE_EVENT (4, dbf_txt); @@ -520,9 +533,15 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid) /* Nuke all fields. */ memset(sch, 0, sizeof(struct subchannel)); - spin_lock_init(&sch->lock); + sch->schid = schid; + if (cio_is_console(schid)) { + sch->lock = cio_get_console_lock(); + } else { + err = cio_create_sch_lock(sch); + if (err) + goto out; + } mutex_init(&sch->reg_mutex); - /* Set a name for the subchannel */ snprintf (sch->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x", schid.ssid, schid.sch_no); @@ -534,10 +553,10 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid) * is not valid. */ ccode = stsch_err (schid, &sch->schib); - if (ccode) - return (ccode == 3) ? -ENXIO : ccode; - - sch->schid = schid; + if (ccode) { + err = (ccode == 3) ? -ENXIO : ccode; + goto out; + } /* Copy subchannel type from path management control word. */ sch->st = sch->schib.pmcw.st; @@ -550,24 +569,27 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid) "non-I/O subchannel type %04X\n", sch->schid.ssid, sch->schid.sch_no, sch->st); /* We stop here for non-io subchannels. */ - return sch->st; + err = sch->st; + goto out; } /* Initialization for io subchannels. */ - if (!sch->schib.pmcw.dnv) + if (!sch->schib.pmcw.dnv) { /* io subchannel but device number is invalid. */ - return -ENODEV; - + err = -ENODEV; + goto out; + } /* Devno is valid. */ if (is_blacklisted (sch->schid.ssid, sch->schib.pmcw.dev)) { /* * This device must not be known to Linux. So we simply * say that there is no device and return ENODEV. */ - CIO_MSG_EVENT(0, "Blacklisted device detected " + CIO_MSG_EVENT(4, "Blacklisted device detected " "at devno %04X, subchannel set %x\n", sch->schib.pmcw.dev, sch->schid.ssid); - return -ENODEV; + err = -ENODEV; + goto out; } sch->opm = 0xff; if (!cio_is_console(sch->schid)) @@ -595,6 +617,11 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid) if ((sch->lpm & (sch->lpm - 1)) != 0) sch->schib.pmcw.mp = 1; /* multipath mode */ return 0; +out: + if (!cio_is_console(schid)) + kfree(sch->lock); + sch->lock = NULL; + return err; } /* @@ -619,7 +646,7 @@ do_IRQ (struct pt_regs *regs) * Make sure that the i/o interrupt did not "overtake" * the last HZ timer interrupt. */ - account_ticks(); + account_ticks(S390_lowcore.int_clock); /* * Get interrupt information from lowcore */ @@ -637,7 +664,7 @@ do_IRQ (struct pt_regs *regs) } sch = (struct subchannel *)(unsigned long)tpi_info->intparm; if (sch) - spin_lock(&sch->lock); + spin_lock(sch->lock); /* Store interrupt response block to lowcore. */ if (tsch (tpi_info->schid, irb) == 0 && sch) { /* Keep subchannel information word up to date. */ @@ -648,7 +675,7 @@ do_IRQ (struct pt_regs *regs) sch->driver->irq(&sch->dev); } if (sch) - spin_unlock(&sch->lock); + spin_unlock(sch->lock); /* * Are more interrupts pending? * If so, the tpi instruction will update the lowcore @@ -687,10 +714,10 @@ wait_cons_dev (void) __ctl_load (cr6, 6, 6); do { - spin_unlock(&console_subchannel.lock); + spin_unlock(console_subchannel.lock); if (!cio_tpi()) cpu_relax(); - spin_lock(&console_subchannel.lock); + spin_lock(console_subchannel.lock); } while (console_subchannel.schib.scsw.actl != 0); /* * restore previous isc value @@ -805,7 +832,7 @@ cio_get_console_subchannel(void) } #endif -static inline int +static int __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib) { int retry, cc; @@ -823,7 +850,20 @@ __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib) return -EBUSY; /* uhm... */ } -static inline int +/* we can't use the normal udelay here, since it enables external interrupts */ + +static void udelay_reset(unsigned long usecs) +{ + uint64_t start_cc, end_cc; + + asm volatile ("STCK %0" : "=m" (start_cc)); + do { + cpu_relax(); + asm volatile ("STCK %0" : "=m" (end_cc)); + } while (((end_cc - start_cc)/4096) < usecs); +} + +static int __clear_subchannel_easy(struct subchannel_id schid) { int retry; @@ -838,16 +878,41 @@ __clear_subchannel_easy(struct subchannel_id schid) if (schid_equal(&ti.schid, &schid)) return 0; } - udelay(100); + udelay_reset(100); } return -EBUSY; } +static int pgm_check_occured; + +static void cio_reset_pgm_check_handler(void) +{ + pgm_check_occured = 1; +} + +static int stsch_reset(struct subchannel_id schid, volatile struct schib *addr) +{ + int rc; + + pgm_check_occured = 0; + s390_base_pgm_handler_fn = cio_reset_pgm_check_handler; + rc = stsch(schid, addr); + s390_base_pgm_handler_fn = NULL; + + /* The program check handler could have changed pgm_check_occured. */ + barrier(); + + if (pgm_check_occured) + return -EIO; + else + return rc; +} + static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data) { struct schib schib; - if (stsch_err(schid, &schib)) + if (stsch_reset(schid, &schib)) return -ENXIO; if (!schib.pmcw.ena) return 0; @@ -892,7 +957,7 @@ static void css_reset(void) /* Reset subchannels. */ for_each_subchannel(__shutdown_subchannel_easy, NULL); /* Reset channel paths. */ - s390_reset_mcck_handler = s390_reset_chpids_mcck_handler; + s390_base_mcck_handler_fn = s390_reset_chpids_mcck_handler; /* Enable channel report machine checks. */ __ctl_set_bit(14, 28); /* Temporarily reenable machine checks. */ @@ -917,7 +982,7 @@ static void css_reset(void) local_mcck_disable(); /* Disable channel report machine checks. */ __ctl_clear_bit(14, 28); - s390_reset_mcck_handler = NULL; + s390_base_mcck_handler_fn = NULL; } static struct reset_call css_reset_call = { @@ -944,7 +1009,7 @@ static int __reipl_subchannel_match(struct subchannel_id schid, void *data) struct schib schib; struct sch_match_id *match_id = data; - if (stsch_err(schid, &schib)) + if (stsch_reset(schid, &schib)) return -ENXIO; if (schib.pmcw.dnv && (schib.pmcw.dev == match_id->devid.devno) && diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h index 4541c1af4b6..35154a21035 100644 --- a/drivers/s390/cio/cio.h +++ b/drivers/s390/cio/cio.h @@ -87,7 +87,7 @@ struct orb { /* subchannel data structure used by I/O subroutines */ struct subchannel { struct subchannel_id schid; - spinlock_t lock; /* subchannel lock */ + spinlock_t *lock; /* subchannel lock */ struct mutex reg_mutex; enum { SUBCHANNEL_TYPE_IO = 0, @@ -131,15 +131,19 @@ extern int cio_set_options (struct subchannel *, int); extern int cio_get_options (struct subchannel *); extern int cio_modify (struct subchannel *); +int cio_create_sch_lock(struct subchannel *); + /* Use with care. */ #ifdef CONFIG_CCW_CONSOLE extern struct subchannel *cio_probe_console(void); extern void cio_release_console(void); extern int cio_is_console(struct subchannel_id); extern struct subchannel *cio_get_console_subchannel(void); +extern spinlock_t * cio_get_console_lock(void); #else #define cio_is_console(schid) 0 #define cio_get_console_subchannel() NULL +#define cio_get_console_lock() NULL; #endif extern int cio_show_msg; diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c index 828b2d334f0..90b22faabbf 100644 --- a/drivers/s390/cio/cmf.c +++ b/drivers/s390/cio/cmf.c @@ -519,8 +519,8 @@ struct cmb { /* insert a single device into the cmb_area list * called with cmb_area.lock held from alloc_cmb */ -static inline int alloc_cmb_single (struct ccw_device *cdev, - struct cmb_data *cmb_data) +static int alloc_cmb_single(struct ccw_device *cdev, + struct cmb_data *cmb_data) { struct cmb *cmb; struct ccw_device_private *node; diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 26cf2f5ae2e..fe0ace7aece 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -30,7 +30,7 @@ struct channel_subsystem *css[__MAX_CSSID + 1]; int css_characteristics_avail = 0; -inline int +int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) { struct subchannel_id schid; @@ -91,9 +91,9 @@ css_free_subchannel(struct subchannel *sch) /* Reset intparm to zeroes. */ sch->schib.pmcw.intparm = 0; cio_modify(sch); + kfree(sch->lock); kfree(sch); } - } static void @@ -102,13 +102,12 @@ css_subchannel_release(struct device *dev) struct subchannel *sch; sch = to_subchannel(dev); - if (!cio_is_console(sch->schid)) + if (!cio_is_console(sch->schid)) { + kfree(sch->lock); kfree(sch); + } } -extern int css_get_ssd_info(struct subchannel *sch); - - int css_sch_device_register(struct subchannel *sch) { int ret; @@ -135,14 +134,17 @@ css_register_subchannel(struct subchannel *sch) sch->dev.parent = &css[0]->device; sch->dev.bus = &css_bus_type; sch->dev.release = &css_subchannel_release; - + sch->dev.groups = subch_attr_groups; + + css_get_ssd_info(sch); + /* make it known to the system */ ret = css_sch_device_register(sch); - if (ret) + if (ret) { printk (KERN_WARNING "%s: could not register %s\n", __func__, sch->dev.bus_id); - else - css_get_ssd_info(sch); + return ret; + } return ret; } @@ -182,7 +184,7 @@ get_subchannel_by_schid(struct subchannel_id schid) return dev ? to_subchannel(dev) : NULL; } -static inline int css_get_subchannel_status(struct subchannel *sch) +static int css_get_subchannel_status(struct subchannel *sch) { struct schib schib; @@ -201,18 +203,18 @@ static int css_evaluate_known_subchannel(struct subchannel *sch, int slow) unsigned long flags; enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action; - spin_lock_irqsave(&sch->lock, flags); + spin_lock_irqsave(sch->lock, flags); disc = device_is_disconnected(sch); if (disc && slow) { /* Disconnected devices are evaluated directly only.*/ - spin_unlock_irqrestore(&sch->lock, flags); + spin_unlock_irqrestore(sch->lock, flags); return 0; } /* No interrupt after machine check - kill pending timers. */ device_kill_pending_timer(sch); if (!disc && !slow) { /* Non-disconnected devices are evaluated on the slow path. */ - spin_unlock_irqrestore(&sch->lock, flags); + spin_unlock_irqrestore(sch->lock, flags); return -EAGAIN; } event = css_get_subchannel_status(sch); @@ -237,9 +239,9 @@ static int css_evaluate_known_subchannel(struct subchannel *sch, int slow) /* Ask driver what to do with device. */ action = UNREGISTER; if (sch->driver && sch->driver->notify) { - spin_unlock_irqrestore(&sch->lock, flags); + spin_unlock_irqrestore(sch->lock, flags); ret = sch->driver->notify(&sch->dev, event); - spin_lock_irqsave(&sch->lock, flags); + spin_lock_irqsave(sch->lock, flags); if (ret) action = NONE; } @@ -264,9 +266,9 @@ static int css_evaluate_known_subchannel(struct subchannel *sch, int slow) case UNREGISTER: case UNREGISTER_PROBE: /* Unregister device (will use subchannel lock). */ - spin_unlock_irqrestore(&sch->lock, flags); + spin_unlock_irqrestore(sch->lock, flags); css_sch_device_unregister(sch); - spin_lock_irqsave(&sch->lock, flags); + spin_lock_irqsave(sch->lock, flags); /* Reset intparm to zeroes. */ sch->schib.pmcw.intparm = 0; @@ -278,7 +280,7 @@ static int css_evaluate_known_subchannel(struct subchannel *sch, int slow) default: break; } - spin_unlock_irqrestore(&sch->lock, flags); + spin_unlock_irqrestore(sch->lock, flags); /* Probe if necessary. */ if (action == UNREGISTER_PROBE) ret = css_probe_device(sch->schid); @@ -294,7 +296,7 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) /* Will be done on the slow path. */ return -EAGAIN; } - if (stsch(schid, &schib) || !schib.pmcw.dnv) { + if (stsch_err(schid, &schib) || !schib.pmcw.dnv) { /* Unusable - ignore. */ return 0; } @@ -412,7 +414,7 @@ static void reprobe_all(struct work_struct *unused) need_reprobe); } -DECLARE_WORK(css_reprobe_work, reprobe_all); +static DECLARE_WORK(css_reprobe_work, reprobe_all); /* Schedule reprobing of all unregistered subchannels. */ void css_schedule_reprobe(void) @@ -573,12 +575,24 @@ css_cm_enable_store(struct device *dev, struct device_attribute *attr, static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store); -static inline void __init -setup_css(int nr) +static int __init setup_css(int nr) { u32 tod_high; + int ret; memset(css[nr], 0, sizeof(struct channel_subsystem)); + css[nr]->pseudo_subchannel = + kzalloc(sizeof(*css[nr]->pseudo_subchannel), GFP_KERNEL); + if (!css[nr]->pseudo_subchannel) + return -ENOMEM; + css[nr]->pseudo_subchannel->dev.parent = &css[nr]->device; + css[nr]->pseudo_subchannel->dev.release = css_subchannel_release; + sprintf(css[nr]->pseudo_subchannel->dev.bus_id, "defunct"); + ret = cio_create_sch_lock(css[nr]->pseudo_subchannel); + if (ret) { + kfree(css[nr]->pseudo_subchannel); + return ret; + } mutex_init(&css[nr]->mutex); css[nr]->valid = 1; css[nr]->cssid = nr; @@ -586,6 +600,7 @@ setup_css(int nr) css[nr]->device.release = channel_subsystem_release; tod_high = (u32) (get_clock() >> 32); css_generate_pgid(css[nr], tod_high); + return 0; } /* @@ -622,10 +637,12 @@ init_channel_subsystem (void) ret = -ENOMEM; goto out_unregister; } - setup_css(i); - ret = device_register(&css[i]->device); + ret = setup_css(i); if (ret) goto out_free; + ret = device_register(&css[i]->device); + if (ret) + goto out_free_all; if (css_characteristics_avail && css_chsc_characteristics.secm) { ret = device_create_file(&css[i]->device, @@ -633,6 +650,9 @@ init_channel_subsystem (void) if (ret) goto out_device; } + ret = device_register(&css[i]->pseudo_subchannel->dev); + if (ret) + goto out_file; } css_init_done = 1; @@ -640,13 +660,19 @@ init_channel_subsystem (void) for_each_subchannel(__init_channel_subsystem, NULL); return 0; +out_file: + device_remove_file(&css[i]->device, &dev_attr_cm_enable); out_device: device_unregister(&css[i]->device); +out_free_all: + kfree(css[i]->pseudo_subchannel->lock); + kfree(css[i]->pseudo_subchannel); out_free: kfree(css[i]); out_unregister: while (i > 0) { i--; + device_unregister(&css[i]->pseudo_subchannel->dev); if (css_characteristics_avail && css_chsc_characteristics.secm) device_remove_file(&css[i]->device, &dev_attr_cm_enable); @@ -658,6 +684,11 @@ out: return ret; } +int sch_is_pseudo_sch(struct subchannel *sch) +{ + return sch == to_css(sch->dev.parent)->pseudo_subchannel; +} + /* * find a driver for a subchannel. They identify by the subchannel * type with the exception that the console subchannel driver has its own diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h index 9ff064e7176..ca2bab932a8 100644 --- a/drivers/s390/cio/css.h +++ b/drivers/s390/cio/css.h @@ -73,6 +73,8 @@ struct senseid { } __attribute__ ((packed,aligned(4))); struct ccw_device_private { + struct ccw_device *cdev; + struct subchannel *sch; int state; /* device state */ atomic_t onoff; unsigned long registered; @@ -141,6 +143,8 @@ extern void css_sch_device_unregister(struct subchannel *); extern struct subchannel * get_subchannel_by_schid(struct subchannel_id); extern int css_init_done; extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); +extern int css_process_crw(int, int); +extern void css_reiterate_subchannels(void); #define __MAX_SUBCHANNEL 65535 #define __MAX_SSID 3 @@ -158,6 +162,8 @@ struct channel_subsystem { int cm_enabled; void *cub_addr1; void *cub_addr2; + /* for orphaned ccw devices */ + struct subchannel *pseudo_subchannel; }; #define to_css(dev) container_of(dev, struct channel_subsystem, device) @@ -185,6 +191,11 @@ void css_clear_subchannel_slow_list(void); int css_slow_subchannels_exist(void); extern int need_rescan; +int sch_is_pseudo_sch(struct subchannel *); + extern struct workqueue_struct *slow_path_wq; extern struct work_struct slow_path_work; + +int subchannel_add_files (struct device *); +extern struct attribute_group *subch_attr_groups[]; #endif diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index d3d3716ff84..e322111fb36 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -23,6 +23,7 @@ #include <asm/param.h> /* HZ */ #include "cio.h" +#include "cio_debug.h" #include "css.h" #include "device.h" #include "ioasm.h" @@ -137,7 +138,6 @@ struct bus_type ccw_bus_type; static int io_subchannel_probe (struct subchannel *); static int io_subchannel_remove (struct subchannel *); -void io_subchannel_irq (struct device *); static int io_subchannel_notify(struct device *, int); static void io_subchannel_verify(struct device *); static void io_subchannel_ioterm(struct device *); @@ -236,7 +236,6 @@ chpids_show (struct device * dev, struct device_attribute *attr, char * buf) for (chp = 0; chp < 8; chp++) ret += sprintf (buf+ret, "%02x ", ssd->chpid[chp]); - ret += sprintf (buf+ret, "\n"); return min((ssize_t)PAGE_SIZE, ret); } @@ -294,14 +293,44 @@ online_show (struct device *dev, struct device_attribute *attr, char *buf) return sprintf(buf, cdev->online ? "1\n" : "0\n"); } +int ccw_device_is_orphan(struct ccw_device *cdev) +{ + return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent)); +} + +static void ccw_device_unregister(struct work_struct *work) +{ + struct ccw_device_private *priv; + struct ccw_device *cdev; + + priv = container_of(work, struct ccw_device_private, kick_work); + cdev = priv->cdev; + if (test_and_clear_bit(1, &cdev->private->registered)) + device_unregister(&cdev->dev); + put_device(&cdev->dev); +} + static void ccw_device_remove_disconnected(struct ccw_device *cdev) { struct subchannel *sch; + unsigned long flags; /* * Forced offline in disconnected state means * 'throw away device'. */ + if (ccw_device_is_orphan(cdev)) { + /* Deregister ccw device. */ + spin_lock_irqsave(cdev->ccwlock, flags); + cdev->private->state = DEV_STATE_NOT_OPER; + spin_unlock_irqrestore(cdev->ccwlock, flags); + if (get_device(&cdev->dev)) { + PREPARE_WORK(&cdev->private->kick_work, + ccw_device_unregister); + queue_work(ccw_device_work, &cdev->private->kick_work); + } + return ; + } sch = to_subchannel(cdev->dev.parent); css_sch_device_unregister(sch); /* Reset intparm to zeroes. */ @@ -462,6 +491,8 @@ available_show (struct device *dev, struct device_attribute *attr, char *buf) struct ccw_device *cdev = to_ccwdev(dev); struct subchannel *sch; + if (ccw_device_is_orphan(cdev)) + return sprintf(buf, "no device\n"); switch (cdev->private->state) { case DEV_STATE_BOXED: return sprintf(buf, "boxed\n"); @@ -498,11 +529,10 @@ static struct attribute_group subch_attr_group = { .attrs = subch_attrs, }; -static inline int -subchannel_add_files (struct device *dev) -{ - return sysfs_create_group(&dev->kobj, &subch_attr_group); -} +struct attribute_group *subch_attr_groups[] = { + &subch_attr_group, + NULL, +}; static struct attribute * ccwdev_attrs[] = { &dev_attr_devtype.attr, @@ -518,13 +548,13 @@ static struct attribute_group ccwdev_attr_group = { .attrs = ccwdev_attrs, }; -static inline int +static int device_add_files (struct device *dev) { return sysfs_create_group(&dev->kobj, &ccwdev_attr_group); } -static inline void +static void device_remove_files(struct device *dev) { sysfs_remove_group(&dev->kobj, &ccwdev_attr_group); @@ -563,11 +593,10 @@ match_devno(struct device * dev, void * data) cdev = to_ccwdev(dev); if ((cdev->private->state == DEV_STATE_DISCONNECTED) && + !ccw_device_is_orphan(cdev) && ccw_dev_id_is_equal(&cdev->private->dev_id, &d->dev_id) && - (cdev != d->sibling)) { - cdev->private->state = DEV_STATE_NOT_OPER; + (cdev != d->sibling)) return 1; - } return 0; } @@ -584,13 +613,36 @@ static struct ccw_device * get_disc_ccwdev_by_dev_id(struct ccw_dev_id *dev_id, return dev ? to_ccwdev(dev) : NULL; } -static void -ccw_device_add_changed(void *data) +static int match_orphan(struct device *dev, void *data) +{ + struct ccw_dev_id *dev_id; + struct ccw_device *cdev; + + dev_id = data; + cdev = to_ccwdev(dev); + return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id); +} + +static struct ccw_device * +get_orphaned_ccwdev_by_dev_id(struct channel_subsystem *css, + struct ccw_dev_id *dev_id) { + struct device *dev; + dev = device_find_child(&css->pseudo_subchannel->dev, dev_id, + match_orphan); + + return dev ? to_ccwdev(dev) : NULL; +} + +static void +ccw_device_add_changed(struct work_struct *work) +{ + struct ccw_device_private *priv; struct ccw_device *cdev; - cdev = data; + priv = container_of(work, struct ccw_device_private, kick_work); + cdev = priv->cdev; if (device_add(&cdev->dev)) { put_device(&cdev->dev); return; @@ -602,64 +654,21 @@ ccw_device_add_changed(void *data) } } -extern int css_get_ssd_info(struct subchannel *sch); - -void -ccw_device_do_unreg_rereg(void *data) +void ccw_device_do_unreg_rereg(struct work_struct *work) { + struct ccw_device_private *priv; struct ccw_device *cdev; struct subchannel *sch; - int need_rename; - cdev = data; + priv = container_of(work, struct ccw_device_private, kick_work); + cdev = priv->cdev; sch = to_subchannel(cdev->dev.parent); - if (cdev->private->dev_id.devno != sch->schib.pmcw.dev) { - /* - * The device number has changed. This is usually only when - * a device has been detached under VM and then re-appeared - * on another subchannel because of a different attachment - * order than before. Ideally, we should should just switch - * subchannels, but unfortunately, this is not possible with - * the current implementation. - * Instead, we search for the old subchannel for this device - * number and deregister so there are no collisions with the - * newly registered ccw_device. - * FIXME: Find another solution so the block layer doesn't - * get possibly sick... - */ - struct ccw_device *other_cdev; - struct ccw_dev_id dev_id; - - need_rename = 1; - dev_id.devno = sch->schib.pmcw.dev; - dev_id.ssid = sch->schid.ssid; - other_cdev = get_disc_ccwdev_by_dev_id(&dev_id, cdev); - if (other_cdev) { - struct subchannel *other_sch; - - other_sch = to_subchannel(other_cdev->dev.parent); - if (get_device(&other_sch->dev)) { - stsch(other_sch->schid, &other_sch->schib); - if (other_sch->schib.pmcw.dnv) { - other_sch->schib.pmcw.intparm = 0; - cio_modify(other_sch); - } - css_sch_device_unregister(other_sch); - } - } - /* Update ssd info here. */ - css_get_ssd_info(sch); - cdev->private->dev_id.devno = sch->schib.pmcw.dev; - } else - need_rename = 0; + device_remove_files(&cdev->dev); if (test_and_clear_bit(1, &cdev->private->registered)) device_del(&cdev->dev); - if (need_rename) - snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x", - sch->schid.ssid, sch->schib.pmcw.dev); PREPARE_WORK(&cdev->private->kick_work, - ccw_device_add_changed, cdev); + ccw_device_add_changed); queue_work(ccw_device_work, &cdev->private->kick_work); } @@ -673,18 +682,194 @@ ccw_device_release(struct device *dev) kfree(cdev); } +static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch) +{ + struct ccw_device *cdev; + + cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); + if (cdev) { + cdev->private = kzalloc(sizeof(struct ccw_device_private), + GFP_KERNEL | GFP_DMA); + if (cdev->private) + return cdev; + } + kfree(cdev); + return ERR_PTR(-ENOMEM); +} + +static int io_subchannel_initialize_dev(struct subchannel *sch, + struct ccw_device *cdev) +{ + cdev->private->cdev = cdev; + atomic_set(&cdev->private->onoff, 0); + cdev->dev.parent = &sch->dev; + cdev->dev.release = ccw_device_release; + INIT_LIST_HEAD(&cdev->private->kick_work.entry); + /* Do first half of device_register. */ + device_initialize(&cdev->dev); + if (!get_device(&sch->dev)) { + if (cdev->dev.release) + cdev->dev.release(&cdev->dev); + return -ENODEV; + } + return 0; +} + +static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch) +{ + struct ccw_device *cdev; + int ret; + + cdev = io_subchannel_allocate_dev(sch); + if (!IS_ERR(cdev)) { + ret = io_subchannel_initialize_dev(sch, cdev); + if (ret) { + kfree(cdev); + cdev = ERR_PTR(ret); + } + } + return cdev; +} + +static int io_subchannel_recog(struct ccw_device *, struct subchannel *); + +static void sch_attach_device(struct subchannel *sch, + struct ccw_device *cdev) +{ + spin_lock_irq(sch->lock); + sch->dev.driver_data = cdev; + cdev->private->schid = sch->schid; + cdev->ccwlock = sch->lock; + device_trigger_reprobe(sch); + spin_unlock_irq(sch->lock); +} + +static void sch_attach_disconnected_device(struct subchannel *sch, + struct ccw_device *cdev) +{ + struct subchannel *other_sch; + int ret; + + other_sch = to_subchannel(get_device(cdev->dev.parent)); + ret = device_move(&cdev->dev, &sch->dev); + if (ret) { + CIO_MSG_EVENT(2, "Moving disconnected device 0.%x.%04x failed " + "(ret=%d)!\n", cdev->private->dev_id.ssid, + cdev->private->dev_id.devno, ret); + put_device(&other_sch->dev); + return; + } + other_sch->dev.driver_data = NULL; + /* No need to keep a subchannel without ccw device around. */ + css_sch_device_unregister(other_sch); + put_device(&other_sch->dev); + sch_attach_device(sch, cdev); +} + +static void sch_attach_orphaned_device(struct subchannel *sch, + struct ccw_device *cdev) +{ + int ret; + + /* Try to move the ccw device to its new subchannel. */ + ret = device_move(&cdev->dev, &sch->dev); + if (ret) { + CIO_MSG_EVENT(0, "Moving device 0.%x.%04x from orphanage " + "failed (ret=%d)!\n", + cdev->private->dev_id.ssid, + cdev->private->dev_id.devno, ret); + return; + } + sch_attach_device(sch, cdev); +} + +static void sch_create_and_recog_new_device(struct subchannel *sch) +{ + struct ccw_device *cdev; + + /* Need to allocate a new ccw device. */ + cdev = io_subchannel_create_ccwdev(sch); + if (IS_ERR(cdev)) { + /* OK, we did everything we could... */ + css_sch_device_unregister(sch); + return; + } + spin_lock_irq(sch->lock); + sch->dev.driver_data = cdev; + spin_unlock_irq(sch->lock); + /* Start recognition for the new ccw device. */ + if (io_subchannel_recog(cdev, sch)) { + spin_lock_irq(sch->lock); + sch->dev.driver_data = NULL; + spin_unlock_irq(sch->lock); + if (cdev->dev.release) + cdev->dev.release(&cdev->dev); + css_sch_device_unregister(sch); + } +} + + +void ccw_device_move_to_orphanage(struct work_struct *work) +{ + struct ccw_device_private *priv; + struct ccw_device *cdev; + struct ccw_device *replacing_cdev; + struct subchannel *sch; + int ret; + struct channel_subsystem *css; + struct ccw_dev_id dev_id; + + priv = container_of(work, struct ccw_device_private, kick_work); + cdev = priv->cdev; + sch = to_subchannel(cdev->dev.parent); + css = to_css(sch->dev.parent); + dev_id.devno = sch->schib.pmcw.dev; + dev_id.ssid = sch->schid.ssid; + + /* + * Move the orphaned ccw device to the orphanage so the replacing + * ccw device can take its place on the subchannel. + */ + ret = device_move(&cdev->dev, &css->pseudo_subchannel->dev); + if (ret) { + CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to orphanage failed " + "(ret=%d)!\n", cdev->private->dev_id.ssid, + cdev->private->dev_id.devno, ret); + return; + } + cdev->ccwlock = css->pseudo_subchannel->lock; + /* + * Search for the replacing ccw device + * - among the disconnected devices + * - in the orphanage + */ + replacing_cdev = get_disc_ccwdev_by_dev_id(&dev_id, cdev); + if (replacing_cdev) { + sch_attach_disconnected_device(sch, replacing_cdev); + return; + } + replacing_cdev = get_orphaned_ccwdev_by_dev_id(css, &dev_id); + if (replacing_cdev) { + sch_attach_orphaned_device(sch, replacing_cdev); + return; + } + sch_create_and_recog_new_device(sch); +} + /* * Register recognized device. */ static void -io_subchannel_register(void *data) +io_subchannel_register(struct work_struct *work) { + struct ccw_device_private *priv; struct ccw_device *cdev; struct subchannel *sch; int ret; unsigned long flags; - cdev = data; + priv = container_of(work, struct ccw_device_private, kick_work); + cdev = priv->cdev; sch = to_subchannel(cdev->dev.parent); /* @@ -709,9 +894,9 @@ io_subchannel_register(void *data) printk (KERN_WARNING "%s: could not register %s\n", __func__, cdev->dev.bus_id); put_device(&cdev->dev); - spin_lock_irqsave(&sch->lock, flags); + spin_lock_irqsave(sch->lock, flags); sch->dev.driver_data = NULL; - spin_unlock_irqrestore(&sch->lock, flags); + spin_unlock_irqrestore(sch->lock, flags); kfree (cdev->private); kfree (cdev); put_device(&sch->dev); @@ -719,11 +904,6 @@ io_subchannel_register(void *data) wake_up(&ccw_device_init_wq); return; } - - ret = subchannel_add_files(cdev->dev.parent); - if (ret) - printk(KERN_WARNING "%s: could not add attributes to %s\n", - __func__, sch->dev.bus_id); put_device(&cdev->dev); out: cdev->private->flags.recog_done = 1; @@ -734,11 +914,14 @@ out: } void -ccw_device_call_sch_unregister(void *data) +ccw_device_call_sch_unregister(struct work_struct *work) { - struct ccw_device *cdev = data; + struct ccw_device_private *priv; + struct ccw_device *cdev; struct subchannel *sch; + priv = container_of(work, struct ccw_device_private, kick_work); + cdev = priv->cdev; sch = to_subchannel(cdev->dev.parent); css_sch_device_unregister(sch); /* Reset intparm to zeroes. */ @@ -768,7 +951,7 @@ io_subchannel_recog_done(struct ccw_device *cdev) break; sch = to_subchannel(cdev->dev.parent); PREPARE_WORK(&cdev->private->kick_work, - ccw_device_call_sch_unregister, cdev); + ccw_device_call_sch_unregister); queue_work(slow_path_wq, &cdev->private->kick_work); if (atomic_dec_and_test(&ccw_device_init_count)) wake_up(&ccw_device_init_wq); @@ -783,7 +966,7 @@ io_subchannel_recog_done(struct ccw_device *cdev) if (!get_device(&cdev->dev)) break; PREPARE_WORK(&cdev->private->kick_work, - io_subchannel_register, cdev); + io_subchannel_register); queue_work(slow_path_wq, &cdev->private->kick_work); break; } @@ -797,7 +980,7 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch) sch->dev.driver_data = cdev; sch->driver = &io_subchannel_driver; - cdev->ccwlock = &sch->lock; + cdev->ccwlock = sch->lock; /* Init private data. */ priv = cdev->private; @@ -817,9 +1000,9 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch) atomic_inc(&ccw_device_init_count); /* Start async. device sensing. */ - spin_lock_irq(&sch->lock); + spin_lock_irq(sch->lock); rc = ccw_device_recognition(cdev); - spin_unlock_irq(&sch->lock); + spin_unlock_irq(sch->lock); if (rc) { if (atomic_dec_and_test(&ccw_device_init_count)) wake_up(&ccw_device_init_wq); @@ -827,12 +1010,55 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch) return rc; } +static void ccw_device_move_to_sch(struct work_struct *work) +{ + struct ccw_device_private *priv; + int rc; + struct subchannel *sch; + struct ccw_device *cdev; + struct subchannel *former_parent; + + priv = container_of(work, struct ccw_device_private, kick_work); + sch = priv->sch; + cdev = priv->cdev; + former_parent = ccw_device_is_orphan(cdev) ? + NULL : to_subchannel(get_device(cdev->dev.parent)); + mutex_lock(&sch->reg_mutex); + /* Try to move the ccw device to its new subchannel. */ + rc = device_move(&cdev->dev, &sch->dev); + mutex_unlock(&sch->reg_mutex); + if (rc) { + CIO_MSG_EVENT(2, "Moving device 0.%x.%04x to subchannel " + "0.%x.%04x failed (ret=%d)!\n", + cdev->private->dev_id.ssid, + cdev->private->dev_id.devno, sch->schid.ssid, + sch->schid.sch_no, rc); + css_sch_device_unregister(sch); + goto out; + } + if (former_parent) { + spin_lock_irq(former_parent->lock); + former_parent->dev.driver_data = NULL; + spin_unlock_irq(former_parent->lock); + css_sch_device_unregister(former_parent); + /* Reset intparm to zeroes. */ + former_parent->schib.pmcw.intparm = 0; + cio_modify(former_parent); + } + sch_attach_device(sch, cdev); +out: + if (former_parent) + put_device(&former_parent->dev); + put_device(&cdev->dev); +} + static int io_subchannel_probe (struct subchannel *sch) { struct ccw_device *cdev; int rc; unsigned long flags; + struct ccw_dev_id dev_id; if (sch->dev.driver_data) { /* @@ -843,7 +1069,6 @@ io_subchannel_probe (struct subchannel *sch) cdev = sch->dev.driver_data; device_initialize(&cdev->dev); ccw_device_register(cdev); - subchannel_add_files(&sch->dev); /* * Check if the device is already online. If it is * the reference count needs to be corrected @@ -856,33 +1081,37 @@ io_subchannel_probe (struct subchannel *sch) get_device(&cdev->dev); return 0; } - cdev = kzalloc (sizeof(*cdev), GFP_KERNEL); + /* + * First check if a fitting device may be found amongst the + * disconnected devices or in the orphanage. + */ + dev_id.devno = sch->schib.pmcw.dev; + dev_id.ssid = sch->schid.ssid; + cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL); if (!cdev) - return -ENOMEM; - cdev->private = kzalloc(sizeof(struct ccw_device_private), - GFP_KERNEL | GFP_DMA); - if (!cdev->private) { - kfree(cdev); - return -ENOMEM; - } - atomic_set(&cdev->private->onoff, 0); - cdev->dev.parent = &sch->dev; - cdev->dev.release = ccw_device_release; - INIT_LIST_HEAD(&cdev->private->kick_work.entry); - /* Do first half of device_register. */ - device_initialize(&cdev->dev); - - if (!get_device(&sch->dev)) { - if (cdev->dev.release) - cdev->dev.release(&cdev->dev); - return -ENODEV; + cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent), + &dev_id); + if (cdev) { + /* + * Schedule moving the device until when we have a registered + * subchannel to move to and succeed the probe. We can + * unregister later again, when the probe is through. + */ + cdev->private->sch = sch; + PREPARE_WORK(&cdev->private->kick_work, + ccw_device_move_to_sch); + queue_work(slow_path_wq, &cdev->private->kick_work); + return 0; } + cdev = io_subchannel_create_ccwdev(sch); + if (IS_ERR(cdev)) + return PTR_ERR(cdev); rc = io_subchannel_recog(cdev, sch); if (rc) { - spin_lock_irqsave(&sch->lock, flags); + spin_lock_irqsave(sch->lock, flags); sch->dev.driver_data = NULL; - spin_unlock_irqrestore(&sch->lock, flags); + spin_unlock_irqrestore(sch->lock, flags); if (cdev->dev.release) cdev->dev.release(&cdev->dev); } @@ -890,17 +1119,6 @@ io_subchannel_probe (struct subchannel *sch) return rc; } -static void -ccw_device_unregister(void *data) -{ - struct ccw_device *cdev; - - cdev = (struct ccw_device *)data; - if (test_and_clear_bit(1, &cdev->private->registered)) - device_unregister(&cdev->dev); - put_device(&cdev->dev); -} - static int io_subchannel_remove (struct subchannel *sch) { @@ -921,7 +1139,7 @@ io_subchannel_remove (struct subchannel *sch) */ if (get_device(&cdev->dev)) { PREPARE_WORK(&cdev->private->kick_work, - ccw_device_unregister, cdev); + ccw_device_unregister); queue_work(ccw_device_work, &cdev->private->kick_work); } return 0; @@ -1003,6 +1221,13 @@ static struct ccw_device console_cdev; static struct ccw_device_private console_private; static int console_cdev_in_use; +static DEFINE_SPINLOCK(ccw_console_lock); + +spinlock_t * cio_get_console_lock(void) +{ + return &ccw_console_lock; +} + static int ccw_device_console_enable (struct ccw_device *cdev, struct subchannel *sch) { @@ -1048,6 +1273,7 @@ ccw_device_probe_console(void) memset(&console_cdev, 0, sizeof(struct ccw_device)); memset(&console_private, 0, sizeof(struct ccw_device_private)); console_cdev.private = &console_private; + console_private.cdev = &console_cdev; ret = ccw_device_console_enable(&console_cdev, sch); if (ret) { cio_release_console(); diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h index 9233b5c0bcc..b66338b7657 100644 --- a/drivers/s390/cio/device.h +++ b/drivers/s390/cio/device.h @@ -74,12 +74,15 @@ extern struct workqueue_struct *ccw_device_notify_work; extern wait_queue_head_t ccw_device_init_wq; extern atomic_t ccw_device_init_count; +void io_subchannel_irq (struct device *pdev); void io_subchannel_recog_done(struct ccw_device *cdev); int ccw_device_cancel_halt_clear(struct ccw_device *); -void ccw_device_do_unreg_rereg(void *); -void ccw_device_call_sch_unregister(void *); +void ccw_device_do_unreg_rereg(struct work_struct *); +void ccw_device_call_sch_unregister(struct work_struct *); +void ccw_device_move_to_orphanage(struct work_struct *); +int ccw_device_is_orphan(struct ccw_device *); int ccw_device_recognition(struct ccw_device *); int ccw_device_online(struct ccw_device *); @@ -116,6 +119,7 @@ int ccw_device_stlck(struct ccw_device *); /* qdio needs this. */ void ccw_device_set_timeout(struct ccw_device *, int); extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *); +extern struct bus_type ccw_bus_type; /* Channel measurement facility related */ void retry_set_schib(struct ccw_device *cdev); diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index 09c7672eb3f..51238e7555b 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c @@ -186,15 +186,14 @@ ccw_device_handle_oper(struct ccw_device *cdev) /* * Check if cu type and device type still match. If * not, it is certainly another device and we have to - * de- and re-register. Also check here for non-matching devno. + * de- and re-register. */ if (cdev->id.cu_type != cdev->private->senseid.cu_type || cdev->id.cu_model != cdev->private->senseid.cu_model || cdev->id.dev_type != cdev->private->senseid.dev_type || - cdev->id.dev_model != cdev->private->senseid.dev_model || - cdev->private->dev_id.devno != sch->schib.pmcw.dev) { + cdev->id.dev_model != cdev->private->senseid.dev_model) { PREPARE_WORK(&cdev->private->kick_work, - ccw_device_do_unreg_rereg, cdev); + ccw_device_do_unreg_rereg); queue_work(ccw_device_work, &cdev->private->kick_work); return 0; } @@ -207,7 +206,7 @@ ccw_device_handle_oper(struct ccw_device *cdev) * been varied online on the SE so we have to find out by magic (i. e. driving * the channel subsystem to device selection and updating our path masks). */ -static inline void +static void __recover_lost_chpids(struct subchannel *sch, int old_lpm) { int mask, i; @@ -329,19 +328,21 @@ ccw_device_sense_id_done(struct ccw_device *cdev, int err) } static void -ccw_device_oper_notify(void *data) +ccw_device_oper_notify(struct work_struct *work) { + struct ccw_device_private *priv; struct ccw_device *cdev; struct subchannel *sch; int ret; - cdev = data; + priv = container_of(work, struct ccw_device_private, kick_work); + cdev = priv->cdev; sch = to_subchannel(cdev->dev.parent); ret = (sch->driver && sch->driver->notify) ? sch->driver->notify(&sch->dev, CIO_OPER) : 0; if (!ret) /* Driver doesn't want device back. */ - ccw_device_do_unreg_rereg(cdev); + ccw_device_do_unreg_rereg(work); else { /* Reenable channel measurements, if needed. */ cmf_reenable(cdev); @@ -377,8 +378,7 @@ ccw_device_done(struct ccw_device *cdev, int state) if (cdev->private->flags.donotify) { cdev->private->flags.donotify = 0; - PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify, - cdev); + PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify); queue_work(ccw_device_notify_work, &cdev->private->kick_work); } wake_up(&cdev->private->wait_q); @@ -387,7 +387,7 @@ ccw_device_done(struct ccw_device *cdev, int state) put_device (&cdev->dev); } -static inline int cmp_pgid(struct pgid *p1, struct pgid *p2) +static int cmp_pgid(struct pgid *p1, struct pgid *p2) { char *c1; char *c2; @@ -528,13 +528,15 @@ ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event) static void -ccw_device_nopath_notify(void *data) +ccw_device_nopath_notify(struct work_struct *work) { + struct ccw_device_private *priv; struct ccw_device *cdev; struct subchannel *sch; int ret; - cdev = data; + priv = container_of(work, struct ccw_device_private, kick_work); + cdev = priv->cdev; sch = to_subchannel(cdev->dev.parent); /* Extra sanity. */ if (sch->lpm) @@ -547,8 +549,7 @@ ccw_device_nopath_notify(void *data) cio_disable_subchannel(sch); if (get_device(&cdev->dev)) { PREPARE_WORK(&cdev->private->kick_work, - ccw_device_call_sch_unregister, - cdev); + ccw_device_call_sch_unregister); queue_work(ccw_device_work, &cdev->private->kick_work); } else @@ -607,7 +608,7 @@ ccw_device_verify_done(struct ccw_device *cdev, int err) /* Reset oper notify indication after verify error. */ cdev->private->flags.donotify = 0; PREPARE_WORK(&cdev->private->kick_work, - ccw_device_nopath_notify, cdev); + ccw_device_nopath_notify); queue_work(ccw_device_notify_work, &cdev->private->kick_work); ccw_device_done(cdev, DEV_STATE_NOT_OPER); break; @@ -674,6 +675,10 @@ ccw_device_offline(struct ccw_device *cdev) { struct subchannel *sch; + if (ccw_device_is_orphan(cdev)) { + ccw_device_done(cdev, DEV_STATE_OFFLINE); + return 0; + } sch = to_subchannel(cdev->dev.parent); if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv) return -ENODEV; @@ -738,7 +743,7 @@ ccw_device_offline_notoper(struct ccw_device *cdev, enum dev_event dev_event) sch = to_subchannel(cdev->dev.parent); if (get_device(&cdev->dev)) { PREPARE_WORK(&cdev->private->kick_work, - ccw_device_call_sch_unregister, cdev); + ccw_device_call_sch_unregister); queue_work(ccw_device_work, &cdev->private->kick_work); } wake_up(&cdev->private->wait_q); @@ -769,7 +774,7 @@ ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event) } if (get_device(&cdev->dev)) { PREPARE_WORK(&cdev->private->kick_work, - ccw_device_call_sch_unregister, cdev); + ccw_device_call_sch_unregister); queue_work(ccw_device_work, &cdev->private->kick_work); } wake_up(&cdev->private->wait_q); @@ -837,6 +842,8 @@ ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event) call_handler_unsol: if (cdev->handler) cdev->handler (cdev, 0, irb); + if (cdev->private->flags.doverify) + ccw_device_online_verify(cdev, 0); return; } /* Accumulate status and find out if a basic sense is needed. */ @@ -874,7 +881,7 @@ ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event) sch = to_subchannel(cdev->dev.parent); if (!sch->lpm) { PREPARE_WORK(&cdev->private->kick_work, - ccw_device_nopath_notify, cdev); + ccw_device_nopath_notify); queue_work(ccw_device_notify_work, &cdev->private->kick_work); } else @@ -887,7 +894,7 @@ ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event) /* * Got an interrupt for a basic sense. */ -void +static void ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) { struct irb *irb; @@ -969,7 +976,7 @@ ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event) ERR_PTR(-EIO)); if (!sch->lpm) { PREPARE_WORK(&cdev->private->kick_work, - ccw_device_nopath_notify, cdev); + ccw_device_nopath_notify); queue_work(ccw_device_notify_work, &cdev->private->kick_work); } else if (cdev->private->flags.doverify) /* Start delayed path verification. */ @@ -992,7 +999,7 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event) sch = to_subchannel(cdev->dev.parent); if (!sch->lpm) { PREPARE_WORK(&cdev->private->kick_work, - ccw_device_nopath_notify, cdev); + ccw_device_nopath_notify); queue_work(ccw_device_notify_work, &cdev->private->kick_work); } else @@ -1021,7 +1028,7 @@ void device_kill_io(struct subchannel *sch) if (ret == -ENODEV) { if (!sch->lpm) { PREPARE_WORK(&cdev->private->kick_work, - ccw_device_nopath_notify, cdev); + ccw_device_nopath_notify); queue_work(ccw_device_notify_work, &cdev->private->kick_work); } else @@ -1033,7 +1040,7 @@ void device_kill_io(struct subchannel *sch) ERR_PTR(-EIO)); if (!sch->lpm) { PREPARE_WORK(&cdev->private->kick_work, - ccw_device_nopath_notify, cdev); + ccw_device_nopath_notify); queue_work(ccw_device_notify_work, &cdev->private->kick_work); } else /* Start delayed path verification. */ @@ -1104,7 +1111,8 @@ device_trigger_reprobe(struct subchannel *sch) /* Update some values. */ if (stsch(sch->schid, &sch->schib)) return; - + if (!sch->schib.pmcw.dnv) + return; /* * The pim, pam, pom values may not be accurate, but they are the best * we have before performing device selection :/ @@ -1118,7 +1126,13 @@ device_trigger_reprobe(struct subchannel *sch) sch->schib.pmcw.mp = 1; sch->schib.pmcw.intparm = (__u32)(unsigned long)sch; /* We should also udate ssd info, but this has to wait. */ - ccw_device_start_id(cdev, 0); + /* Check if this is another device which appeared on the same sch. */ + if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { + PREPARE_WORK(&cdev->private->kick_work, + ccw_device_move_to_orphanage); + queue_work(ccw_device_work, &cdev->private->kick_work); + } else + ccw_device_start_id(cdev, 0); } static void diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index b39c1fa48ac..d7b25b8f71d 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c @@ -302,7 +302,7 @@ ccw_device_wake_up(struct ccw_device *cdev, unsigned long ip, struct irb *irb) wake_up(&cdev->private->wait_q); } -static inline int +static int __ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, __u8 lpm) { int ret; @@ -316,9 +316,9 @@ __ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, _ ccw_device_set_timeout(cdev, 0); if (ret == -EBUSY) { /* Try again later. */ - spin_unlock_irq(&sch->lock); + spin_unlock_irq(sch->lock); msleep(10); - spin_lock_irq(&sch->lock); + spin_lock_irq(sch->lock); continue; } if (ret != 0) @@ -326,12 +326,12 @@ __ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, _ break; /* Wait for end of request. */ cdev->private->intparm = magic; - spin_unlock_irq(&sch->lock); + spin_unlock_irq(sch->lock); wait_event(cdev->private->wait_q, (cdev->private->intparm == -EIO) || (cdev->private->intparm == -EAGAIN) || (cdev->private->intparm == 0)); - spin_lock_irq(&sch->lock); + spin_lock_irq(sch->lock); /* Check at least for channel end / device end */ if (cdev->private->intparm == -EIO) { /* Non-retryable error. */ @@ -342,9 +342,9 @@ __ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, _ /* Success. */ break; /* Try again later. */ - spin_unlock_irq(&sch->lock); + spin_unlock_irq(sch->lock); msleep(10); - spin_lock_irq(&sch->lock); + spin_lock_irq(sch->lock); } while (1); return ret; @@ -389,7 +389,7 @@ read_dev_chars (struct ccw_device *cdev, void **buffer, int length) return ret; } - spin_lock_irq(&sch->lock); + spin_lock_irq(sch->lock); /* Save interrupt handler. */ handler = cdev->handler; /* Temporarily install own handler. */ @@ -406,7 +406,7 @@ read_dev_chars (struct ccw_device *cdev, void **buffer, int length) /* Restore interrupt handler. */ cdev->handler = handler; - spin_unlock_irq(&sch->lock); + spin_unlock_irq(sch->lock); clear_normalized_cda (rdc_ccw); kfree(rdc_ccw); @@ -463,7 +463,7 @@ read_conf_data_lpm (struct ccw_device *cdev, void **buffer, int *length, __u8 lp rcd_ccw->count = ciw->count; rcd_ccw->flags = CCW_FLAG_SLI; - spin_lock_irq(&sch->lock); + spin_lock_irq(sch->lock); /* Save interrupt handler. */ handler = cdev->handler; /* Temporarily install own handler. */ @@ -480,7 +480,7 @@ read_conf_data_lpm (struct ccw_device *cdev, void **buffer, int *length, __u8 lp /* Restore interrupt handler. */ cdev->handler = handler; - spin_unlock_irq(&sch->lock); + spin_unlock_irq(sch->lock); /* * on success we update the user input parms @@ -537,7 +537,7 @@ ccw_device_stlck(struct ccw_device *cdev) kfree(buf); return -ENOMEM; } - spin_lock_irqsave(&sch->lock, flags); + spin_lock_irqsave(sch->lock, flags); ret = cio_enable_subchannel(sch, 3); if (ret) goto out_unlock; @@ -559,9 +559,9 @@ ccw_device_stlck(struct ccw_device *cdev) goto out_unlock; } cdev->private->irb.scsw.actl |= SCSW_ACTL_START_PEND; - spin_unlock_irqrestore(&sch->lock, flags); + spin_unlock_irqrestore(sch->lock, flags); wait_event(cdev->private->wait_q, cdev->private->irb.scsw.actl == 0); - spin_lock_irqsave(&sch->lock, flags); + spin_lock_irqsave(sch->lock, flags); cio_disable_subchannel(sch); //FIXME: return code? if ((cdev->private->irb.scsw.dstat != (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) || @@ -572,7 +572,7 @@ ccw_device_stlck(struct ccw_device *cdev) out_unlock: kfree(buf); kfree(buf2); - spin_unlock_irqrestore(&sch->lock, flags); + spin_unlock_irqrestore(sch->lock, flags); return ret; } diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c index bdcf930f7be..6b1caea622e 100644 --- a/drivers/s390/cio/device_status.c +++ b/drivers/s390/cio/device_status.c @@ -25,7 +25,7 @@ * Check for any kind of channel or interface control check but don't * issue the message for the console device */ -static inline void +static void ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb) { if (!(irb->scsw.cstat & (SCHN_STAT_CHN_DATA_CHK | @@ -72,7 +72,7 @@ ccw_device_path_notoper(struct ccw_device *cdev) /* * Copy valid bits from the extended control word to device irb. */ -static inline void +static void ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb) { /* @@ -94,7 +94,7 @@ ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb) /* * Check if extended status word is valid. */ -static inline int +static int ccw_device_accumulate_esw_valid(struct irb *irb) { if (!irb->scsw.eswf && irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) @@ -109,7 +109,7 @@ ccw_device_accumulate_esw_valid(struct irb *irb) /* * Copy valid bits from the extended status word to device irb. */ -static inline void +static void ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb) { struct irb *cdev_irb; diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c index 8d5fa1b4d11..d726cd5777d 100644 --- a/drivers/s390/cio/qdio.c +++ b/drivers/s390/cio/qdio.c @@ -46,6 +46,7 @@ #include <asm/timex.h> #include <asm/debug.h> +#include <asm/s390_rdev.h> #include <asm/qdio.h> #include "cio.h" @@ -66,11 +67,10 @@ MODULE_LICENSE("GPL"); static const char version[] = "QDIO base support version 2"; -#ifdef QDIO_PERFORMANCE_STATS +static int qdio_performance_stats = 0; static int proc_perf_file_registration; static unsigned long i_p_c, i_p_nc, o_p_c, o_p_nc, ii_p_c, ii_p_nc; static struct qdio_perf_stats perf_stats; -#endif /* QDIO_PERFORMANCE_STATS */ static int hydra_thinints; static int is_passthrough = 0; @@ -137,7 +137,7 @@ qdio_release_q(struct qdio_q *q) } /*check ccq */ -static inline int +static int qdio_check_ccq(struct qdio_q *q, unsigned int ccq) { char dbf_text[15]; @@ -152,7 +152,7 @@ qdio_check_ccq(struct qdio_q *q, unsigned int ccq) return -EIO; } /* EQBS: extract buffer states */ -static inline int +static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, unsigned int *start, unsigned int *cnt) { @@ -187,7 +187,7 @@ again: } /* SQBS: set buffer states */ -static inline int +static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, unsigned int *start, unsigned int *cnt) { @@ -275,9 +275,8 @@ qdio_siga_sync(struct qdio_q *q, unsigned int gpr2, QDIO_DBF_TEXT4(0,trace,"sigasync"); QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); -#ifdef QDIO_PERFORMANCE_STATS - perf_stats.siga_syncs++; -#endif /* QDIO_PERFORMANCE_STATS */ + if (qdio_performance_stats) + perf_stats.siga_syncs++; cc = do_siga_sync(q->schid, gpr2, gpr3); if (cc) @@ -315,16 +314,15 @@ __do_siga_output(struct qdio_q *q, unsigned int *busy_bit) * returns QDIO_SIGA_ERROR_ACCESS_EXCEPTION as cc, when SIGA returns * an access exception */ -static inline int +static int qdio_siga_output(struct qdio_q *q) { int cc; __u32 busy_bit; __u64 start_time=0; -#ifdef QDIO_PERFORMANCE_STATS - perf_stats.siga_outs++; -#endif /* QDIO_PERFORMANCE_STATS */ + if (qdio_performance_stats) + perf_stats.siga_outs++; QDIO_DBF_TEXT4(0,trace,"sigaout"); QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); @@ -350,7 +348,7 @@ qdio_siga_output(struct qdio_q *q) return cc; } -static inline int +static int qdio_siga_input(struct qdio_q *q) { int cc; @@ -358,9 +356,8 @@ qdio_siga_input(struct qdio_q *q) QDIO_DBF_TEXT4(0,trace,"sigain"); QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); -#ifdef QDIO_PERFORMANCE_STATS - perf_stats.siga_ins++; -#endif /* QDIO_PERFORMANCE_STATS */ + if (qdio_performance_stats) + perf_stats.siga_ins++; cc = do_siga_input(q->schid, q->mask); @@ -423,7 +420,7 @@ tiqdio_sched_tl(void) tasklet_hi_schedule(&tiqdio_tasklet); } -static inline void +static void qdio_mark_tiq(struct qdio_q *q) { unsigned long flags; @@ -473,7 +470,7 @@ qdio_mark_q(struct qdio_q *q) tasklet_schedule(&q->tasklet); } -static inline int +static int qdio_stop_polling(struct qdio_q *q) { #ifdef QDIO_USE_PROCESSING_STATE @@ -527,7 +524,7 @@ qdio_stop_polling(struct qdio_q *q) * sophisticated locking outside of unmark_q, so that we don't need to * disable the interrupts :-) */ -static inline void +static void qdio_unmark_q(struct qdio_q *q) { unsigned long flags; @@ -693,7 +690,7 @@ qdio_qebsm_get_inbound_buffer_frontier(struct qdio_q *q) return q->first_to_check; } -static inline int +static int qdio_get_outbound_buffer_frontier(struct qdio_q *q) { struct qdio_irq *irq; @@ -776,7 +773,7 @@ out: } /* all buffers are processed */ -static inline int +static int qdio_is_outbound_q_done(struct qdio_q *q) { int no_used; @@ -798,7 +795,7 @@ qdio_is_outbound_q_done(struct qdio_q *q) return (no_used==0); } -static inline int +static int qdio_has_outbound_q_moved(struct qdio_q *q) { int i; @@ -818,7 +815,7 @@ qdio_has_outbound_q_moved(struct qdio_q *q) } } -static inline void +static void qdio_kick_outbound_q(struct qdio_q *q) { int result; @@ -907,7 +904,7 @@ qdio_kick_outbound_q(struct qdio_q *q) } } -static inline void +static void qdio_kick_outbound_handler(struct qdio_q *q) { int start, end, real_end, count; @@ -944,7 +941,7 @@ qdio_kick_outbound_handler(struct qdio_q *q) q->error_status_flags=0; } -static inline void +static void __qdio_outbound_processing(struct qdio_q *q) { int siga_attempts; @@ -954,9 +951,8 @@ __qdio_outbound_processing(struct qdio_q *q) if (unlikely(qdio_reserve_q(q))) { qdio_release_q(q); -#ifdef QDIO_PERFORMANCE_STATS - o_p_c++; -#endif /* QDIO_PERFORMANCE_STATS */ + if (qdio_performance_stats) + o_p_c++; /* as we're sissies, we'll check next time */ if (likely(!atomic_read(&q->is_in_shutdown))) { qdio_mark_q(q); @@ -964,10 +960,10 @@ __qdio_outbound_processing(struct qdio_q *q) } return; } -#ifdef QDIO_PERFORMANCE_STATS - o_p_nc++; - perf_stats.tl_runs++; -#endif /* QDIO_PERFORMANCE_STATS */ + if (qdio_performance_stats) { + o_p_nc++; + perf_stats.tl_runs++; + } /* see comment in qdio_kick_outbound_q */ siga_attempts=atomic_read(&q->busy_siga_counter); @@ -982,12 +978,11 @@ __qdio_outbound_processing(struct qdio_q *q) if (q->is_iqdio_q) { /* - * for asynchronous queues, we better check, if the fill - * level is too high. for synchronous queues, the fill - * level will never be that high. + * for asynchronous queues, we better check, if the sent + * buffer is already switched from PRIMED to EMPTY. */ - if (atomic_read(&q->number_of_buffers_used)> - IQDIO_FILL_LEVEL_TO_POLL) + if ((q->queue_type == QDIO_IQDIO_QFMT_ASYNCH) && + !qdio_is_outbound_q_done(q)) qdio_mark_q(q); } else if (!q->hydra_gives_outbound_pcis) @@ -1006,7 +1001,7 @@ qdio_outbound_processing(struct qdio_q *q) /************************* INBOUND ROUTINES *******************************/ -static inline int +static int qdio_get_inbound_buffer_frontier(struct qdio_q *q) { struct qdio_irq *irq; @@ -1137,20 +1132,21 @@ out: return q->first_to_check; } -static inline int +static int qdio_has_inbound_q_moved(struct qdio_q *q) { int i; -#ifdef QDIO_PERFORMANCE_STATS static int old_pcis=0; static int old_thinints=0; - if ((old_pcis==perf_stats.pcis)&&(old_thinints==perf_stats.thinints)) - perf_stats.start_time_inbound=NOW; - else - old_pcis=perf_stats.pcis; -#endif /* QDIO_PERFORMANCE_STATS */ + if (qdio_performance_stats) { + if ((old_pcis==perf_stats.pcis)&& + (old_thinints==perf_stats.thinints)) + perf_stats.start_time_inbound=NOW; + else + old_pcis=perf_stats.pcis; + } i=qdio_get_inbound_buffer_frontier(q); if ( (i!=GET_SAVED_FRONTIER(q)) || @@ -1170,7 +1166,7 @@ qdio_has_inbound_q_moved(struct qdio_q *q) } /* means, no more buffers to be filled */ -static inline int +static int tiqdio_is_inbound_q_done(struct qdio_q *q) { int no_used; @@ -1231,7 +1227,7 @@ tiqdio_is_inbound_q_done(struct qdio_q *q) return 0; } -static inline int +static int qdio_is_inbound_q_done(struct qdio_q *q) { int no_used; @@ -1299,7 +1295,7 @@ qdio_is_inbound_q_done(struct qdio_q *q) } } -static inline void +static void qdio_kick_inbound_handler(struct qdio_q *q) { int count, start, end, real_end, i; @@ -1340,13 +1336,13 @@ qdio_kick_inbound_handler(struct qdio_q *q) q->siga_error=0; q->error_status_flags=0; -#ifdef QDIO_PERFORMANCE_STATS - perf_stats.inbound_time+=NOW-perf_stats.start_time_inbound; - perf_stats.inbound_cnt++; -#endif /* QDIO_PERFORMANCE_STATS */ + if (qdio_performance_stats) { + perf_stats.inbound_time+=NOW-perf_stats.start_time_inbound; + perf_stats.inbound_cnt++; + } } -static inline void +static void __tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set) { struct qdio_irq *irq_ptr; @@ -1363,9 +1359,8 @@ __tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set) */ if (unlikely(qdio_reserve_q(q))) { qdio_release_q(q); -#ifdef QDIO_PERFORMANCE_STATS - ii_p_c++; -#endif /* QDIO_PERFORMANCE_STATS */ + if (qdio_performance_stats) + ii_p_c++; /* * as we might just be about to stop polling, we make * sure that we check again at least once more @@ -1373,9 +1368,8 @@ __tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set) tiqdio_sched_tl(); return; } -#ifdef QDIO_PERFORMANCE_STATS - ii_p_nc++; -#endif /* QDIO_PERFORMANCE_STATS */ + if (qdio_performance_stats) + ii_p_nc++; if (unlikely(atomic_read(&q->is_in_shutdown))) { qdio_unmark_q(q); goto out; @@ -1416,11 +1410,11 @@ __tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set) irq_ptr = (struct qdio_irq*)q->irq_ptr; for (i=0;i<irq_ptr->no_output_qs;i++) { oq = irq_ptr->output_qs[i]; -#ifdef QDIO_PERFORMANCE_STATS - perf_stats.tl_runs--; -#endif /* QDIO_PERFORMANCE_STATS */ - if (!qdio_is_outbound_q_done(oq)) + if (!qdio_is_outbound_q_done(oq)) { + if (qdio_performance_stats) + perf_stats.tl_runs--; __qdio_outbound_processing(oq); + } } } @@ -1447,7 +1441,7 @@ tiqdio_inbound_processing(struct qdio_q *q) __tiqdio_inbound_processing(q, atomic_read(&spare_indicator_usecount)); } -static inline void +static void __qdio_inbound_processing(struct qdio_q *q) { int q_laps=0; @@ -1457,9 +1451,8 @@ __qdio_inbound_processing(struct qdio_q *q) if (unlikely(qdio_reserve_q(q))) { qdio_release_q(q); -#ifdef QDIO_PERFORMANCE_STATS - i_p_c++; -#endif /* QDIO_PERFORMANCE_STATS */ + if (qdio_performance_stats) + i_p_c++; /* as we're sissies, we'll check next time */ if (likely(!atomic_read(&q->is_in_shutdown))) { qdio_mark_q(q); @@ -1467,10 +1460,10 @@ __qdio_inbound_processing(struct qdio_q *q) } return; } -#ifdef QDIO_PERFORMANCE_STATS - i_p_nc++; - perf_stats.tl_runs++; -#endif /* QDIO_PERFORMANCE_STATS */ + if (qdio_performance_stats) { + i_p_nc++; + perf_stats.tl_runs++; + } again: if (qdio_has_inbound_q_moved(q)) { @@ -1499,7 +1492,7 @@ qdio_inbound_processing(struct qdio_q *q) /************************* MAIN ROUTINES *******************************/ #ifdef QDIO_USE_PROCESSING_STATE -static inline int +static int tiqdio_reset_processing_state(struct qdio_q *q, int q_laps) { if (!q) { @@ -1516,9 +1509,8 @@ tiqdio_reset_processing_state(struct qdio_q *q, int q_laps) if (unlikely(qdio_reserve_q(q))) { qdio_release_q(q); -#ifdef QDIO_PERFORMANCE_STATS - ii_p_c++; -#endif /* QDIO_PERFORMANCE_STATS */ + if (qdio_performance_stats) + ii_p_c++; /* * as we might just be about to stop polling, we make * sure that we check again at least once more @@ -1552,7 +1544,7 @@ tiqdio_reset_processing_state(struct qdio_q *q, int q_laps) } #endif /* QDIO_USE_PROCESSING_STATE */ -static inline void +static void tiqdio_inbound_checks(void) { struct qdio_q *q; @@ -1609,9 +1601,8 @@ tiqdio_tl(unsigned long data) { QDIO_DBF_TEXT4(0,trace,"iqdio_tl"); -#ifdef QDIO_PERFORMANCE_STATS - perf_stats.tl_runs++; -#endif /* QDIO_PERFORMANCE_STATS */ + if (qdio_performance_stats) + perf_stats.tl_runs++; tiqdio_inbound_checks(); } @@ -1832,6 +1823,10 @@ qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev, q->sbal[j]=*(outbound_sbals_array++); q->queue_type=q_format; + if ((q->queue_type == QDIO_IQDIO_QFMT) && + (no_output_qs > 1) && + (i == no_output_qs-1)) + q->queue_type = QDIO_IQDIO_QFMT_ASYNCH; q->int_parm=int_parm; q->is_input_q=0; q->schid = irq_ptr->schid; @@ -1918,10 +1913,10 @@ tiqdio_thinint_handler(void) { QDIO_DBF_TEXT4(0,trace,"thin_int"); -#ifdef QDIO_PERFORMANCE_STATS - perf_stats.thinints++; - perf_stats.start_time_inbound=NOW; -#endif /* QDIO_PERFORMANCE_STATS */ + if (qdio_performance_stats) { + perf_stats.thinints++; + perf_stats.start_time_inbound=NOW; + } /* SVS only when needed: * issue SVS to benefit from iqdio interrupt avoidance @@ -1953,7 +1948,7 @@ qdio_set_state(struct qdio_irq *irq_ptr, enum qdio_irq_states state) mb(); } -static inline void +static void qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb) { char dbf_text[15]; @@ -1970,24 +1965,23 @@ qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb) } -static inline void +static void qdio_handle_pci(struct qdio_irq *irq_ptr) { int i; struct qdio_q *q; -#ifdef QDIO_PERFORMANCE_STATS - perf_stats.pcis++; - perf_stats.start_time_inbound=NOW; -#endif /* QDIO_PERFORMANCE_STATS */ + if (qdio_performance_stats) { + perf_stats.pcis++; + perf_stats.start_time_inbound=NOW; + } for (i=0;i<irq_ptr->no_input_qs;i++) { q=irq_ptr->input_qs[i]; if (q->is_input_q&QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT) qdio_mark_q(q); else { -#ifdef QDIO_PERFORMANCE_STATS - perf_stats.tl_runs--; -#endif /* QDIO_PERFORMANCE_STATS */ + if (qdio_performance_stats) + perf_stats.tl_runs--; __qdio_inbound_processing(q); } } @@ -1995,11 +1989,10 @@ qdio_handle_pci(struct qdio_irq *irq_ptr) return; for (i=0;i<irq_ptr->no_output_qs;i++) { q=irq_ptr->output_qs[i]; -#ifdef QDIO_PERFORMANCE_STATS - perf_stats.tl_runs--; -#endif /* QDIO_PERFORMANCE_STATS */ if (qdio_is_outbound_q_done(q)) continue; + if (qdio_performance_stats) + perf_stats.tl_runs--; if (!irq_ptr->sync_done_on_outb_pcis) SYNC_MEMORY; __qdio_outbound_processing(q); @@ -2008,7 +2001,7 @@ qdio_handle_pci(struct qdio_irq *irq_ptr) static void qdio_establish_handle_irq(struct ccw_device*, int, int); -static inline void +static void qdio_handle_activate_check(struct ccw_device *cdev, unsigned long intparm, int cstat, int dstat) { @@ -2045,11 +2038,13 @@ omit_handler_call: } static void -qdio_call_shutdown(void *data) +qdio_call_shutdown(struct work_struct *work) { + struct ccw_device_private *priv; struct ccw_device *cdev; - cdev = (struct ccw_device *)data; + priv = container_of(work, struct ccw_device_private, kick_work); + cdev = priv->cdev; qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); put_device(&cdev->dev); } @@ -2091,7 +2086,7 @@ qdio_timeout_handler(struct ccw_device *cdev) if (get_device(&cdev->dev)) { /* Can't call shutdown from interrupt context. */ PREPARE_WORK(&cdev->private->kick_work, - qdio_call_shutdown, (void *)cdev); + qdio_call_shutdown); queue_work(ccw_device_work, &cdev->private->kick_work); } break; @@ -2233,7 +2228,7 @@ qdio_synchronize(struct ccw_device *cdev, unsigned int flags, return cc; } -static inline void +static void qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned char qdioac, unsigned long token) { @@ -2744,7 +2739,7 @@ qdio_free(struct ccw_device *cdev) return 0; } -static inline void +static void qdio_allocate_do_dbf(struct qdio_initialize *init_data) { char dbf_text[20]; /* if a printf printed out more than 8 chars */ @@ -2777,7 +2772,7 @@ qdio_allocate_do_dbf(struct qdio_initialize *init_data) QDIO_DBF_HEX0(0,setup,&init_data->output_sbal_addr_array,sizeof(void*)); } -static inline void +static void qdio_allocate_fill_input_desc(struct qdio_irq *irq_ptr, int i, int iqfmt) { irq_ptr->input_qs[i]->is_iqdio_q = iqfmt; @@ -2796,7 +2791,7 @@ qdio_allocate_fill_input_desc(struct qdio_irq *irq_ptr, int i, int iqfmt) irq_ptr->qdr->qdf0[i].dkey=QDIO_STORAGE_KEY; } -static inline void +static void qdio_allocate_fill_output_desc(struct qdio_irq *irq_ptr, int i, int j, int iqfmt) { @@ -2817,7 +2812,7 @@ qdio_allocate_fill_output_desc(struct qdio_irq *irq_ptr, int i, } -static inline void +static void qdio_initialize_set_siga_flags_input(struct qdio_irq *irq_ptr) { int i; @@ -2843,7 +2838,7 @@ qdio_initialize_set_siga_flags_input(struct qdio_irq *irq_ptr) } } -static inline void +static void qdio_initialize_set_siga_flags_output(struct qdio_irq *irq_ptr) { int i; @@ -2869,7 +2864,7 @@ qdio_initialize_set_siga_flags_output(struct qdio_irq *irq_ptr) } } -static inline int +static int qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat, int dstat) { @@ -3018,7 +3013,7 @@ qdio_allocate(struct qdio_initialize *init_data) return 0; } -int qdio_fill_irq(struct qdio_initialize *init_data) +static int qdio_fill_irq(struct qdio_initialize *init_data) { int i; char dbf_text[15]; @@ -3371,7 +3366,7 @@ qdio_activate(struct ccw_device *cdev, int flags) } /* buffers filled forwards again to make Rick happy */ -static inline void +static void qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx, unsigned int count, struct qdio_buffer *buffers) { @@ -3390,7 +3385,7 @@ qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx, } } -static inline void +static void qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx, unsigned int count, struct qdio_buffer *buffers) { @@ -3411,7 +3406,7 @@ qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx, } } -static inline void +static void do_qdio_handle_inbound(struct qdio_q *q, unsigned int callflags, unsigned int qidx, unsigned int count, struct qdio_buffer *buffers) @@ -3447,7 +3442,7 @@ do_qdio_handle_inbound(struct qdio_q *q, unsigned int callflags, qdio_mark_q(q); } -static inline void +static void do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags, unsigned int qidx, unsigned int count, struct qdio_buffer *buffers) @@ -3458,19 +3453,18 @@ do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags, struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr; /* This is the outbound handling of queues */ -#ifdef QDIO_PERFORMANCE_STATS - perf_stats.start_time_outbound=NOW; -#endif /* QDIO_PERFORMANCE_STATS */ + if (qdio_performance_stats) + perf_stats.start_time_outbound=NOW; qdio_do_qdio_fill_output(q,qidx,count,buffers); used_elements=atomic_add_return(count, &q->number_of_buffers_used) - count; if (callflags&QDIO_FLAG_DONT_SIGA) { -#ifdef QDIO_PERFORMANCE_STATS - perf_stats.outbound_time+=NOW-perf_stats.start_time_outbound; - perf_stats.outbound_cnt++; -#endif /* QDIO_PERFORMANCE_STATS */ + if (qdio_performance_stats) { + perf_stats.outbound_time+=NOW-perf_stats.start_time_outbound; + perf_stats.outbound_cnt++; + } return; } if (q->is_iqdio_q) { @@ -3500,9 +3494,8 @@ do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags, qdio_kick_outbound_q(q); } else { QDIO_DBF_TEXT3(0,trace, "fast-req"); -#ifdef QDIO_PERFORMANCE_STATS - perf_stats.fast_reqs++; -#endif /* QDIO_PERFORMANCE_STATS */ + if (qdio_performance_stats) + perf_stats.fast_reqs++; } } /* @@ -3513,10 +3506,10 @@ do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags, __qdio_outbound_processing(q); } -#ifdef QDIO_PERFORMANCE_STATS - perf_stats.outbound_time+=NOW-perf_stats.start_time_outbound; - perf_stats.outbound_cnt++; -#endif /* QDIO_PERFORMANCE_STATS */ + if (qdio_performance_stats) { + perf_stats.outbound_time+=NOW-perf_stats.start_time_outbound; + perf_stats.outbound_cnt++; + } } /* count must be 1 in iqdio */ @@ -3574,7 +3567,6 @@ do_QDIO(struct ccw_device *cdev,unsigned int callflags, return 0; } -#ifdef QDIO_PERFORMANCE_STATS static int qdio_perf_procfile_read(char *buffer, char **buffer_location, off_t offset, int buffer_length, int *eof, void *data) @@ -3590,29 +3582,29 @@ qdio_perf_procfile_read(char *buffer, char **buffer_location, off_t offset, _OUTP_IT("i_p_nc/c=%lu/%lu\n",i_p_nc,i_p_c); _OUTP_IT("ii_p_nc/c=%lu/%lu\n",ii_p_nc,ii_p_c); _OUTP_IT("o_p_nc/c=%lu/%lu\n",o_p_nc,o_p_c); - _OUTP_IT("Number of tasklet runs (total) : %u\n", + _OUTP_IT("Number of tasklet runs (total) : %lu\n", perf_stats.tl_runs); _OUTP_IT("\n"); - _OUTP_IT("Number of SIGA sync's issued : %u\n", + _OUTP_IT("Number of SIGA sync's issued : %lu\n", perf_stats.siga_syncs); - _OUTP_IT("Number of SIGA in's issued : %u\n", + _OUTP_IT("Number of SIGA in's issued : %lu\n", perf_stats.siga_ins); - _OUTP_IT("Number of SIGA out's issued : %u\n", + _OUTP_IT("Number of SIGA out's issued : %lu\n", perf_stats.siga_outs); - _OUTP_IT("Number of PCIs caught : %u\n", + _OUTP_IT("Number of PCIs caught : %lu\n", perf_stats.pcis); - _OUTP_IT("Number of adapter interrupts caught : %u\n", + _OUTP_IT("Number of adapter interrupts caught : %lu\n", perf_stats.thinints); - _OUTP_IT("Number of fast requeues (outg. SBALs w/o SIGA) : %u\n", + _OUTP_IT("Number of fast requeues (outg. SBALs w/o SIGA) : %lu\n", perf_stats.fast_reqs); _OUTP_IT("\n"); - _OUTP_IT("Total time of all inbound actions (us) incl. UL : %u\n", + _OUTP_IT("Total time of all inbound actions (us) incl. UL : %lu\n", perf_stats.inbound_time); - _OUTP_IT("Number of inbound transfers : %u\n", + _OUTP_IT("Number of inbound transfers : %lu\n", perf_stats.inbound_cnt); - _OUTP_IT("Total time of all outbound do_QDIOs (us) : %u\n", + _OUTP_IT("Total time of all outbound do_QDIOs (us) : %lu\n", perf_stats.outbound_time); - _OUTP_IT("Number of do_QDIOs outbound : %u\n", + _OUTP_IT("Number of do_QDIOs outbound : %lu\n", perf_stats.outbound_cnt); _OUTP_IT("\n"); @@ -3620,12 +3612,10 @@ qdio_perf_procfile_read(char *buffer, char **buffer_location, off_t offset, } static struct proc_dir_entry *qdio_perf_proc_file; -#endif /* QDIO_PERFORMANCE_STATS */ static void qdio_add_procfs_entry(void) { -#ifdef QDIO_PERFORMANCE_STATS proc_perf_file_registration=0; qdio_perf_proc_file=create_proc_entry(QDIO_PERF, S_IFREG|0444,&proc_root); @@ -3637,20 +3627,58 @@ qdio_add_procfs_entry(void) QDIO_PRINT_WARN("was not able to register perf. " \ "proc-file (%i).\n", proc_perf_file_registration); -#endif /* QDIO_PERFORMANCE_STATS */ } static void qdio_remove_procfs_entry(void) { -#ifdef QDIO_PERFORMANCE_STATS perf_stats.tl_runs=0; if (!proc_perf_file_registration) /* means if it went ok earlier */ remove_proc_entry(QDIO_PERF,&proc_root); -#endif /* QDIO_PERFORMANCE_STATS */ } +/** + * attributes in sysfs + *****************************************************************************/ + +static ssize_t +qdio_performance_stats_show(struct bus_type *bus, char *buf) +{ + return sprintf(buf, "%i\n", qdio_performance_stats ? 1 : 0); +} + +static ssize_t +qdio_performance_stats_store(struct bus_type *bus, const char *buf, size_t count) +{ + char *tmp; + int i; + + i = simple_strtoul(buf, &tmp, 16); + if ((i == 0) || (i == 1)) { + if (i == qdio_performance_stats) + return count; + qdio_performance_stats = i; + if (i==0) { + /* reset perf. stat. info */ + i_p_nc = 0; + i_p_c = 0; + ii_p_nc = 0; + ii_p_c = 0; + o_p_nc = 0; + o_p_c = 0; + memset(&perf_stats, 0, sizeof(struct qdio_perf_stats)); + } + } else { + QDIO_PRINT_WARN("QDIO performance_stats: write 0 or 1 to this file!\n"); + return -EINVAL; + } + return count; +} + +static BUS_ATTR(qdio_performance_stats, 0644, qdio_performance_stats_show, + qdio_performance_stats_store); + static void tiqdio_register_thinints(void) { @@ -3695,6 +3723,7 @@ qdio_release_qdio_memory(void) kfree(indicators); } + static void qdio_unregister_dbf_views(void) { @@ -3796,9 +3825,7 @@ static int __init init_QDIO(void) { int res; -#ifdef QDIO_PERFORMANCE_STATS void *ptr; -#endif /* QDIO_PERFORMANCE_STATS */ printk("qdio: loading %s\n",version); @@ -3811,13 +3838,12 @@ init_QDIO(void) return res; QDIO_DBF_TEXT0(0,setup,"initQDIO"); + res = bus_create_file(&ccw_bus_type, &bus_attr_qdio_performance_stats); -#ifdef QDIO_PERFORMANCE_STATS - memset((void*)&perf_stats,0,sizeof(perf_stats)); + memset((void*)&perf_stats,0,sizeof(perf_stats)); QDIO_DBF_TEXT0(0,setup,"perfstat"); ptr=&perf_stats; QDIO_DBF_HEX0(0,setup,&ptr,sizeof(void*)); -#endif /* QDIO_PERFORMANCE_STATS */ qdio_add_procfs_entry(); @@ -3841,7 +3867,7 @@ cleanup_QDIO(void) qdio_release_qdio_memory(); qdio_unregister_dbf_views(); mempool_destroy(qdio_mempool_scssc); - + bus_remove_file(&ccw_bus_type, &bus_attr_qdio_performance_stats); printk("qdio: %s: module removed\n",version); } diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index 42927c1b745..ec9af72b2af 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h @@ -12,10 +12,6 @@ #endif /* CONFIG_QDIO_DEBUG */ #define QDIO_USE_PROCESSING_STATE -#ifdef CONFIG_QDIO_PERF_STATS -#define QDIO_PERFORMANCE_STATS -#endif /* CONFIG_QDIO_PERF_STATS */ - #define QDIO_MINIMAL_BH_RELIEF_TIME 16 #define QDIO_TIMER_POLL_VALUE 1 #define IQDIO_TIMER_POLL_VALUE 1 @@ -409,25 +405,23 @@ do_clear_global_summary(void) #define CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS 0x08 #define CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS 0x04 -#ifdef QDIO_PERFORMANCE_STATS struct qdio_perf_stats { - unsigned int tl_runs; + unsigned long tl_runs; - unsigned int siga_outs; - unsigned int siga_ins; - unsigned int siga_syncs; - unsigned int pcis; - unsigned int thinints; - unsigned int fast_reqs; + unsigned long siga_outs; + unsigned long siga_ins; + unsigned long siga_syncs; + unsigned long pcis; + unsigned long thinints; + unsigned long fast_reqs; __u64 start_time_outbound; - unsigned int outbound_cnt; - unsigned int outbound_time; + unsigned long outbound_cnt; + unsigned long outbound_time; __u64 start_time_inbound; - unsigned int inbound_cnt; - unsigned int inbound_time; + unsigned long inbound_cnt; + unsigned long inbound_time; }; -#endif /* QDIO_PERFORMANCE_STATS */ /* unlikely as the later the better */ #define SYNC_MEMORY if (unlikely(q->siga_sync)) qdio_siga_sync_q(q) diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index e4dc947e74e..c7d1355237b 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -33,6 +33,7 @@ #include <linux/kthread.h> #include <linux/mutex.h> #include <asm/s390_rdev.h> +#include <asm/reset.h> #include "ap_bus.h" @@ -464,7 +465,7 @@ static int ap_device_probe(struct device *dev) * Flush all requests from the request/pending queue of an AP device. * @ap_dev: pointer to the AP device. */ -static inline void __ap_flush_queue(struct ap_device *ap_dev) +static void __ap_flush_queue(struct ap_device *ap_dev) { struct ap_message *ap_msg, *next; @@ -586,7 +587,7 @@ static struct bus_attribute *const ap_bus_attrs[] = { /** * Pick one of the 16 ap domains. */ -static inline int ap_select_domain(void) +static int ap_select_domain(void) { int queue_depth, device_type, count, max_count, best_domain; int rc, i, j; @@ -824,7 +825,7 @@ static inline void ap_schedule_poll_timer(void) * required, bit 2^1 is set if the poll timer needs to get armed * Returns 0 if the device is still present, -ENODEV if not. */ -static inline int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags) +static int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags) { struct ap_queue_status status; struct ap_message *ap_msg; @@ -871,7 +872,7 @@ static inline int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags) * required, bit 2^1 is set if the poll timer needs to get armed * Returns 0 if the device is still present, -ENODEV if not. */ -static inline int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags) +static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags) { struct ap_queue_status status; struct ap_message *ap_msg; @@ -1128,6 +1129,27 @@ static void ap_poll_thread_stop(void) mutex_unlock(&ap_poll_thread_mutex); } +static void ap_reset_domain(void) +{ + int i; + + for (i = 0; i < AP_DEVICES; i++) + ap_reset_queue(AP_MKQID(i, ap_domain_index)); +} + +static void ap_reset_all(void) +{ + int i, j; + + for (i = 0; i < AP_DOMAINS; i++) + for (j = 0; j < AP_DEVICES; j++) + ap_reset_queue(AP_MKQID(j, i)); +} + +static struct reset_call ap_reset_call = { + .fn = ap_reset_all, +}; + /** * The module initialization code. */ @@ -1144,6 +1166,7 @@ int __init ap_module_init(void) printk(KERN_WARNING "AP instructions not installed.\n"); return -ENODEV; } + register_reset_call(&ap_reset_call); /* Create /sys/bus/ap. */ rc = bus_register(&ap_bus_type); @@ -1197,6 +1220,7 @@ out_bus: bus_remove_file(&ap_bus_type, ap_bus_attrs[i]); bus_unregister(&ap_bus_type); out: + unregister_reset_call(&ap_reset_call); return rc; } @@ -1213,10 +1237,12 @@ void ap_module_exit(void) int i; struct device *dev; + ap_reset_domain(); ap_poll_thread_stop(); del_timer_sync(&ap_config_timer); del_timer_sync(&ap_poll_timer); destroy_workqueue(ap_work_queue); + tasklet_kill(&ap_tasklet); s390_root_dev_unregister(ap_root_device); while ((dev = bus_find_device(&ap_bus_type, NULL, NULL, __ap_match_all))) @@ -1227,6 +1253,7 @@ void ap_module_exit(void) for (i = 0; ap_bus_attrs[i]; i++) bus_remove_file(&ap_bus_type, ap_bus_attrs[i]); bus_unregister(&ap_bus_type); + unregister_reset_call(&ap_reset_call); } #ifndef CONFIG_ZCRYPT_MONOLITHIC diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index 1edc10a7a6f..b9e59bc9435 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c @@ -791,7 +791,7 @@ static long trans_xcRB32(struct file *filp, unsigned int cmd, return rc; } -long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd, +static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { if (cmd == ICARSAMODEXPO) @@ -833,8 +833,8 @@ static struct miscdevice zcrypt_misc_device = { */ static struct proc_dir_entry *zcrypt_entry; -static inline int sprintcl(unsigned char *outaddr, unsigned char *addr, - unsigned int len) +static int sprintcl(unsigned char *outaddr, unsigned char *addr, + unsigned int len) { int hl, i; @@ -845,8 +845,8 @@ static inline int sprintcl(unsigned char *outaddr, unsigned char *addr, return hl; } -static inline int sprintrw(unsigned char *outaddr, unsigned char *addr, - unsigned int len) +static int sprintrw(unsigned char *outaddr, unsigned char *addr, + unsigned int len) { int hl, inl, c, cx; @@ -865,8 +865,8 @@ static inline int sprintrw(unsigned char *outaddr, unsigned char *addr, return hl; } -static inline int sprinthx(unsigned char *title, unsigned char *outaddr, - unsigned char *addr, unsigned int len) +static int sprinthx(unsigned char *title, unsigned char *outaddr, + unsigned char *addr, unsigned int len) { int hl, inl, r, rx; @@ -885,8 +885,8 @@ static inline int sprinthx(unsigned char *title, unsigned char *outaddr, return hl; } -static inline int sprinthx4(unsigned char *title, unsigned char *outaddr, - unsigned int *array, unsigned int len) +static int sprinthx4(unsigned char *title, unsigned char *outaddr, + unsigned int *array, unsigned int len) { int hl, r; @@ -943,7 +943,7 @@ static int zcrypt_status_read(char *resp_buff, char **start, off_t offset, zcrypt_qdepth_mask(workarea); len += sprinthx("Waiting work element counts", resp_buff+len, workarea, AP_DEVICES); - zcrypt_perdev_reqcnt((unsigned int *) workarea); + zcrypt_perdev_reqcnt((int *) workarea); len += sprinthx4("Per-device successfully completed request counts", resp_buff+len,(unsigned int *) workarea, AP_DEVICES); *eof = 1; diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c index a62b00083d0..5bb13a9d089 100644 --- a/drivers/s390/crypto/zcrypt_cex2a.c +++ b/drivers/s390/crypto/zcrypt_cex2a.c @@ -295,7 +295,7 @@ static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev, struct completion work; int rc; - ap_msg.message = (void *) kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL); + ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL); if (!ap_msg.message) return -ENOMEM; ap_msg.psmid = (((unsigned long long) current->pid) << 32) + @@ -337,7 +337,7 @@ static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev, struct completion work; int rc; - ap_msg.message = (void *) kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL); + ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL); if (!ap_msg.message) return -ENOMEM; ap_msg.psmid = (((unsigned long long) current->pid) << 32) + diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c index b6a4ecdc802..818ffe05ac0 100644 --- a/drivers/s390/crypto/zcrypt_pcica.c +++ b/drivers/s390/crypto/zcrypt_pcica.c @@ -191,10 +191,10 @@ static int ICACRT_msg_to_type4CRT_msg(struct zcrypt_device *zdev, * * Returns 0 on success or -EFAULT. */ -static inline int convert_type84(struct zcrypt_device *zdev, - struct ap_message *reply, - char __user *outputdata, - unsigned int outputdatalength) +static int convert_type84(struct zcrypt_device *zdev, + struct ap_message *reply, + char __user *outputdata, + unsigned int outputdatalength) { struct type84_hdr *t84h = reply->message; char *data; @@ -279,7 +279,7 @@ static long zcrypt_pcica_modexpo(struct zcrypt_device *zdev, struct completion work; int rc; - ap_msg.message = (void *) kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL); + ap_msg.message = kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL); if (!ap_msg.message) return -ENOMEM; ap_msg.psmid = (((unsigned long long) current->pid) << 32) + @@ -321,7 +321,7 @@ static long zcrypt_pcica_modexpo_crt(struct zcrypt_device *zdev, struct completion work; int rc; - ap_msg.message = (void *) kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL); + ap_msg.message = kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL); if (!ap_msg.message) return -ENOMEM; ap_msg.psmid = (((unsigned long long) current->pid) << 32) + diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c index 2da8b938140..252443b6bd1 100644 --- a/drivers/s390/crypto/zcrypt_pcixcc.c +++ b/drivers/s390/crypto/zcrypt_pcixcc.c @@ -709,7 +709,8 @@ out_free: * PCIXCC/CEX2C device to the request distributor * @xcRB: pointer to the send_cprb request buffer */ -long zcrypt_pcixcc_send_cprb(struct zcrypt_device *zdev, struct ica_xcRB *xcRB) +static long zcrypt_pcixcc_send_cprb(struct zcrypt_device *zdev, + struct ica_xcRB *xcRB) { struct ap_message ap_msg; struct response_type resp_type = { @@ -717,7 +718,7 @@ long zcrypt_pcixcc_send_cprb(struct zcrypt_device *zdev, struct ica_xcRB *xcRB) }; int rc; - ap_msg.message = (void *) kmalloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE, GFP_KERNEL); + ap_msg.message = kmalloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE, GFP_KERNEL); if (!ap_msg.message) return -ENOMEM; ap_msg.psmid = (((unsigned long long) current->pid) << 32) + diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig index 1a93fa684e9..f98fa465df0 100644 --- a/drivers/s390/net/Kconfig +++ b/drivers/s390/net/Kconfig @@ -22,16 +22,6 @@ config CTC available. This option is also available as a module which will be called ctc.ko. If you do not know what it is, it's safe to say "Y". -config IUCV - tristate "IUCV support (VM only)" - help - Select this option if you want to use inter-user communication - under VM or VIF. If unsure, say "Y" to enable a fast communication - link between VM guests. At boot time the user ID of the guest needs - to be passed to the kernel. Note that both kernels need to be - compiled with this option and both need to be booted with the user ID - of the other VM guest. - config NETIUCV tristate "IUCV network device support (VM only)" depends on IUCV && NETDEVICES diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile index 4777e36a922..bbe3ab2e93d 100644 --- a/drivers/s390/net/Makefile +++ b/drivers/s390/net/Makefile @@ -4,7 +4,6 @@ ctc-objs := ctcmain.o ctcdbug.o -obj-$(CONFIG_IUCV) += iucv.o obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o obj-$(CONFIG_SMSGIUCV) += smsgiucv.o obj-$(CONFIG_CTC) += ctc.o fsm.o cu3088.o diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c index 95f4e105cb9..7809a79feec 100644 --- a/drivers/s390/net/claw.c +++ b/drivers/s390/net/claw.c @@ -121,7 +121,7 @@ MODULE_LICENSE("GPL"); #define DEBUG #endif - char debug_buffer[255]; +static char debug_buffer[255]; /** * Debug Facility Stuff */ @@ -223,16 +223,14 @@ static void claw_timer ( struct chbk * p_ch ); /* Functions */ static int add_claw_reads(struct net_device *dev, struct ccwbk* p_first, struct ccwbk* p_last); -static void inline ccw_check_return_code (struct ccw_device *cdev, - int return_code); -static void inline ccw_check_unit_check (struct chbk * p_ch, - unsigned char sense ); +static void ccw_check_return_code (struct ccw_device *cdev, int return_code); +static void ccw_check_unit_check (struct chbk * p_ch, unsigned char sense ); static int find_link(struct net_device *dev, char *host_name, char *ws_name ); static int claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid); static int init_ccw_bk(struct net_device *dev); static void probe_error( struct ccwgroup_device *cgdev); static struct net_device_stats *claw_stats(struct net_device *dev); -static int inline pages_to_order_of_mag(int num_of_pages); +static int pages_to_order_of_mag(int num_of_pages); static struct sk_buff *claw_pack_skb(struct claw_privbk *privptr); #ifdef DEBUG static void dumpit (char *buf, int len); @@ -1310,7 +1308,7 @@ claw_timer ( struct chbk * p_ch ) * of magnitude get_free_pages() has an upper order of 9 * *--------------------------------------------------------------------*/ -static int inline +static int pages_to_order_of_mag(int num_of_pages) { int order_of_mag=1; /* assume 2 pages */ @@ -1482,7 +1480,7 @@ add_claw_reads(struct net_device *dev, struct ccwbk* p_first, * * *-------------------------------------------------------------------*/ -static void inline +static void ccw_check_return_code(struct ccw_device *cdev, int return_code) { #ifdef FUNCTRACE @@ -1529,7 +1527,7 @@ ccw_check_return_code(struct ccw_device *cdev, int return_code) * ccw_check_unit_check * *--------------------------------------------------------------------*/ -static void inline +static void ccw_check_unit_check(struct chbk * p_ch, unsigned char sense ) { struct net_device *dev = p_ch->ndev; diff --git a/drivers/s390/net/ctcmain.c b/drivers/s390/net/ctcmain.c index 3257c22dd79..5a84fbbc661 100644 --- a/drivers/s390/net/ctcmain.c +++ b/drivers/s390/net/ctcmain.c @@ -369,7 +369,7 @@ ctc_dump_skb(struct sk_buff *skb, int offset) * @param ch The channel where this skb has been received. * @param pskb The received skb. */ -static __inline__ void +static void ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb) { struct net_device *dev = ch->netdev; @@ -512,7 +512,7 @@ ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb) * @param ch The channel, the error belongs to. * @param return_code The error code to inspect. */ -static void inline +static void ccw_check_return_code(struct channel *ch, int return_code, char *msg) { DBF_TEXT(trace, 5, __FUNCTION__); @@ -547,7 +547,7 @@ ccw_check_return_code(struct channel *ch, int return_code, char *msg) * @param ch The channel, the sense code belongs to. * @param sense The sense code to inspect. */ -static void inline +static void ccw_unit_check(struct channel *ch, unsigned char sense) { DBF_TEXT(trace, 5, __FUNCTION__); @@ -603,7 +603,7 @@ ctc_purge_skb_queue(struct sk_buff_head *q) } } -static __inline__ int +static int ctc_checkalloc_buffer(struct channel *ch, int warn) { DBF_TEXT(trace, 5, __FUNCTION__); @@ -1646,7 +1646,7 @@ add_channel(struct ccw_device *cdev, enum channel_types type) return -1; } memset(ch, 0, sizeof (struct channel)); - if ((ch->ccw = (struct ccw1 *) kmalloc(8*sizeof(struct ccw1), + if ((ch->ccw = kmalloc(8*sizeof(struct ccw1), GFP_KERNEL | GFP_DMA)) == NULL) { kfree(ch); ctc_pr_warn("ctc: Out of memory in add_channel\n"); @@ -1693,7 +1693,7 @@ add_channel(struct ccw_device *cdev, enum channel_types type) return -1; } fsm_newstate(ch->fsm, CH_STATE_IDLE); - if ((ch->irb = (struct irb *) kmalloc(sizeof (struct irb), + if ((ch->irb = kmalloc(sizeof (struct irb), GFP_KERNEL)) == NULL) { ctc_pr_warn("ctc: Out of memory in add_channel\n"); kfree_fsm(ch->fsm); @@ -2535,7 +2535,7 @@ ctc_print_statistics(struct ctc_priv *priv) DBF_TEXT(trace, 4, __FUNCTION__); if (!priv) return; - sbuf = (char *)kmalloc(2048, GFP_KERNEL); + sbuf = kmalloc(2048, GFP_KERNEL); if (sbuf == NULL) return; p = sbuf; diff --git a/drivers/s390/net/cu3088.c b/drivers/s390/net/cu3088.c index e965f03a729..76728ae4b84 100644 --- a/drivers/s390/net/cu3088.c +++ b/drivers/s390/net/cu3088.c @@ -57,7 +57,7 @@ static struct ccw_device_id cu3088_ids[] = { static struct ccw_driver cu3088_driver; -struct device *cu3088_root_dev; +static struct device *cu3088_root_dev; static ssize_t group_write(struct device_driver *drv, const char *buf, size_t count) diff --git a/drivers/s390/net/iucv.c b/drivers/s390/net/iucv.c deleted file mode 100644 index 1476ce2b437..00000000000 --- a/drivers/s390/net/iucv.c +++ /dev/null @@ -1,2540 +0,0 @@ -/* - * IUCV network driver - * - * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation - * Author(s): - * Original source: - * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000 - * Xenia Tkatschow (xenia@us.ibm.com) - * 2Gb awareness and general cleanup: - * Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com) - * - * Documentation used: - * The original source - * CP Programming Service, IBM document # SC24-5760 - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * - */ - -/* #define DEBUG */ - -#include <linux/module.h> -#include <linux/moduleparam.h> - -#include <linux/spinlock.h> -#include <linux/kernel.h> -#include <linux/slab.h> -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/list.h> -#include <linux/errno.h> -#include <linux/err.h> -#include <linux/device.h> -#include <asm/atomic.h> -#include "iucv.h" -#include <asm/io.h> -#include <asm/s390_ext.h> -#include <asm/ebcdic.h> -#include <asm/smp.h> -#include <asm/s390_rdev.h> - -/* FLAGS: - * All flags are defined in the field IPFLAGS1 of each function - * and can be found in CP Programming Services. - * IPSRCCLS - Indicates you have specified a source class - * IPFGMCL - Indicates you have specified a target class - * IPFGPID - Indicates you have specified a pathid - * IPFGMID - Indicates you have specified a message ID - * IPANSLST - Indicates that you are using an address list for - * reply data - * IPBUFLST - Indicates that you are using an address list for - * message data - */ - -#define IPSRCCLS 0x01 -#define IPFGMCL 0x01 -#define IPFGPID 0x02 -#define IPFGMID 0x04 -#define IPANSLST 0x08 -#define IPBUFLST 0x40 - -static int -iucv_bus_match (struct device *dev, struct device_driver *drv) -{ - return 0; -} - -struct bus_type iucv_bus = { - .name = "iucv", - .match = iucv_bus_match, -}; - -struct device *iucv_root; - -/* General IUCV interrupt structure */ -typedef struct { - __u16 ippathid; - __u8 res1; - __u8 iptype; - __u32 res2; - __u8 ipvmid[8]; - __u8 res3[24]; -} iucv_GeneralInterrupt; - -static iucv_GeneralInterrupt *iucv_external_int_buffer = NULL; - -/* Spin Lock declaration */ - -static DEFINE_SPINLOCK(iucv_lock); - -static int messagesDisabled = 0; - -/***************INTERRUPT HANDLING ***************/ - -typedef struct { - struct list_head queue; - iucv_GeneralInterrupt data; -} iucv_irqdata; - -static struct list_head iucv_irq_queue; -static DEFINE_SPINLOCK(iucv_irq_queue_lock); - -/* - *Internal function prototypes - */ -static void iucv_tasklet_handler(unsigned long); -static void iucv_irq_handler(__u16); - -static DECLARE_TASKLET(iucv_tasklet,iucv_tasklet_handler,0); - -/************ FUNCTION ID'S ****************************/ - -#define ACCEPT 10 -#define CONNECT 11 -#define DECLARE_BUFFER 12 -#define PURGE 9 -#define QUERY 0 -#define QUIESCE 13 -#define RECEIVE 5 -#define REJECT 8 -#define REPLY 6 -#define RESUME 14 -#define RETRIEVE_BUFFER 2 -#define SEND 4 -#define SETMASK 16 -#define SEVER 15 - -/** - * Structure: handler - * members: list - list management. - * structure: id - * userid - 8 char array of machine identification - * user_data - 16 char array for user identification - * mask - 24 char array used to compare the 2 previous - * interrupt_table - vector of interrupt functions. - * pgm_data - ulong, application data that is passed - * to the interrupt handlers -*/ -typedef struct handler_t { - struct list_head list; - struct { - __u8 userid[8]; - __u8 user_data[16]; - __u8 mask[24]; - } id; - iucv_interrupt_ops_t *interrupt_table; - void *pgm_data; -} handler; - -/** - * iucv_handler_table: List of registered handlers. - */ -static struct list_head iucv_handler_table; - -/** - * iucv_pathid_table: an array of *handler pointing into - * iucv_handler_table for fast indexing by pathid; - */ -static handler **iucv_pathid_table; - -static unsigned long max_connections; - -/** - * iucv_cpuid: contains the logical cpu number of the cpu which - * has declared the iucv buffer by issuing DECLARE_BUFFER. - * If no cpu has done the initialization iucv_cpuid contains -1. - */ -static int iucv_cpuid = -1; -/** - * register_flag: is 0 when external interrupt has not been registered - */ -static int register_flag; - -/****************FIVE 40-BYTE PARAMETER STRUCTURES******************/ -/* Data struct 1: iparml_control - * Used for iucv_accept - * iucv_connect - * iucv_quiesce - * iucv_resume - * iucv_sever - * iucv_retrieve_buffer - * Data struct 2: iparml_dpl (data in parameter list) - * Used for iucv_send_prmmsg - * iucv_send2way_prmmsg - * iucv_send2way_prmmsg_array - * iucv_reply_prmmsg - * Data struct 3: iparml_db (data in a buffer) - * Used for iucv_receive - * iucv_receive_array - * iucv_reject - * iucv_reply - * iucv_reply_array - * iucv_send - * iucv_send_array - * iucv_send2way - * iucv_send2way_array - * iucv_declare_buffer - * Data struct 4: iparml_purge - * Used for iucv_purge - * iucv_query - * Data struct 5: iparml_set_mask - * Used for iucv_set_mask - */ - -typedef struct { - __u16 ippathid; - __u8 ipflags1; - __u8 iprcode; - __u16 ipmsglim; - __u16 res1; - __u8 ipvmid[8]; - __u8 ipuser[16]; - __u8 iptarget[8]; -} iparml_control; - -typedef struct { - __u16 ippathid; - __u8 ipflags1; - __u8 iprcode; - __u32 ipmsgid; - __u32 iptrgcls; - __u8 iprmmsg[8]; - __u32 ipsrccls; - __u32 ipmsgtag; - __u32 ipbfadr2; - __u32 ipbfln2f; - __u32 res; -} iparml_dpl; - -typedef struct { - __u16 ippathid; - __u8 ipflags1; - __u8 iprcode; - __u32 ipmsgid; - __u32 iptrgcls; - __u32 ipbfadr1; - __u32 ipbfln1f; - __u32 ipsrccls; - __u32 ipmsgtag; - __u32 ipbfadr2; - __u32 ipbfln2f; - __u32 res; -} iparml_db; - -typedef struct { - __u16 ippathid; - __u8 ipflags1; - __u8 iprcode; - __u32 ipmsgid; - __u8 ipaudit[3]; - __u8 res1[5]; - __u32 res2; - __u32 ipsrccls; - __u32 ipmsgtag; - __u32 res3[3]; -} iparml_purge; - -typedef struct { - __u8 ipmask; - __u8 res1[2]; - __u8 iprcode; - __u32 res2[9]; -} iparml_set_mask; - -typedef struct { - union { - iparml_control p_ctrl; - iparml_dpl p_dpl; - iparml_db p_db; - iparml_purge p_purge; - iparml_set_mask p_set_mask; - } param; - atomic_t in_use; - __u32 res; -} __attribute__ ((aligned(8))) iucv_param; -#define PARAM_POOL_SIZE (PAGE_SIZE / sizeof(iucv_param)) - -static iucv_param * iucv_param_pool; - -MODULE_AUTHOR("(C) 2001 IBM Corp. by Fritz Elfert (felfert@millenux.com)"); -MODULE_DESCRIPTION("Linux for S/390 IUCV lowlevel driver"); -MODULE_LICENSE("GPL"); - -/* - * Debugging stuff - *******************************************************************************/ - - -#ifdef DEBUG -static int debuglevel = 0; - -module_param(debuglevel, int, 0); -MODULE_PARM_DESC(debuglevel, - "Specifies the debug level (0=off ... 3=all)"); - -static void -iucv_dumpit(char *title, void *buf, int len) -{ - int i; - __u8 *p = (__u8 *)buf; - - if (debuglevel < 3) - return; - - printk(KERN_DEBUG "%s\n", title); - printk(" "); - for (i = 0; i < len; i++) { - if (!(i % 16) && i != 0) - printk ("\n "); - else if (!(i % 4) && i != 0) - printk(" "); - printk("%02X", *p++); - } - if (len % 16) - printk ("\n"); - return; -} -#define iucv_debug(lvl, fmt, args...) \ -do { \ - if (debuglevel >= lvl) \ - printk(KERN_DEBUG "%s: " fmt "\n", __FUNCTION__ , ## args); \ -} while (0) - -#else - -#define iucv_debug(lvl, fmt, args...) do { } while (0) -#define iucv_dumpit(title, buf, len) do { } while (0) - -#endif - -/* - * Internal functions - *******************************************************************************/ - -/** - * print start banner - */ -static void -iucv_banner(void) -{ - printk(KERN_INFO "IUCV lowlevel driver initialized\n"); -} - -/** - * iucv_init - Initialization - * - * Allocates and initializes various data structures. - */ -static int -iucv_init(void) -{ - int ret; - - if (iucv_external_int_buffer) - return 0; - - if (!MACHINE_IS_VM) { - printk(KERN_ERR "IUCV: IUCV connection needs VM as base\n"); - return -EPROTONOSUPPORT; - } - - ret = bus_register(&iucv_bus); - if (ret) { - printk(KERN_ERR "IUCV: failed to register bus.\n"); - return ret; - } - - iucv_root = s390_root_dev_register("iucv"); - if (IS_ERR(iucv_root)) { - printk(KERN_ERR "IUCV: failed to register iucv root.\n"); - bus_unregister(&iucv_bus); - return PTR_ERR(iucv_root); - } - - /* Note: GFP_DMA used used to get memory below 2G */ - iucv_external_int_buffer = kzalloc(sizeof(iucv_GeneralInterrupt), - GFP_KERNEL|GFP_DMA); - if (!iucv_external_int_buffer) { - printk(KERN_WARNING - "%s: Could not allocate external interrupt buffer\n", - __FUNCTION__); - s390_root_dev_unregister(iucv_root); - bus_unregister(&iucv_bus); - return -ENOMEM; - } - - /* Initialize parameter pool */ - iucv_param_pool = kzalloc(sizeof(iucv_param) * PARAM_POOL_SIZE, - GFP_KERNEL|GFP_DMA); - if (!iucv_param_pool) { - printk(KERN_WARNING "%s: Could not allocate param pool\n", - __FUNCTION__); - kfree(iucv_external_int_buffer); - iucv_external_int_buffer = NULL; - s390_root_dev_unregister(iucv_root); - bus_unregister(&iucv_bus); - return -ENOMEM; - } - - /* Initialize irq queue */ - INIT_LIST_HEAD(&iucv_irq_queue); - - /* Initialize handler table */ - INIT_LIST_HEAD(&iucv_handler_table); - - iucv_banner(); - return 0; -} - -/** - * iucv_exit - De-Initialization - * - * Frees everything allocated from iucv_init. - */ -static int iucv_retrieve_buffer (void); - -static void -iucv_exit(void) -{ - iucv_retrieve_buffer(); - kfree(iucv_external_int_buffer); - iucv_external_int_buffer = NULL; - kfree(iucv_param_pool); - iucv_param_pool = NULL; - s390_root_dev_unregister(iucv_root); - bus_unregister(&iucv_bus); - printk(KERN_INFO "IUCV lowlevel driver unloaded\n"); -} - -/** - * grab_param: - Get a parameter buffer from the pre-allocated pool. - * - * This function searches for an unused element in the pre-allocated pool - * of parameter buffers. If one is found, it marks it "in use" and returns - * a pointer to it. The calling function is responsible for releasing it - * when it has finished its usage. - * - * Returns: A pointer to iucv_param. - */ -static __inline__ iucv_param * -grab_param(void) -{ - iucv_param *ptr; - static int hint = 0; - - ptr = iucv_param_pool + hint; - do { - ptr++; - if (ptr >= iucv_param_pool + PARAM_POOL_SIZE) - ptr = iucv_param_pool; - } while (atomic_cmpxchg(&ptr->in_use, 0, 1) != 0); - hint = ptr - iucv_param_pool; - - memset(&ptr->param, 0, sizeof(ptr->param)); - return ptr; -} - -/** - * release_param - Release a parameter buffer. - * @p: A pointer to a struct iucv_param, previously obtained by calling - * grab_param(). - * - * This function marks the specified parameter buffer "unused". - */ -static __inline__ void -release_param(void *p) -{ - atomic_set(&((iucv_param *)p)->in_use, 0); -} - -/** - * iucv_add_handler: - Add a new handler - * @new_handler: handle that is being entered into chain. - * - * Places new handle on iucv_handler_table, if identical handler is not - * found. - * - * Returns: 0 on success, !0 on failure (handler already in chain). - */ -static int -iucv_add_handler (handler *new) -{ - ulong flags; - - iucv_debug(1, "entering"); - iucv_dumpit("handler:", new, sizeof(handler)); - - spin_lock_irqsave (&iucv_lock, flags); - if (!list_empty(&iucv_handler_table)) { - struct list_head *lh; - - /** - * Search list for handler with identical id. If one - * is found, the new handler is _not_ added. - */ - list_for_each(lh, &iucv_handler_table) { - handler *h = list_entry(lh, handler, list); - if (!memcmp(&new->id, &h->id, sizeof(h->id))) { - iucv_debug(1, "ret 1"); - spin_unlock_irqrestore (&iucv_lock, flags); - return 1; - } - } - } - /** - * If we get here, no handler was found. - */ - INIT_LIST_HEAD(&new->list); - list_add(&new->list, &iucv_handler_table); - spin_unlock_irqrestore (&iucv_lock, flags); - - iucv_debug(1, "exiting"); - return 0; -} - -/** - * b2f0: - * @code: identifier of IUCV call to CP. - * @parm: pointer to 40 byte iparml area passed to CP - * - * Calls CP to execute IUCV commands. - * - * Returns: return code from CP's IUCV call - */ -static inline ulong b2f0(__u32 code, void *parm) -{ - register unsigned long reg0 asm ("0"); - register unsigned long reg1 asm ("1"); - iucv_dumpit("iparml before b2f0 call:", parm, sizeof(iucv_param)); - - reg0 = code; - reg1 = virt_to_phys(parm); - asm volatile(".long 0xb2f01000" : : "d" (reg0), "a" (reg1)); - - iucv_dumpit("iparml after b2f0 call:", parm, sizeof(iucv_param)); - - return (unsigned long)*((__u8 *)(parm + 3)); -} - -/* - * Name: iucv_add_pathid - * Purpose: Adds a path id to the system. - * Input: pathid - pathid that is going to be entered into system - * handle - address of handler that the pathid will be associated - * with. - * pgm_data - token passed in by application. - * Output: 0: successful addition of pathid - * - EINVAL - pathid entry is being used by another application - * - ENOMEM - storage allocation for a new pathid table failed -*/ -static int -__iucv_add_pathid(__u16 pathid, handler *handler) -{ - - iucv_debug(1, "entering"); - - iucv_debug(1, "handler is pointing to %p", handler); - - if (pathid > (max_connections - 1)) - return -EINVAL; - - if (iucv_pathid_table[pathid]) { - iucv_debug(1, "pathid entry is %p", iucv_pathid_table[pathid]); - printk(KERN_WARNING - "%s: Pathid being used, error.\n", __FUNCTION__); - return -EINVAL; - } - iucv_pathid_table[pathid] = handler; - - iucv_debug(1, "exiting"); - return 0; -} /* end of add_pathid function */ - -static int -iucv_add_pathid(__u16 pathid, handler *handler) -{ - ulong flags; - int rc; - - spin_lock_irqsave (&iucv_lock, flags); - rc = __iucv_add_pathid(pathid, handler); - spin_unlock_irqrestore (&iucv_lock, flags); - return rc; -} - -static void -iucv_remove_pathid(__u16 pathid) -{ - ulong flags; - - if (pathid > (max_connections - 1)) - return; - - spin_lock_irqsave (&iucv_lock, flags); - iucv_pathid_table[pathid] = NULL; - spin_unlock_irqrestore (&iucv_lock, flags); -} - -/** - * iucv_declare_buffer_cpuid - * Register at VM for subsequent IUCV operations. This is executed - * on the reserved CPU iucv_cpuid. Called from iucv_declare_buffer(). - */ -static void -iucv_declare_buffer_cpuid (void *result) -{ - iparml_db *parm; - - parm = (iparml_db *)grab_param(); - parm->ipbfadr1 = virt_to_phys(iucv_external_int_buffer); - if ((*((ulong *)result) = b2f0(DECLARE_BUFFER, parm)) == 1) - *((ulong *)result) = parm->iprcode; - release_param(parm); -} - -/** - * iucv_retrieve_buffer_cpuid: - * Unregister IUCV usage at VM. This is always executed on the same - * cpu that registered the buffer to VM. - * Called from iucv_retrieve_buffer(). - */ -static void -iucv_retrieve_buffer_cpuid (void *cpu) -{ - iparml_control *parm; - - parm = (iparml_control *)grab_param(); - b2f0(RETRIEVE_BUFFER, parm); - release_param(parm); -} - -/** - * Name: iucv_declare_buffer - * Purpose: Specifies the guests real address of an external - * interrupt. - * Input: void - * Output: iprcode - return code from b2f0 call - */ -static int -iucv_declare_buffer (void) -{ - unsigned long flags; - ulong b2f0_result; - - iucv_debug(1, "entering"); - b2f0_result = -ENODEV; - spin_lock_irqsave (&iucv_lock, flags); - if (iucv_cpuid == -1) { - /* Reserve any cpu for use by iucv. */ - iucv_cpuid = smp_get_cpu(CPU_MASK_ALL); - spin_unlock_irqrestore (&iucv_lock, flags); - smp_call_function_on(iucv_declare_buffer_cpuid, - &b2f0_result, 0, 1, iucv_cpuid); - if (b2f0_result) { - smp_put_cpu(iucv_cpuid); - iucv_cpuid = -1; - } - iucv_debug(1, "Address of EIB = %p", iucv_external_int_buffer); - } else { - spin_unlock_irqrestore (&iucv_lock, flags); - b2f0_result = 0; - } - iucv_debug(1, "exiting"); - return b2f0_result; -} - -/** - * iucv_retrieve_buffer: - * - * Terminates all use of IUCV. - * Returns: return code from CP - */ -static int -iucv_retrieve_buffer (void) -{ - iucv_debug(1, "entering"); - if (iucv_cpuid != -1) { - smp_call_function_on(iucv_retrieve_buffer_cpuid, - NULL, 0, 1, iucv_cpuid); - /* Release the cpu reserved by iucv_declare_buffer. */ - smp_put_cpu(iucv_cpuid); - iucv_cpuid = -1; - } - iucv_debug(1, "exiting"); - return 0; -} - -/** - * iucv_remove_handler: - * @users_handler: handler to be removed - * - * Remove handler when application unregisters. - */ -static void -iucv_remove_handler(handler *handler) -{ - unsigned long flags; - - if ((!iucv_pathid_table) || (!handler)) - return; - - iucv_debug(1, "entering"); - - spin_lock_irqsave (&iucv_lock, flags); - list_del(&handler->list); - if (list_empty(&iucv_handler_table)) { - if (register_flag) { - unregister_external_interrupt(0x4000, iucv_irq_handler); - register_flag = 0; - } - } - spin_unlock_irqrestore (&iucv_lock, flags); - - iucv_debug(1, "exiting"); - return; -} - -/** - * iucv_register_program: - * @pgmname: user identification - * @userid: machine identification - * @pgmmask: Indicates which bits in the pgmname and userid combined will be - * used to determine who is given control. - * @ops: Address of interrupt handler table. - * @pgm_data: Application data to be passed to interrupt handlers. - * - * Registers an application with IUCV. - * Returns: - * The address of handler, or NULL on failure. - * NOTE on pgmmask: - * If pgmname, userid and pgmmask are provided, pgmmask is entered into the - * handler as is. - * If pgmmask is NULL, the internal mask is set to all 0xff's - * When userid is NULL, the first 8 bytes of the internal mask are forced - * to 0x00. - * If pgmmask and userid are NULL, the first 8 bytes of the internal mask - * are forced to 0x00 and the last 16 bytes to 0xff. - */ - -iucv_handle_t -iucv_register_program (__u8 pgmname[16], - __u8 userid[8], - __u8 pgmmask[24], - iucv_interrupt_ops_t * ops, void *pgm_data) -{ - ulong rc = 0; /* return code from function calls */ - handler *new_handler; - - iucv_debug(1, "entering"); - - if (ops == NULL) { - /* interrupt table is not defined */ - printk(KERN_WARNING "%s: Interrupt table is not defined, " - "exiting\n", __FUNCTION__); - return NULL; - } - if (!pgmname) { - printk(KERN_WARNING "%s: pgmname not provided\n", __FUNCTION__); - return NULL; - } - - /* Allocate handler entry */ - new_handler = (handler *)kmalloc(sizeof(handler), GFP_ATOMIC); - if (new_handler == NULL) { - printk(KERN_WARNING "%s: storage allocation for new handler " - "failed.\n", __FUNCTION__); - return NULL; - } - - if (!iucv_pathid_table) { - if (iucv_init()) { - kfree(new_handler); - return NULL; - } - - max_connections = iucv_query_maxconn(); - iucv_pathid_table = kcalloc(max_connections, sizeof(handler *), - GFP_ATOMIC); - if (iucv_pathid_table == NULL) { - printk(KERN_WARNING "%s: iucv_pathid_table storage " - "allocation failed\n", __FUNCTION__); - kfree(new_handler); - return NULL; - } - } - memset(new_handler, 0, sizeof (handler)); - memcpy(new_handler->id.user_data, pgmname, - sizeof (new_handler->id.user_data)); - if (userid) { - memcpy (new_handler->id.userid, userid, - sizeof (new_handler->id.userid)); - ASCEBC (new_handler->id.userid, - sizeof (new_handler->id.userid)); - EBC_TOUPPER (new_handler->id.userid, - sizeof (new_handler->id.userid)); - - if (pgmmask) { - memcpy (new_handler->id.mask, pgmmask, - sizeof (new_handler->id.mask)); - } else { - memset (new_handler->id.mask, 0xFF, - sizeof (new_handler->id.mask)); - } - } else { - if (pgmmask) { - memcpy (new_handler->id.mask, pgmmask, - sizeof (new_handler->id.mask)); - } else { - memset (new_handler->id.mask, 0xFF, - sizeof (new_handler->id.mask)); - } - memset (new_handler->id.userid, 0x00, - sizeof (new_handler->id.userid)); - } - /* fill in the rest of handler */ - new_handler->pgm_data = pgm_data; - new_handler->interrupt_table = ops; - - /* - * Check if someone else is registered with same pgmname, userid - * and mask. If someone is already registered with same pgmname, - * userid and mask, registration will fail and NULL will be returned - * to the application. - * If identical handler not found, then handler is added to list. - */ - rc = iucv_add_handler(new_handler); - if (rc) { - printk(KERN_WARNING "%s: Someone already registered with same " - "pgmname, userid, pgmmask\n", __FUNCTION__); - kfree (new_handler); - return NULL; - } - - rc = iucv_declare_buffer(); - if (rc) { - char *err = "Unknown"; - iucv_remove_handler(new_handler); - kfree(new_handler); - switch(rc) { - case 0x03: - err = "Directory error"; - break; - case 0x0a: - err = "Invalid length"; - break; - case 0x13: - err = "Buffer already exists"; - break; - case 0x3e: - err = "Buffer overlap"; - break; - case 0x5c: - err = "Paging or storage error"; - break; - } - printk(KERN_WARNING "%s: iucv_declare_buffer " - "returned error 0x%02lx (%s)\n", __FUNCTION__, rc, err); - return NULL; - } - if (!register_flag) { - /* request the 0x4000 external interrupt */ - rc = register_external_interrupt (0x4000, iucv_irq_handler); - if (rc) { - iucv_remove_handler(new_handler); - kfree (new_handler); - printk(KERN_WARNING "%s: " - "register_external_interrupt returned %ld\n", - __FUNCTION__, rc); - return NULL; - - } - register_flag = 1; - } - iucv_debug(1, "exiting"); - return new_handler; -} /* end of register function */ - -/** - * iucv_unregister_program: - * @handle: address of handler - * - * Unregister application with IUCV. - * Returns: - * 0 on success, -EINVAL, if specified handle is invalid. - */ - -int -iucv_unregister_program (iucv_handle_t handle) -{ - handler *h = NULL; - struct list_head *lh; - int i; - ulong flags; - - iucv_debug(1, "entering"); - iucv_debug(1, "address of handler is %p", h); - - /* Checking if handle is valid */ - spin_lock_irqsave (&iucv_lock, flags); - list_for_each(lh, &iucv_handler_table) { - if ((handler *)handle == list_entry(lh, handler, list)) { - h = (handler *)handle; - break; - } - } - if (!h) { - spin_unlock_irqrestore (&iucv_lock, flags); - if (handle) - printk(KERN_WARNING - "%s: Handler not found in iucv_handler_table.\n", - __FUNCTION__); - else - printk(KERN_WARNING - "%s: NULL handle passed by application.\n", - __FUNCTION__); - return -EINVAL; - } - - /** - * First, walk thru iucv_pathid_table and sever any pathid which is - * still pointing to the handler to be removed. - */ - for (i = 0; i < max_connections; i++) - if (iucv_pathid_table[i] == h) { - spin_unlock_irqrestore (&iucv_lock, flags); - iucv_sever(i, h->id.user_data); - spin_lock_irqsave(&iucv_lock, flags); - } - spin_unlock_irqrestore (&iucv_lock, flags); - - iucv_remove_handler(h); - kfree(h); - - iucv_debug(1, "exiting"); - return 0; -} - -/** - * iucv_accept: - * @pathid: Path identification number - * @msglim_reqstd: The number of outstanding messages requested. - * @user_data: Data specified by the iucv_connect function. - * @flags1: Contains options for this path. - * - IPPRTY (0x20) Specifies if you want to send priority message. - * - IPRMDATA (0x80) Specifies whether your program can handle a message - * in the parameter list. - * - IPQUSCE (0x40) Specifies whether you want to quiesce the path being - * established. - * @handle: Address of handler. - * @pgm_data: Application data passed to interrupt handlers. - * @flags1_out: Pointer to an int. If not NULL, on return the options for - * the path are stored at the given location: - * - IPPRTY (0x20) Indicates you may send a priority message. - * @msglim: Pointer to an __u16. If not NULL, on return the maximum - * number of outstanding messages is stored at the given - * location. - * - * This function is issued after the user receives a Connection Pending external - * interrupt and now wishes to complete the IUCV communication path. - * Returns: - * return code from CP - */ -int -iucv_accept(__u16 pathid, __u16 msglim_reqstd, - __u8 user_data[16], int flags1, - iucv_handle_t handle, void *pgm_data, - int *flags1_out, __u16 * msglim) -{ - ulong b2f0_result = 0; - ulong flags; - struct list_head *lh; - handler *h = NULL; - iparml_control *parm; - - iucv_debug(1, "entering"); - iucv_debug(1, "pathid = %d", pathid); - - /* Checking if handle is valid */ - spin_lock_irqsave (&iucv_lock, flags); - list_for_each(lh, &iucv_handler_table) { - if ((handler *)handle == list_entry(lh, handler, list)) { - h = (handler *)handle; - break; - } - } - spin_unlock_irqrestore (&iucv_lock, flags); - - if (!h) { - if (handle) - printk(KERN_WARNING - "%s: Handler not found in iucv_handler_table.\n", - __FUNCTION__); - else - printk(KERN_WARNING - "%s: NULL handle passed by application.\n", - __FUNCTION__); - return -EINVAL; - } - - parm = (iparml_control *)grab_param(); - - parm->ippathid = pathid; - parm->ipmsglim = msglim_reqstd; - if (user_data) - memcpy(parm->ipuser, user_data, sizeof(parm->ipuser)); - - parm->ipflags1 = (__u8)flags1; - b2f0_result = b2f0(ACCEPT, parm); - - if (!b2f0_result) { - if (msglim) - *msglim = parm->ipmsglim; - if (pgm_data) - h->pgm_data = pgm_data; - if (flags1_out) - *flags1_out = (parm->ipflags1 & IPPRTY) ? IPPRTY : 0; - } - release_param(parm); - - iucv_debug(1, "exiting"); - return b2f0_result; -} - -/** - * iucv_connect: - * @pathid: Path identification number - * @msglim_reqstd: Number of outstanding messages requested - * @user_data: 16-byte user data - * @userid: 8-byte of user identification - * @system_name: 8-byte identifying the system name - * @flags1: Specifies options for this path: - * - IPPRTY (0x20) Specifies if you want to send priority message. - * - IPRMDATA (0x80) Specifies whether your program can handle a message - * in the parameter list. - * - IPQUSCE (0x40) Specifies whether you want to quiesce the path being - * established. - * - IPLOCAL (0x01) Allows an application to force the partner to be on the - * local system. If local is specified then target class - * cannot be specified. - * @flags1_out: Pointer to an int. If not NULL, on return the options for - * the path are stored at the given location: - * - IPPRTY (0x20) Indicates you may send a priority message. - * @msglim: Pointer to an __u16. If not NULL, on return the maximum - * number of outstanding messages is stored at the given - * location. - * @handle: Address of handler. - * @pgm_data: Application data to be passed to interrupt handlers. - * - * This function establishes an IUCV path. Although the connect may complete - * successfully, you are not able to use the path until you receive an IUCV - * Connection Complete external interrupt. - * Returns: return code from CP, or one of the following - * - ENOMEM - * - return code from iucv_declare_buffer - * - EINVAL - invalid handle passed by application - * - EINVAL - pathid address is NULL - * - ENOMEM - pathid table storage allocation failed - * - return code from internal function add_pathid - */ -int -iucv_connect (__u16 *pathid, __u16 msglim_reqstd, - __u8 user_data[16], __u8 userid[8], - __u8 system_name[8], int flags1, - int *flags1_out, __u16 * msglim, - iucv_handle_t handle, void *pgm_data) -{ - iparml_control *parm; - iparml_control local_parm; - struct list_head *lh; - ulong b2f0_result = 0; - ulong flags; - int add_pathid_result = 0; - handler *h = NULL; - __u8 no_memory[16] = "NO MEMORY"; - - iucv_debug(1, "entering"); - - /* Checking if handle is valid */ - spin_lock_irqsave (&iucv_lock, flags); - list_for_each(lh, &iucv_handler_table) { - if ((handler *)handle == list_entry(lh, handler, list)) { - h = (handler *)handle; - break; - } - } - spin_unlock_irqrestore (&iucv_lock, flags); - - if (!h) { - if (handle) - printk(KERN_WARNING - "%s: Handler not found in iucv_handler_table.\n", - __FUNCTION__); - else - printk(KERN_WARNING - "%s: NULL handle passed by application.\n", - __FUNCTION__); - return -EINVAL; - } - - if (pathid == NULL) { - printk(KERN_WARNING "%s: NULL pathid pointer\n", - __FUNCTION__); - return -EINVAL; - } - - parm = (iparml_control *)grab_param(); - - parm->ipmsglim = msglim_reqstd; - - if (user_data) - memcpy(parm->ipuser, user_data, sizeof(parm->ipuser)); - - if (userid) { - memcpy(parm->ipvmid, userid, sizeof(parm->ipvmid)); - ASCEBC(parm->ipvmid, sizeof(parm->ipvmid)); - EBC_TOUPPER(parm->ipvmid, sizeof(parm->ipvmid)); - } - - if (system_name) { - memcpy(parm->iptarget, system_name, sizeof(parm->iptarget)); - ASCEBC(parm->iptarget, sizeof(parm->iptarget)); - EBC_TOUPPER(parm->iptarget, sizeof(parm->iptarget)); - } - - /* In order to establish an IUCV connection, the procedure is: - * - * b2f0(CONNECT) - * take the ippathid from the b2f0 call - * register the handler to the ippathid - * - * Unfortunately, the ConnectionEstablished message gets sent after the - * b2f0(CONNECT) call but before the register is handled. - * - * In order for this race condition to be eliminated, the IUCV Control - * Interrupts must be disabled for the above procedure. - * - * David Kennedy <dkennedy@linuxcare.com> - */ - - /* Enable everything but IUCV Control messages */ - iucv_setmask(~(AllInterrupts)); - messagesDisabled = 1; - - spin_lock_irqsave (&iucv_lock, flags); - parm->ipflags1 = (__u8)flags1; - b2f0_result = b2f0(CONNECT, parm); - memcpy(&local_parm, parm, sizeof(local_parm)); - release_param(parm); - parm = &local_parm; - if (!b2f0_result) - add_pathid_result = __iucv_add_pathid(parm->ippathid, h); - spin_unlock_irqrestore (&iucv_lock, flags); - - if (b2f0_result) { - iucv_setmask(~0); - messagesDisabled = 0; - return b2f0_result; - } - - *pathid = parm->ippathid; - - /* Enable everything again */ - iucv_setmask(IUCVControlInterruptsFlag); - - if (msglim) - *msglim = parm->ipmsglim; - if (flags1_out) - *flags1_out = (parm->ipflags1 & IPPRTY) ? IPPRTY : 0; - - if (add_pathid_result) { - iucv_sever(*pathid, no_memory); - printk(KERN_WARNING "%s: add_pathid failed with rc =" - " %d\n", __FUNCTION__, add_pathid_result); - return(add_pathid_result); - } - - iucv_debug(1, "exiting"); - return b2f0_result; -} - -/** - * iucv_purge: - * @pathid: Path identification number - * @msgid: Message ID of message to purge. - * @srccls: Message class of the message to purge. - * @audit: Pointer to an __u32. If not NULL, on return, information about - * asynchronous errors that may have affected the normal completion - * of this message ist stored at the given location. - * - * Cancels a message you have sent. - * Returns: return code from CP - */ -int -iucv_purge (__u16 pathid, __u32 msgid, __u32 srccls, __u32 *audit) -{ - iparml_purge *parm; - ulong b2f0_result = 0; - - iucv_debug(1, "entering"); - iucv_debug(1, "pathid = %d", pathid); - - parm = (iparml_purge *)grab_param(); - - parm->ipmsgid = msgid; - parm->ippathid = pathid; - parm->ipsrccls = srccls; - parm->ipflags1 |= (IPSRCCLS | IPFGMID | IPFGPID); - b2f0_result = b2f0(PURGE, parm); - - if (!b2f0_result && audit) { - memcpy(audit, parm->ipaudit, sizeof(parm->ipaudit)); - /* parm->ipaudit has only 3 bytes */ - *audit >>= 8; - } - - release_param(parm); - - iucv_debug(1, "b2f0_result = %ld", b2f0_result); - iucv_debug(1, "exiting"); - return b2f0_result; -} - -/** - * iucv_query_generic: - * @want_maxconn: Flag, describing which value is to be returned. - * - * Helper function for iucv_query_maxconn() and iucv_query_bufsize(). - * - * Returns: The buffersize, if want_maxconn is 0; the maximum number of - * connections, if want_maxconn is 1 or an error-code < 0 on failure. - */ -static int -iucv_query_generic(int want_maxconn) -{ - register unsigned long reg0 asm ("0"); - register unsigned long reg1 asm ("1"); - iparml_purge *parm = (iparml_purge *)grab_param(); - int bufsize, maxconn; - int ccode; - - /** - * Call b2f0 and store R0 (max buffer size), - * R1 (max connections) and CC. - */ - reg0 = QUERY; - reg1 = virt_to_phys(parm); - asm volatile( - " .long 0xb2f01000\n" - " ipm %0\n" - " srl %0,28\n" - : "=d" (ccode), "+d" (reg0), "+d" (reg1) : : "cc"); - bufsize = reg0; - maxconn = reg1; - release_param(parm); - - if (ccode) - return -EPERM; - if (want_maxconn) - return maxconn; - return bufsize; -} - -/** - * iucv_query_maxconn: - * - * Determines the maximum number of connections thay may be established. - * - * Returns: Maximum number of connections that can be. - */ -ulong -iucv_query_maxconn(void) -{ - return iucv_query_generic(1); -} - -/** - * iucv_query_bufsize: - * - * Determines the size of the external interrupt buffer. - * - * Returns: Size of external interrupt buffer. - */ -ulong -iucv_query_bufsize (void) -{ - return iucv_query_generic(0); -} - -/** - * iucv_quiesce: - * @pathid: Path identification number - * @user_data: 16-byte user data - * - * Temporarily suspends incoming messages on an IUCV path. - * You can later reactivate the path by invoking the iucv_resume function. - * Returns: return code from CP - */ -int -iucv_quiesce (__u16 pathid, __u8 user_data[16]) -{ - iparml_control *parm; - ulong b2f0_result = 0; - - iucv_debug(1, "entering"); - iucv_debug(1, "pathid = %d", pathid); - - parm = (iparml_control *)grab_param(); - - memcpy(parm->ipuser, user_data, sizeof(parm->ipuser)); - parm->ippathid = pathid; - - b2f0_result = b2f0(QUIESCE, parm); - release_param(parm); - - iucv_debug(1, "b2f0_result = %ld", b2f0_result); - iucv_debug(1, "exiting"); - - return b2f0_result; -} - -/** - * iucv_receive: - * @pathid: Path identification number. - * @buffer: Address of buffer to receive. Must be below 2G. - * @buflen: Length of buffer to receive. - * @msgid: Specifies the message ID. - * @trgcls: Specifies target class. - * @flags1_out: Receives options for path on return. - * - IPNORPY (0x10) Specifies whether a reply is required - * - IPPRTY (0x20) Specifies if you want to send priority message - * - IPRMDATA (0x80) Specifies the data is contained in the parameter list - * @residual_buffer: Receives the address of buffer updated by the number - * of bytes you have received on return. - * @residual_length: On return, receives one of the following values: - * - 0 If the receive buffer is the same length as - * the message. - * - Remaining bytes in buffer If the receive buffer is longer than the - * message. - * - Remaining bytes in message If the receive buffer is shorter than the - * message. - * - * This function receives messages that are being sent to you over established - * paths. - * Returns: return code from CP IUCV call; If the receive buffer is shorter - * than the message, always 5 - * -EINVAL - buffer address is pointing to NULL - */ -int -iucv_receive (__u16 pathid, __u32 msgid, __u32 trgcls, - void *buffer, ulong buflen, - int *flags1_out, ulong * residual_buffer, ulong * residual_length) -{ - iparml_db *parm; - ulong b2f0_result; - int moved = 0; /* number of bytes moved from parmlist to buffer */ - - iucv_debug(2, "entering"); - - if (!buffer) - return -EINVAL; - - parm = (iparml_db *)grab_param(); - - parm->ipbfadr1 = (__u32) (addr_t) buffer; - parm->ipbfln1f = (__u32) ((ulong) buflen); - parm->ipmsgid = msgid; - parm->ippathid = pathid; - parm->iptrgcls = trgcls; - parm->ipflags1 = (IPFGPID | IPFGMID | IPFGMCL); - - b2f0_result = b2f0(RECEIVE, parm); - - if (!b2f0_result || b2f0_result == 5) { - if (flags1_out) { - iucv_debug(2, "*flags1_out = %d", *flags1_out); - *flags1_out = (parm->ipflags1 & (~0x07)); - iucv_debug(2, "*flags1_out = %d", *flags1_out); - } - - if (!(parm->ipflags1 & IPRMDATA)) { /*msg not in parmlist */ - if (residual_length) - *residual_length = parm->ipbfln1f; - - if (residual_buffer) - *residual_buffer = parm->ipbfadr1; - } else { - moved = min_t (unsigned long, buflen, 8); - - memcpy ((char *) buffer, - (char *) &parm->ipbfadr1, moved); - - if (buflen < 8) - b2f0_result = 5; - - if (residual_length) - *residual_length = abs (buflen - 8); - - if (residual_buffer) - *residual_buffer = (ulong) (buffer + moved); - } - } - release_param(parm); - - iucv_debug(2, "exiting"); - return b2f0_result; -} - -/* - * Name: iucv_receive_array - * Purpose: This function receives messages that are being sent to you - * over established paths. - * Input: pathid - path identification number - * buffer - address of array of buffers - * buflen - total length of buffers - * msgid - specifies the message ID. - * trgcls - specifies target class - * Output: - * flags1_out: Options for path. - * IPNORPY - 0x10 specifies whether a reply is required - * IPPRTY - 0x20 specifies if you want to send priority message - * IPRMDATA - 0x80 specifies the data is contained in the parameter list - * residual_buffer - address points to the current list entry IUCV - * is working on. - * residual_length - - * Contains one of the following values, if the receive buffer is: - * The same length as the message, this field is zero. - * Longer than the message, this field contains the number of - * bytes remaining in the buffer. - * Shorter than the message, this field contains the residual - * count (that is, the number of bytes remaining in the - * message that does not fit into the buffer. In this case - * b2f0_result = 5. - * Return: b2f0_result - return code from CP - * (-EINVAL) - buffer address is NULL - */ -int -iucv_receive_array (__u16 pathid, - __u32 msgid, __u32 trgcls, - iucv_array_t * buffer, ulong buflen, - int *flags1_out, - ulong * residual_buffer, ulong * residual_length) -{ - iparml_db *parm; - ulong b2f0_result; - int i = 0, moved = 0, need_to_move = 8, dyn_len; - - iucv_debug(2, "entering"); - - if (!buffer) - return -EINVAL; - - parm = (iparml_db *)grab_param(); - - parm->ipbfadr1 = (__u32) ((ulong) buffer); - parm->ipbfln1f = (__u32) buflen; - parm->ipmsgid = msgid; - parm->ippathid = pathid; - parm->iptrgcls = trgcls; - parm->ipflags1 = (IPBUFLST | IPFGPID | IPFGMID | IPFGMCL); - - b2f0_result = b2f0(RECEIVE, parm); - - if (!b2f0_result || b2f0_result == 5) { - - if (flags1_out) { - iucv_debug(2, "*flags1_out = %d", *flags1_out); - *flags1_out = (parm->ipflags1 & (~0x07)); - iucv_debug(2, "*flags1_out = %d", *flags1_out); - } - - if (!(parm->ipflags1 & IPRMDATA)) { /*msg not in parmlist */ - - if (residual_length) - *residual_length = parm->ipbfln1f; - - if (residual_buffer) - *residual_buffer = parm->ipbfadr1; - - } else { - /* copy msg from parmlist to users array. */ - - while ((moved < 8) && (moved < buflen)) { - dyn_len = - min_t (unsigned int, - (buffer + i)->length, need_to_move); - - memcpy ((char *)((ulong)((buffer + i)->address)), - ((char *) &parm->ipbfadr1) + moved, - dyn_len); - - moved += dyn_len; - need_to_move -= dyn_len; - - (buffer + i)->address = - (__u32) - ((ulong)(__u8 *) ((ulong)(buffer + i)->address) - + dyn_len); - - (buffer + i)->length -= dyn_len; - i++; - } - - if (need_to_move) /* buflen < 8 bytes */ - b2f0_result = 5; - - if (residual_length) - *residual_length = abs (buflen - 8); - - if (residual_buffer) { - if (!moved) - *residual_buffer = (ulong) buffer; - else - *residual_buffer = - (ulong) (buffer + (i - 1)); - } - - } - } - release_param(parm); - - iucv_debug(2, "exiting"); - return b2f0_result; -} - -/** - * iucv_reject: - * @pathid: Path identification number. - * @msgid: Message ID of the message to reject. - * @trgcls: Target class of the message to reject. - * Returns: return code from CP - * - * Refuses a specified message. Between the time you are notified of a - * message and the time that you complete the message, the message may - * be rejected. - */ -int -iucv_reject (__u16 pathid, __u32 msgid, __u32 trgcls) -{ - iparml_db *parm; - ulong b2f0_result = 0; - - iucv_debug(1, "entering"); - iucv_debug(1, "pathid = %d", pathid); - - parm = (iparml_db *)grab_param(); - - parm->ippathid = pathid; - parm->ipmsgid = msgid; - parm->iptrgcls = trgcls; - parm->ipflags1 = (IPFGMCL | IPFGMID | IPFGPID); - - b2f0_result = b2f0(REJECT, parm); - release_param(parm); - - iucv_debug(1, "b2f0_result = %ld", b2f0_result); - iucv_debug(1, "exiting"); - - return b2f0_result; -} - -/* - * Name: iucv_reply - * Purpose: This function responds to the two-way messages that you - * receive. You must identify completely the message to - * which you wish to reply. ie, pathid, msgid, and trgcls. - * Input: pathid - path identification number - * msgid - specifies the message ID. - * trgcls - specifies target class - * flags1 - option for path - * IPPRTY- 0x20 - specifies if you want to send priority message - * buffer - address of reply buffer - * buflen - length of reply buffer - * Output: ipbfadr2 - Address of buffer updated by the number - * of bytes you have moved. - * ipbfln2f - Contains one of the following values: - * If the answer buffer is the same length as the reply, this field - * contains zero. - * If the answer buffer is longer than the reply, this field contains - * the number of bytes remaining in the buffer. - * If the answer buffer is shorter than the reply, this field contains - * a residual count (that is, the number of bytes remianing in the - * reply that does not fit into the buffer. In this - * case b2f0_result = 5. - * Return: b2f0_result - return code from CP - * (-EINVAL) - buffer address is NULL - */ -int -iucv_reply (__u16 pathid, - __u32 msgid, __u32 trgcls, - int flags1, - void *buffer, ulong buflen, ulong * ipbfadr2, ulong * ipbfln2f) -{ - iparml_db *parm; - ulong b2f0_result; - - iucv_debug(2, "entering"); - - if (!buffer) - return -EINVAL; - - parm = (iparml_db *)grab_param(); - - parm->ipbfadr2 = (__u32) ((ulong) buffer); - parm->ipbfln2f = (__u32) buflen; /* length of message */ - parm->ippathid = pathid; - parm->ipmsgid = msgid; - parm->iptrgcls = trgcls; - parm->ipflags1 = (__u8) flags1; /* priority message */ - - b2f0_result = b2f0(REPLY, parm); - - if ((!b2f0_result) || (b2f0_result == 5)) { - if (ipbfadr2) - *ipbfadr2 = parm->ipbfadr2; - if (ipbfln2f) - *ipbfln2f = parm->ipbfln2f; - } - release_param(parm); - - iucv_debug(2, "exiting"); - - return b2f0_result; -} - -/* - * Name: iucv_reply_array - * Purpose: This function responds to the two-way messages that you - * receive. You must identify completely the message to - * which you wish to reply. ie, pathid, msgid, and trgcls. - * The array identifies a list of addresses and lengths of - * discontiguous buffers that contains the reply data. - * Input: pathid - path identification number - * msgid - specifies the message ID. - * trgcls - specifies target class - * flags1 - option for path - * IPPRTY- specifies if you want to send priority message - * buffer - address of array of reply buffers - * buflen - total length of reply buffers - * Output: ipbfadr2 - Address of buffer which IUCV is currently working on. - * ipbfln2f - Contains one of the following values: - * If the answer buffer is the same length as the reply, this field - * contains zero. - * If the answer buffer is longer than the reply, this field contains - * the number of bytes remaining in the buffer. - * If the answer buffer is shorter than the reply, this field contains - * a residual count (that is, the number of bytes remianing in the - * reply that does not fit into the buffer. In this - * case b2f0_result = 5. - * Return: b2f0_result - return code from CP - * (-EINVAL) - buffer address is NULL -*/ -int -iucv_reply_array (__u16 pathid, - __u32 msgid, __u32 trgcls, - int flags1, - iucv_array_t * buffer, - ulong buflen, ulong * ipbfadr2, ulong * ipbfln2f) -{ - iparml_db *parm; - ulong b2f0_result; - - iucv_debug(2, "entering"); - - if (!buffer) - return -EINVAL; - - parm = (iparml_db *)grab_param(); - - parm->ipbfadr2 = (__u32) ((ulong) buffer); - parm->ipbfln2f = buflen; /* length of message */ - parm->ippathid = pathid; - parm->ipmsgid = msgid; - parm->iptrgcls = trgcls; - parm->ipflags1 = (IPANSLST | flags1); - - b2f0_result = b2f0(REPLY, parm); - - if ((!b2f0_result) || (b2f0_result == 5)) { - - if (ipbfadr2) - *ipbfadr2 = parm->ipbfadr2; - if (ipbfln2f) - *ipbfln2f = parm->ipbfln2f; - } - release_param(parm); - - iucv_debug(2, "exiting"); - - return b2f0_result; -} - -/* - * Name: iucv_reply_prmmsg - * Purpose: This function responds to the two-way messages that you - * receive. You must identify completely the message to - * which you wish to reply. ie, pathid, msgid, and trgcls. - * Prmmsg signifies the data is moved into the - * parameter list. - * Input: pathid - path identification number - * msgid - specifies the message ID. - * trgcls - specifies target class - * flags1 - option for path - * IPPRTY- specifies if you want to send priority message - * prmmsg - 8-bytes of data to be placed into the parameter - * list. - * Output: NA - * Return: b2f0_result - return code from CP -*/ -int -iucv_reply_prmmsg (__u16 pathid, - __u32 msgid, __u32 trgcls, int flags1, __u8 prmmsg[8]) -{ - iparml_dpl *parm; - ulong b2f0_result; - - iucv_debug(2, "entering"); - - parm = (iparml_dpl *)grab_param(); - - parm->ippathid = pathid; - parm->ipmsgid = msgid; - parm->iptrgcls = trgcls; - memcpy(parm->iprmmsg, prmmsg, sizeof (parm->iprmmsg)); - parm->ipflags1 = (IPRMDATA | flags1); - - b2f0_result = b2f0(REPLY, parm); - release_param(parm); - - iucv_debug(2, "exiting"); - - return b2f0_result; -} - -/** - * iucv_resume: - * @pathid: Path identification number - * @user_data: 16-byte of user data - * - * This function restores communication over a quiesced path. - * Returns: return code from CP - */ -int -iucv_resume (__u16 pathid, __u8 user_data[16]) -{ - iparml_control *parm; - ulong b2f0_result = 0; - - iucv_debug(1, "entering"); - iucv_debug(1, "pathid = %d", pathid); - - parm = (iparml_control *)grab_param(); - - memcpy (parm->ipuser, user_data, sizeof (*user_data)); - parm->ippathid = pathid; - - b2f0_result = b2f0(RESUME, parm); - release_param(parm); - - iucv_debug(1, "exiting"); - - return b2f0_result; -} - -/* - * Name: iucv_send - * Purpose: sends messages - * Input: pathid - ushort, pathid - * msgid - ulong *, id of message returned to caller - * trgcls - ulong, target message class - * srccls - ulong, source message class - * msgtag - ulong, message tag - * flags1 - Contains options for this path. - * IPPRTY - Ox20 - specifies if you want to send a priority message. - * buffer - pointer to buffer - * buflen - ulong, length of buffer - * Output: b2f0_result - return code from b2f0 call - * msgid - returns message id - */ -int -iucv_send (__u16 pathid, __u32 * msgid, - __u32 trgcls, __u32 srccls, - __u32 msgtag, int flags1, void *buffer, ulong buflen) -{ - iparml_db *parm; - ulong b2f0_result; - - iucv_debug(2, "entering"); - - if (!buffer) - return -EINVAL; - - parm = (iparml_db *)grab_param(); - - parm->ipbfadr1 = (__u32) ((ulong) buffer); - parm->ippathid = pathid; - parm->iptrgcls = trgcls; - parm->ipbfln1f = (__u32) buflen; /* length of message */ - parm->ipsrccls = srccls; - parm->ipmsgtag = msgtag; - parm->ipflags1 = (IPNORPY | flags1); /* one way priority message */ - - b2f0_result = b2f0(SEND, parm); - - if ((!b2f0_result) && (msgid)) - *msgid = parm->ipmsgid; - release_param(parm); - - iucv_debug(2, "exiting"); - - return b2f0_result; -} - -/* - * Name: iucv_send_array - * Purpose: This function transmits data to another application. - * The contents of buffer is the address of the array of - * addresses and lengths of discontiguous buffers that hold - * the message text. This is a one-way message and the - * receiver will not reply to the message. - * Input: pathid - path identification number - * trgcls - specifies target class - * srccls - specifies the source message class - * msgtag - specifies a tag to be associated witht the message - * flags1 - option for path - * IPPRTY- specifies if you want to send priority message - * buffer - address of array of send buffers - * buflen - total length of send buffers - * Output: msgid - specifies the message ID. - * Return: b2f0_result - return code from CP - * (-EINVAL) - buffer address is NULL - */ -int -iucv_send_array (__u16 pathid, - __u32 * msgid, - __u32 trgcls, - __u32 srccls, - __u32 msgtag, int flags1, iucv_array_t * buffer, ulong buflen) -{ - iparml_db *parm; - ulong b2f0_result; - - iucv_debug(2, "entering"); - - if (!buffer) - return -EINVAL; - - parm = (iparml_db *)grab_param(); - - parm->ippathid = pathid; - parm->iptrgcls = trgcls; - parm->ipbfadr1 = (__u32) ((ulong) buffer); - parm->ipbfln1f = (__u32) buflen; /* length of message */ - parm->ipsrccls = srccls; - parm->ipmsgtag = msgtag; - parm->ipflags1 = (IPNORPY | IPBUFLST | flags1); - b2f0_result = b2f0(SEND, parm); - - if ((!b2f0_result) && (msgid)) - *msgid = parm->ipmsgid; - release_param(parm); - - iucv_debug(2, "exiting"); - return b2f0_result; -} - -/* - * Name: iucv_send_prmmsg - * Purpose: This function transmits data to another application. - * Prmmsg specifies that the 8-bytes of data are to be moved - * into the parameter list. This is a one-way message and the - * receiver will not reply to the message. - * Input: pathid - path identification number - * trgcls - specifies target class - * srccls - specifies the source message class - * msgtag - specifies a tag to be associated with the message - * flags1 - option for path - * IPPRTY- specifies if you want to send priority message - * prmmsg - 8-bytes of data to be placed into parameter list - * Output: msgid - specifies the message ID. - * Return: b2f0_result - return code from CP -*/ -int -iucv_send_prmmsg (__u16 pathid, - __u32 * msgid, - __u32 trgcls, - __u32 srccls, __u32 msgtag, int flags1, __u8 prmmsg[8]) -{ - iparml_dpl *parm; - ulong b2f0_result; - - iucv_debug(2, "entering"); - - parm = (iparml_dpl *)grab_param(); - - parm->ippathid = pathid; - parm->iptrgcls = trgcls; - parm->ipsrccls = srccls; - parm->ipmsgtag = msgtag; - parm->ipflags1 = (IPRMDATA | IPNORPY | flags1); - memcpy(parm->iprmmsg, prmmsg, sizeof(parm->iprmmsg)); - - b2f0_result = b2f0(SEND, parm); - - if ((!b2f0_result) && (msgid)) - *msgid = parm->ipmsgid; - release_param(parm); - - iucv_debug(2, "exiting"); - - return b2f0_result; -} - -/* - * Name: iucv_send2way - * Purpose: This function transmits data to another application. - * Data to be transmitted is in a buffer. The receiver - * of the send is expected to reply to the message and - * a buffer is provided into which IUCV moves the reply - * to this message. - * Input: pathid - path identification number - * trgcls - specifies target class - * srccls - specifies the source message class - * msgtag - specifies a tag associated with the message - * flags1 - option for path - * IPPRTY- specifies if you want to send priority message - * buffer - address of send buffer - * buflen - length of send buffer - * ansbuf - address of buffer to reply with - * anslen - length of buffer to reply with - * Output: msgid - specifies the message ID. - * Return: b2f0_result - return code from CP - * (-EINVAL) - buffer or ansbuf address is NULL - */ -int -iucv_send2way (__u16 pathid, - __u32 * msgid, - __u32 trgcls, - __u32 srccls, - __u32 msgtag, - int flags1, - void *buffer, ulong buflen, void *ansbuf, ulong anslen) -{ - iparml_db *parm; - ulong b2f0_result; - - iucv_debug(2, "entering"); - - if (!buffer || !ansbuf) - return -EINVAL; - - parm = (iparml_db *)grab_param(); - - parm->ippathid = pathid; - parm->iptrgcls = trgcls; - parm->ipbfadr1 = (__u32) ((ulong) buffer); - parm->ipbfln1f = (__u32) buflen; /* length of message */ - parm->ipbfadr2 = (__u32) ((ulong) ansbuf); - parm->ipbfln2f = (__u32) anslen; - parm->ipsrccls = srccls; - parm->ipmsgtag = msgtag; - parm->ipflags1 = flags1; /* priority message */ - - b2f0_result = b2f0(SEND, parm); - - if ((!b2f0_result) && (msgid)) - *msgid = parm->ipmsgid; - release_param(parm); - - iucv_debug(2, "exiting"); - - return b2f0_result; -} - -/* - * Name: iucv_send2way_array - * Purpose: This function transmits data to another application. - * The contents of buffer is the address of the array of - * addresses and lengths of discontiguous buffers that hold - * the message text. The receiver of the send is expected to - * reply to the message and a buffer is provided into which - * IUCV moves the reply to this message. - * Input: pathid - path identification number - * trgcls - specifies target class - * srccls - specifies the source message class - * msgtag - spcifies a tag to be associated with the message - * flags1 - option for path - * IPPRTY- specifies if you want to send priority message - * buffer - address of array of send buffers - * buflen - total length of send buffers - * ansbuf - address of buffer to reply with - * anslen - length of buffer to reply with - * Output: msgid - specifies the message ID. - * Return: b2f0_result - return code from CP - * (-EINVAL) - buffer address is NULL - */ -int -iucv_send2way_array (__u16 pathid, - __u32 * msgid, - __u32 trgcls, - __u32 srccls, - __u32 msgtag, - int flags1, - iucv_array_t * buffer, - ulong buflen, iucv_array_t * ansbuf, ulong anslen) -{ - iparml_db *parm; - ulong b2f0_result; - - iucv_debug(2, "entering"); - - if (!buffer || !ansbuf) - return -EINVAL; - - parm = (iparml_db *)grab_param(); - - parm->ippathid = pathid; - parm->iptrgcls = trgcls; - parm->ipbfadr1 = (__u32) ((ulong) buffer); - parm->ipbfln1f = (__u32) buflen; /* length of message */ - parm->ipbfadr2 = (__u32) ((ulong) ansbuf); - parm->ipbfln2f = (__u32) anslen; - parm->ipsrccls = srccls; - parm->ipmsgtag = msgtag; - parm->ipflags1 = (IPBUFLST | IPANSLST | flags1); - b2f0_result = b2f0(SEND, parm); - if ((!b2f0_result) && (msgid)) - *msgid = parm->ipmsgid; - release_param(parm); - - iucv_debug(2, "exiting"); - return b2f0_result; -} - -/* - * Name: iucv_send2way_prmmsg - * Purpose: This function transmits data to another application. - * Prmmsg specifies that the 8-bytes of data are to be moved - * into the parameter list. This is a two-way message and the - * receiver of the message is expected to reply. A buffer - * is provided into which IUCV moves the reply to this - * message. - * Input: pathid - path identification number - * trgcls - specifies target class - * srccls - specifies the source message class - * msgtag - specifies a tag to be associated with the message - * flags1 - option for path - * IPPRTY- specifies if you want to send priority message - * prmmsg - 8-bytes of data to be placed in parameter list - * ansbuf - address of buffer to reply with - * anslen - length of buffer to reply with - * Output: msgid - specifies the message ID. - * Return: b2f0_result - return code from CP - * (-EINVAL) - buffer address is NULL -*/ -int -iucv_send2way_prmmsg (__u16 pathid, - __u32 * msgid, - __u32 trgcls, - __u32 srccls, - __u32 msgtag, - ulong flags1, __u8 prmmsg[8], void *ansbuf, ulong anslen) -{ - iparml_dpl *parm; - ulong b2f0_result; - - iucv_debug(2, "entering"); - - if (!ansbuf) - return -EINVAL; - - parm = (iparml_dpl *)grab_param(); - - parm->ippathid = pathid; - parm->iptrgcls = trgcls; - parm->ipsrccls = srccls; - parm->ipmsgtag = msgtag; - parm->ipbfadr2 = (__u32) ((ulong) ansbuf); - parm->ipbfln2f = (__u32) anslen; - parm->ipflags1 = (IPRMDATA | flags1); /* message in prmlist */ - memcpy(parm->iprmmsg, prmmsg, sizeof(parm->iprmmsg)); - - b2f0_result = b2f0(SEND, parm); - - if ((!b2f0_result) && (msgid)) - *msgid = parm->ipmsgid; - release_param(parm); - - iucv_debug(2, "exiting"); - - return b2f0_result; -} - -/* - * Name: iucv_send2way_prmmsg_array - * Purpose: This function transmits data to another application. - * Prmmsg specifies that the 8-bytes of data are to be moved - * into the parameter list. This is a two-way message and the - * receiver of the message is expected to reply. A buffer - * is provided into which IUCV moves the reply to this - * message. The contents of ansbuf is the address of the - * array of addresses and lengths of discontiguous buffers - * that contain the reply. - * Input: pathid - path identification number - * trgcls - specifies target class - * srccls - specifies the source message class - * msgtag - specifies a tag to be associated with the message - * flags1 - option for path - * IPPRTY- specifies if you want to send priority message - * prmmsg - 8-bytes of data to be placed into the parameter list - * ansbuf - address of buffer to reply with - * anslen - length of buffer to reply with - * Output: msgid - specifies the message ID. - * Return: b2f0_result - return code from CP - * (-EINVAL) - ansbuf address is NULL - */ -int -iucv_send2way_prmmsg_array (__u16 pathid, - __u32 * msgid, - __u32 trgcls, - __u32 srccls, - __u32 msgtag, - int flags1, - __u8 prmmsg[8], - iucv_array_t * ansbuf, ulong anslen) -{ - iparml_dpl *parm; - ulong b2f0_result; - - iucv_debug(2, "entering"); - - if (!ansbuf) - return -EINVAL; - - parm = (iparml_dpl *)grab_param(); - - parm->ippathid = pathid; - parm->iptrgcls = trgcls; - parm->ipsrccls = srccls; - parm->ipmsgtag = msgtag; - parm->ipbfadr2 = (__u32) ((ulong) ansbuf); - parm->ipbfln2f = (__u32) anslen; - parm->ipflags1 = (IPRMDATA | IPANSLST | flags1); - memcpy(parm->iprmmsg, prmmsg, sizeof(parm->iprmmsg)); - b2f0_result = b2f0(SEND, parm); - if ((!b2f0_result) && (msgid)) - *msgid = parm->ipmsgid; - release_param(parm); - - iucv_debug(2, "exiting"); - return b2f0_result; -} - -void -iucv_setmask_cpuid (void *result) -{ - iparml_set_mask *parm; - - iucv_debug(1, "entering"); - parm = (iparml_set_mask *)grab_param(); - parm->ipmask = *((__u8*)result); - *((ulong *)result) = b2f0(SETMASK, parm); - release_param(parm); - - iucv_debug(1, "b2f0_result = %ld", *((ulong *)result)); - iucv_debug(1, "exiting"); -} - -/* - * Name: iucv_setmask - * Purpose: This function enables or disables the following IUCV - * external interruptions: Nonpriority and priority message - * interrupts, nonpriority and priority reply interrupts. - * Input: SetMaskFlag - options for interrupts - * 0x80 - Nonpriority_MessagePendingInterruptsFlag - * 0x40 - Priority_MessagePendingInterruptsFlag - * 0x20 - Nonpriority_MessageCompletionInterruptsFlag - * 0x10 - Priority_MessageCompletionInterruptsFlag - * 0x08 - IUCVControlInterruptsFlag - * Output: NA - * Return: b2f0_result - return code from CP -*/ -int -iucv_setmask (int SetMaskFlag) -{ - union { - ulong result; - __u8 param; - } u; - int cpu; - - u.param = SetMaskFlag; - cpu = get_cpu(); - smp_call_function_on(iucv_setmask_cpuid, &u, 0, 1, iucv_cpuid); - put_cpu(); - - return u.result; -} - -/** - * iucv_sever: - * @pathid: Path identification number - * @user_data: 16-byte of user data - * - * This function terminates an iucv path. - * Returns: return code from CP - */ -int -iucv_sever(__u16 pathid, __u8 user_data[16]) -{ - iparml_control *parm; - ulong b2f0_result = 0; - - iucv_debug(1, "entering"); - parm = (iparml_control *)grab_param(); - - memcpy(parm->ipuser, user_data, sizeof(parm->ipuser)); - parm->ippathid = pathid; - - b2f0_result = b2f0(SEVER, parm); - - if (!b2f0_result) - iucv_remove_pathid(pathid); - release_param(parm); - - iucv_debug(1, "exiting"); - return b2f0_result; -} - -/* - * Interrupt Handlers - *******************************************************************************/ - -/** - * iucv_irq_handler: - * @regs: Current registers - * @code: irq code - * - * Handles external interrupts coming in from CP. - * Places the interrupt buffer on a queue and schedules iucv_tasklet_handler(). - */ -static void -iucv_irq_handler(__u16 code) -{ - iucv_irqdata *irqdata; - - irqdata = kmalloc(sizeof(iucv_irqdata), GFP_ATOMIC); - if (!irqdata) { - printk(KERN_WARNING "%s: out of memory\n", __FUNCTION__); - return; - } - - memcpy(&irqdata->data, iucv_external_int_buffer, - sizeof(iucv_GeneralInterrupt)); - - spin_lock(&iucv_irq_queue_lock); - list_add_tail(&irqdata->queue, &iucv_irq_queue); - spin_unlock(&iucv_irq_queue_lock); - - tasklet_schedule(&iucv_tasklet); -} - -/** - * iucv_do_int: - * @int_buf: Pointer to copy of external interrupt buffer - * - * The workhorse for handling interrupts queued by iucv_irq_handler(). - * This function is called from the bottom half iucv_tasklet_handler(). - */ -static void -iucv_do_int(iucv_GeneralInterrupt * int_buf) -{ - handler *h = NULL; - struct list_head *lh; - ulong flags; - iucv_interrupt_ops_t *interrupt = NULL; /* interrupt addresses */ - __u8 temp_buff1[24], temp_buff2[24]; /* masked handler id. */ - int rc = 0, j = 0; - __u8 no_listener[16] = "NO LISTENER"; - - iucv_debug(2, "entering, pathid %d, type %02X", - int_buf->ippathid, int_buf->iptype); - iucv_dumpit("External Interrupt Buffer:", - int_buf, sizeof(iucv_GeneralInterrupt)); - - ASCEBC (no_listener, 16); - - if (int_buf->iptype != 01) { - if ((int_buf->ippathid) > (max_connections - 1)) { - printk(KERN_WARNING "%s: Got interrupt with pathid %d" - " > max_connections (%ld)\n", __FUNCTION__, - int_buf->ippathid, max_connections - 1); - } else { - h = iucv_pathid_table[int_buf->ippathid]; - interrupt = h->interrupt_table; - iucv_dumpit("Handler:", h, sizeof(handler)); - } - } - - /* end of if statement */ - switch (int_buf->iptype) { - case 0x01: /* connection pending */ - if (messagesDisabled) { - iucv_setmask(~0); - messagesDisabled = 0; - } - spin_lock_irqsave(&iucv_lock, flags); - list_for_each(lh, &iucv_handler_table) { - h = list_entry(lh, handler, list); - memcpy(temp_buff1, &(int_buf->ipvmid), 24); - memcpy(temp_buff2, &(h->id.userid), 24); - for (j = 0; j < 24; j++) { - temp_buff1[j] &= (h->id.mask)[j]; - temp_buff2[j] &= (h->id.mask)[j]; - } - - iucv_dumpit("temp_buff1:", - temp_buff1, sizeof(temp_buff1)); - iucv_dumpit("temp_buff2", - temp_buff2, sizeof(temp_buff2)); - - if (!memcmp (temp_buff1, temp_buff2, 24)) { - - iucv_debug(2, - "found a matching handler"); - break; - } else - h = NULL; - } - spin_unlock_irqrestore (&iucv_lock, flags); - if (h) { - /* ADD PATH TO PATHID TABLE */ - rc = iucv_add_pathid(int_buf->ippathid, h); - if (rc) { - iucv_sever (int_buf->ippathid, - no_listener); - iucv_debug(1, - "add_pathid failed, rc = %d", - rc); - } else { - interrupt = h->interrupt_table; - if (interrupt->ConnectionPending) { - EBCASC (int_buf->ipvmid, 8); - interrupt->ConnectionPending( - (iucv_ConnectionPending *)int_buf, - h->pgm_data); - } else - iucv_sever(int_buf->ippathid, - no_listener); - } - } else - iucv_sever(int_buf->ippathid, no_listener); - break; - - case 0x02: /*connection complete */ - if (messagesDisabled) { - iucv_setmask(~0); - messagesDisabled = 0; - } - if (h) { - if (interrupt->ConnectionComplete) - { - interrupt->ConnectionComplete( - (iucv_ConnectionComplete *)int_buf, - h->pgm_data); - } - else - iucv_debug(1, - "ConnectionComplete not called"); - } else - iucv_sever(int_buf->ippathid, no_listener); - break; - - case 0x03: /* connection severed */ - if (messagesDisabled) { - iucv_setmask(~0); - messagesDisabled = 0; - } - if (h) { - if (interrupt->ConnectionSevered) - interrupt->ConnectionSevered( - (iucv_ConnectionSevered *)int_buf, - h->pgm_data); - - else - iucv_sever (int_buf->ippathid, no_listener); - } else - iucv_sever(int_buf->ippathid, no_listener); - break; - - case 0x04: /* connection quiesced */ - if (messagesDisabled) { - iucv_setmask(~0); - messagesDisabled = 0; - } - if (h) { - if (interrupt->ConnectionQuiesced) - interrupt->ConnectionQuiesced( - (iucv_ConnectionQuiesced *)int_buf, - h->pgm_data); - else - iucv_debug(1, - "ConnectionQuiesced not called"); - } - break; - - case 0x05: /* connection resumed */ - if (messagesDisabled) { - iucv_setmask(~0); - messagesDisabled = 0; - } - if (h) { - if (interrupt->ConnectionResumed) - interrupt->ConnectionResumed( - (iucv_ConnectionResumed *)int_buf, - h->pgm_data); - else - iucv_debug(1, - "ConnectionResumed not called"); - } - break; - - case 0x06: /* priority message complete */ - case 0x07: /* nonpriority message complete */ - if (h) { - if (interrupt->MessageComplete) - interrupt->MessageComplete( - (iucv_MessageComplete *)int_buf, - h->pgm_data); - else - iucv_debug(2, - "MessageComplete not called"); - } - break; - - case 0x08: /* priority message pending */ - case 0x09: /* nonpriority message pending */ - if (h) { - if (interrupt->MessagePending) - interrupt->MessagePending( - (iucv_MessagePending *) int_buf, - h->pgm_data); - else - iucv_debug(2, - "MessagePending not called"); - } - break; - default: /* unknown iucv type */ - printk(KERN_WARNING "%s: unknown iucv interrupt\n", - __FUNCTION__); - break; - } /* end switch */ - - iucv_debug(2, "exiting pathid %d, type %02X", - int_buf->ippathid, int_buf->iptype); - - return; -} - -/** - * iucv_tasklet_handler: - * - * This function loops over the queue of irq buffers and runs iucv_do_int() - * on every queue element. - */ -static void -iucv_tasklet_handler(unsigned long ignored) -{ - struct list_head head; - struct list_head *next; - ulong flags; - - spin_lock_irqsave(&iucv_irq_queue_lock, flags); - list_add(&head, &iucv_irq_queue); - list_del_init(&iucv_irq_queue); - spin_unlock_irqrestore (&iucv_irq_queue_lock, flags); - - next = head.next; - while (next != &head) { - iucv_irqdata *p = list_entry(next, iucv_irqdata, queue); - - next = next->next; - iucv_do_int(&p->data); - kfree(p); - } - - return; -} - -subsys_initcall(iucv_init); -module_exit(iucv_exit); - -/** - * Export all public stuff - */ -EXPORT_SYMBOL (iucv_bus); -EXPORT_SYMBOL (iucv_root); -EXPORT_SYMBOL (iucv_accept); -EXPORT_SYMBOL (iucv_connect); -#if 0 -EXPORT_SYMBOL (iucv_purge); -EXPORT_SYMBOL (iucv_query_maxconn); -EXPORT_SYMBOL (iucv_query_bufsize); -EXPORT_SYMBOL (iucv_quiesce); -#endif -EXPORT_SYMBOL (iucv_receive); -#if 0 -EXPORT_SYMBOL (iucv_receive_array); -#endif -EXPORT_SYMBOL (iucv_reject); -#if 0 -EXPORT_SYMBOL (iucv_reply); -EXPORT_SYMBOL (iucv_reply_array); -EXPORT_SYMBOL (iucv_resume); -#endif -EXPORT_SYMBOL (iucv_reply_prmmsg); -EXPORT_SYMBOL (iucv_send); -EXPORT_SYMBOL (iucv_send2way); -EXPORT_SYMBOL (iucv_send2way_array); -EXPORT_SYMBOL (iucv_send2way_prmmsg); -EXPORT_SYMBOL (iucv_send2way_prmmsg_array); -#if 0 -EXPORT_SYMBOL (iucv_send_array); -EXPORT_SYMBOL (iucv_send_prmmsg); -EXPORT_SYMBOL (iucv_setmask); -#endif -EXPORT_SYMBOL (iucv_sever); -EXPORT_SYMBOL (iucv_register_program); -EXPORT_SYMBOL (iucv_unregister_program); diff --git a/drivers/s390/net/iucv.h b/drivers/s390/net/iucv.h deleted file mode 100644 index 5b6b1b7241c..00000000000 --- a/drivers/s390/net/iucv.h +++ /dev/null @@ -1,849 +0,0 @@ -/* - * drivers/s390/net/iucv.h - * IUCV base support. - * - * S390 version - * Copyright (C) 2000 IBM Corporation - * Author(s):Alan Altmark (Alan_Altmark@us.ibm.com) - * Xenia Tkatschow (xenia@us.ibm.com) - * - * - * Functionality: - * To explore any of the IUCV functions, one must first register - * their program using iucv_register_program(). Once your program has - * successfully completed a register, it can exploit the other functions. - * For furthur reference on all IUCV functionality, refer to the - * CP Programming Services book, also available on the web - * thru www.ibm.com/s390/vm/pubs, manual # SC24-5760 - * - * Definition of Return Codes - * -All positive return codes including zero are reflected back - * from CP except for iucv_register_program. The definition of each - * return code can be found in CP Programming Services book. - * Also available on the web thru www.ibm.com/s390/vm/pubs, manual # SC24-5760 - * - Return Code of: - * (-EINVAL) Invalid value - * (-ENOMEM) storage allocation failed - * pgmask defined in iucv_register_program will be set depending on input - * paramters. - * - */ - -#include <linux/types.h> -#include <asm/debug.h> - -/** - * Debug Facility stuff - */ -#define IUCV_DBF_SETUP_NAME "iucv_setup" -#define IUCV_DBF_SETUP_LEN 32 -#define IUCV_DBF_SETUP_PAGES 2 -#define IUCV_DBF_SETUP_NR_AREAS 1 -#define IUCV_DBF_SETUP_LEVEL 3 - -#define IUCV_DBF_DATA_NAME "iucv_data" -#define IUCV_DBF_DATA_LEN 128 -#define IUCV_DBF_DATA_PAGES 2 -#define IUCV_DBF_DATA_NR_AREAS 1 -#define IUCV_DBF_DATA_LEVEL 2 - -#define IUCV_DBF_TRACE_NAME "iucv_trace" -#define IUCV_DBF_TRACE_LEN 16 -#define IUCV_DBF_TRACE_PAGES 4 -#define IUCV_DBF_TRACE_NR_AREAS 1 -#define IUCV_DBF_TRACE_LEVEL 3 - -#define IUCV_DBF_TEXT(name,level,text) \ - do { \ - debug_text_event(iucv_dbf_##name,level,text); \ - } while (0) - -#define IUCV_DBF_HEX(name,level,addr,len) \ - do { \ - debug_event(iucv_dbf_##name,level,(void*)(addr),len); \ - } while (0) - -DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf); - -#define IUCV_DBF_TEXT_(name,level,text...) \ - do { \ - char* iucv_dbf_txt_buf = get_cpu_var(iucv_dbf_txt_buf); \ - sprintf(iucv_dbf_txt_buf, text); \ - debug_text_event(iucv_dbf_##name,level,iucv_dbf_txt_buf); \ - put_cpu_var(iucv_dbf_txt_buf); \ - } while (0) - -#define IUCV_DBF_SPRINTF(name,level,text...) \ - do { \ - debug_sprintf_event(iucv_dbf_trace, level, ##text ); \ - debug_sprintf_event(iucv_dbf_trace, level, text ); \ - } while (0) - -/** - * some more debug stuff - */ -#define IUCV_HEXDUMP16(importance,header,ptr) \ -PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \ - "%02x %02x %02x %02x %02x %02x %02x %02x\n", \ - *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \ - *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \ - *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \ - *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \ - *(((char*)ptr)+12),*(((char*)ptr)+13), \ - *(((char*)ptr)+14),*(((char*)ptr)+15)); \ -PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \ - "%02x %02x %02x %02x %02x %02x %02x %02x\n", \ - *(((char*)ptr)+16),*(((char*)ptr)+17), \ - *(((char*)ptr)+18),*(((char*)ptr)+19), \ - *(((char*)ptr)+20),*(((char*)ptr)+21), \ - *(((char*)ptr)+22),*(((char*)ptr)+23), \ - *(((char*)ptr)+24),*(((char*)ptr)+25), \ - *(((char*)ptr)+26),*(((char*)ptr)+27), \ - *(((char*)ptr)+28),*(((char*)ptr)+29), \ - *(((char*)ptr)+30),*(((char*)ptr)+31)); - -static inline void -iucv_hex_dump(unsigned char *buf, size_t len) -{ - size_t i; - - for (i = 0; i < len; i++) { - if (i && !(i % 16)) - printk("\n"); - printk("%02x ", *(buf + i)); - } - printk("\n"); -} -/** - * end of debug stuff - */ - -#define uchar unsigned char -#define ushort unsigned short -#define ulong unsigned long -#define iucv_handle_t void * - -/* flags1: - * All flags are defined in the field IPFLAGS1 of each function - * and can be found in CP Programming Services. - * IPLOCAL - Indicates the connect can only be satisfied on the - * local system - * IPPRTY - Indicates a priority message - * IPQUSCE - Indicates you do not want to receive messages on a - * path until an iucv_resume is issued - * IPRMDATA - Indicates that the message is in the parameter list - */ -#define IPLOCAL 0x01 -#define IPPRTY 0x20 -#define IPQUSCE 0x40 -#define IPRMDATA 0x80 - -/* flags1_out: - * All flags are defined in the output field of IPFLAGS1 for each function - * and can be found in CP Programming Services. - * IPNORPY - Specifies this is a one-way message and no reply is expected. - * IPPRTY - Indicates a priority message is permitted. Defined in flags1. - */ -#define IPNORPY 0x10 - -#define Nonpriority_MessagePendingInterruptsFlag 0x80 -#define Priority_MessagePendingInterruptsFlag 0x40 -#define Nonpriority_MessageCompletionInterruptsFlag 0x20 -#define Priority_MessageCompletionInterruptsFlag 0x10 -#define IUCVControlInterruptsFlag 0x08 -#define AllInterrupts 0xf8 -/* - * Mapping of external interrupt buffers should be used with the corresponding - * interrupt types. - * Names: iucv_ConnectionPending -> connection pending - * iucv_ConnectionComplete -> connection complete - * iucv_ConnectionSevered -> connection severed - * iucv_ConnectionQuiesced -> connection quiesced - * iucv_ConnectionResumed -> connection resumed - * iucv_MessagePending -> message pending - * iucv_MessageComplete -> message complete - */ -typedef struct { - u16 ippathid; - uchar ipflags1; - uchar iptype; - u16 ipmsglim; - u16 res1; - uchar ipvmid[8]; - uchar ipuser[16]; - u32 res3; - uchar ippollfg; - uchar res4[3]; -} iucv_ConnectionPending; - -typedef struct { - u16 ippathid; - uchar ipflags1; - uchar iptype; - u16 ipmsglim; - u16 res1; - uchar res2[8]; - uchar ipuser[16]; - u32 res3; - uchar ippollfg; - uchar res4[3]; -} iucv_ConnectionComplete; - -typedef struct { - u16 ippathid; - uchar res1; - uchar iptype; - u32 res2; - uchar res3[8]; - uchar ipuser[16]; - u32 res4; - uchar ippollfg; - uchar res5[3]; -} iucv_ConnectionSevered; - -typedef struct { - u16 ippathid; - uchar res1; - uchar iptype; - u32 res2; - uchar res3[8]; - uchar ipuser[16]; - u32 res4; - uchar ippollfg; - uchar res5[3]; -} iucv_ConnectionQuiesced; - -typedef struct { - u16 ippathid; - uchar res1; - uchar iptype; - u32 res2; - uchar res3[8]; - uchar ipuser[16]; - u32 res4; - uchar ippollfg; - uchar res5[3]; -} iucv_ConnectionResumed; - -typedef struct { - u16 ippathid; - uchar ipflags1; - uchar iptype; - u32 ipmsgid; - u32 iptrgcls; - union u2 { - u32 iprmmsg1_u32; - uchar iprmmsg1[4]; - } ln1msg1; - union u1 { - u32 ipbfln1f; - uchar iprmmsg2[4]; - } ln1msg2; - u32 res1[3]; - u32 ipbfln2f; - uchar ippollfg; - uchar res2[3]; -} iucv_MessagePending; - -typedef struct { - u16 ippathid; - uchar ipflags1; - uchar iptype; - u32 ipmsgid; - u32 ipaudit; - uchar iprmmsg[8]; - u32 ipsrccls; - u32 ipmsgtag; - u32 res; - u32 ipbfln2f; - uchar ippollfg; - uchar res2[3]; -} iucv_MessageComplete; - -/* - * iucv_interrupt_ops_t: Is a vector of functions that handle - * IUCV interrupts. - * Parameter list: - * eib - is a pointer to a 40-byte area described - * with one of the structures above. - * pgm_data - this data is strictly for the - * interrupt handler that is passed by - * the application. This may be an address - * or token. -*/ -typedef struct { - void (*ConnectionPending) (iucv_ConnectionPending * eib, - void *pgm_data); - void (*ConnectionComplete) (iucv_ConnectionComplete * eib, - void *pgm_data); - void (*ConnectionSevered) (iucv_ConnectionSevered * eib, - void *pgm_data); - void (*ConnectionQuiesced) (iucv_ConnectionQuiesced * eib, - void *pgm_data); - void (*ConnectionResumed) (iucv_ConnectionResumed * eib, - void *pgm_data); - void (*MessagePending) (iucv_MessagePending * eib, void *pgm_data); - void (*MessageComplete) (iucv_MessageComplete * eib, void *pgm_data); -} iucv_interrupt_ops_t; - -/* - *iucv_array_t : Defines buffer array. - * Inside the array may be 31- bit addresses and 31-bit lengths. -*/ -typedef struct { - u32 address; - u32 length; -} iucv_array_t __attribute__ ((aligned (8))); - -extern struct bus_type iucv_bus; -extern struct device *iucv_root; - -/* -prototypes- */ -/* - * Name: iucv_register_program - * Purpose: Registers an application with IUCV - * Input: prmname - user identification - * userid - machine identification - * pgmmask - indicates which bits in the prmname and userid combined will be - * used to determine who is given control - * ops - address of vector of interrupt handlers - * pgm_data- application data passed to interrupt handlers - * Output: NA - * Return: address of handler - * (0) - Error occurred, registration not completed. - * NOTE: Exact cause of failure will be recorded in syslog. -*/ -iucv_handle_t iucv_register_program (uchar pgmname[16], - uchar userid[8], - uchar pgmmask[24], - iucv_interrupt_ops_t * ops, - void *pgm_data); - -/* - * Name: iucv_unregister_program - * Purpose: Unregister application with IUCV - * Input: address of handler - * Output: NA - * Return: (0) - Normal return - * (-EINVAL) - Internal error, wild pointer -*/ -int iucv_unregister_program (iucv_handle_t handle); - -/* - * Name: iucv_accept - * Purpose: This function is issued after the user receives a Connection Pending external - * interrupt and now wishes to complete the IUCV communication path. - * Input: pathid - u16 , Path identification number - * msglim_reqstd - u16, The number of outstanding messages requested. - * user_data - uchar[16], Data specified by the iucv_connect function. - * flags1 - int, Contains options for this path. - * -IPPRTY - 0x20- Specifies if you want to send priority message. - * -IPRMDATA - 0x80, Specifies whether your program can handle a message - * in the parameter list. - * -IPQUSCE - 0x40, Specifies whether you want to quiesce the path being - * established. - * handle - iucv_handle_t, Address of handler. - * pgm_data - void *, Application data passed to interrupt handlers. - * flags1_out - int * Contains information about the path - * - IPPRTY - 0x20, Indicates you may send priority messages. - * msglim - *u16, Number of outstanding messages. - * Output: return code from CP IUCV call. -*/ - -int iucv_accept (u16 pathid, - u16 msglim_reqstd, - uchar user_data[16], - int flags1, - iucv_handle_t handle, - void *pgm_data, int *flags1_out, u16 * msglim); - -/* - * Name: iucv_connect - * Purpose: This function establishes an IUCV path. Although the connect may complete - * successfully, you are not able to use the path until you receive an IUCV - * Connection Complete external interrupt. - * Input: pathid - u16 *, Path identification number - * msglim_reqstd - u16, Number of outstanding messages requested - * user_data - uchar[16], 16-byte user data - * userid - uchar[8], User identification - * system_name - uchar[8], 8-byte identifying the system name - * flags1 - int, Contains options for this path. - * -IPPRTY - 0x20, Specifies if you want to send priority message. - * -IPRMDATA - 0x80, Specifies whether your program can handle a message - * in the parameter list. - * -IPQUSCE - 0x40, Specifies whether you want to quiesce the path being - * established. - * -IPLOCAL - 0X01, Allows an application to force the partner to be on - * the local system. If local is specified then target class cannot be - * specified. - * flags1_out - int * Contains information about the path - * - IPPRTY - 0x20, Indicates you may send priority messages. - * msglim - * u16, Number of outstanding messages - * handle - iucv_handle_t, Address of handler - * pgm_data - void *, Application data passed to interrupt handlers - * Output: return code from CP IUCV call - * rc - return code from iucv_declare_buffer - * -EINVAL - Invalid handle passed by application - * -EINVAL - Pathid address is NULL - * add_pathid_result - Return code from internal function add_pathid -*/ -int - iucv_connect (u16 * pathid, - u16 msglim_reqstd, - uchar user_data[16], - uchar userid[8], - uchar system_name[8], - int flags1, - int *flags1_out, - u16 * msglim, iucv_handle_t handle, void *pgm_data); - -/* - * Name: iucv_purge - * Purpose: This function cancels a message that you have sent. - * Input: pathid - Path identification number. - * msgid - Specifies the message ID of the message to be purged. - * srccls - Specifies the source message class. - * Output: audit - Contains information about asynchronous error - * that may have affected the normal completion - * of this message. - * Return: Return code from CP IUCV call. -*/ -int iucv_purge (u16 pathid, u32 msgid, u32 srccls, __u32 *audit); -/* - * Name: iucv_query_maxconn - * Purpose: This function determines the maximum number of communication paths you - * may establish. - * Return: maxconn - ulong, Maximum number of connection the virtual machine may - * establish. -*/ -ulong iucv_query_maxconn (void); - -/* - * Name: iucv_query_bufsize - * Purpose: This function determines how large an external interrupt - * buffer IUCV requires to store information. - * Return: bufsize - ulong, Size of external interrupt buffer. - */ -ulong iucv_query_bufsize (void); - -/* - * Name: iucv_quiesce - * Purpose: This function temporarily suspends incoming messages on an - * IUCV path. You can later reactivate the path by invoking - * the iucv_resume function. - * Input: pathid - Path identification number - * user_data - 16-bytes of user data - * Output: NA - * Return: Return code from CP IUCV call. -*/ -int iucv_quiesce (u16 pathid, uchar user_data[16]); - -/* - * Name: iucv_receive - * Purpose: This function receives messages that are being sent to you - * over established paths. Data will be returned in buffer for length of - * buflen. - * Input: - * pathid - Path identification number. - * buffer - Address of buffer to receive. - * buflen - Length of buffer to receive. - * msgid - Specifies the message ID. - * trgcls - Specifies target class. - * Output: - * flags1_out: int *, Contains information about this path. - * IPNORPY - 0x10 Specifies this is a one-way message and no reply is - * expected. - * IPPRTY - 0x20 Specifies if you want to send priority message. - * IPRMDATA - 0x80 specifies the data is contained in the parameter list - * residual_buffer - address of buffer updated by the number - * of bytes you have received. - * residual_length - - * Contains one of the following values, if the receive buffer is: - * The same length as the message, this field is zero. - * Longer than the message, this field contains the number of - * bytes remaining in the buffer. - * Shorter than the message, this field contains the residual - * count (that is, the number of bytes remaining in the - * message that does not fit into the buffer. In this - * case b2f0_result = 5. - * Return: Return code from CP IUCV call. - * (-EINVAL) - buffer address is pointing to NULL -*/ -int iucv_receive (u16 pathid, - u32 msgid, - u32 trgcls, - void *buffer, - ulong buflen, - int *flags1_out, - ulong * residual_buffer, ulong * residual_length); - - /* - * Name: iucv_receive_array - * Purpose: This function receives messages that are being sent to you - * over established paths. Data will be returned in first buffer for - * length of first buffer. - * Input: pathid - Path identification number. - * msgid - specifies the message ID. - * trgcls - Specifies target class. - * buffer - Address of array of buffers. - * buflen - Total length of buffers. - * Output: - * flags1_out: int *, Contains information about this path. - * IPNORPY - 0x10 Specifies this is a one-way message and no reply is - * expected. - * IPPRTY - 0x20 Specifies if you want to send priority message. - * IPRMDATA - 0x80 specifies the data is contained in the parameter list - * residual_buffer - address points to the current list entry IUCV - * is working on. - * residual_length - - * Contains one of the following values, if the receive buffer is: - * The same length as the message, this field is zero. - * Longer than the message, this field contains the number of - * bytes remaining in the buffer. - * Shorter than the message, this field contains the residual - * count (that is, the number of bytes remaining in the - * message that does not fit into the buffer. In this - * case b2f0_result = 5. - * Return: Return code from CP IUCV call. - * (-EINVAL) - Buffer address is NULL. - */ -int iucv_receive_array (u16 pathid, - u32 msgid, - u32 trgcls, - iucv_array_t * buffer, - ulong buflen, - int *flags1_out, - ulong * residual_buffer, ulong * residual_length); - -/* - * Name: iucv_reject - * Purpose: The reject function refuses a specified message. Between the - * time you are notified of a message and the time that you - * complete the message, the message may be rejected. - * Input: pathid - Path identification number. - * msgid - Specifies the message ID. - * trgcls - Specifies target class. - * Output: NA - * Return: Return code from CP IUCV call. -*/ -int iucv_reject (u16 pathid, u32 msgid, u32 trgcls); - -/* - * Name: iucv_reply - * Purpose: This function responds to the two-way messages that you - * receive. You must identify completely the message to - * which you wish to reply. ie, pathid, msgid, and trgcls. - * Input: pathid - Path identification number. - * msgid - Specifies the message ID. - * trgcls - Specifies target class. - * flags1 - Option for path. - * IPPRTY- 0x20, Specifies if you want to send priority message. - * buffer - Address of reply buffer. - * buflen - Length of reply buffer. - * Output: residual_buffer - Address of buffer updated by the number - * of bytes you have moved. - * residual_length - Contains one of the following values: - * If the answer buffer is the same length as the reply, this field - * contains zero. - * If the answer buffer is longer than the reply, this field contains - * the number of bytes remaining in the buffer. - * If the answer buffer is shorter than the reply, this field contains - * a residual count (that is, the number of bytes remianing in the - * reply that does not fit into the buffer. In this - * case b2f0_result = 5. - * Return: Return code from CP IUCV call. - * (-EINVAL) - Buffer address is NULL. -*/ -int iucv_reply (u16 pathid, - u32 msgid, - u32 trgcls, - int flags1, - void *buffer, ulong buflen, ulong * residual_buffer, - ulong * residual_length); - -/* - * Name: iucv_reply_array - * Purpose: This function responds to the two-way messages that you - * receive. You must identify completely the message to - * which you wish to reply. ie, pathid, msgid, and trgcls. - * The array identifies a list of addresses and lengths of - * discontiguous buffers that contains the reply data. - * Input: pathid - Path identification number - * msgid - Specifies the message ID. - * trgcls - Specifies target class. - * flags1 - Option for path. - * IPPRTY- 0x20, Specifies if you want to send priority message. - * buffer - Address of array of reply buffers. - * buflen - Total length of reply buffers. - * Output: residual_buffer - Address of buffer which IUCV is currently working on. - * residual_length - Contains one of the following values: - * If the answer buffer is the same length as the reply, this field - * contains zero. - * If the answer buffer is longer than the reply, this field contains - * the number of bytes remaining in the buffer. - * If the answer buffer is shorter than the reply, this field contains - * a residual count (that is, the number of bytes remianing in the - * reply that does not fit into the buffer. In this - * case b2f0_result = 5. - * Return: Return code from CP IUCV call. - * (-EINVAL) - Buffer address is NULL. -*/ -int iucv_reply_array (u16 pathid, - u32 msgid, - u32 trgcls, - int flags1, - iucv_array_t * buffer, - ulong buflen, ulong * residual_address, - ulong * residual_length); - -/* - * Name: iucv_reply_prmmsg - * Purpose: This function responds to the two-way messages that you - * receive. You must identify completely the message to - * which you wish to reply. ie, pathid, msgid, and trgcls. - * Prmmsg signifies the data is moved into the - * parameter list. - * Input: pathid - Path identification number. - * msgid - Specifies the message ID. - * trgcls - Specifies target class. - * flags1 - Option for path. - * IPPRTY- 0x20 Specifies if you want to send priority message. - * prmmsg - 8-bytes of data to be placed into the parameter. - * list. - * Output: NA - * Return: Return code from CP IUCV call. -*/ -int iucv_reply_prmmsg (u16 pathid, - u32 msgid, u32 trgcls, int flags1, uchar prmmsg[8]); - -/* - * Name: iucv_resume - * Purpose: This function restores communications over a quiesced path - * Input: pathid - Path identification number. - * user_data - 16-bytes of user data. - * Output: NA - * Return: Return code from CP IUCV call. -*/ -int iucv_resume (u16 pathid, uchar user_data[16]); - -/* - * Name: iucv_send - * Purpose: This function transmits data to another application. - * Data to be transmitted is in a buffer and this is a - * one-way message and the receiver will not reply to the - * message. - * Input: pathid - Path identification number. - * trgcls - Specifies target class. - * srccls - Specifies the source message class. - * msgtag - Specifies a tag to be associated with the message. - * flags1 - Option for path. - * IPPRTY- 0x20 Specifies if you want to send priority message. - * buffer - Address of send buffer. - * buflen - Length of send buffer. - * Output: msgid - Specifies the message ID. - * Return: Return code from CP IUCV call. - * (-EINVAL) - Buffer address is NULL. -*/ -int iucv_send (u16 pathid, - u32 * msgid, - u32 trgcls, - u32 srccls, u32 msgtag, int flags1, void *buffer, ulong buflen); - -/* - * Name: iucv_send_array - * Purpose: This function transmits data to another application. - * The contents of buffer is the address of the array of - * addresses and lengths of discontiguous buffers that hold - * the message text. This is a one-way message and the - * receiver will not reply to the message. - * Input: pathid - Path identification number. - * trgcls - Specifies target class. - * srccls - Specifies the source message class. - * msgtag - Specifies a tag to be associated witht the message. - * flags1 - Option for path. - * IPPRTY- specifies if you want to send priority message. - * buffer - Address of array of send buffers. - * buflen - Total length of send buffers. - * Output: msgid - Specifies the message ID. - * Return: Return code from CP IUCV call. - * (-EINVAL) - Buffer address is NULL. -*/ -int iucv_send_array (u16 pathid, - u32 * msgid, - u32 trgcls, - u32 srccls, - u32 msgtag, - int flags1, iucv_array_t * buffer, ulong buflen); - -/* - * Name: iucv_send_prmmsg - * Purpose: This function transmits data to another application. - * Prmmsg specifies that the 8-bytes of data are to be moved - * into the parameter list. This is a one-way message and the - * receiver will not reply to the message. - * Input: pathid - Path identification number. - * trgcls - Specifies target class. - * srccls - Specifies the source message class. - * msgtag - Specifies a tag to be associated with the message. - * flags1 - Option for path. - * IPPRTY- 0x20 specifies if you want to send priority message. - * prmmsg - 8-bytes of data to be placed into parameter list. - * Output: msgid - Specifies the message ID. - * Return: Return code from CP IUCV call. -*/ -int iucv_send_prmmsg (u16 pathid, - u32 * msgid, - u32 trgcls, - u32 srccls, u32 msgtag, int flags1, uchar prmmsg[8]); - -/* - * Name: iucv_send2way - * Purpose: This function transmits data to another application. - * Data to be transmitted is in a buffer. The receiver - * of the send is expected to reply to the message and - * a buffer is provided into which IUCV moves the reply - * to this message. - * Input: pathid - Path identification number. - * trgcls - Specifies target class. - * srccls - Specifies the source message class. - * msgtag - Specifies a tag associated with the message. - * flags1 - Option for path. - * IPPRTY- 0x20 Specifies if you want to send priority message. - * buffer - Address of send buffer. - * buflen - Length of send buffer. - * ansbuf - Address of buffer into which IUCV moves the reply of - * this message. - * anslen - Address of length of buffer. - * Output: msgid - Specifies the message ID. - * Return: Return code from CP IUCV call. - * (-EINVAL) - Buffer or ansbuf address is NULL. -*/ -int iucv_send2way (u16 pathid, - u32 * msgid, - u32 trgcls, - u32 srccls, - u32 msgtag, - int flags1, - void *buffer, ulong buflen, void *ansbuf, ulong anslen); - -/* - * Name: iucv_send2way_array - * Purpose: This function transmits data to another application. - * The contents of buffer is the address of the array of - * addresses and lengths of discontiguous buffers that hold - * the message text. The receiver of the send is expected to - * reply to the message and a buffer is provided into which - * IUCV moves the reply to this message. - * Input: pathid - Path identification number. - * trgcls - Specifies target class. - * srccls - Specifies the source message class. - * msgtag - Specifies a tag to be associated with the message. - * flags1 - Option for path. - * IPPRTY- 0x20 Specifies if you want to send priority message. - * buffer - Sddress of array of send buffers. - * buflen - Total length of send buffers. - * ansbuf - Address of array of buffer into which IUCV moves the reply - * of this message. - * anslen - Address of length reply buffers. - * Output: msgid - Specifies the message ID. - * Return: Return code from CP IUCV call. - * (-EINVAL) - Buffer address is NULL. -*/ -int iucv_send2way_array (u16 pathid, - u32 * msgid, - u32 trgcls, - u32 srccls, - u32 msgtag, - int flags1, - iucv_array_t * buffer, - ulong buflen, iucv_array_t * ansbuf, ulong anslen); - -/* - * Name: iucv_send2way_prmmsg - * Purpose: This function transmits data to another application. - * Prmmsg specifies that the 8-bytes of data are to be moved - * into the parameter list. This is a two-way message and the - * receiver of the message is expected to reply. A buffer - * is provided into which IUCV moves the reply to this - * message. - * Input: pathid - Rath identification number. - * trgcls - Specifies target class. - * srccls - Specifies the source message class. - * msgtag - Specifies a tag to be associated with the message. - * flags1 - Option for path. - * IPPRTY- 0x20 Specifies if you want to send priority message. - * prmmsg - 8-bytes of data to be placed in parameter list. - * ansbuf - Address of buffer into which IUCV moves the reply of - * this message. - * anslen - Address of length of buffer. - * Output: msgid - Specifies the message ID. - * Return: Return code from CP IUCV call. - * (-EINVAL) - Buffer address is NULL. -*/ -int iucv_send2way_prmmsg (u16 pathid, - u32 * msgid, - u32 trgcls, - u32 srccls, - u32 msgtag, - ulong flags1, - uchar prmmsg[8], void *ansbuf, ulong anslen); - -/* - * Name: iucv_send2way_prmmsg_array - * Purpose: This function transmits data to another application. - * Prmmsg specifies that the 8-bytes of data are to be moved - * into the parameter list. This is a two-way message and the - * receiver of the message is expected to reply. A buffer - * is provided into which IUCV moves the reply to this - * message. The contents of ansbuf is the address of the - * array of addresses and lengths of discontiguous buffers - * that contain the reply. - * Input: pathid - Path identification number. - * trgcls - Specifies target class. - * srccls - Specifies the source message class. - * msgtag - Specifies a tag to be associated with the message. - * flags1 - Option for path. - * IPPRTY- 0x20 specifies if you want to send priority message. - * prmmsg - 8-bytes of data to be placed into the parameter list. - * ansbuf - Address of array of buffer into which IUCV moves the reply - * of this message. - * anslen - Address of length of reply buffers. - * Output: msgid - Specifies the message ID. - * Return: Return code from CP IUCV call. - * (-EINVAL) - Ansbuf address is NULL. -*/ -int iucv_send2way_prmmsg_array (u16 pathid, - u32 * msgid, - u32 trgcls, - u32 srccls, - u32 msgtag, - int flags1, - uchar prmmsg[8], - iucv_array_t * ansbuf, ulong anslen); - -/* - * Name: iucv_setmask - * Purpose: This function enables or disables the following IUCV - * external interruptions: Nonpriority and priority message - * interrupts, nonpriority and priority reply interrupts. - * Input: SetMaskFlag - options for interrupts - * 0x80 - Nonpriority_MessagePendingInterruptsFlag - * 0x40 - Priority_MessagePendingInterruptsFlag - * 0x20 - Nonpriority_MessageCompletionInterruptsFlag - * 0x10 - Priority_MessageCompletionInterruptsFlag - * 0x08 - IUCVControlInterruptsFlag - * Output: NA - * Return: Return code from CP IUCV call. -*/ -int iucv_setmask (int SetMaskFlag); - -/* - * Name: iucv_sever - * Purpose: This function terminates an IUCV path. - * Input: pathid - Path identification number. - * user_data - 16-bytes of user data. - * Output: NA - * Return: Return code from CP IUCV call. - * (-EINVAL) - Interal error, wild pointer. -*/ -int iucv_sever (u16 pathid, uchar user_data[16]); diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index e5665b6743a..b97dd15bdb9 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c @@ -828,7 +828,7 @@ lcs_notify_lancmd_waiters(struct lcs_card *card, struct lcs_cmd *cmd) /** * Emit buffer of a lan comand. */ -void +static void lcs_lancmd_timeout(unsigned long data) { struct lcs_reply *reply, *list_reply, *r; @@ -1360,7 +1360,7 @@ lcs_get_problem(struct ccw_device *cdev, struct irb *irb) return 0; } -void +static void lcs_schedule_recovery(struct lcs_card *card) { LCS_DBF_TEXT(2, trace, "startrec"); @@ -1990,7 +1990,7 @@ lcs_timeout_store (struct device *dev, struct device_attribute *attr, const char } -DEVICE_ATTR(lancmd_timeout, 0644, lcs_timeout_show, lcs_timeout_store); +static DEVICE_ATTR(lancmd_timeout, 0644, lcs_timeout_show, lcs_timeout_store); static ssize_t lcs_dev_recover_store(struct device *dev, struct device_attribute *attr, diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index d7d1cc0a5c8..6387b483f2b 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c @@ -1,7 +1,7 @@ /* * IUCV network driver * - * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Copyright 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com) * * Sysfs integration and all bugs therein by Cornelia Huck @@ -58,13 +58,94 @@ #include <asm/io.h> #include <asm/uaccess.h> -#include "iucv.h" +#include <net/iucv/iucv.h> #include "fsm.h" MODULE_AUTHOR ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)"); MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver"); +/** + * Debug Facility stuff + */ +#define IUCV_DBF_SETUP_NAME "iucv_setup" +#define IUCV_DBF_SETUP_LEN 32 +#define IUCV_DBF_SETUP_PAGES 2 +#define IUCV_DBF_SETUP_NR_AREAS 1 +#define IUCV_DBF_SETUP_LEVEL 3 + +#define IUCV_DBF_DATA_NAME "iucv_data" +#define IUCV_DBF_DATA_LEN 128 +#define IUCV_DBF_DATA_PAGES 2 +#define IUCV_DBF_DATA_NR_AREAS 1 +#define IUCV_DBF_DATA_LEVEL 2 + +#define IUCV_DBF_TRACE_NAME "iucv_trace" +#define IUCV_DBF_TRACE_LEN 16 +#define IUCV_DBF_TRACE_PAGES 4 +#define IUCV_DBF_TRACE_NR_AREAS 1 +#define IUCV_DBF_TRACE_LEVEL 3 + +#define IUCV_DBF_TEXT(name,level,text) \ + do { \ + debug_text_event(iucv_dbf_##name,level,text); \ + } while (0) + +#define IUCV_DBF_HEX(name,level,addr,len) \ + do { \ + debug_event(iucv_dbf_##name,level,(void*)(addr),len); \ + } while (0) + +DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf); + +#define IUCV_DBF_TEXT_(name,level,text...) \ + do { \ + char* iucv_dbf_txt_buf = get_cpu_var(iucv_dbf_txt_buf); \ + sprintf(iucv_dbf_txt_buf, text); \ + debug_text_event(iucv_dbf_##name,level,iucv_dbf_txt_buf); \ + put_cpu_var(iucv_dbf_txt_buf); \ + } while (0) + +#define IUCV_DBF_SPRINTF(name,level,text...) \ + do { \ + debug_sprintf_event(iucv_dbf_trace, level, ##text ); \ + debug_sprintf_event(iucv_dbf_trace, level, text ); \ + } while (0) + +/** + * some more debug stuff + */ +#define IUCV_HEXDUMP16(importance,header,ptr) \ +PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \ + "%02x %02x %02x %02x %02x %02x %02x %02x\n", \ + *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \ + *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \ + *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \ + *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \ + *(((char*)ptr)+12),*(((char*)ptr)+13), \ + *(((char*)ptr)+14),*(((char*)ptr)+15)); \ +PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \ + "%02x %02x %02x %02x %02x %02x %02x %02x\n", \ + *(((char*)ptr)+16),*(((char*)ptr)+17), \ + *(((char*)ptr)+18),*(((char*)ptr)+19), \ + *(((char*)ptr)+20),*(((char*)ptr)+21), \ + *(((char*)ptr)+22),*(((char*)ptr)+23), \ + *(((char*)ptr)+24),*(((char*)ptr)+25), \ + *(((char*)ptr)+26),*(((char*)ptr)+27), \ + *(((char*)ptr)+28),*(((char*)ptr)+29), \ + *(((char*)ptr)+30),*(((char*)ptr)+31)); + +static inline void iucv_hex_dump(unsigned char *buf, size_t len) +{ + size_t i; + + for (i = 0; i < len; i++) { + if (i && !(i % 16)) + printk("\n"); + printk("%02x ", *(buf + i)); + } + printk("\n"); +} #define PRINTK_HEADER " iucv: " /* for debugging */ @@ -73,6 +154,25 @@ static struct device_driver netiucv_driver = { .bus = &iucv_bus, }; +static int netiucv_callback_connreq(struct iucv_path *, + u8 ipvmid[8], u8 ipuser[16]); +static void netiucv_callback_connack(struct iucv_path *, u8 ipuser[16]); +static void netiucv_callback_connrej(struct iucv_path *, u8 ipuser[16]); +static void netiucv_callback_connsusp(struct iucv_path *, u8 ipuser[16]); +static void netiucv_callback_connres(struct iucv_path *, u8 ipuser[16]); +static void netiucv_callback_rx(struct iucv_path *, struct iucv_message *); +static void netiucv_callback_txdone(struct iucv_path *, struct iucv_message *); + +static struct iucv_handler netiucv_handler = { + .path_pending = netiucv_callback_connreq, + .path_complete = netiucv_callback_connack, + .path_severed = netiucv_callback_connrej, + .path_quiesced = netiucv_callback_connsusp, + .path_resumed = netiucv_callback_connres, + .message_pending = netiucv_callback_rx, + .message_complete = netiucv_callback_txdone +}; + /** * Per connection profiling data */ @@ -92,9 +192,8 @@ struct connection_profile { * Representation of one iucv connection */ struct iucv_connection { - struct iucv_connection *next; - iucv_handle_t handle; - __u16 pathid; + struct list_head list; + struct iucv_path *path; struct sk_buff *rx_buff; struct sk_buff *tx_buff; struct sk_buff_head collect_queue; @@ -112,12 +211,9 @@ struct iucv_connection { /** * Linked list of all connection structs. */ -struct iucv_connection_struct { - struct iucv_connection *iucv_connections; - rwlock_t iucv_rwlock; -}; - -static struct iucv_connection_struct iucv_conns; +static struct list_head iucv_connection_list = + LIST_HEAD_INIT(iucv_connection_list); +static rwlock_t iucv_connection_rwlock = RW_LOCK_UNLOCKED; /** * Representation of event-data for the @@ -142,11 +238,11 @@ struct netiucv_priv { /** * Link level header for a packet. */ -typedef struct ll_header_t { - __u16 next; -} ll_header; +struct ll_header { + u16 next; +}; -#define NETIUCV_HDRLEN (sizeof(ll_header)) +#define NETIUCV_HDRLEN (sizeof(struct ll_header)) #define NETIUCV_BUFSIZE_MAX 32768 #define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX #define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN) @@ -158,36 +254,26 @@ typedef struct ll_header_t { * Compatibility macros for busy handling * of network devices. */ -static __inline__ void netiucv_clear_busy(struct net_device *dev) +static inline void netiucv_clear_busy(struct net_device *dev) { - clear_bit(0, &(((struct netiucv_priv *)dev->priv)->tbusy)); + struct netiucv_priv *priv = netdev_priv(dev); + clear_bit(0, &priv->tbusy); netif_wake_queue(dev); } -static __inline__ int netiucv_test_and_set_busy(struct net_device *dev) +static inline int netiucv_test_and_set_busy(struct net_device *dev) { + struct netiucv_priv *priv = netdev_priv(dev); netif_stop_queue(dev); - return test_and_set_bit(0, &((struct netiucv_priv *)dev->priv)->tbusy); + return test_and_set_bit(0, &priv->tbusy); } -static __u8 iucv_host[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; -static __u8 iucvMagic[16] = { +static u8 iucvMagic[16] = { 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40 }; /** - * This mask means the 16-byte IUCV "magic" and the origin userid must - * match exactly as specified in order to give connection_pending() - * control. - */ -static __u8 netiucv_mask[] = { - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff -}; - -/** * Convert an iucv userId to its printable * form (strip whitespace at end). * @@ -195,8 +281,7 @@ static __u8 netiucv_mask[] = { * * @returns The printable string (static data!!) */ -static __inline__ char * -netiucv_printname(char *name) +static inline char *netiucv_printname(char *name) { static char tmp[9]; char *p = tmp; @@ -379,8 +464,7 @@ static debug_info_t *iucv_dbf_trace = NULL; DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf); -static void -iucv_unregister_dbf_views(void) +static void iucv_unregister_dbf_views(void) { if (iucv_dbf_setup) debug_unregister(iucv_dbf_setup); @@ -389,8 +473,7 @@ iucv_unregister_dbf_views(void) if (iucv_dbf_trace) debug_unregister(iucv_dbf_trace); } -static int -iucv_register_dbf_views(void) +static int iucv_register_dbf_views(void) { iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME, IUCV_DBF_SETUP_PAGES, @@ -422,125 +505,111 @@ iucv_register_dbf_views(void) return 0; } -/** +/* * Callback-wrappers, called from lowlevel iucv layer. - *****************************************************************************/ + */ -static void -netiucv_callback_rx(iucv_MessagePending *eib, void *pgm_data) +static void netiucv_callback_rx(struct iucv_path *path, + struct iucv_message *msg) { - struct iucv_connection *conn = (struct iucv_connection *)pgm_data; + struct iucv_connection *conn = path->private; struct iucv_event ev; ev.conn = conn; - ev.data = (void *)eib; - + ev.data = msg; fsm_event(conn->fsm, CONN_EVENT_RX, &ev); } -static void -netiucv_callback_txdone(iucv_MessageComplete *eib, void *pgm_data) +static void netiucv_callback_txdone(struct iucv_path *path, + struct iucv_message *msg) { - struct iucv_connection *conn = (struct iucv_connection *)pgm_data; + struct iucv_connection *conn = path->private; struct iucv_event ev; ev.conn = conn; - ev.data = (void *)eib; + ev.data = msg; fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev); } -static void -netiucv_callback_connack(iucv_ConnectionComplete *eib, void *pgm_data) +static void netiucv_callback_connack(struct iucv_path *path, u8 ipuser[16]) { - struct iucv_connection *conn = (struct iucv_connection *)pgm_data; - struct iucv_event ev; + struct iucv_connection *conn = path->private; - ev.conn = conn; - ev.data = (void *)eib; - fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, &ev); + fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, conn); } -static void -netiucv_callback_connreq(iucv_ConnectionPending *eib, void *pgm_data) +static int netiucv_callback_connreq(struct iucv_path *path, + u8 ipvmid[8], u8 ipuser[16]) { - struct iucv_connection *conn = (struct iucv_connection *)pgm_data; + struct iucv_connection *conn = path->private; struct iucv_event ev; + int rc; - ev.conn = conn; - ev.data = (void *)eib; - fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev); + if (memcmp(iucvMagic, ipuser, sizeof(ipuser))) + /* ipuser must match iucvMagic. */ + return -EINVAL; + rc = -EINVAL; + read_lock_bh(&iucv_connection_rwlock); + list_for_each_entry(conn, &iucv_connection_list, list) { + if (strncmp(ipvmid, conn->userid, 8)) + continue; + /* Found a matching connection for this path. */ + conn->path = path; + ev.conn = conn; + ev.data = path; + fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev); + rc = 0; + } + read_unlock_bh(&iucv_connection_rwlock); + return rc; } -static void -netiucv_callback_connrej(iucv_ConnectionSevered *eib, void *pgm_data) +static void netiucv_callback_connrej(struct iucv_path *path, u8 ipuser[16]) { - struct iucv_connection *conn = (struct iucv_connection *)pgm_data; - struct iucv_event ev; + struct iucv_connection *conn = path->private; - ev.conn = conn; - ev.data = (void *)eib; - fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, &ev); + fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, conn); } -static void -netiucv_callback_connsusp(iucv_ConnectionQuiesced *eib, void *pgm_data) +static void netiucv_callback_connsusp(struct iucv_path *path, u8 ipuser[16]) { - struct iucv_connection *conn = (struct iucv_connection *)pgm_data; - struct iucv_event ev; + struct iucv_connection *conn = path->private; - ev.conn = conn; - ev.data = (void *)eib; - fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, &ev); + fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, conn); } -static void -netiucv_callback_connres(iucv_ConnectionResumed *eib, void *pgm_data) +static void netiucv_callback_connres(struct iucv_path *path, u8 ipuser[16]) { - struct iucv_connection *conn = (struct iucv_connection *)pgm_data; - struct iucv_event ev; + struct iucv_connection *conn = path->private; - ev.conn = conn; - ev.data = (void *)eib; - fsm_event(conn->fsm, CONN_EVENT_CONN_RES, &ev); -} - -static iucv_interrupt_ops_t netiucv_ops = { - .ConnectionPending = netiucv_callback_connreq, - .ConnectionComplete = netiucv_callback_connack, - .ConnectionSevered = netiucv_callback_connrej, - .ConnectionQuiesced = netiucv_callback_connsusp, - .ConnectionResumed = netiucv_callback_connres, - .MessagePending = netiucv_callback_rx, - .MessageComplete = netiucv_callback_txdone -}; + fsm_event(conn->fsm, CONN_EVENT_CONN_RES, conn); +} /** * Dummy NOP action for all statemachines */ -static void -fsm_action_nop(fsm_instance *fi, int event, void *arg) +static void fsm_action_nop(fsm_instance *fi, int event, void *arg) { } -/** +/* * Actions of the connection statemachine - *****************************************************************************/ + */ /** - * Helper function for conn_action_rx() - * Unpack a just received skb and hand it over to - * upper layers. + * netiucv_unpack_skb + * @conn: The connection where this skb has been received. + * @pskb: The received skb. * - * @param conn The connection where this skb has been received. - * @param pskb The received skb. + * Unpack a just received skb and hand it over to upper layers. + * Helper function for conn_action_rx. */ -//static __inline__ void -static void -netiucv_unpack_skb(struct iucv_connection *conn, struct sk_buff *pskb) +static void netiucv_unpack_skb(struct iucv_connection *conn, + struct sk_buff *pskb) { struct net_device *dev = conn->netdev; - struct netiucv_priv *privptr = dev->priv; - __u16 offset = 0; + struct netiucv_priv *privptr = netdev_priv(dev); + u16 offset = 0; skb_put(pskb, NETIUCV_HDRLEN); pskb->dev = dev; @@ -549,7 +618,7 @@ netiucv_unpack_skb(struct iucv_connection *conn, struct sk_buff *pskb) while (1) { struct sk_buff *skb; - ll_header *header = (ll_header *)pskb->data; + struct ll_header *header = (struct ll_header *) pskb->data; if (!header->next) break; @@ -595,40 +664,37 @@ netiucv_unpack_skb(struct iucv_connection *conn, struct sk_buff *pskb) } } -static void -conn_action_rx(fsm_instance *fi, int event, void *arg) +static void conn_action_rx(fsm_instance *fi, int event, void *arg) { - struct iucv_event *ev = (struct iucv_event *)arg; + struct iucv_event *ev = arg; struct iucv_connection *conn = ev->conn; - iucv_MessagePending *eib = (iucv_MessagePending *)ev->data; - struct netiucv_priv *privptr =(struct netiucv_priv *)conn->netdev->priv; - - __u32 msglen = eib->ln1msg2.ipbfln1f; + struct iucv_message *msg = ev->data; + struct netiucv_priv *privptr = netdev_priv(conn->netdev); int rc; IUCV_DBF_TEXT(trace, 4, __FUNCTION__); if (!conn->netdev) { - /* FRITZ: How to tell iucv LL to drop the msg? */ + iucv_message_reject(conn->path, msg); PRINT_WARN("Received data for unlinked connection\n"); IUCV_DBF_TEXT(data, 2, - "Received data for unlinked connection\n"); + "Received data for unlinked connection\n"); return; } - if (msglen > conn->max_buffsize) { - /* FRITZ: How to tell iucv LL to drop the msg? */ + if (msg->length > conn->max_buffsize) { + iucv_message_reject(conn->path, msg); privptr->stats.rx_dropped++; PRINT_WARN("msglen %d > max_buffsize %d\n", - msglen, conn->max_buffsize); + msg->length, conn->max_buffsize); IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n", - msglen, conn->max_buffsize); + msg->length, conn->max_buffsize); return; } conn->rx_buff->data = conn->rx_buff->tail = conn->rx_buff->head; conn->rx_buff->len = 0; - rc = iucv_receive(conn->pathid, eib->ipmsgid, eib->iptrgcls, - conn->rx_buff->data, msglen, NULL, NULL, NULL); - if (rc || msglen < 5) { + rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data, + msg->length, NULL); + if (rc || msg->length < 5) { privptr->stats.rx_errors++; PRINT_WARN("iucv_receive returned %08x\n", rc); IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc); @@ -637,26 +703,26 @@ conn_action_rx(fsm_instance *fi, int event, void *arg) netiucv_unpack_skb(conn, conn->rx_buff); } -static void -conn_action_txdone(fsm_instance *fi, int event, void *arg) +static void conn_action_txdone(fsm_instance *fi, int event, void *arg) { - struct iucv_event *ev = (struct iucv_event *)arg; + struct iucv_event *ev = arg; struct iucv_connection *conn = ev->conn; - iucv_MessageComplete *eib = (iucv_MessageComplete *)ev->data; + struct iucv_message *msg = ev->data; + struct iucv_message txmsg; struct netiucv_priv *privptr = NULL; - /* Shut up, gcc! skb is always below 2G. */ - __u32 single_flag = eib->ipmsgtag; - __u32 txbytes = 0; - __u32 txpackets = 0; - __u32 stat_maxcq = 0; + u32 single_flag = msg->tag; + u32 txbytes = 0; + u32 txpackets = 0; + u32 stat_maxcq = 0; struct sk_buff *skb; unsigned long saveflags; - ll_header header; + struct ll_header header; + int rc; IUCV_DBF_TEXT(trace, 4, __FUNCTION__); - if (conn && conn->netdev && conn->netdev->priv) - privptr = (struct netiucv_priv *)conn->netdev->priv; + if (conn && conn->netdev) + privptr = netdev_priv(conn->netdev); conn->prof.tx_pending--; if (single_flag) { if ((skb = skb_dequeue(&conn->commit_queue))) { @@ -688,56 +754,55 @@ conn_action_txdone(fsm_instance *fi, int event, void *arg) conn->prof.maxmulti = conn->collect_len; conn->collect_len = 0; spin_unlock_irqrestore(&conn->collect_lock, saveflags); - if (conn->tx_buff->len) { - int rc; - - header.next = 0; - memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header, - NETIUCV_HDRLEN); + if (conn->tx_buff->len == 0) { + fsm_newstate(fi, CONN_STATE_IDLE); + return; + } - conn->prof.send_stamp = xtime; - rc = iucv_send(conn->pathid, NULL, 0, 0, 0, 0, + header.next = 0; + memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN); + conn->prof.send_stamp = xtime; + txmsg.class = 0; + txmsg.tag = 0; + rc = iucv_message_send(conn->path, &txmsg, 0, 0, conn->tx_buff->data, conn->tx_buff->len); - conn->prof.doios_multi++; - conn->prof.txlen += conn->tx_buff->len; - conn->prof.tx_pending++; - if (conn->prof.tx_pending > conn->prof.tx_max_pending) - conn->prof.tx_max_pending = conn->prof.tx_pending; - if (rc) { - conn->prof.tx_pending--; - fsm_newstate(fi, CONN_STATE_IDLE); - if (privptr) - privptr->stats.tx_errors += txpackets; - PRINT_WARN("iucv_send returned %08x\n", rc); - IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc); - } else { - if (privptr) { - privptr->stats.tx_packets += txpackets; - privptr->stats.tx_bytes += txbytes; - } - if (stat_maxcq > conn->prof.maxcqueue) - conn->prof.maxcqueue = stat_maxcq; - } - } else + conn->prof.doios_multi++; + conn->prof.txlen += conn->tx_buff->len; + conn->prof.tx_pending++; + if (conn->prof.tx_pending > conn->prof.tx_max_pending) + conn->prof.tx_max_pending = conn->prof.tx_pending; + if (rc) { + conn->prof.tx_pending--; fsm_newstate(fi, CONN_STATE_IDLE); + if (privptr) + privptr->stats.tx_errors += txpackets; + PRINT_WARN("iucv_send returned %08x\n", rc); + IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc); + } else { + if (privptr) { + privptr->stats.tx_packets += txpackets; + privptr->stats.tx_bytes += txbytes; + } + if (stat_maxcq > conn->prof.maxcqueue) + conn->prof.maxcqueue = stat_maxcq; + } } -static void -conn_action_connaccept(fsm_instance *fi, int event, void *arg) +static void conn_action_connaccept(fsm_instance *fi, int event, void *arg) { - struct iucv_event *ev = (struct iucv_event *)arg; + struct iucv_event *ev = arg; struct iucv_connection *conn = ev->conn; - iucv_ConnectionPending *eib = (iucv_ConnectionPending *)ev->data; + struct iucv_path *path = ev->data; struct net_device *netdev = conn->netdev; - struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv; + struct netiucv_priv *privptr = netdev_priv(netdev); int rc; - __u16 msglimit; - __u8 udata[16]; IUCV_DBF_TEXT(trace, 3, __FUNCTION__); - rc = iucv_accept(eib->ippathid, NETIUCV_QUEUELEN_DEFAULT, udata, 0, - conn->handle, conn, NULL, &msglimit); + conn->path = path; + path->msglim = NETIUCV_QUEUELEN_DEFAULT; + path->flags = 0; + rc = iucv_path_accept(path, &netiucv_handler, NULL, conn); if (rc) { PRINT_WARN("%s: IUCV accept failed with error %d\n", netdev->name, rc); @@ -745,183 +810,126 @@ conn_action_connaccept(fsm_instance *fi, int event, void *arg) return; } fsm_newstate(fi, CONN_STATE_IDLE); - conn->pathid = eib->ippathid; - netdev->tx_queue_len = msglimit; + netdev->tx_queue_len = conn->path->msglim; fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev); } -static void -conn_action_connreject(fsm_instance *fi, int event, void *arg) +static void conn_action_connreject(fsm_instance *fi, int event, void *arg) { - struct iucv_event *ev = (struct iucv_event *)arg; - struct iucv_connection *conn = ev->conn; - struct net_device *netdev = conn->netdev; - iucv_ConnectionPending *eib = (iucv_ConnectionPending *)ev->data; - __u8 udata[16]; + struct iucv_event *ev = arg; + struct iucv_path *path = ev->data; IUCV_DBF_TEXT(trace, 3, __FUNCTION__); - - iucv_sever(eib->ippathid, udata); - if (eib->ippathid != conn->pathid) { - PRINT_INFO("%s: IR Connection Pending; " - "pathid %d does not match original pathid %d\n", - netdev->name, eib->ippathid, conn->pathid); - IUCV_DBF_TEXT_(data, 2, - "connreject: IR pathid %d, conn. pathid %d\n", - eib->ippathid, conn->pathid); - iucv_sever(conn->pathid, udata); - } + iucv_path_sever(path, NULL); } -static void -conn_action_connack(fsm_instance *fi, int event, void *arg) +static void conn_action_connack(fsm_instance *fi, int event, void *arg) { - struct iucv_event *ev = (struct iucv_event *)arg; - struct iucv_connection *conn = ev->conn; - iucv_ConnectionComplete *eib = (iucv_ConnectionComplete *)ev->data; + struct iucv_connection *conn = arg; struct net_device *netdev = conn->netdev; - struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv; + struct netiucv_priv *privptr = netdev_priv(netdev); IUCV_DBF_TEXT(trace, 3, __FUNCTION__); - fsm_deltimer(&conn->timer); fsm_newstate(fi, CONN_STATE_IDLE); - if (eib->ippathid != conn->pathid) { - PRINT_INFO("%s: IR Connection Complete; " - "pathid %d does not match original pathid %d\n", - netdev->name, eib->ippathid, conn->pathid); - IUCV_DBF_TEXT_(data, 2, - "connack: IR pathid %d, conn. pathid %d\n", - eib->ippathid, conn->pathid); - conn->pathid = eib->ippathid; - } - netdev->tx_queue_len = eib->ipmsglim; + netdev->tx_queue_len = conn->path->msglim; fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev); } -static void -conn_action_conntimsev(fsm_instance *fi, int event, void *arg) +static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg) { - struct iucv_connection *conn = (struct iucv_connection *)arg; - __u8 udata[16]; + struct iucv_connection *conn = arg; IUCV_DBF_TEXT(trace, 3, __FUNCTION__); - fsm_deltimer(&conn->timer); - iucv_sever(conn->pathid, udata); + iucv_path_sever(conn->path, NULL); fsm_newstate(fi, CONN_STATE_STARTWAIT); } -static void -conn_action_connsever(fsm_instance *fi, int event, void *arg) +static void conn_action_connsever(fsm_instance *fi, int event, void *arg) { - struct iucv_event *ev = (struct iucv_event *)arg; - struct iucv_connection *conn = ev->conn; + struct iucv_connection *conn = arg; struct net_device *netdev = conn->netdev; - struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv; - __u8 udata[16]; + struct netiucv_priv *privptr = netdev_priv(netdev); IUCV_DBF_TEXT(trace, 3, __FUNCTION__); fsm_deltimer(&conn->timer); - iucv_sever(conn->pathid, udata); + iucv_path_sever(conn->path, NULL); PRINT_INFO("%s: Remote dropped connection\n", netdev->name); IUCV_DBF_TEXT(data, 2, - "conn_action_connsever: Remote dropped connection\n"); + "conn_action_connsever: Remote dropped connection\n"); fsm_newstate(fi, CONN_STATE_STARTWAIT); fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev); } -static void -conn_action_start(fsm_instance *fi, int event, void *arg) +static void conn_action_start(fsm_instance *fi, int event, void *arg) { - struct iucv_event *ev = (struct iucv_event *)arg; - struct iucv_connection *conn = ev->conn; - __u16 msglimit; + struct iucv_connection *conn = arg; int rc; IUCV_DBF_TEXT(trace, 3, __FUNCTION__); - if (!conn->handle) { - IUCV_DBF_TEXT(trace, 5, "calling iucv_register_program\n"); - conn->handle = - iucv_register_program(iucvMagic, conn->userid, - netiucv_mask, - &netiucv_ops, conn); - fsm_newstate(fi, CONN_STATE_STARTWAIT); - if (!conn->handle) { - fsm_newstate(fi, CONN_STATE_REGERR); - conn->handle = NULL; - IUCV_DBF_TEXT(setup, 2, - "NULL from iucv_register_program\n"); - return; - } - - PRINT_DEBUG("%s('%s'): registered successfully\n", - conn->netdev->name, conn->userid); - } - + fsm_newstate(fi, CONN_STATE_STARTWAIT); PRINT_DEBUG("%s('%s'): connecting ...\n", - conn->netdev->name, conn->userid); + conn->netdev->name, conn->userid); - /* We must set the state before calling iucv_connect because the callback - * handler could be called at any point after the connection request is - * sent */ + /* + * We must set the state before calling iucv_connect because the + * callback handler could be called at any point after the connection + * request is sent + */ fsm_newstate(fi, CONN_STATE_SETUPWAIT); - rc = iucv_connect(&(conn->pathid), NETIUCV_QUEUELEN_DEFAULT, iucvMagic, - conn->userid, iucv_host, 0, NULL, &msglimit, - conn->handle, conn); + conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL); + rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid, + NULL, iucvMagic, conn); switch (rc) { - case 0: - conn->netdev->tx_queue_len = msglimit; - fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC, - CONN_EVENT_TIMER, conn); - return; - case 11: - PRINT_INFO("%s: User %s is currently not available.\n", - conn->netdev->name, - netiucv_printname(conn->userid)); - fsm_newstate(fi, CONN_STATE_STARTWAIT); - return; - case 12: - PRINT_INFO("%s: User %s is currently not ready.\n", - conn->netdev->name, - netiucv_printname(conn->userid)); - fsm_newstate(fi, CONN_STATE_STARTWAIT); - return; - case 13: - PRINT_WARN("%s: Too many IUCV connections.\n", - conn->netdev->name); - fsm_newstate(fi, CONN_STATE_CONNERR); - break; - case 14: - PRINT_WARN( - "%s: User %s has too many IUCV connections.\n", - conn->netdev->name, - netiucv_printname(conn->userid)); - fsm_newstate(fi, CONN_STATE_CONNERR); - break; - case 15: - PRINT_WARN( - "%s: No IUCV authorization in CP directory.\n", - conn->netdev->name); - fsm_newstate(fi, CONN_STATE_CONNERR); - break; - default: - PRINT_WARN("%s: iucv_connect returned error %d\n", - conn->netdev->name, rc); - fsm_newstate(fi, CONN_STATE_CONNERR); - break; + case 0: + conn->netdev->tx_queue_len = conn->path->msglim; + fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC, + CONN_EVENT_TIMER, conn); + return; + case 11: + PRINT_INFO("%s: User %s is currently not available.\n", + conn->netdev->name, + netiucv_printname(conn->userid)); + fsm_newstate(fi, CONN_STATE_STARTWAIT); + break; + case 12: + PRINT_INFO("%s: User %s is currently not ready.\n", + conn->netdev->name, + netiucv_printname(conn->userid)); + fsm_newstate(fi, CONN_STATE_STARTWAIT); + break; + case 13: + PRINT_WARN("%s: Too many IUCV connections.\n", + conn->netdev->name); + fsm_newstate(fi, CONN_STATE_CONNERR); + break; + case 14: + PRINT_WARN("%s: User %s has too many IUCV connections.\n", + conn->netdev->name, + netiucv_printname(conn->userid)); + fsm_newstate(fi, CONN_STATE_CONNERR); + break; + case 15: + PRINT_WARN("%s: No IUCV authorization in CP directory.\n", + conn->netdev->name); + fsm_newstate(fi, CONN_STATE_CONNERR); + break; + default: + PRINT_WARN("%s: iucv_connect returned error %d\n", + conn->netdev->name, rc); + fsm_newstate(fi, CONN_STATE_CONNERR); + break; } IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc); - IUCV_DBF_TEXT(trace, 5, "calling iucv_unregister_program\n"); - iucv_unregister_program(conn->handle); - conn->handle = NULL; + kfree(conn->path); + conn->path = NULL; } -static void -netiucv_purge_skb_queue(struct sk_buff_head *q) +static void netiucv_purge_skb_queue(struct sk_buff_head *q) { struct sk_buff *skb; @@ -931,36 +939,34 @@ netiucv_purge_skb_queue(struct sk_buff_head *q) } } -static void -conn_action_stop(fsm_instance *fi, int event, void *arg) +static void conn_action_stop(fsm_instance *fi, int event, void *arg) { - struct iucv_event *ev = (struct iucv_event *)arg; + struct iucv_event *ev = arg; struct iucv_connection *conn = ev->conn; struct net_device *netdev = conn->netdev; - struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv; + struct netiucv_priv *privptr = netdev_priv(netdev); IUCV_DBF_TEXT(trace, 3, __FUNCTION__); fsm_deltimer(&conn->timer); fsm_newstate(fi, CONN_STATE_STOPPED); netiucv_purge_skb_queue(&conn->collect_queue); - if (conn->handle) - IUCV_DBF_TEXT(trace, 5, "calling iucv_unregister_program\n"); - iucv_unregister_program(conn->handle); - conn->handle = NULL; + if (conn->path) { + IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n"); + iucv_path_sever(conn->path, iucvMagic); + kfree(conn->path); + conn->path = NULL; + } netiucv_purge_skb_queue(&conn->commit_queue); fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev); } -static void -conn_action_inval(fsm_instance *fi, int event, void *arg) +static void conn_action_inval(fsm_instance *fi, int event, void *arg) { - struct iucv_event *ev = (struct iucv_event *)arg; - struct iucv_connection *conn = ev->conn; + struct iucv_connection *conn = arg; struct net_device *netdev = conn->netdev; - PRINT_WARN("%s: Cannot connect without username\n", - netdev->name); + PRINT_WARN("%s: Cannot connect without username\n", netdev->name); IUCV_DBF_TEXT(data, 2, "conn_action_inval called\n"); } @@ -999,29 +1005,27 @@ static const fsm_node conn_fsm[] = { static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node); -/** +/* * Actions for interface - statemachine. - *****************************************************************************/ + */ /** - * Startup connection by sending CONN_EVENT_START to it. + * dev_action_start + * @fi: An instance of an interface statemachine. + * @event: The event, just happened. + * @arg: Generic pointer, casted from struct net_device * upon call. * - * @param fi An instance of an interface statemachine. - * @param event The event, just happened. - * @param arg Generic pointer, casted from struct net_device * upon call. + * Startup connection by sending CONN_EVENT_START to it. */ -static void -dev_action_start(fsm_instance *fi, int event, void *arg) +static void dev_action_start(fsm_instance *fi, int event, void *arg) { - struct net_device *dev = (struct net_device *)arg; - struct netiucv_priv *privptr = dev->priv; - struct iucv_event ev; + struct net_device *dev = arg; + struct netiucv_priv *privptr = netdev_priv(dev); IUCV_DBF_TEXT(trace, 3, __FUNCTION__); - ev.conn = privptr->conn; fsm_newstate(fi, DEV_STATE_STARTWAIT); - fsm_event(privptr->conn->fsm, CONN_EVENT_START, &ev); + fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn); } /** @@ -1034,8 +1038,8 @@ dev_action_start(fsm_instance *fi, int event, void *arg) static void dev_action_stop(fsm_instance *fi, int event, void *arg) { - struct net_device *dev = (struct net_device *)arg; - struct netiucv_priv *privptr = dev->priv; + struct net_device *dev = arg; + struct netiucv_priv *privptr = netdev_priv(dev); struct iucv_event ev; IUCV_DBF_TEXT(trace, 3, __FUNCTION__); @@ -1057,8 +1061,8 @@ dev_action_stop(fsm_instance *fi, int event, void *arg) static void dev_action_connup(fsm_instance *fi, int event, void *arg) { - struct net_device *dev = (struct net_device *)arg; - struct netiucv_priv *privptr = dev->priv; + struct net_device *dev = arg; + struct netiucv_priv *privptr = netdev_priv(dev); IUCV_DBF_TEXT(trace, 3, __FUNCTION__); @@ -1131,11 +1135,13 @@ static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node); * * @return 0 on success, -ERRNO on failure. (Never fails.) */ -static int -netiucv_transmit_skb(struct iucv_connection *conn, struct sk_buff *skb) { +static int netiucv_transmit_skb(struct iucv_connection *conn, + struct sk_buff *skb) +{ + struct iucv_message msg; unsigned long saveflags; - ll_header header; - int rc = 0; + struct ll_header header; + int rc; if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) { int l = skb->len + NETIUCV_HDRLEN; @@ -1145,11 +1151,12 @@ netiucv_transmit_skb(struct iucv_connection *conn, struct sk_buff *skb) { (conn->max_buffsize - NETIUCV_HDRLEN)) { rc = -EBUSY; IUCV_DBF_TEXT(data, 2, - "EBUSY from netiucv_transmit_skb\n"); + "EBUSY from netiucv_transmit_skb\n"); } else { atomic_inc(&skb->users); skb_queue_tail(&conn->collect_queue, skb); conn->collect_len += l; + rc = 0; } spin_unlock_irqrestore(&conn->collect_lock, saveflags); } else { @@ -1188,9 +1195,10 @@ netiucv_transmit_skb(struct iucv_connection *conn, struct sk_buff *skb) { fsm_newstate(conn->fsm, CONN_STATE_TX); conn->prof.send_stamp = xtime; - rc = iucv_send(conn->pathid, NULL, 0, 0, 1 /* single_flag */, - 0, nskb->data, nskb->len); - /* Shut up, gcc! nskb is always below 2G. */ + msg.tag = 1; + msg.class = 0; + rc = iucv_message_send(conn->path, &msg, 0, 0, + nskb->data, nskb->len); conn->prof.doios_single++; conn->prof.txlen += skb->len; conn->prof.tx_pending++; @@ -1200,7 +1208,7 @@ netiucv_transmit_skb(struct iucv_connection *conn, struct sk_buff *skb) { struct netiucv_priv *privptr; fsm_newstate(conn->fsm, CONN_STATE_IDLE); conn->prof.tx_pending--; - privptr = (struct netiucv_priv *)conn->netdev->priv; + privptr = netdev_priv(conn->netdev); if (privptr) privptr->stats.tx_errors++; if (copied) @@ -1226,9 +1234,9 @@ netiucv_transmit_skb(struct iucv_connection *conn, struct sk_buff *skb) { return rc; } -/** +/* * Interface API for upper network layers - *****************************************************************************/ + */ /** * Open an interface. @@ -1238,9 +1246,11 @@ netiucv_transmit_skb(struct iucv_connection *conn, struct sk_buff *skb) { * * @return 0 on success, -ERRNO on failure. (Never fails.) */ -static int -netiucv_open(struct net_device *dev) { - fsm_event(((struct netiucv_priv *)dev->priv)->fsm, DEV_EVENT_START,dev); +static int netiucv_open(struct net_device *dev) +{ + struct netiucv_priv *priv = netdev_priv(dev); + + fsm_event(priv->fsm, DEV_EVENT_START, dev); return 0; } @@ -1252,9 +1262,11 @@ netiucv_open(struct net_device *dev) { * * @return 0 on success, -ERRNO on failure. (Never fails.) */ -static int -netiucv_close(struct net_device *dev) { - fsm_event(((struct netiucv_priv *)dev->priv)->fsm, DEV_EVENT_STOP, dev); +static int netiucv_close(struct net_device *dev) +{ + struct netiucv_priv *priv = netdev_priv(dev); + + fsm_event(priv->fsm, DEV_EVENT_STOP, dev); return 0; } @@ -1271,8 +1283,8 @@ netiucv_close(struct net_device *dev) { */ static int netiucv_tx(struct sk_buff *skb, struct net_device *dev) { - int rc = 0; - struct netiucv_priv *privptr = dev->priv; + struct netiucv_priv *privptr = netdev_priv(dev); + int rc; IUCV_DBF_TEXT(trace, 4, __FUNCTION__); /** @@ -1312,40 +1324,41 @@ static int netiucv_tx(struct sk_buff *skb, struct net_device *dev) return -EBUSY; } dev->trans_start = jiffies; - if (netiucv_transmit_skb(privptr->conn, skb)) - rc = 1; + rc = netiucv_transmit_skb(privptr->conn, skb) != 0; netiucv_clear_busy(dev); return rc; } /** - * Returns interface statistics of a device. + * netiucv_stats + * @dev: Pointer to interface struct. * - * @param dev Pointer to interface struct. + * Returns interface statistics of a device. * - * @return Pointer to stats struct of this interface. + * Returns pointer to stats struct of this interface. */ -static struct net_device_stats * -netiucv_stats (struct net_device * dev) +static struct net_device_stats *netiucv_stats (struct net_device * dev) { + struct netiucv_priv *priv = netdev_priv(dev); + IUCV_DBF_TEXT(trace, 5, __FUNCTION__); - return &((struct netiucv_priv *)dev->priv)->stats; + return &priv->stats; } /** - * Sets MTU of an interface. + * netiucv_change_mtu + * @dev: Pointer to interface struct. + * @new_mtu: The new MTU to use for this interface. * - * @param dev Pointer to interface struct. - * @param new_mtu The new MTU to use for this interface. + * Sets MTU of an interface. * - * @return 0 on success, -EINVAL if MTU is out of valid range. + * Returns 0 on success, -EINVAL if MTU is out of valid range. * (valid range is 576 .. NETIUCV_MTU_MAX). */ -static int -netiucv_change_mtu (struct net_device * dev, int new_mtu) +static int netiucv_change_mtu(struct net_device * dev, int new_mtu) { IUCV_DBF_TEXT(trace, 3, __FUNCTION__); - if ((new_mtu < 576) || (new_mtu > NETIUCV_MTU_MAX)) { + if (new_mtu < 576 || new_mtu > NETIUCV_MTU_MAX) { IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n"); return -EINVAL; } @@ -1353,12 +1366,12 @@ netiucv_change_mtu (struct net_device * dev, int new_mtu) return 0; } -/** +/* * attributes in sysfs - *****************************************************************************/ + */ -static ssize_t -user_show (struct device *dev, struct device_attribute *attr, char *buf) +static ssize_t user_show(struct device *dev, struct device_attribute *attr, + char *buf) { struct netiucv_priv *priv = dev->driver_data; @@ -1366,8 +1379,8 @@ user_show (struct device *dev, struct device_attribute *attr, char *buf) return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid)); } -static ssize_t -user_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +static ssize_t user_write(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { struct netiucv_priv *priv = dev->driver_data; struct net_device *ndev = priv->conn->netdev; @@ -1375,80 +1388,70 @@ user_write (struct device *dev, struct device_attribute *attr, const char *buf, char *tmp; char username[9]; int i; - struct iucv_connection **clist = &iucv_conns.iucv_connections; - unsigned long flags; + struct iucv_connection *cp; IUCV_DBF_TEXT(trace, 3, __FUNCTION__); - if (count>9) { - PRINT_WARN("netiucv: username too long (%d)!\n", (int)count); + if (count > 9) { + PRINT_WARN("netiucv: username too long (%d)!\n", (int) count); IUCV_DBF_TEXT_(setup, 2, - "%d is length of username\n", (int)count); + "%d is length of username\n", (int) count); return -EINVAL; } tmp = strsep((char **) &buf, "\n"); - for (i=0, p=tmp; i<8 && *p; i++, p++) { - if (isalnum(*p) || (*p == '$')) + for (i = 0, p = tmp; i < 8 && *p; i++, p++) { + if (isalnum(*p) || (*p == '$')) { username[i]= toupper(*p); - else if (*p == '\n') { + continue; + } + if (*p == '\n') { /* trailing lf, grr */ break; - } else { - PRINT_WARN("netiucv: Invalid char %c in username!\n", - *p); - IUCV_DBF_TEXT_(setup, 2, - "username: invalid character %c\n", - *p); - return -EINVAL; } + PRINT_WARN("netiucv: Invalid char %c in username!\n", *p); + IUCV_DBF_TEXT_(setup, 2, + "username: invalid character %c\n", *p); + return -EINVAL; } - while (i<8) + while (i < 8) username[i++] = ' '; username[8] = '\0'; - if (memcmp(username, priv->conn->userid, 9)) { - /* username changed */ - if (ndev->flags & (IFF_UP | IFF_RUNNING)) { - PRINT_WARN( - "netiucv: device %s active, connected to %s\n", - dev->bus_id, priv->conn->userid); - PRINT_WARN("netiucv: user cannot be updated\n"); - IUCV_DBF_TEXT(setup, 2, "user_write: device active\n"); - return -EBUSY; + if (memcmp(username, priv->conn->userid, 9) && + (ndev->flags & (IFF_UP | IFF_RUNNING))) { + /* username changed while the interface is active. */ + PRINT_WARN("netiucv: device %s active, connected to %s\n", + dev->bus_id, priv->conn->userid); + PRINT_WARN("netiucv: user cannot be updated\n"); + IUCV_DBF_TEXT(setup, 2, "user_write: device active\n"); + return -EBUSY; + } + read_lock_bh(&iucv_connection_rwlock); + list_for_each_entry(cp, &iucv_connection_list, list) { + if (!strncmp(username, cp->userid, 9) && cp->netdev != ndev) { + read_unlock_bh(&iucv_connection_rwlock); + PRINT_WARN("netiucv: Connection to %s already " + "exists\n", username); + return -EEXIST; } } - read_lock_irqsave(&iucv_conns.iucv_rwlock, flags); - while (*clist) { - if (!strncmp(username, (*clist)->userid, 9) || - ((*clist)->netdev != ndev)) - break; - clist = &((*clist)->next); - } - read_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags); - if (*clist) { - PRINT_WARN("netiucv: Connection to %s already exists\n", - username); - return -EEXIST; - } + read_unlock_bh(&iucv_connection_rwlock); memcpy(priv->conn->userid, username, 9); - return count; - } static DEVICE_ATTR(user, 0644, user_show, user_write); -static ssize_t -buffer_show (struct device *dev, struct device_attribute *attr, char *buf) -{ - struct netiucv_priv *priv = dev->driver_data; +static ssize_t buffer_show (struct device *dev, struct device_attribute *attr, + char *buf) +{ struct netiucv_priv *priv = dev->driver_data; IUCV_DBF_TEXT(trace, 5, __FUNCTION__); return sprintf(buf, "%d\n", priv->conn->max_buffsize); } -static ssize_t -buffer_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +static ssize_t buffer_write (struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { struct netiucv_priv *priv = dev->driver_data; struct net_device *ndev = priv->conn->netdev; @@ -1502,8 +1505,8 @@ buffer_write (struct device *dev, struct device_attribute *attr, const char *buf static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write); -static ssize_t -dev_fsm_show (struct device *dev, struct device_attribute *attr, char *buf) +static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr, + char *buf) { struct netiucv_priv *priv = dev->driver_data; @@ -1513,8 +1516,8 @@ dev_fsm_show (struct device *dev, struct device_attribute *attr, char *buf) static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL); -static ssize_t -conn_fsm_show (struct device *dev, struct device_attribute *attr, char *buf) +static ssize_t conn_fsm_show (struct device *dev, + struct device_attribute *attr, char *buf) { struct netiucv_priv *priv = dev->driver_data; @@ -1524,8 +1527,8 @@ conn_fsm_show (struct device *dev, struct device_attribute *attr, char *buf) static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL); -static ssize_t -maxmulti_show (struct device *dev, struct device_attribute *attr, char *buf) +static ssize_t maxmulti_show (struct device *dev, + struct device_attribute *attr, char *buf) { struct netiucv_priv *priv = dev->driver_data; @@ -1533,8 +1536,9 @@ maxmulti_show (struct device *dev, struct device_attribute *attr, char *buf) return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti); } -static ssize_t -maxmulti_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +static ssize_t maxmulti_write (struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) { struct netiucv_priv *priv = dev->driver_data; @@ -1545,8 +1549,8 @@ maxmulti_write (struct device *dev, struct device_attribute *attr, const char *b static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write); -static ssize_t -maxcq_show (struct device *dev, struct device_attribute *attr, char *buf) +static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr, + char *buf) { struct netiucv_priv *priv = dev->driver_data; @@ -1554,8 +1558,8 @@ maxcq_show (struct device *dev, struct device_attribute *attr, char *buf) return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue); } -static ssize_t -maxcq_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { struct netiucv_priv *priv = dev->driver_data; @@ -1566,8 +1570,8 @@ maxcq_write (struct device *dev, struct device_attribute *attr, const char *buf, static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write); -static ssize_t -sdoio_show (struct device *dev, struct device_attribute *attr, char *buf) +static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr, + char *buf) { struct netiucv_priv *priv = dev->driver_data; @@ -1575,8 +1579,8 @@ sdoio_show (struct device *dev, struct device_attribute *attr, char *buf) return sprintf(buf, "%ld\n", priv->conn->prof.doios_single); } -static ssize_t -sdoio_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { struct netiucv_priv *priv = dev->driver_data; @@ -1587,8 +1591,8 @@ sdoio_write (struct device *dev, struct device_attribute *attr, const char *buf, static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write); -static ssize_t -mdoio_show (struct device *dev, struct device_attribute *attr, char *buf) +static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr, + char *buf) { struct netiucv_priv *priv = dev->driver_data; @@ -1596,8 +1600,8 @@ mdoio_show (struct device *dev, struct device_attribute *attr, char *buf) return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi); } -static ssize_t -mdoio_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { struct netiucv_priv *priv = dev->driver_data; @@ -1608,8 +1612,8 @@ mdoio_write (struct device *dev, struct device_attribute *attr, const char *buf, static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write); -static ssize_t -txlen_show (struct device *dev, struct device_attribute *attr, char *buf) +static ssize_t txlen_show (struct device *dev, struct device_attribute *attr, + char *buf) { struct netiucv_priv *priv = dev->driver_data; @@ -1617,8 +1621,8 @@ txlen_show (struct device *dev, struct device_attribute *attr, char *buf) return sprintf(buf, "%ld\n", priv->conn->prof.txlen); } -static ssize_t -txlen_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +static ssize_t txlen_write (struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { struct netiucv_priv *priv = dev->driver_data; @@ -1629,8 +1633,8 @@ txlen_write (struct device *dev, struct device_attribute *attr, const char *buf, static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write); -static ssize_t -txtime_show (struct device *dev, struct device_attribute *attr, char *buf) +static ssize_t txtime_show (struct device *dev, struct device_attribute *attr, + char *buf) { struct netiucv_priv *priv = dev->driver_data; @@ -1638,8 +1642,8 @@ txtime_show (struct device *dev, struct device_attribute *attr, char *buf) return sprintf(buf, "%ld\n", priv->conn->prof.tx_time); } -static ssize_t -txtime_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +static ssize_t txtime_write (struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { struct netiucv_priv *priv = dev->driver_data; @@ -1650,8 +1654,8 @@ txtime_write (struct device *dev, struct device_attribute *attr, const char *buf static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write); -static ssize_t -txpend_show (struct device *dev, struct device_attribute *attr, char *buf) +static ssize_t txpend_show (struct device *dev, struct device_attribute *attr, + char *buf) { struct netiucv_priv *priv = dev->driver_data; @@ -1659,8 +1663,8 @@ txpend_show (struct device *dev, struct device_attribute *attr, char *buf) return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending); } -static ssize_t -txpend_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +static ssize_t txpend_write (struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { struct netiucv_priv *priv = dev->driver_data; @@ -1671,8 +1675,8 @@ txpend_write (struct device *dev, struct device_attribute *attr, const char *buf static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write); -static ssize_t -txmpnd_show (struct device *dev, struct device_attribute *attr, char *buf) +static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr, + char *buf) { struct netiucv_priv *priv = dev->driver_data; @@ -1680,8 +1684,8 @@ txmpnd_show (struct device *dev, struct device_attribute *attr, char *buf) return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending); } -static ssize_t -txmpnd_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { struct netiucv_priv *priv = dev->driver_data; @@ -1721,8 +1725,7 @@ static struct attribute_group netiucv_stat_attr_group = { .attrs = netiucv_stat_attrs, }; -static inline int -netiucv_add_files(struct device *dev) +static inline int netiucv_add_files(struct device *dev) { int ret; @@ -1736,18 +1739,16 @@ netiucv_add_files(struct device *dev) return ret; } -static inline void -netiucv_remove_files(struct device *dev) +static inline void netiucv_remove_files(struct device *dev) { IUCV_DBF_TEXT(trace, 3, __FUNCTION__); sysfs_remove_group(&dev->kobj, &netiucv_stat_attr_group); sysfs_remove_group(&dev->kobj, &netiucv_attr_group); } -static int -netiucv_register_device(struct net_device *ndev) +static int netiucv_register_device(struct net_device *ndev) { - struct netiucv_priv *priv = ndev->priv; + struct netiucv_priv *priv = netdev_priv(ndev); struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL); int ret; @@ -1786,8 +1787,7 @@ out_unreg: return ret; } -static void -netiucv_unregister_device(struct device *dev) +static void netiucv_unregister_device(struct device *dev) { IUCV_DBF_TEXT(trace, 3, __FUNCTION__); netiucv_remove_files(dev); @@ -1798,107 +1798,89 @@ netiucv_unregister_device(struct device *dev) * Allocate and initialize a new connection structure. * Add it to the list of netiucv connections; */ -static struct iucv_connection * -netiucv_new_connection(struct net_device *dev, char *username) -{ - unsigned long flags; - struct iucv_connection **clist = &iucv_conns.iucv_connections; - struct iucv_connection *conn = - kzalloc(sizeof(struct iucv_connection), GFP_KERNEL); - - if (conn) { - skb_queue_head_init(&conn->collect_queue); - skb_queue_head_init(&conn->commit_queue); - spin_lock_init(&conn->collect_lock); - conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT; - conn->netdev = dev; - - conn->rx_buff = alloc_skb(NETIUCV_BUFSIZE_DEFAULT, - GFP_KERNEL | GFP_DMA); - if (!conn->rx_buff) { - kfree(conn); - return NULL; - } - conn->tx_buff = alloc_skb(NETIUCV_BUFSIZE_DEFAULT, - GFP_KERNEL | GFP_DMA); - if (!conn->tx_buff) { - kfree_skb(conn->rx_buff); - kfree(conn); - return NULL; - } - conn->fsm = init_fsm("netiucvconn", conn_state_names, - conn_event_names, NR_CONN_STATES, - NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN, - GFP_KERNEL); - if (!conn->fsm) { - kfree_skb(conn->tx_buff); - kfree_skb(conn->rx_buff); - kfree(conn); - return NULL; - } - fsm_settimer(conn->fsm, &conn->timer); - fsm_newstate(conn->fsm, CONN_STATE_INVALID); - - if (username) { - memcpy(conn->userid, username, 9); - fsm_newstate(conn->fsm, CONN_STATE_STOPPED); - } +static struct iucv_connection *netiucv_new_connection(struct net_device *dev, + char *username) +{ + struct iucv_connection *conn; - write_lock_irqsave(&iucv_conns.iucv_rwlock, flags); - conn->next = *clist; - *clist = conn; - write_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags); + conn = kzalloc(sizeof(*conn), GFP_KERNEL); + if (!conn) + goto out; + skb_queue_head_init(&conn->collect_queue); + skb_queue_head_init(&conn->commit_queue); + spin_lock_init(&conn->collect_lock); + conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT; + conn->netdev = dev; + + conn->rx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA); + if (!conn->rx_buff) + goto out_conn; + conn->tx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA); + if (!conn->tx_buff) + goto out_rx; + conn->fsm = init_fsm("netiucvconn", conn_state_names, + conn_event_names, NR_CONN_STATES, + NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN, + GFP_KERNEL); + if (!conn->fsm) + goto out_tx; + + fsm_settimer(conn->fsm, &conn->timer); + fsm_newstate(conn->fsm, CONN_STATE_INVALID); + + if (username) { + memcpy(conn->userid, username, 9); + fsm_newstate(conn->fsm, CONN_STATE_STOPPED); } + + write_lock_bh(&iucv_connection_rwlock); + list_add_tail(&conn->list, &iucv_connection_list); + write_unlock_bh(&iucv_connection_rwlock); return conn; + +out_tx: + kfree_skb(conn->tx_buff); +out_rx: + kfree_skb(conn->rx_buff); +out_conn: + kfree(conn); +out: + return NULL; } /** * Release a connection structure and remove it from the * list of netiucv connections. */ -static void -netiucv_remove_connection(struct iucv_connection *conn) +static void netiucv_remove_connection(struct iucv_connection *conn) { - struct iucv_connection **clist = &iucv_conns.iucv_connections; - unsigned long flags; - IUCV_DBF_TEXT(trace, 3, __FUNCTION__); - if (conn == NULL) - return; - write_lock_irqsave(&iucv_conns.iucv_rwlock, flags); - while (*clist) { - if (*clist == conn) { - *clist = conn->next; - write_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags); - if (conn->handle) { - iucv_unregister_program(conn->handle); - conn->handle = NULL; - } - fsm_deltimer(&conn->timer); - kfree_fsm(conn->fsm); - kfree_skb(conn->rx_buff); - kfree_skb(conn->tx_buff); - return; - } - clist = &((*clist)->next); + write_lock_bh(&iucv_connection_rwlock); + list_del_init(&conn->list); + write_unlock_bh(&iucv_connection_rwlock); + if (conn->path) { + iucv_path_sever(conn->path, iucvMagic); + kfree(conn->path); + conn->path = NULL; } - write_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags); + fsm_deltimer(&conn->timer); + kfree_fsm(conn->fsm); + kfree_skb(conn->rx_buff); + kfree_skb(conn->tx_buff); } /** * Release everything of a net device. */ -static void -netiucv_free_netdevice(struct net_device *dev) +static void netiucv_free_netdevice(struct net_device *dev) { - struct netiucv_priv *privptr; + struct netiucv_priv *privptr = netdev_priv(dev); IUCV_DBF_TEXT(trace, 3, __FUNCTION__); if (!dev) return; - privptr = (struct netiucv_priv *)dev->priv; if (privptr) { if (privptr->conn) netiucv_remove_connection(privptr->conn); @@ -1913,11 +1895,8 @@ netiucv_free_netdevice(struct net_device *dev) /** * Initialize a net device. (Called from kernel in alloc_netdev()) */ -static void -netiucv_setup_netdevice(struct net_device *dev) +static void netiucv_setup_netdevice(struct net_device *dev) { - memset(dev->priv, 0, sizeof(struct netiucv_priv)); - dev->mtu = NETIUCV_MTU_DEFAULT; dev->hard_start_xmit = netiucv_tx; dev->open = netiucv_open; @@ -1936,8 +1915,7 @@ netiucv_setup_netdevice(struct net_device *dev) /** * Allocate and initialize everything of a net device. */ -static struct net_device * -netiucv_init_netdevice(char *username) +static struct net_device *netiucv_init_netdevice(char *username) { struct netiucv_priv *privptr; struct net_device *dev; @@ -1946,40 +1924,40 @@ netiucv_init_netdevice(char *username) netiucv_setup_netdevice); if (!dev) return NULL; - if (dev_alloc_name(dev, dev->name) < 0) { - free_netdev(dev); - return NULL; - } + if (dev_alloc_name(dev, dev->name) < 0) + goto out_netdev; - privptr = (struct netiucv_priv *)dev->priv; + privptr = netdev_priv(dev); privptr->fsm = init_fsm("netiucvdev", dev_state_names, dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS, dev_fsm, DEV_FSM_LEN, GFP_KERNEL); - if (!privptr->fsm) { - free_netdev(dev); - return NULL; - } + if (!privptr->fsm) + goto out_netdev; + privptr->conn = netiucv_new_connection(dev, username); if (!privptr->conn) { - kfree_fsm(privptr->fsm); - free_netdev(dev); IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n"); - return NULL; + goto out_fsm; } fsm_newstate(privptr->fsm, DEV_STATE_STOPPED); - return dev; + +out_fsm: + kfree_fsm(privptr->fsm); +out_netdev: + free_netdev(dev); + return NULL; } -static ssize_t -conn_write(struct device_driver *drv, const char *buf, size_t count) +static ssize_t conn_write(struct device_driver *drv, + const char *buf, size_t count) { - char *p; + const char *p; char username[9]; - int i, ret; + int i, rc; struct net_device *dev; - struct iucv_connection **clist = &iucv_conns.iucv_connections; - unsigned long flags; + struct netiucv_priv *priv; + struct iucv_connection *cp; IUCV_DBF_TEXT(trace, 3, __FUNCTION__); if (count>9) { @@ -1988,83 +1966,82 @@ conn_write(struct device_driver *drv, const char *buf, size_t count) return -EINVAL; } - for (i=0, p=(char *)buf; i<8 && *p; i++, p++) { - if (isalnum(*p) || (*p == '$')) - username[i]= toupper(*p); - else if (*p == '\n') { + for (i = 0, p = buf; i < 8 && *p; i++, p++) { + if (isalnum(*p) || *p == '$') { + username[i] = toupper(*p); + continue; + } + if (*p == '\n') /* trailing lf, grr */ break; - } else { - PRINT_WARN("netiucv: Invalid character in username!\n"); - IUCV_DBF_TEXT_(setup, 2, - "conn_write: invalid character %c\n", *p); - return -EINVAL; - } + PRINT_WARN("netiucv: Invalid character in username!\n"); + IUCV_DBF_TEXT_(setup, 2, + "conn_write: invalid character %c\n", *p); + return -EINVAL; } - while (i<8) + while (i < 8) username[i++] = ' '; username[8] = '\0'; - read_lock_irqsave(&iucv_conns.iucv_rwlock, flags); - while (*clist) { - if (!strncmp(username, (*clist)->userid, 9)) - break; - clist = &((*clist)->next); - } - read_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags); - if (*clist) { - PRINT_WARN("netiucv: Connection to %s already exists\n", - username); - return -EEXIST; + read_lock_bh(&iucv_connection_rwlock); + list_for_each_entry(cp, &iucv_connection_list, list) { + if (!strncmp(username, cp->userid, 9)) { + read_unlock_bh(&iucv_connection_rwlock); + PRINT_WARN("netiucv: Connection to %s already " + "exists\n", username); + return -EEXIST; + } } + read_unlock_bh(&iucv_connection_rwlock); + dev = netiucv_init_netdevice(username); if (!dev) { - PRINT_WARN( - "netiucv: Could not allocate network device structure " - "for user '%s'\n", netiucv_printname(username)); + PRINT_WARN("netiucv: Could not allocate network device " + "structure for user '%s'\n", + netiucv_printname(username)); IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n"); return -ENODEV; } - if ((ret = netiucv_register_device(dev))) { + rc = netiucv_register_device(dev); + if (rc) { IUCV_DBF_TEXT_(setup, 2, - "ret %d from netiucv_register_device\n", ret); + "ret %d from netiucv_register_device\n", rc); goto out_free_ndev; } /* sysfs magic */ - SET_NETDEV_DEV(dev, - (struct device*)((struct netiucv_priv*)dev->priv)->dev); + priv = netdev_priv(dev); + SET_NETDEV_DEV(dev, priv->dev); - if ((ret = register_netdev(dev))) { - netiucv_unregister_device((struct device*) - ((struct netiucv_priv*)dev->priv)->dev); - goto out_free_ndev; - } + rc = register_netdev(dev); + if (rc) + goto out_unreg; PRINT_INFO("%s: '%s'\n", dev->name, netiucv_printname(username)); return count; +out_unreg: + netiucv_unregister_device(priv->dev); out_free_ndev: PRINT_WARN("netiucv: Could not register '%s'\n", dev->name); IUCV_DBF_TEXT(setup, 2, "conn_write: could not register\n"); netiucv_free_netdevice(dev); - return ret; + return rc; } -DRIVER_ATTR(connection, 0200, NULL, conn_write); +static DRIVER_ATTR(connection, 0200, NULL, conn_write); -static ssize_t -remove_write (struct device_driver *drv, const char *buf, size_t count) +static ssize_t remove_write (struct device_driver *drv, + const char *buf, size_t count) { - struct iucv_connection **clist = &iucv_conns.iucv_connections; - unsigned long flags; + struct iucv_connection *cp; struct net_device *ndev; struct netiucv_priv *priv; struct device *dev; char name[IFNAMSIZ]; - char *p; + const char *p; int i; IUCV_DBF_TEXT(trace, 3, __FUNCTION__); @@ -2072,33 +2049,27 @@ remove_write (struct device_driver *drv, const char *buf, size_t count) if (count >= IFNAMSIZ) count = IFNAMSIZ - 1;; - for (i=0, p=(char *)buf; i<count && *p; i++, p++) { - if ((*p == '\n') || (*p == ' ')) { + for (i = 0, p = buf; i < count && *p; i++, p++) { + if (*p == '\n' || *p == ' ') /* trailing lf, grr */ break; - } else { - name[i]=*p; - } + name[i] = *p; } name[i] = '\0'; - read_lock_irqsave(&iucv_conns.iucv_rwlock, flags); - while (*clist) { - ndev = (*clist)->netdev; - priv = (struct netiucv_priv*)ndev->priv; + read_lock_bh(&iucv_connection_rwlock); + list_for_each_entry(cp, &iucv_connection_list, list) { + ndev = cp->netdev; + priv = netdev_priv(ndev); dev = priv->dev; - - if (strncmp(name, ndev->name, count)) { - clist = &((*clist)->next); - continue; - } - read_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags); + if (strncmp(name, ndev->name, count)) + continue; + read_unlock_bh(&iucv_connection_rwlock); if (ndev->flags & (IFF_UP | IFF_RUNNING)) { - PRINT_WARN( - "netiucv: net device %s active with peer %s\n", - ndev->name, priv->conn->userid); + PRINT_WARN("netiucv: net device %s active with peer " + "%s\n", ndev->name, priv->conn->userid); PRINT_WARN("netiucv: %s cannot be removed\n", - ndev->name); + ndev->name); IUCV_DBF_TEXT(data, 2, "remove_write: still active\n"); return -EBUSY; } @@ -2106,75 +2077,94 @@ remove_write (struct device_driver *drv, const char *buf, size_t count) netiucv_unregister_device(dev); return count; } - read_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags); + read_unlock_bh(&iucv_connection_rwlock); PRINT_WARN("netiucv: net device %s unknown\n", name); IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n"); return -EINVAL; } -DRIVER_ATTR(remove, 0200, NULL, remove_write); +static DRIVER_ATTR(remove, 0200, NULL, remove_write); -static void -netiucv_banner(void) +static struct attribute * netiucv_drv_attrs[] = { + &driver_attr_connection.attr, + &driver_attr_remove.attr, + NULL, +}; + +static struct attribute_group netiucv_drv_attr_group = { + .attrs = netiucv_drv_attrs, +}; + +static void netiucv_banner(void) { PRINT_INFO("NETIUCV driver initialized\n"); } -static void __exit -netiucv_exit(void) +static void __exit netiucv_exit(void) { + struct iucv_connection *cp; + struct net_device *ndev; + struct netiucv_priv *priv; + struct device *dev; + IUCV_DBF_TEXT(trace, 3, __FUNCTION__); - while (iucv_conns.iucv_connections) { - struct net_device *ndev = iucv_conns.iucv_connections->netdev; - struct netiucv_priv *priv = (struct netiucv_priv*)ndev->priv; - struct device *dev = priv->dev; + while (!list_empty(&iucv_connection_list)) { + cp = list_entry(iucv_connection_list.next, + struct iucv_connection, list); + list_del(&cp->list); + ndev = cp->netdev; + priv = netdev_priv(ndev); + dev = priv->dev; unregister_netdev(ndev); netiucv_unregister_device(dev); } - driver_remove_file(&netiucv_driver, &driver_attr_connection); - driver_remove_file(&netiucv_driver, &driver_attr_remove); + sysfs_remove_group(&netiucv_driver.kobj, &netiucv_drv_attr_group); driver_unregister(&netiucv_driver); + iucv_unregister(&netiucv_handler, 1); iucv_unregister_dbf_views(); PRINT_INFO("NETIUCV driver unloaded\n"); return; } -static int __init -netiucv_init(void) +static int __init netiucv_init(void) { - int ret; + int rc; - ret = iucv_register_dbf_views(); - if (ret) { - PRINT_WARN("netiucv_init failed, " - "iucv_register_dbf_views rc = %d\n", ret); - return ret; - } + rc = iucv_register_dbf_views(); + if (rc) + goto out; + rc = iucv_register(&netiucv_handler, 1); + if (rc) + goto out_dbf; IUCV_DBF_TEXT(trace, 3, __FUNCTION__); - ret = driver_register(&netiucv_driver); - if (ret) { + rc = driver_register(&netiucv_driver); + if (rc) { PRINT_ERR("NETIUCV: failed to register driver.\n"); - IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", ret); - iucv_unregister_dbf_views(); - return ret; + IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc); + goto out_iucv; } - /* Add entry for specifying connections. */ - ret = driver_create_file(&netiucv_driver, &driver_attr_connection); - if (!ret) { - ret = driver_create_file(&netiucv_driver, &driver_attr_remove); - netiucv_banner(); - rwlock_init(&iucv_conns.iucv_rwlock); - } else { - PRINT_ERR("NETIUCV: failed to add driver attribute.\n"); - IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_create_file\n", ret); - driver_unregister(&netiucv_driver); - iucv_unregister_dbf_views(); + rc = sysfs_create_group(&netiucv_driver.kobj, &netiucv_drv_attr_group); + if (rc) { + PRINT_ERR("NETIUCV: failed to add driver attributes.\n"); + IUCV_DBF_TEXT_(setup, 2, + "ret %d - netiucv_drv_attr_group\n", rc); + goto out_driver; } - return ret; + netiucv_banner(); + return rc; + +out_driver: + driver_unregister(&netiucv_driver); +out_iucv: + iucv_unregister(&netiucv_handler, 1); +out_dbf: + iucv_unregister_dbf_views(); +out: + return rc; } module_init(netiucv_init); diff --git a/drivers/s390/net/qeth.h b/drivers/s390/net/qeth.h index 53c358c7d36..e95c281f1e3 100644 --- a/drivers/s390/net/qeth.h +++ b/drivers/s390/net/qeth.h @@ -710,7 +710,7 @@ struct qeth_reply { int (*callback)(struct qeth_card *,struct qeth_reply *,unsigned long); u32 seqno; unsigned long offset; - int received; + atomic_t received; int rc; void *param; struct qeth_card *card; diff --git a/drivers/s390/net/qeth_eddp.c b/drivers/s390/net/qeth_eddp.c index 6bb558a9a03..7c735e1fe06 100644 --- a/drivers/s390/net/qeth_eddp.c +++ b/drivers/s390/net/qeth_eddp.c @@ -49,7 +49,7 @@ qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *queue, return buffers_needed; } -static inline void +static void qeth_eddp_free_context(struct qeth_eddp_context *ctx) { int i; @@ -91,7 +91,7 @@ qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf) } } -static inline int +static int qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf, struct qeth_eddp_context *ctx) { @@ -196,7 +196,7 @@ out: return flush_cnt; } -static inline void +static void qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx, struct qeth_eddp_data *eddp, int data_len) { @@ -256,7 +256,7 @@ qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx, ctx->offset += eddp->thl; } -static inline void +static void qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len, __wsum *hcsum) { @@ -302,7 +302,7 @@ qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len, } } -static inline void +static void qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx, struct qeth_eddp_data *eddp, int data_len, __wsum hcsum) @@ -349,7 +349,7 @@ qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx, ((struct tcphdr *)eddp->th_in_ctx)->check = csum_fold(hcsum); } -static inline __wsum +static __wsum qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, int data_len) { __wsum phcsum; /* pseudo header checksum */ @@ -363,7 +363,7 @@ qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, int data_len) return csum_partial((u8 *)&eddp->th, eddp->thl, phcsum); } -static inline __wsum +static __wsum qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, int data_len) { __be32 proto; @@ -381,7 +381,7 @@ qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, int data_len) return phcsum; } -static inline struct qeth_eddp_data * +static struct qeth_eddp_data * qeth_eddp_create_eddp_data(struct qeth_hdr *qh, u8 *nh, u8 nhl, u8 *th, u8 thl) { struct qeth_eddp_data *eddp; @@ -399,7 +399,7 @@ qeth_eddp_create_eddp_data(struct qeth_hdr *qh, u8 *nh, u8 nhl, u8 *th, u8 thl) return eddp; } -static inline void +static void __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, struct qeth_eddp_data *eddp) { @@ -464,7 +464,7 @@ __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, } } -static inline int +static int qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, struct sk_buff *skb, struct qeth_hdr *qhdr) { @@ -505,7 +505,7 @@ qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, return 0; } -static inline void +static void qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb, int hdr_len) { @@ -529,7 +529,7 @@ qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb, (skb_shinfo(skb)->gso_segs + 1); } -static inline struct qeth_eddp_context * +static struct qeth_eddp_context * qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb, int hdr_len) { @@ -581,7 +581,7 @@ qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb, return ctx; } -static inline struct qeth_eddp_context * +static struct qeth_eddp_context * qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb, struct qeth_hdr *qhdr) { @@ -625,5 +625,3 @@ qeth_eddp_create_context(struct qeth_card *card, struct sk_buff *skb, } return NULL; } - - diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c index 2bde4f1fb9c..2257e45594b 100644 --- a/drivers/s390/net/qeth_main.c +++ b/drivers/s390/net/qeth_main.c @@ -471,7 +471,7 @@ qeth_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) channel->state == CH_STATE_UP) qeth_issue_next_read(card); - tasklet_schedule(&channel->irq_tasklet); + qeth_irq_tasklet((unsigned long)channel); return; out: wake_up(&card->wait_q); @@ -651,7 +651,7 @@ __qeth_ref_ip_on_card(struct qeth_card *card, struct qeth_ipaddr *todo, return 0; } -static inline int +static int __qeth_address_exists_in_list(struct list_head *list, struct qeth_ipaddr *addr, int same_type) { @@ -795,7 +795,7 @@ qeth_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr) return rc; } -static inline void +static void __qeth_delete_all_mc(struct qeth_card *card, unsigned long *flags) { struct qeth_ipaddr *addr, *tmp; @@ -882,7 +882,7 @@ static void qeth_layer2_add_multicast(struct qeth_card *); static void qeth_add_multicast_ipv6(struct qeth_card *); #endif -static inline int +static int qeth_set_thread_start_bit(struct qeth_card *card, unsigned long thread) { unsigned long flags; @@ -920,7 +920,7 @@ qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread) wake_up(&card->wait_q); } -static inline int +static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread) { unsigned long flags; @@ -951,40 +951,6 @@ qeth_do_run_thread(struct qeth_card *card, unsigned long thread) } static int -qeth_register_ip_addresses(void *ptr) -{ - struct qeth_card *card; - - card = (struct qeth_card *) ptr; - daemonize("qeth_reg_ip"); - QETH_DBF_TEXT(trace,4,"regipth1"); - if (!qeth_do_run_thread(card, QETH_SET_IP_THREAD)) - return 0; - QETH_DBF_TEXT(trace,4,"regipth2"); - qeth_set_ip_addr_list(card); - qeth_clear_thread_running_bit(card, QETH_SET_IP_THREAD); - return 0; -} - -/* - * Drive the SET_PROMISC_MODE thread - */ -static int -qeth_set_promisc_mode(void *ptr) -{ - struct qeth_card *card = (struct qeth_card *) ptr; - - daemonize("qeth_setprm"); - QETH_DBF_TEXT(trace,4,"setprm1"); - if (!qeth_do_run_thread(card, QETH_SET_PROMISC_MODE_THREAD)) - return 0; - QETH_DBF_TEXT(trace,4,"setprm2"); - qeth_setadp_promisc_mode(card); - qeth_clear_thread_running_bit(card, QETH_SET_PROMISC_MODE_THREAD); - return 0; -} - -static int qeth_recover(void *ptr) { struct qeth_card *card; @@ -1047,11 +1013,6 @@ qeth_start_kernel_thread(struct work_struct *work) if (card->read.state != CH_STATE_UP && card->write.state != CH_STATE_UP) return; - - if (qeth_do_start_thread(card, QETH_SET_IP_THREAD)) - kernel_thread(qeth_register_ip_addresses, (void *)card,SIGCHLD); - if (qeth_do_start_thread(card, QETH_SET_PROMISC_MODE_THREAD)) - kernel_thread(qeth_set_promisc_mode, (void *)card, SIGCHLD); if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) kernel_thread(qeth_recover, (void *) card, SIGCHLD); } @@ -1074,7 +1035,7 @@ qeth_set_intial_options(struct qeth_card *card) card->options.layer2 = 1; else card->options.layer2 = 0; - card->options.performance_stats = 1; + card->options.performance_stats = 0; } /** @@ -1613,8 +1574,6 @@ qeth_issue_next_read(struct qeth_card *card) return -ENOMEM; } qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE); - wait_event(card->wait_q, - atomic_cmpxchg(&card->read.irq_pending, 0, 1) == 0); QETH_DBF_TEXT(trace, 6, "noirqpnd"); rc = ccw_device_start(card->read.ccwdev, &card->read.ccw, (addr_t) iob, 0, 0); @@ -1635,6 +1594,7 @@ qeth_alloc_reply(struct qeth_card *card) reply = kzalloc(sizeof(struct qeth_reply), GFP_ATOMIC); if (reply){ atomic_set(&reply->refcnt, 1); + atomic_set(&reply->received, 0); reply->card = card; }; return reply; @@ -1655,31 +1615,6 @@ qeth_put_reply(struct qeth_reply *reply) kfree(reply); } -static void -qeth_cmd_timeout(unsigned long data) -{ - struct qeth_reply *reply, *list_reply, *r; - unsigned long flags; - - reply = (struct qeth_reply *) data; - spin_lock_irqsave(&reply->card->lock, flags); - list_for_each_entry_safe(list_reply, r, - &reply->card->cmd_waiter_list, list) { - if (reply == list_reply){ - qeth_get_reply(reply); - list_del_init(&reply->list); - spin_unlock_irqrestore(&reply->card->lock, flags); - reply->rc = -ETIME; - reply->received = 1; - wake_up(&reply->wait_q); - qeth_put_reply(reply); - return; - } - } - spin_unlock_irqrestore(&reply->card->lock, flags); -} - - static struct qeth_ipa_cmd * qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob) { @@ -1745,7 +1680,7 @@ qeth_clear_ipacmd_list(struct qeth_card *card) list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) { qeth_get_reply(reply); reply->rc = -EIO; - reply->received = 1; + atomic_inc(&reply->received); list_del_init(&reply->list); wake_up(&reply->wait_q); qeth_put_reply(reply); @@ -1814,7 +1749,7 @@ qeth_send_control_data_cb(struct qeth_channel *channel, &card->cmd_waiter_list); spin_unlock_irqrestore(&card->lock, flags); } else { - reply->received = 1; + atomic_inc(&reply->received); wake_up(&reply->wait_q); } qeth_put_reply(reply); @@ -1829,9 +1764,9 @@ out: qeth_release_buffer(channel,iob); } -static inline void +static void qeth_prepare_control_data(struct qeth_card *card, int len, -struct qeth_cmd_buffer *iob) + struct qeth_cmd_buffer *iob) { qeth_setup_ccw(&card->write,iob->data,len); iob->callback = qeth_release_buffer; @@ -1858,7 +1793,7 @@ qeth_send_control_data(struct qeth_card *card, int len, int rc; unsigned long flags; struct qeth_reply *reply = NULL; - struct timer_list timer; + unsigned long timeout; QETH_DBF_TEXT(trace, 2, "sendctl"); @@ -1873,21 +1808,20 @@ qeth_send_control_data(struct qeth_card *card, int len, reply->seqno = QETH_IDX_COMMAND_SEQNO; else reply->seqno = card->seqno.ipa++; - init_timer(&timer); - timer.function = qeth_cmd_timeout; - timer.data = (unsigned long) reply; init_waitqueue_head(&reply->wait_q); spin_lock_irqsave(&card->lock, flags); list_add_tail(&reply->list, &card->cmd_waiter_list); spin_unlock_irqrestore(&card->lock, flags); QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN); - wait_event(card->wait_q, - atomic_cmpxchg(&card->write.irq_pending, 0, 1) == 0); + + while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ; qeth_prepare_control_data(card, len, iob); + if (IS_IPA(iob->data)) - timer.expires = jiffies + QETH_IPA_TIMEOUT; + timeout = jiffies + QETH_IPA_TIMEOUT; else - timer.expires = jiffies + QETH_TIMEOUT; + timeout = jiffies + QETH_TIMEOUT; + QETH_DBF_TEXT(trace, 6, "noirqpnd"); spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags); rc = ccw_device_start(card->write.ccwdev, &card->write.ccw, @@ -1906,9 +1840,16 @@ qeth_send_control_data(struct qeth_card *card, int len, wake_up(&card->wait_q); return rc; } - add_timer(&timer); - wait_event(reply->wait_q, reply->received); - del_timer_sync(&timer); + while (!atomic_read(&reply->received)) { + if (time_after(jiffies, timeout)) { + spin_lock_irqsave(&reply->card->lock, flags); + list_del_init(&reply->list); + spin_unlock_irqrestore(&reply->card->lock, flags); + reply->rc = -ETIME; + atomic_inc(&reply->received); + wake_up(&reply->wait_q); + } + }; rc = reply->rc; qeth_put_reply(reply); return rc; @@ -2219,7 +2160,7 @@ qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error, return 0; } -static inline struct sk_buff * +static struct sk_buff * qeth_get_skb(unsigned int length, struct qeth_hdr *hdr) { struct sk_buff* skb; @@ -2238,7 +2179,7 @@ qeth_get_skb(unsigned int length, struct qeth_hdr *hdr) return skb; } -static inline struct sk_buff * +static struct sk_buff * qeth_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer, struct qdio_buffer_element **__element, int *__offset, struct qeth_hdr **hdr) @@ -2323,7 +2264,7 @@ no_mem: return NULL; } -static inline __be16 +static __be16 qeth_type_trans(struct sk_buff *skb, struct net_device *dev) { struct qeth_card *card; @@ -2356,7 +2297,7 @@ qeth_type_trans(struct sk_buff *skb, struct net_device *dev) return htons(ETH_P_802_2); } -static inline void +static void qeth_rebuild_skb_fake_ll_tr(struct qeth_card *card, struct sk_buff *skb, struct qeth_hdr *hdr) { @@ -2410,7 +2351,7 @@ qeth_rebuild_skb_fake_ll_tr(struct qeth_card *card, struct sk_buff *skb, fake_llc->ethertype = ETH_P_IP; } -static inline void +static void qeth_rebuild_skb_fake_ll_eth(struct qeth_card *card, struct sk_buff *skb, struct qeth_hdr *hdr) { @@ -2466,35 +2407,20 @@ qeth_rebuild_skb_fake_ll(struct qeth_card *card, struct sk_buff *skb, qeth_rebuild_skb_fake_ll_eth(card, skb, hdr); } -static inline __u16 +static inline void qeth_layer2_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, struct qeth_hdr *hdr) { - unsigned short vlan_id = 0; -#ifdef CONFIG_QETH_VLAN - struct vlan_hdr *vhdr; -#endif - skb->pkt_type = PACKET_HOST; skb->protocol = qeth_type_trans(skb, skb->dev); if (card->options.checksum_type == NO_CHECKSUMMING) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb->ip_summed = CHECKSUM_NONE; -#ifdef CONFIG_QETH_VLAN - if (hdr->hdr.l2.flags[2] & (QETH_LAYER2_FLAG_VLAN)) { - vhdr = (struct vlan_hdr *) skb->data; - skb->protocol = - __constant_htons(vhdr->h_vlan_encapsulated_proto); - vlan_id = hdr->hdr.l2.vlan_id; - skb_pull(skb, VLAN_HLEN); - } -#endif *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno; - return vlan_id; } -static inline __u16 +static __u16 qeth_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, struct qeth_hdr *hdr) { @@ -2550,7 +2476,7 @@ qeth_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, return vlan_id; } -static inline void +static void qeth_process_inbound_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf, int index) { @@ -2560,7 +2486,6 @@ qeth_process_inbound_buffer(struct qeth_card *card, int offset; int rxrc; __u16 vlan_tag = 0; - __u16 *vlan_addr; /* get first element of current buffer */ element = (struct qdio_buffer_element *)&buf->buffer->element[0]; @@ -2571,7 +2496,7 @@ qeth_process_inbound_buffer(struct qeth_card *card, &offset, &hdr))) { skb->dev = card->dev; if (hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) - vlan_tag = qeth_layer2_rebuild_skb(card, skb, hdr); + qeth_layer2_rebuild_skb(card, skb, hdr); else if (hdr->hdr.l3.id == QETH_HEADER_TYPE_LAYER3) vlan_tag = qeth_rebuild_skb(card, skb, hdr); else { /*in case of OSN*/ @@ -2603,7 +2528,7 @@ qeth_process_inbound_buffer(struct qeth_card *card, } } -static inline struct qeth_buffer_pool_entry * +static struct qeth_buffer_pool_entry * qeth_get_buffer_pool_entry(struct qeth_card *card) { struct qeth_buffer_pool_entry *entry; @@ -2618,7 +2543,7 @@ qeth_get_buffer_pool_entry(struct qeth_card *card) return NULL; } -static inline void +static void qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf) { struct qeth_buffer_pool_entry *pool_entry; @@ -2645,7 +2570,7 @@ qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf) buf->state = QETH_QDIO_BUF_EMPTY; } -static inline void +static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, struct qeth_qdio_out_buffer *buf) { @@ -2670,7 +2595,7 @@ qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY); } -static inline void +static void qeth_queue_input_buffer(struct qeth_card *card, int index) { struct qeth_qdio_q *queue = card->qdio.in_q; @@ -2774,7 +2699,7 @@ qeth_qdio_input_handler(struct ccw_device * ccwdev, unsigned int status, card->perf_stats.inbound_start_time; } -static inline int +static int qeth_handle_send_error(struct qeth_card *card, struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err, unsigned int siga_err) @@ -2896,7 +2821,7 @@ qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int, * Switched to packing state if the number of used buffers on a queue * reaches a certain limit. */ -static inline void +static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue) { if (!queue->do_pack) { @@ -2917,7 +2842,7 @@ qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue) * In that case 1 is returned to inform the caller. If no buffer * has to be flushed, zero is returned. */ -static inline int +static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue) { struct qeth_qdio_out_buffer *buffer; @@ -2952,7 +2877,7 @@ qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue) * Checks if there is a packing buffer and prepares it to be flushed. * In that case returns 1, otherwise zero. */ -static inline int +static int qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue) { struct qeth_qdio_out_buffer *buffer; @@ -2969,7 +2894,7 @@ qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue) return 0; } -static inline void +static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) { int index; @@ -3669,7 +3594,7 @@ qeth_fake_header(struct sk_buff *skb, struct net_device *dev, } } -static inline int +static int qeth_send_packet(struct qeth_card *, struct sk_buff *); static int @@ -3834,7 +3759,7 @@ qeth_stop(struct net_device *dev) return 0; } -static inline int +static int qeth_get_cast_type(struct qeth_card *card, struct sk_buff *skb) { int cast_type = RTN_UNSPEC; @@ -3881,7 +3806,7 @@ qeth_get_cast_type(struct qeth_card *card, struct sk_buff *skb) return cast_type; } -static inline int +static int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, int ipv, int cast_type) { @@ -3928,7 +3853,7 @@ qeth_get_ip_version(struct sk_buff *skb) } } -static inline struct qeth_hdr * +static struct qeth_hdr * __qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, int ipv) { #ifdef CONFIG_QETH_VLAN @@ -3957,24 +3882,33 @@ __qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, int ipv) qeth_push_skb(card, skb, sizeof(struct qeth_hdr))); } -static inline void +static void __qeth_free_new_skb(struct sk_buff *orig_skb, struct sk_buff *new_skb) { if (orig_skb != new_skb) dev_kfree_skb_any(new_skb); } -static inline struct sk_buff * +static struct sk_buff * qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, struct qeth_hdr **hdr, int ipv) { - struct sk_buff *new_skb; + struct sk_buff *new_skb, *new_skb2; QETH_DBF_TEXT(trace, 6, "prepskb"); - - new_skb = qeth_realloc_headroom(card, skb, sizeof(struct qeth_hdr)); - if (new_skb == NULL) + new_skb = skb; + new_skb = qeth_pskb_unshare(skb, GFP_ATOMIC); + if (!new_skb) return NULL; + new_skb2 = qeth_realloc_headroom(card, new_skb, + sizeof(struct qeth_hdr)); + if (!new_skb2) { + __qeth_free_new_skb(skb, new_skb); + return NULL; + } + if (new_skb != skb) + __qeth_free_new_skb(new_skb2, new_skb); + new_skb = new_skb2; *hdr = __qeth_prepare_skb(card, new_skb, ipv); if (*hdr == NULL) { __qeth_free_new_skb(skb, new_skb); @@ -4006,7 +3940,7 @@ qeth_get_qeth_hdr_flags6(int cast_type) return ct | QETH_CAST_UNICAST; } -static inline void +static void qeth_layer2_get_packet_type(struct qeth_card *card, struct qeth_hdr *hdr, struct sk_buff *skb) { @@ -4043,7 +3977,7 @@ qeth_layer2_get_packet_type(struct qeth_card *card, struct qeth_hdr *hdr, } } -static inline void +static void qeth_layer2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, struct sk_buff *skb, int cast_type) { @@ -4134,7 +4068,7 @@ qeth_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, } } -static inline void +static void __qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer, int is_tso, int *next_element_to_fill) { @@ -4178,7 +4112,7 @@ __qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer, *next_element_to_fill = element; } -static inline int +static int qeth_fill_buffer(struct qeth_qdio_out_q *queue, struct qeth_qdio_out_buffer *buf, struct sk_buff *skb) @@ -4237,7 +4171,7 @@ qeth_fill_buffer(struct qeth_qdio_out_q *queue, return flush_cnt; } -static inline int +static int qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue, struct sk_buff *skb, struct qeth_hdr *hdr, int elements_needed, @@ -4288,7 +4222,7 @@ out: return -EBUSY; } -static inline int +static int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, struct sk_buff *skb, struct qeth_hdr *hdr, int elements_needed, struct qeth_eddp_context *ctx) @@ -4394,7 +4328,7 @@ out: return rc; } -static inline int +static int qeth_get_elements_no(struct qeth_card *card, void *hdr, struct sk_buff *skb, int elems) { @@ -4415,7 +4349,7 @@ qeth_get_elements_no(struct qeth_card *card, void *hdr, } -static inline int +static int qeth_send_packet(struct qeth_card *card, struct sk_buff *skb) { int ipv = 0; @@ -4602,7 +4536,7 @@ qeth_mdio_read(struct net_device *dev, int phy_id, int regnum) } -static inline const char * +static const char * qeth_arp_get_error_cause(int *rc) { switch (*rc) { @@ -4663,7 +4597,7 @@ qeth_arp_set_no_entries(struct qeth_card *card, int no_entries) return rc; } -static inline void +static void qeth_copy_arp_entries_stripped(struct qeth_arp_query_info *qinfo, struct qeth_arp_query_data *qdata, int entry_size, int uentry_size) @@ -4844,9 +4778,11 @@ qeth_arp_query(struct qeth_card *card, char __user *udata) "(0x%x/%d)\n", QETH_CARD_IFNAME(card), qeth_arp_get_error_cause(&rc), tmp, tmp); - copy_to_user(udata, qinfo.udata, 4); + if (copy_to_user(udata, qinfo.udata, 4)) + rc = -EFAULT; } else { - copy_to_user(udata, qinfo.udata, qinfo.udata_len); + if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) + rc = -EFAULT; } kfree(qinfo.udata); return rc; @@ -4992,8 +4928,10 @@ qeth_snmp_command(struct qeth_card *card, char __user *udata) if (rc) PRINT_WARN("SNMP command failed on %s: (0x%x)\n", QETH_CARD_IFNAME(card), rc); - else - copy_to_user(udata, qinfo.udata, qinfo.udata_len); + else { + if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) + rc = -EFAULT; + } kfree(ureq); kfree(qinfo.udata); @@ -5276,7 +5214,7 @@ qeth_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) spin_unlock_irqrestore(&card->vlanlock, flags); } -static inline void +static void qeth_free_vlan_buffer(struct qeth_card *card, struct qeth_qdio_out_buffer *buf, unsigned short vid) { @@ -5544,12 +5482,10 @@ qeth_set_multicast_list(struct net_device *dev) qeth_add_multicast_ipv6(card); #endif out: - if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0) - schedule_work(&card->kernel_thread_starter); + qeth_set_ip_addr_list(card); if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) return; - if (qeth_set_thread_start_bit(card, QETH_SET_PROMISC_MODE_THREAD)==0) - schedule_work(&card->kernel_thread_starter); + qeth_setadp_promisc_mode(card); } static int @@ -5689,7 +5625,7 @@ qeth_delete_mc_addresses(struct qeth_card *card) spin_unlock_irqrestore(&card->ip_lock, flags); } -static inline void +static void qeth_add_mc(struct qeth_card *card, struct in_device *in4_dev) { struct qeth_ipaddr *ipm; @@ -5775,7 +5711,7 @@ qeth_layer2_add_multicast(struct qeth_card *card) } #ifdef CONFIG_QETH_IPV6 -static inline void +static void qeth_add_mc6(struct qeth_card *card, struct inet6_dev *in6_dev) { struct qeth_ipaddr *ipm; @@ -6086,7 +6022,7 @@ qeth_send_setdelmc(struct qeth_card *card, struct qeth_ipaddr *addr, int ipacmd) return rc; } -static inline void +static void qeth_fill_netmask(u8 *netmask, unsigned int len) { int i,j; @@ -6351,6 +6287,42 @@ static struct ethtool_ops qeth_ethtool_ops = { }; static int +qeth_hard_header_parse(struct sk_buff *skb, unsigned char *haddr) +{ + struct qeth_card *card; + struct ethhdr *eth; + + card = qeth_get_card_from_dev(skb->dev); + if (card->options.layer2) + goto haveheader; +#ifdef CONFIG_QETH_IPV6 + /* cause of the manipulated arp constructor and the ARP + flag for OSAE devices we have some nasty exceptions */ + if (card->info.type == QETH_CARD_TYPE_OSAE) { + if (!card->options.fake_ll) { + if ((skb->pkt_type==PACKET_OUTGOING) && + (skb->protocol==ETH_P_IPV6)) + goto haveheader; + else + return 0; + } else { + if ((skb->pkt_type==PACKET_OUTGOING) && + (skb->protocol==ETH_P_IP)) + return 0; + else + goto haveheader; + } + } +#endif + if (!card->options.fake_ll) + return 0; +haveheader: + eth = eth_hdr(skb); + memcpy(haddr, eth->h_source, ETH_ALEN); + return ETH_ALEN; +} + +static int qeth_netdev_init(struct net_device *dev) { struct qeth_card *card; @@ -6388,7 +6360,10 @@ qeth_netdev_init(struct net_device *dev) if (card->options.fake_ll && (qeth_get_netdev_flags(card) & IFF_NOARP)) dev->hard_header = qeth_fake_header; - dev->hard_header_parse = NULL; + if (dev->type == ARPHRD_IEEE802_TR) + dev->hard_header_parse = NULL; + else + dev->hard_header_parse = qeth_hard_header_parse; dev->set_mac_address = qeth_layer2_set_mac_address; dev->flags |= qeth_get_netdev_flags(card); if ((card->options.fake_broadcast) || @@ -6651,7 +6626,7 @@ qeth_send_setadp_mode(struct qeth_card *card, __u32 command, __u32 mode) return rc; } -static inline int +static int qeth_setadapter_hstr(struct qeth_card *card) { int rc; @@ -6914,7 +6889,7 @@ qeth_send_simple_setassparms(struct qeth_card *card, return rc; } -static inline int +static int qeth_start_ipa_arp_processing(struct qeth_card *card) { int rc; @@ -7554,7 +7529,7 @@ qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads, wake_up(&card->wait_q); } -static inline int +static int qeth_threads_running(struct qeth_card *card, unsigned long threads) { unsigned long flags; @@ -8143,7 +8118,7 @@ qeth_del_ipato_entry(struct qeth_card *card, enum qeth_prot_versions proto, spin_unlock_irqrestore(&card->ip_lock, flags); } -static inline void +static void qeth_convert_addr_to_bits(u8 *addr, u8 *bits, int len) { int i, j; @@ -8235,8 +8210,7 @@ qeth_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto, } if (!qeth_add_ip(card, ipaddr)) kfree(ipaddr); - if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0) - schedule_work(&card->kernel_thread_starter); + qeth_set_ip_addr_list(card); return rc; } @@ -8264,8 +8238,7 @@ qeth_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto, return; if (!qeth_delete_ip(card, ipaddr)) kfree(ipaddr); - if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0) - schedule_work(&card->kernel_thread_starter); + qeth_set_ip_addr_list(card); } /* @@ -8308,8 +8281,7 @@ qeth_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto, } if (!qeth_add_ip(card, ipaddr)) kfree(ipaddr); - if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0) - schedule_work(&card->kernel_thread_starter); + qeth_set_ip_addr_list(card); return 0; } @@ -8337,8 +8309,7 @@ qeth_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto, return; if (!qeth_delete_ip(card, ipaddr)) kfree(ipaddr); - if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0) - schedule_work(&card->kernel_thread_starter); + qeth_set_ip_addr_list(card); } /** @@ -8380,8 +8351,7 @@ qeth_ip_event(struct notifier_block *this, default: break; } - if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0) - schedule_work(&card->kernel_thread_starter); + qeth_set_ip_addr_list(card); out: return NOTIFY_DONE; } @@ -8433,8 +8403,7 @@ qeth_ip6_event(struct notifier_block *this, default: break; } - if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0) - schedule_work(&card->kernel_thread_starter); + qeth_set_ip_addr_list(card); out: return NOTIFY_DONE; } diff --git a/drivers/s390/net/qeth_sys.c b/drivers/s390/net/qeth_sys.c index 5836737ac58..d518419cd0c 100644 --- a/drivers/s390/net/qeth_sys.c +++ b/drivers/s390/net/qeth_sys.c @@ -328,7 +328,7 @@ qeth_dev_bufcnt_store(struct device *dev, struct device_attribute *attr, const c static DEVICE_ATTR(buffer_count, 0644, qeth_dev_bufcnt_show, qeth_dev_bufcnt_store); -static inline ssize_t +static ssize_t qeth_dev_route_show(struct qeth_card *card, struct qeth_routing_info *route, char *buf) { @@ -368,7 +368,7 @@ qeth_dev_route4_show(struct device *dev, struct device_attribute *attr, char *bu return qeth_dev_route_show(card, &card->options.route4, buf); } -static inline ssize_t +static ssize_t qeth_dev_route_store(struct qeth_card *card, struct qeth_routing_info *route, enum qeth_prot_versions prot, const char *buf, size_t count) { @@ -998,7 +998,7 @@ struct device_attribute dev_attr_##_id = { \ .store = _store, \ }; -int +static int qeth_check_layer2(struct qeth_card *card) { if (card->options.layer2) @@ -1100,7 +1100,7 @@ static QETH_DEVICE_ATTR(ipato_invert4, invert4, 0644, qeth_dev_ipato_invert4_show, qeth_dev_ipato_invert4_store); -static inline ssize_t +static ssize_t qeth_dev_ipato_add_show(char *buf, struct qeth_card *card, enum qeth_prot_versions proto) { @@ -1146,7 +1146,7 @@ qeth_dev_ipato_add4_show(struct device *dev, struct device_attribute *attr, char return qeth_dev_ipato_add_show(buf, card, QETH_PROT_IPV4); } -static inline int +static int qeth_parse_ipatoe(const char* buf, enum qeth_prot_versions proto, u8 *addr, int *mask_bits) { @@ -1178,7 +1178,7 @@ qeth_parse_ipatoe(const char* buf, enum qeth_prot_versions proto, return 0; } -static inline ssize_t +static ssize_t qeth_dev_ipato_add_store(const char *buf, size_t count, struct qeth_card *card, enum qeth_prot_versions proto) { @@ -1223,7 +1223,7 @@ static QETH_DEVICE_ATTR(ipato_add4, add4, 0644, qeth_dev_ipato_add4_show, qeth_dev_ipato_add4_store); -static inline ssize_t +static ssize_t qeth_dev_ipato_del_store(const char *buf, size_t count, struct qeth_card *card, enum qeth_prot_versions proto) { @@ -1361,7 +1361,7 @@ static struct attribute_group qeth_device_ipato_group = { .attrs = (struct attribute **)qeth_ipato_device_attrs, }; -static inline ssize_t +static ssize_t qeth_dev_vipa_add_show(char *buf, struct qeth_card *card, enum qeth_prot_versions proto) { @@ -1407,7 +1407,7 @@ qeth_dev_vipa_add4_show(struct device *dev, struct device_attribute *attr, char return qeth_dev_vipa_add_show(buf, card, QETH_PROT_IPV4); } -static inline int +static int qeth_parse_vipae(const char* buf, enum qeth_prot_versions proto, u8 *addr) { @@ -1418,7 +1418,7 @@ qeth_parse_vipae(const char* buf, enum qeth_prot_versions proto, return 0; } -static inline ssize_t +static ssize_t qeth_dev_vipa_add_store(const char *buf, size_t count, struct qeth_card *card, enum qeth_prot_versions proto) { @@ -1451,7 +1451,7 @@ static QETH_DEVICE_ATTR(vipa_add4, add4, 0644, qeth_dev_vipa_add4_show, qeth_dev_vipa_add4_store); -static inline ssize_t +static ssize_t qeth_dev_vipa_del_store(const char *buf, size_t count, struct qeth_card *card, enum qeth_prot_versions proto) { @@ -1542,7 +1542,7 @@ static struct attribute_group qeth_device_vipa_group = { .attrs = (struct attribute **)qeth_vipa_device_attrs, }; -static inline ssize_t +static ssize_t qeth_dev_rxip_add_show(char *buf, struct qeth_card *card, enum qeth_prot_versions proto) { @@ -1588,7 +1588,7 @@ qeth_dev_rxip_add4_show(struct device *dev, struct device_attribute *attr, char return qeth_dev_rxip_add_show(buf, card, QETH_PROT_IPV4); } -static inline int +static int qeth_parse_rxipe(const char* buf, enum qeth_prot_versions proto, u8 *addr) { @@ -1599,7 +1599,7 @@ qeth_parse_rxipe(const char* buf, enum qeth_prot_versions proto, return 0; } -static inline ssize_t +static ssize_t qeth_dev_rxip_add_store(const char *buf, size_t count, struct qeth_card *card, enum qeth_prot_versions proto) { @@ -1632,7 +1632,7 @@ static QETH_DEVICE_ATTR(rxip_add4, add4, 0644, qeth_dev_rxip_add4_show, qeth_dev_rxip_add4_store); -static inline ssize_t +static ssize_t qeth_dev_rxip_del_store(const char *buf, size_t count, struct qeth_card *card, enum qeth_prot_versions proto) { diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c index b8179c27ceb..3ccca5871fd 100644 --- a/drivers/s390/net/smsgiucv.c +++ b/drivers/s390/net/smsgiucv.c @@ -1,7 +1,7 @@ /* * IUCV special message driver * - * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Copyright 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) * * This program is free software; you can redistribute it and/or modify @@ -23,10 +23,10 @@ #include <linux/init.h> #include <linux/errno.h> #include <linux/device.h> +#include <net/iucv/iucv.h> #include <asm/cpcmd.h> #include <asm/ebcdic.h> - -#include "iucv.h" +#include "smsgiucv.h" struct smsg_callback { struct list_head list; @@ -39,38 +39,46 @@ MODULE_AUTHOR ("(C) 2003 IBM Corporation by Martin Schwidefsky (schwidefsky@de.ibm.com)"); MODULE_DESCRIPTION ("Linux for S/390 IUCV special message driver"); -static iucv_handle_t smsg_handle; -static unsigned short smsg_pathid; +static struct iucv_path *smsg_path; + static DEFINE_SPINLOCK(smsg_list_lock); static struct list_head smsg_list = LIST_HEAD_INIT(smsg_list); -static void -smsg_connection_complete(iucv_ConnectionComplete *eib, void *pgm_data) +static int smsg_path_pending(struct iucv_path *, u8 ipvmid[8], u8 ipuser[16]); +static void smsg_message_pending(struct iucv_path *, struct iucv_message *); + +static struct iucv_handler smsg_handler = { + .path_pending = smsg_path_pending, + .message_pending = smsg_message_pending, +}; + +static int smsg_path_pending(struct iucv_path *path, u8 ipvmid[8], + u8 ipuser[16]) { + if (strncmp(ipvmid, "*MSG ", sizeof(ipvmid)) != 0) + return -EINVAL; + /* Path pending from *MSG. */ + return iucv_path_accept(path, &smsg_handler, "SMSGIUCV ", NULL); } - -static void -smsg_message_pending(iucv_MessagePending *eib, void *pgm_data) +static void smsg_message_pending(struct iucv_path *path, + struct iucv_message *msg) { struct smsg_callback *cb; - unsigned char *msg; + unsigned char *buffer; unsigned char sender[9]; - unsigned short len; int rc, i; - len = eib->ln1msg2.ipbfln1f; - msg = kmalloc(len + 1, GFP_ATOMIC|GFP_DMA); - if (!msg) { - iucv_reject(eib->ippathid, eib->ipmsgid, eib->iptrgcls); + buffer = kmalloc(msg->length + 1, GFP_ATOMIC | GFP_DMA); + if (!buffer) { + iucv_message_reject(path, msg); return; } - rc = iucv_receive(eib->ippathid, eib->ipmsgid, eib->iptrgcls, - msg, len, NULL, NULL, NULL); + rc = iucv_message_receive(path, msg, 0, buffer, msg->length, NULL); if (rc == 0) { - msg[len] = 0; - EBCASC(msg, len); - memcpy(sender, msg, 8); + buffer[msg->length] = 0; + EBCASC(buffer, msg->length); + memcpy(sender, buffer, 8); sender[8] = 0; /* Remove trailing whitespace from the sender name. */ for (i = 7; i >= 0; i--) { @@ -80,27 +88,17 @@ smsg_message_pending(iucv_MessagePending *eib, void *pgm_data) } spin_lock(&smsg_list_lock); list_for_each_entry(cb, &smsg_list, list) - if (strncmp(msg + 8, cb->prefix, cb->len) == 0) { - cb->callback(sender, msg + 8); + if (strncmp(buffer + 8, cb->prefix, cb->len) == 0) { + cb->callback(sender, buffer + 8); break; } spin_unlock(&smsg_list_lock); } - kfree(msg); + kfree(buffer); } -static iucv_interrupt_ops_t smsg_ops = { - .ConnectionComplete = smsg_connection_complete, - .MessagePending = smsg_message_pending, -}; - -static struct device_driver smsg_driver = { - .name = "SMSGIUCV", - .bus = &iucv_bus, -}; - -int -smsg_register_callback(char *prefix, void (*callback)(char *from, char *str)) +int smsg_register_callback(char *prefix, + void (*callback)(char *from, char *str)) { struct smsg_callback *cb; @@ -110,18 +108,18 @@ smsg_register_callback(char *prefix, void (*callback)(char *from, char *str)) cb->prefix = prefix; cb->len = strlen(prefix); cb->callback = callback; - spin_lock(&smsg_list_lock); + spin_lock_bh(&smsg_list_lock); list_add_tail(&cb->list, &smsg_list); - spin_unlock(&smsg_list_lock); + spin_unlock_bh(&smsg_list_lock); return 0; } -void -smsg_unregister_callback(char *prefix, void (*callback)(char *from, char *str)) +void smsg_unregister_callback(char *prefix, + void (*callback)(char *from, char *str)) { struct smsg_callback *cb, *tmp; - spin_lock(&smsg_list_lock); + spin_lock_bh(&smsg_list_lock); cb = NULL; list_for_each_entry(tmp, &smsg_list, list) if (tmp->callback == callback && @@ -130,55 +128,58 @@ smsg_unregister_callback(char *prefix, void (*callback)(char *from, char *str)) list_del(&cb->list); break; } - spin_unlock(&smsg_list_lock); + spin_unlock_bh(&smsg_list_lock); kfree(cb); } -static void __exit -smsg_exit(void) +static struct device_driver smsg_driver = { + .name = "SMSGIUCV", + .bus = &iucv_bus, +}; + +static void __exit smsg_exit(void) { - if (smsg_handle > 0) { - cpcmd("SET SMSG OFF", NULL, 0, NULL); - iucv_sever(smsg_pathid, NULL); - iucv_unregister_program(smsg_handle); - driver_unregister(&smsg_driver); - } - return; + cpcmd("SET SMSG IUCV", NULL, 0, NULL); + iucv_unregister(&smsg_handler, 1); + driver_unregister(&smsg_driver); } -static int __init -smsg_init(void) +static int __init smsg_init(void) { - static unsigned char pgmmask[24] = { - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff - }; int rc; rc = driver_register(&smsg_driver); - if (rc != 0) { - printk(KERN_ERR "SMSGIUCV: failed to register driver.\n"); - return rc; - } - smsg_handle = iucv_register_program("SMSGIUCV ", "*MSG ", - pgmmask, &smsg_ops, NULL); - if (!smsg_handle) { + if (rc != 0) + goto out; + rc = iucv_register(&smsg_handler, 1); + if (rc) { printk(KERN_ERR "SMSGIUCV: failed to register to iucv"); - driver_unregister(&smsg_driver); - return -EIO; /* better errno ? */ + rc = -EIO; /* better errno ? */ + goto out_driver; + } + smsg_path = iucv_path_alloc(255, 0, GFP_KERNEL); + if (!smsg_path) { + rc = -ENOMEM; + goto out_register; } - rc = iucv_connect (&smsg_pathid, 255, NULL, "*MSG ", NULL, 0, - NULL, NULL, smsg_handle, NULL); + rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG ", + NULL, NULL, NULL); if (rc) { printk(KERN_ERR "SMSGIUCV: failed to connect to *MSG"); - iucv_unregister_program(smsg_handle); - driver_unregister(&smsg_driver); - smsg_handle = NULL; - return -EIO; + rc = -EIO; /* better errno ? */ + goto out_free; } cpcmd("SET SMSG IUCV", NULL, 0, NULL); return 0; + +out_free: + iucv_path_free(smsg_path); +out_register: + iucv_unregister(&smsg_handler, 1); +out_driver: + driver_unregister(&smsg_driver); +out: + return rc; } module_init(smsg_init); diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c index e088b5e2871..806bb1a921e 100644 --- a/drivers/s390/s390mach.c +++ b/drivers/s390/s390mach.c @@ -13,22 +13,18 @@ #include <linux/errno.h> #include <linux/workqueue.h> #include <linux/time.h> +#include <linux/device.h> #include <linux/kthread.h> - +#include <asm/etr.h> #include <asm/lowcore.h> - +#include <asm/cio.h> +#include "cio/cio.h" +#include "cio/chsc.h" +#include "cio/css.h" #include "s390mach.h" static struct semaphore m_sem; -extern int css_process_crw(int, int); -extern int chsc_process_crw(void); -extern int chp_process_crw(int, int); -extern void css_reiterate_subchannels(void); - -extern struct workqueue_struct *slow_path_wq; -extern struct work_struct slow_path_work; - static NORET_TYPE void s390_handle_damage(char *msg) { @@ -470,6 +466,19 @@ s390_do_machine_check(struct pt_regs *regs) s390_handle_damage("unable to revalidate registers."); } + if (mci->cd) { + /* Timing facility damage */ + s390_handle_damage("TOD clock damaged"); + } + + if (mci->ed && mci->ec) { + /* External damage */ + if (S390_lowcore.external_damage_code & (1U << ED_ETR_SYNC)) + etr_sync_check(); + if (S390_lowcore.external_damage_code & (1U << ED_ETR_SWITCH)) + etr_switch_to_local(); + } + if (mci->se) /* Storage error uncorrected */ s390_handle_damage("received storage error uncorrected " @@ -508,7 +517,7 @@ static int machine_check_init(void) { init_MUTEX_LOCKED(&m_sem); - ctl_clear_bit(14, 25); /* disable external damage MCH */ + ctl_set_bit(14, 25); /* enable external damage MCH */ ctl_set_bit(14, 27); /* enable system recovery MCH */ #ifdef CONFIG_MACHCHK_WARNING ctl_set_bit(14, 24); /* enable warning MCH */ @@ -529,7 +538,11 @@ arch_initcall(machine_check_init); static int __init machine_check_crw_init (void) { - kthread_run(s390_collect_crw_info, &m_sem, "kmcheck"); + struct task_struct *task; + + task = kthread_run(s390_collect_crw_info, &m_sem, "kmcheck"); + if (IS_ERR(task)) + return PTR_ERR(task); ctl_set_bit(14, 28); /* enable channel report MCH */ return 0; } diff --git a/drivers/s390/s390mach.h b/drivers/s390/s390mach.h index 7abb42a09ae..d3ca4281a49 100644 --- a/drivers/s390/s390mach.h +++ b/drivers/s390/s390mach.h @@ -102,4 +102,7 @@ static inline int stcrw(struct crw *pcrw ) return ccode; } +#define ED_ETR_SYNC 12 /* External damage ETR sync check */ +#define ED_ETR_SWITCH 13 /* External damage ETR switch to local */ + #endif /* __s390mach */ diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 5d39b2df0cc..39a88526679 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -47,13 +47,12 @@ static int __init zfcp_module_init(void); static void zfcp_ns_gid_pn_handler(unsigned long); /* miscellaneous */ -static inline int zfcp_sg_list_alloc(struct zfcp_sg_list *, size_t); -static inline void zfcp_sg_list_free(struct zfcp_sg_list *); -static inline int zfcp_sg_list_copy_from_user(struct zfcp_sg_list *, - void __user *, size_t); -static inline int zfcp_sg_list_copy_to_user(void __user *, - struct zfcp_sg_list *, size_t); - +static int zfcp_sg_list_alloc(struct zfcp_sg_list *, size_t); +static void zfcp_sg_list_free(struct zfcp_sg_list *); +static int zfcp_sg_list_copy_from_user(struct zfcp_sg_list *, + void __user *, size_t); +static int zfcp_sg_list_copy_to_user(void __user *, + struct zfcp_sg_list *, size_t); static long zfcp_cfdc_dev_ioctl(struct file *, unsigned int, unsigned long); #define ZFCP_CFDC_IOC_MAGIC 0xDD @@ -237,7 +236,7 @@ zfcp_device_setup(char *devstr) return 0; len = strlen(devstr) + 1; - str = (char *) kmalloc(len, GFP_KERNEL); + str = kmalloc(len, GFP_KERNEL); if (!str) goto err_out; memcpy(str, devstr, len); @@ -605,7 +604,7 @@ zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command, * elements of the scatter-gather list. The maximum size of a single element * in the scatter-gather list is PAGE_SIZE. */ -static inline int +static int zfcp_sg_list_alloc(struct zfcp_sg_list *sg_list, size_t size) { struct scatterlist *sg; @@ -652,7 +651,7 @@ zfcp_sg_list_alloc(struct zfcp_sg_list *sg_list, size_t size) * Memory for each element in the scatter-gather list is freed. * Finally sg_list->sg is freed itself and sg_list->count is reset. */ -static inline void +static void zfcp_sg_list_free(struct zfcp_sg_list *sg_list) { struct scatterlist *sg; @@ -697,7 +696,7 @@ zfcp_sg_size(struct scatterlist *sg, unsigned int sg_count) * @size: number of bytes to be copied * Return: 0 on success, -EFAULT if copy_from_user fails. */ -static inline int +static int zfcp_sg_list_copy_from_user(struct zfcp_sg_list *sg_list, void __user *user_buffer, size_t size) @@ -735,7 +734,7 @@ zfcp_sg_list_copy_from_user(struct zfcp_sg_list *sg_list, * @size: number of bytes to be copied * Return: 0 on success, -EFAULT if copy_to_user fails */ -static inline int +static int zfcp_sg_list_copy_to_user(void __user *user_buffer, struct zfcp_sg_list *sg_list, size_t size) @@ -1799,7 +1798,7 @@ static const struct zfcp_rc_entry zfcp_p_rjt_rc[] = { * @code: reason code * @rc_table: table of reason codes and descriptions */ -static inline const char * +static const char * zfcp_rc_description(u8 code, const struct zfcp_rc_entry *rc_table) { const char *descr = "unknown reason code"; @@ -1847,7 +1846,7 @@ zfcp_check_ct_response(struct ct_hdr *rjt) * @rjt_par: reject parameter acc. to FC-PH/FC-FS * @rc_table: table of reason codes and descriptions */ -static inline void +static void zfcp_print_els_rjt(struct zfcp_ls_rjt_par *rjt_par, const struct zfcp_rc_entry *rc_table) { diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index 0aa3b1ac76a..d8191d115c1 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c @@ -31,7 +31,7 @@ MODULE_PARM_DESC(dbfsize, #define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER -static inline int +static int zfcp_dbf_stck(char *out_buf, const char *label, unsigned long long stck) { unsigned long long sec; @@ -106,7 +106,7 @@ zfcp_dbf_view_dump(char *out_buf, const char *label, return len; } -static inline int +static int zfcp_dbf_view_header(debug_info_t * id, struct debug_view *view, int area, debug_entry_t * entry, char *out_buf) { @@ -130,7 +130,7 @@ zfcp_dbf_view_header(debug_info_t * id, struct debug_view *view, int area, return len; } -inline void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req) +void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req) { struct zfcp_adapter *adapter = fsf_req->adapter; struct fsf_qtcb *qtcb = fsf_req->qtcb; @@ -241,7 +241,7 @@ inline void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req) spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); } -inline void +void zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter, struct fsf_status_read_buffer *status_buffer) { @@ -295,7 +295,7 @@ zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter, spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); } -inline void +void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status, unsigned int qdio_error, unsigned int siga_error, int sbal_index, int sbal_count) @@ -316,7 +316,7 @@ zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status, spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); } -static inline int +static int zfcp_hba_dbf_view_response(char *out_buf, struct zfcp_hba_dbf_record_response *rec) { @@ -403,7 +403,7 @@ zfcp_hba_dbf_view_response(char *out_buf, return len; } -static inline int +static int zfcp_hba_dbf_view_status(char *out_buf, struct zfcp_hba_dbf_record_status *rec) { int len = 0; @@ -424,7 +424,7 @@ zfcp_hba_dbf_view_status(char *out_buf, struct zfcp_hba_dbf_record_status *rec) return len; } -static inline int +static int zfcp_hba_dbf_view_qdio(char *out_buf, struct zfcp_hba_dbf_record_qdio *rec) { int len = 0; @@ -469,7 +469,7 @@ zfcp_hba_dbf_view_format(debug_info_t * id, struct debug_view *view, return len; } -struct debug_view zfcp_hba_dbf_view = { +static struct debug_view zfcp_hba_dbf_view = { "structured", NULL, &zfcp_dbf_view_header, @@ -478,7 +478,7 @@ struct debug_view zfcp_hba_dbf_view = { NULL }; -inline void +void _zfcp_san_dbf_event_common_ct(const char *tag, struct zfcp_fsf_req *fsf_req, u32 s_id, u32 d_id, void *buffer, int buflen) { @@ -519,7 +519,7 @@ _zfcp_san_dbf_event_common_ct(const char *tag, struct zfcp_fsf_req *fsf_req, spin_unlock_irqrestore(&adapter->san_dbf_lock, flags); } -inline void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req) +void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req) { struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; struct zfcp_port *port = ct->port; @@ -531,7 +531,7 @@ inline void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req) ct->req->length); } -inline void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req) +void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req) { struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; struct zfcp_port *port = ct->port; @@ -543,7 +543,7 @@ inline void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req) ct->resp->length); } -static inline void +static void _zfcp_san_dbf_event_common_els(const char *tag, int level, struct zfcp_fsf_req *fsf_req, u32 s_id, u32 d_id, u8 ls_code, void *buffer, int buflen) @@ -585,7 +585,7 @@ _zfcp_san_dbf_event_common_els(const char *tag, int level, spin_unlock_irqrestore(&adapter->san_dbf_lock, flags); } -inline void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *fsf_req) +void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *fsf_req) { struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data; @@ -597,7 +597,7 @@ inline void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *fsf_req) els->req->length); } -inline void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *fsf_req) +void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *fsf_req) { struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data; @@ -608,7 +608,7 @@ inline void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *fsf_req) els->resp->length); } -inline void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *fsf_req) +void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *fsf_req) { struct zfcp_adapter *adapter = fsf_req->adapter; struct fsf_status_read_buffer *status_buffer = @@ -693,7 +693,7 @@ zfcp_san_dbf_view_format(debug_info_t * id, struct debug_view *view, return len; } -struct debug_view zfcp_san_dbf_view = { +static struct debug_view zfcp_san_dbf_view = { "structured", NULL, &zfcp_dbf_view_header, @@ -702,7 +702,7 @@ struct debug_view zfcp_san_dbf_view = { NULL }; -static inline void +static void _zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level, struct zfcp_adapter *adapter, struct scsi_cmnd *scsi_cmnd, @@ -786,7 +786,7 @@ _zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level, spin_unlock_irqrestore(&adapter->scsi_dbf_lock, flags); } -inline void +void zfcp_scsi_dbf_event_result(const char *tag, int level, struct zfcp_adapter *adapter, struct scsi_cmnd *scsi_cmnd, @@ -796,7 +796,7 @@ zfcp_scsi_dbf_event_result(const char *tag, int level, adapter, scsi_cmnd, fsf_req, 0); } -inline void +void zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter, struct scsi_cmnd *scsi_cmnd, struct zfcp_fsf_req *new_fsf_req, @@ -806,7 +806,7 @@ zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter, adapter, scsi_cmnd, new_fsf_req, old_req_id); } -inline void +void zfcp_scsi_dbf_event_devreset(const char *tag, u8 flag, struct zfcp_unit *unit, struct scsi_cmnd *scsi_cmnd) { @@ -884,7 +884,7 @@ zfcp_scsi_dbf_view_format(debug_info_t * id, struct debug_view *view, return len; } -struct debug_view zfcp_scsi_dbf_view = { +static struct debug_view zfcp_scsi_dbf_view = { "structured", NULL, &zfcp_dbf_view_header, diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index c88babce9bc..88642dec080 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -200,7 +200,7 @@ void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req, unsigned long timeout) * returns: 0 - initiated action successfully * <0 - failed to initiate action */ -int +static int zfcp_erp_adapter_reopen_internal(struct zfcp_adapter *adapter, int clear_mask) { int retval; @@ -295,7 +295,7 @@ zfcp_erp_unit_shutdown(struct zfcp_unit *unit, int clear_mask) * zfcp_erp_adisc - send ADISC ELS command * @port: port structure */ -int +static int zfcp_erp_adisc(struct zfcp_port *port) { struct zfcp_adapter *adapter = port->adapter; @@ -380,7 +380,7 @@ zfcp_erp_adisc(struct zfcp_port *port) * * If ADISC failed (LS_RJT or timed out) forced reopen of the port is triggered. */ -void +static void zfcp_erp_adisc_handler(unsigned long data) { struct zfcp_send_els *send_els; @@ -3141,7 +3141,6 @@ zfcp_erp_action_cleanup(int action, struct zfcp_adapter *adapter, break; case ZFCP_ERP_ACTION_REOPEN_ADAPTER: if (result != ZFCP_ERP_SUCCEEDED) { - struct zfcp_port *port; list_for_each_entry(port, &adapter->port_list_head, list) if (port->rport && !atomic_test_mask(ZFCP_STATUS_PORT_WKA, diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index b8794d77285..cda0cc095ad 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -119,8 +119,8 @@ extern int zfcp_adapter_scsi_register(struct zfcp_adapter *); extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *); extern void zfcp_set_fcp_dl(struct fcp_cmnd_iu *, fcp_dl_t); extern char *zfcp_get_fcp_rsp_info_ptr(struct fcp_rsp_iu *); -extern void set_host_byte(u32 *, char); -extern void set_driver_byte(u32 *, char); +extern void set_host_byte(int *, char); +extern void set_driver_byte(int *, char); extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *); extern fcp_dl_t zfcp_get_fcp_dl(struct fcp_cmnd_iu *); diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 067f1519eb0..4b3ae3f22e7 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -4563,7 +4563,7 @@ zfcp_fsf_req_sbal_check(unsigned long *flags, /* * set qtcb pointer in fsf_req and initialize QTCB */ -static inline void +static void zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req) { if (likely(fsf_req->qtcb != NULL)) { diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index dbd9f48e863..1e12a78e8ed 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c @@ -21,22 +21,22 @@ #include "zfcp_ext.h" -static inline void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *, int); +static void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *, int); static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_get (struct zfcp_qdio_queue *, int, int); static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_resp (struct zfcp_fsf_req *, int, int); -static inline volatile struct qdio_buffer_element *zfcp_qdio_sbal_chain +static volatile struct qdio_buffer_element *zfcp_qdio_sbal_chain (struct zfcp_fsf_req *, unsigned long); -static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_next +static volatile struct qdio_buffer_element *zfcp_qdio_sbale_next (struct zfcp_fsf_req *, unsigned long); -static inline int zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *, int, int); +static int zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *, int, int); static inline int zfcp_qdio_sbals_wipe(struct zfcp_fsf_req *); -static inline void zfcp_qdio_sbale_fill +static void zfcp_qdio_sbale_fill (struct zfcp_fsf_req *, unsigned long, void *, int); -static inline int zfcp_qdio_sbals_from_segment +static int zfcp_qdio_sbals_from_segment (struct zfcp_fsf_req *, unsigned long, void *, unsigned long); -static inline int zfcp_qdio_sbals_from_buffer +static int zfcp_qdio_sbals_from_buffer (struct zfcp_fsf_req *, unsigned long, void *, unsigned long, int); static qdio_handler_t zfcp_qdio_request_handler; @@ -201,7 +201,7 @@ zfcp_qdio_allocate(struct zfcp_adapter *adapter) * returns: error flag * */ -static inline int +static int zfcp_qdio_handler_error_check(struct zfcp_adapter *adapter, unsigned int status, unsigned int qdio_error, unsigned int siga_error, int first_element, int elements_processed) @@ -462,7 +462,7 @@ zfcp_qdio_sbale_get(struct zfcp_qdio_queue *queue, int sbal, int sbale) * zfcp_qdio_sbale_req - return pointer to SBALE of request_queue for * a struct zfcp_fsf_req */ -inline volatile struct qdio_buffer_element * +volatile struct qdio_buffer_element * zfcp_qdio_sbale_req(struct zfcp_fsf_req *fsf_req, int sbal, int sbale) { return zfcp_qdio_sbale_get(&fsf_req->adapter->request_queue, @@ -484,7 +484,7 @@ zfcp_qdio_sbale_resp(struct zfcp_fsf_req *fsf_req, int sbal, int sbale) * zfcp_qdio_sbale_curr - return current SBALE on request_queue for * a struct zfcp_fsf_req */ -inline volatile struct qdio_buffer_element * +volatile struct qdio_buffer_element * zfcp_qdio_sbale_curr(struct zfcp_fsf_req *fsf_req) { return zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, @@ -499,7 +499,7 @@ zfcp_qdio_sbale_curr(struct zfcp_fsf_req *fsf_req) * * Note: We can assume at least one free SBAL in the request_queue when called. */ -static inline void +static void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals) { int count = atomic_read(&fsf_req->adapter->request_queue.free_count); @@ -517,7 +517,7 @@ zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals) * * This function changes sbal_curr, sbale_curr, sbal_number of fsf_req. */ -static inline volatile struct qdio_buffer_element * +static volatile struct qdio_buffer_element * zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) { volatile struct qdio_buffer_element *sbale; @@ -554,7 +554,7 @@ zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) /** * zfcp_qdio_sbale_next - switch to next SBALE, chain SBALs if needed */ -static inline volatile struct qdio_buffer_element * +static volatile struct qdio_buffer_element * zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) { if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL) @@ -569,7 +569,7 @@ zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) * zfcp_qdio_sbals_zero - initialize SBALs between first and last in queue * with zero from */ -static inline int +static int zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *queue, int first, int last) { struct qdio_buffer **buf = queue->buffer; @@ -603,7 +603,7 @@ zfcp_qdio_sbals_wipe(struct zfcp_fsf_req *fsf_req) * zfcp_qdio_sbale_fill - set address and lenght in current SBALE * on request_queue */ -static inline void +static void zfcp_qdio_sbale_fill(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, void *addr, int length) { @@ -624,7 +624,7 @@ zfcp_qdio_sbale_fill(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, * Alignment and length of the segment determine how many SBALEs are needed * for the memory segment. */ -static inline int +static int zfcp_qdio_sbals_from_segment(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, void *start_addr, unsigned long total_length) { @@ -659,7 +659,7 @@ zfcp_qdio_sbals_from_segment(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, * @sg_count: number of elements in scatter-gather list * @max_sbals: upper bound for number of SBALs to be used */ -inline int +int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, struct scatterlist *sg, int sg_count, int max_sbals) { @@ -707,7 +707,7 @@ out: * @length: length of buffer * @max_sbals: upper bound for number of SBALs to be used */ -static inline int +static int zfcp_qdio_sbals_from_buffer(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, void *buffer, unsigned long length, int max_sbals) { @@ -728,7 +728,7 @@ zfcp_qdio_sbals_from_buffer(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, * @scsi_cmnd: either scatter-gather list or buffer contained herein is used * to fill SBALs */ -inline int +int zfcp_qdio_sbals_from_scsicmnd(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, struct scsi_cmnd *scsi_cmnd) { diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 452d96f92a1..99db02062c3 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -90,7 +90,7 @@ zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu) return fcp_sns_info_ptr; } -fcp_dl_t * +static fcp_dl_t * zfcp_get_fcp_dl_ptr(struct fcp_cmnd_iu * fcp_cmd) { int additional_length = fcp_cmd->add_fcp_cdb_length << 2; @@ -124,19 +124,19 @@ zfcp_set_fcp_dl(struct fcp_cmnd_iu *fcp_cmd, fcp_dl_t fcp_dl) * regarding the specified byte */ static inline void -set_byte(u32 * result, char status, char pos) +set_byte(int *result, char status, char pos) { *result |= status << (pos * 8); } void -set_host_byte(u32 * result, char status) +set_host_byte(int *result, char status) { set_byte(result, status, 2); } void -set_driver_byte(u32 * result, char status) +set_driver_byte(int *result, char status) { set_byte(result, status, 3); } @@ -280,7 +280,7 @@ out: return retval; } -void +static void zfcp_scsi_command_sync_handler(struct scsi_cmnd *scpnt) { struct completion *wait = (struct completion *) scpnt->SCp.ptr; @@ -324,7 +324,7 @@ zfcp_scsi_command_sync(struct zfcp_unit *unit, struct scsi_cmnd *scpnt, * returns: 0 - success, SCSI command enqueued * !0 - failure */ -int +static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt, void (*done) (struct scsi_cmnd *)) { @@ -380,7 +380,7 @@ zfcp_unit_lookup(struct zfcp_adapter *adapter, int channel, unsigned int id, * will handle late commands. (Usually, the normal completion of late * commands is ignored with respect to the running abort operation.) */ -int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) +static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) { struct Scsi_Host *scsi_host; struct zfcp_adapter *adapter; @@ -445,7 +445,7 @@ int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) return retval; } -int +static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt) { int retval; @@ -541,7 +541,7 @@ zfcp_task_management_function(struct zfcp_unit *unit, u8 tm_flags, /** * zfcp_scsi_eh_host_reset_handler - handler for host and bus reset */ -int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) +static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) { struct zfcp_unit *unit; struct zfcp_adapter *adapter; diff --git a/drivers/s390/sysinfo.c b/drivers/s390/sysinfo.c index 1e788e815ce..090743d2f91 100644 --- a/drivers/s390/sysinfo.c +++ b/drivers/s390/sysinfo.c @@ -9,8 +9,14 @@ #include <linux/mm.h> #include <linux/proc_fs.h> #include <linux/init.h> +#include <linux/delay.h> #include <asm/ebcdic.h> +/* Sigh, math-emu. Don't ask. */ +#include <asm/sfp-util.h> +#include <math-emu/soft-fp.h> +#include <math-emu/single.h> + struct sysinfo_1_1_1 { char reserved_0[32]; char manufacturer[16]; @@ -198,7 +204,7 @@ static int stsi_1_2_2(struct sysinfo_1_2_2 *info, char *page, int len) * if the higher order 8 bits are not zero. Printing * a floating point number in the kernel is a no-no, * always print the number as 32 bit unsigned integer. - * The user-space needs to know about the stange + * The user-space needs to know about the strange * encoding of the alternate cpu capability. */ len += sprintf(page + len, "Capability: %u %u\n", @@ -351,3 +357,58 @@ static __init int create_proc_sysinfo(void) __initcall(create_proc_sysinfo); +/* + * CPU capability might have changed. Therefore recalculate loops_per_jiffy. + */ +void s390_adjust_jiffies(void) +{ + struct sysinfo_1_2_2 *info; + const unsigned int fmil = 0x4b189680; /* 1e7 as 32-bit float. */ + FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); + FP_DECL_EX; + unsigned int capability; + + info = (void *) get_zeroed_page(GFP_KERNEL); + if (!info) + return; + + if (stsi(info, 1, 2, 2) != -ENOSYS) { + /* + * Major sigh. The cpu capability encoding is "special". + * If the first 9 bits of info->capability are 0 then it + * is a 32 bit unsigned integer in the range 0 .. 2^23. + * If the first 9 bits are != 0 then it is a 32 bit float. + * In addition a lower value indicates a proportionally + * higher cpu capacity. Bogomips are the other way round. + * To get to a halfway suitable number we divide 1e7 + * by the cpu capability number. Yes, that means a floating + * point division .. math-emu here we come :-) + */ + FP_UNPACK_SP(SA, &fmil); + if ((info->capability >> 23) == 0) + FP_FROM_INT_S(SB, info->capability, 32, int); + else + FP_UNPACK_SP(SB, &info->capability); + FP_DIV_S(SR, SA, SB); + FP_TO_INT_S(capability, SR, 32, 0); + } else + /* + * Really old machine without stsi block for basic + * cpu information. Report 42.0 bogomips. + */ + capability = 42; + loops_per_jiffy = capability * (500000/HZ); + free_page((unsigned long) info); +} + +/* + * calibrate the delay loop + */ +void __init calibrate_delay(void) +{ + s390_adjust_jiffies(); + /* Print the good old Bogomips line .. */ + printk(KERN_DEBUG "Calibrating delay loop (skipped)... " + "%lu.%02lu BogoMIPS preset\n", loops_per_jiffy/(500000/HZ), + (loops_per_jiffy/(5000/HZ)) % 100); +} |