diff options
Diffstat (limited to 'drivers/s390/cio')
-rw-r--r-- | drivers/s390/cio/css.c | 3 | ||||
-rw-r--r-- | drivers/s390/cio/css.h | 2 | ||||
-rw-r--r-- | drivers/s390/cio/device.c | 4 | ||||
-rw-r--r-- | drivers/s390/cio/device_ops.c | 11 | ||||
-rw-r--r-- | drivers/s390/cio/qdio.c | 278 | ||||
-rw-r--r-- | drivers/s390/cio/qdio.h | 56 |
6 files changed, 219 insertions, 135 deletions
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 27c6d9e55b2..dfca0ef139f 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -191,8 +191,7 @@ static int css_register_subchannel(struct subchannel *sch) return ret; } -int -css_probe_device(struct subchannel_id schid) +static int css_probe_device(struct subchannel_id schid) { int ret; struct subchannel *sch; diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h index 71fcfdc4280..ed7977531c3 100644 --- a/drivers/s390/cio/css.h +++ b/drivers/s390/cio/css.h @@ -138,9 +138,7 @@ struct css_driver { * all css_drivers have the css_bus_type */ extern struct bus_type css_bus_type; -extern struct css_driver io_subchannel_driver; -extern int css_probe_device(struct subchannel_id); extern int css_sch_device_register(struct subchannel *); extern void css_sch_device_unregister(struct subchannel *); extern struct subchannel * get_subchannel_by_schid(struct subchannel_id); diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index a23ff582db9..a8b373f69cf 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -129,7 +129,7 @@ static void io_subchannel_verify(struct device *); static void io_subchannel_ioterm(struct device *); static void io_subchannel_shutdown(struct subchannel *); -struct css_driver io_subchannel_driver = { +static struct css_driver io_subchannel_driver = { .subchannel_type = SUBCHANNEL_TYPE_IO, .drv = { .name = "io_subchannel", @@ -546,7 +546,7 @@ static struct attribute_group ccwdev_attr_group = { .attrs = ccwdev_attrs, }; -struct attribute_group *ccwdev_attr_groups[] = { +static struct attribute_group *ccwdev_attr_groups[] = { &ccwdev_attr_group, NULL, }; diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index 16f59fcb66b..a5d263fb55a 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c @@ -616,6 +616,17 @@ ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no) return chp_get_chp_desc(chpid); } +/** + * ccw_device_get_id - obtain a ccw device id + * @cdev: device to obtain the id for + * @dev_id: where to fill in the values + */ +void ccw_device_get_id(struct ccw_device *cdev, struct ccw_dev_id *dev_id) +{ + *dev_id = cdev->private->dev_id; +} +EXPORT_SYMBOL(ccw_device_get_id); + // FIXME: these have to go: int diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c index 05fac0733f3..e70aeb7a378 100644 --- a/drivers/s390/cio/qdio.c +++ b/drivers/s390/cio/qdio.c @@ -69,7 +69,6 @@ static const char version[] = "QDIO base support version 2"; static int qdio_performance_stats = 0; static int proc_perf_file_registration; -static unsigned long i_p_c, i_p_nc, o_p_c, o_p_nc, ii_p_c, ii_p_nc; static struct qdio_perf_stats perf_stats; static int hydra_thinints; @@ -111,6 +110,31 @@ qdio_min(int a,int b) } /***************** SCRUBBER HELPER ROUTINES **********************/ +#ifdef CONFIG_64BIT +static inline void qdio_perf_stat_inc(atomic64_t *count) +{ + if (qdio_performance_stats) + atomic64_inc(count); +} + +static inline void qdio_perf_stat_dec(atomic64_t *count) +{ + if (qdio_performance_stats) + atomic64_dec(count); +} +#else /* CONFIG_64BIT */ +static inline void qdio_perf_stat_inc(atomic_t *count) +{ + if (qdio_performance_stats) + atomic_inc(count); +} + +static inline void qdio_perf_stat_dec(atomic_t *count) +{ + if (qdio_performance_stats) + atomic_dec(count); +} +#endif /* CONFIG_64BIT */ static inline __u64 qdio_get_micros(void) @@ -277,8 +301,7 @@ qdio_siga_sync(struct qdio_q *q, unsigned int gpr2, QDIO_DBF_TEXT4(0,trace,"sigasync"); QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - if (qdio_performance_stats) - perf_stats.siga_syncs++; + qdio_perf_stat_inc(&perf_stats.siga_syncs); cc = do_siga_sync(q->schid, gpr2, gpr3); if (cc) @@ -323,8 +346,7 @@ qdio_siga_output(struct qdio_q *q) __u32 busy_bit; __u64 start_time=0; - if (qdio_performance_stats) - perf_stats.siga_outs++; + qdio_perf_stat_inc(&perf_stats.siga_outs); QDIO_DBF_TEXT4(0,trace,"sigaout"); QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); @@ -358,8 +380,7 @@ qdio_siga_input(struct qdio_q *q) QDIO_DBF_TEXT4(0,trace,"sigain"); QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - if (qdio_performance_stats) - perf_stats.siga_ins++; + qdio_perf_stat_inc(&perf_stats.siga_ins); cc = do_siga_input(q->schid, q->mask); @@ -953,8 +974,7 @@ __qdio_outbound_processing(struct qdio_q *q) if (unlikely(qdio_reserve_q(q))) { qdio_release_q(q); - if (qdio_performance_stats) - o_p_c++; + qdio_perf_stat_inc(&perf_stats.outbound_tl_runs_resched); /* as we're sissies, we'll check next time */ if (likely(!atomic_read(&q->is_in_shutdown))) { qdio_mark_q(q); @@ -962,10 +982,8 @@ __qdio_outbound_processing(struct qdio_q *q) } return; } - if (qdio_performance_stats) { - o_p_nc++; - perf_stats.tl_runs++; - } + qdio_perf_stat_inc(&perf_stats.outbound_tl_runs); + qdio_perf_stat_inc(&perf_stats.tl_runs); /* see comment in qdio_kick_outbound_q */ siga_attempts=atomic_read(&q->busy_siga_counter); @@ -978,18 +996,25 @@ __qdio_outbound_processing(struct qdio_q *q) if (qdio_has_outbound_q_moved(q)) qdio_kick_outbound_handler(q); - if (q->is_iqdio_q) { + if (q->queue_type == QDIO_ZFCP_QFMT) { + if ((!q->hydra_gives_outbound_pcis) && + (!qdio_is_outbound_q_done(q))) + qdio_mark_q(q); + } + else if (((!q->is_iqdio_q) && (!q->is_pci_out)) || + (q->queue_type == QDIO_IQDIO_QFMT_ASYNCH)) { /* - * for asynchronous queues, we better check, if the sent - * buffer is already switched from PRIMED to EMPTY. + * make sure buffer switch from PRIMED to EMPTY is noticed + * and outbound_handler is called */ - if ((q->queue_type == QDIO_IQDIO_QFMT_ASYNCH) && - !qdio_is_outbound_q_done(q)) - qdio_mark_q(q); - - } else if (!q->hydra_gives_outbound_pcis) - if (!qdio_is_outbound_q_done(q)) - qdio_mark_q(q); + if (qdio_is_outbound_q_done(q)) { + del_timer(&q->timer); + } else { + if (!timer_pending(&q->timer)) + mod_timer(&q->timer, jiffies + + QDIO_FORCE_CHECK_TIMEOUT); + } + } qdio_release_q(q); } @@ -1139,17 +1164,6 @@ qdio_has_inbound_q_moved(struct qdio_q *q) { int i; - static int old_pcis=0; - static int old_thinints=0; - - if (qdio_performance_stats) { - if ((old_pcis==perf_stats.pcis)&& - (old_thinints==perf_stats.thinints)) - perf_stats.start_time_inbound=NOW; - else - old_pcis=perf_stats.pcis; - } - i=qdio_get_inbound_buffer_frontier(q); if ( (i!=GET_SAVED_FRONTIER(q)) || (q->error_status_flags&QDIO_STATUS_LOOK_FOR_ERROR) ) { @@ -1337,10 +1351,7 @@ qdio_kick_inbound_handler(struct qdio_q *q) q->siga_error=0; q->error_status_flags=0; - if (qdio_performance_stats) { - perf_stats.inbound_time+=NOW-perf_stats.start_time_inbound; - perf_stats.inbound_cnt++; - } + qdio_perf_stat_inc(&perf_stats.inbound_cnt); } static void @@ -1360,8 +1371,7 @@ __tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set) */ if (unlikely(qdio_reserve_q(q))) { qdio_release_q(q); - if (qdio_performance_stats) - ii_p_c++; + qdio_perf_stat_inc(&perf_stats.inbound_thin_tl_runs_resched); /* * as we might just be about to stop polling, we make * sure that we check again at least once more @@ -1369,8 +1379,7 @@ __tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set) tiqdio_sched_tl(); return; } - if (qdio_performance_stats) - ii_p_nc++; + qdio_perf_stat_inc(&perf_stats.inbound_thin_tl_runs); if (unlikely(atomic_read(&q->is_in_shutdown))) { qdio_unmark_q(q); goto out; @@ -1412,8 +1421,7 @@ __tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set) for (i=0;i<irq_ptr->no_output_qs;i++) { oq = irq_ptr->output_qs[i]; if (!qdio_is_outbound_q_done(oq)) { - if (qdio_performance_stats) - perf_stats.tl_runs--; + qdio_perf_stat_dec(&perf_stats.tl_runs); __qdio_outbound_processing(oq); } } @@ -1452,8 +1460,7 @@ __qdio_inbound_processing(struct qdio_q *q) if (unlikely(qdio_reserve_q(q))) { qdio_release_q(q); - if (qdio_performance_stats) - i_p_c++; + qdio_perf_stat_inc(&perf_stats.inbound_tl_runs_resched); /* as we're sissies, we'll check next time */ if (likely(!atomic_read(&q->is_in_shutdown))) { qdio_mark_q(q); @@ -1461,10 +1468,8 @@ __qdio_inbound_processing(struct qdio_q *q) } return; } - if (qdio_performance_stats) { - i_p_nc++; - perf_stats.tl_runs++; - } + qdio_perf_stat_inc(&perf_stats.inbound_tl_runs); + qdio_perf_stat_inc(&perf_stats.tl_runs); again: if (qdio_has_inbound_q_moved(q)) { @@ -1510,8 +1515,7 @@ tiqdio_reset_processing_state(struct qdio_q *q, int q_laps) if (unlikely(qdio_reserve_q(q))) { qdio_release_q(q); - if (qdio_performance_stats) - ii_p_c++; + qdio_perf_stat_inc(&perf_stats.inbound_thin_tl_runs_resched); /* * as we might just be about to stop polling, we make * sure that we check again at least once more @@ -1602,8 +1606,7 @@ tiqdio_tl(unsigned long data) { QDIO_DBF_TEXT4(0,trace,"iqdio_tl"); - if (qdio_performance_stats) - perf_stats.tl_runs++; + qdio_perf_stat_inc(&perf_stats.tl_runs); tiqdio_inbound_checks(); } @@ -1830,6 +1833,7 @@ qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev, q->queue_type = QDIO_IQDIO_QFMT_ASYNCH; q->int_parm=int_parm; q->is_input_q=0; + q->is_pci_out = 0; q->schid = irq_ptr->schid; q->cdev = cdev; q->irq_ptr = irq_ptr; @@ -1842,6 +1846,10 @@ qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev, q->tasklet.data=(unsigned long)q; q->tasklet.func=(void(*)(unsigned long)) &qdio_outbound_processing; + q->timer.function=(void(*)(unsigned long)) + &qdio_outbound_processing; + q->timer.data = (long)q; + init_timer(&q->timer); atomic_set(&q->busy_siga_counter,0); q->timing.busy_start=0; @@ -1914,10 +1922,7 @@ tiqdio_thinint_handler(void) { QDIO_DBF_TEXT4(0,trace,"thin_int"); - if (qdio_performance_stats) { - perf_stats.thinints++; - perf_stats.start_time_inbound=NOW; - } + qdio_perf_stat_inc(&perf_stats.thinints); /* SVS only when needed: * issue SVS to benefit from iqdio interrupt avoidance @@ -1972,17 +1977,13 @@ qdio_handle_pci(struct qdio_irq *irq_ptr) int i; struct qdio_q *q; - if (qdio_performance_stats) { - perf_stats.pcis++; - perf_stats.start_time_inbound=NOW; - } + qdio_perf_stat_inc(&perf_stats.pcis); for (i=0;i<irq_ptr->no_input_qs;i++) { q=irq_ptr->input_qs[i]; if (q->is_input_q&QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT) qdio_mark_q(q); else { - if (qdio_performance_stats) - perf_stats.tl_runs--; + qdio_perf_stat_dec(&perf_stats.tl_runs); __qdio_inbound_processing(q); } } @@ -1992,8 +1993,7 @@ qdio_handle_pci(struct qdio_irq *irq_ptr) q=irq_ptr->output_qs[i]; if (qdio_is_outbound_q_done(q)) continue; - if (qdio_performance_stats) - perf_stats.tl_runs--; + qdio_perf_stat_dec(&perf_stats.tl_runs); if (!irq_ptr->sync_done_on_outb_pcis) SYNC_MEMORY; __qdio_outbound_processing(q); @@ -2648,6 +2648,7 @@ qdio_shutdown(struct ccw_device *cdev, int how) for (i=0;i<irq_ptr->no_output_qs;i++) { tasklet_kill(&irq_ptr->output_qs[i]->tasklet); + del_timer(&irq_ptr->output_qs[i]->timer); wait_event_interruptible_timeout(cdev->private->wait_q, !atomic_read(&irq_ptr-> output_qs[i]-> @@ -3463,20 +3464,18 @@ do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags, struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr; /* This is the outbound handling of queues */ - if (qdio_performance_stats) - perf_stats.start_time_outbound=NOW; - qdio_do_qdio_fill_output(q,qidx,count,buffers); used_elements=atomic_add_return(count, &q->number_of_buffers_used) - count; if (callflags&QDIO_FLAG_DONT_SIGA) { - if (qdio_performance_stats) { - perf_stats.outbound_time+=NOW-perf_stats.start_time_outbound; - perf_stats.outbound_cnt++; - } + qdio_perf_stat_inc(&perf_stats.outbound_cnt); return; } + if (callflags & QDIO_FLAG_PCI_OUT) + q->is_pci_out = 1; + else + q->is_pci_out = 0; if (q->is_iqdio_q) { /* one siga for every sbal */ while (count--) @@ -3504,8 +3503,7 @@ do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags, qdio_kick_outbound_q(q); } else { QDIO_DBF_TEXT3(0,trace, "fast-req"); - if (qdio_performance_stats) - perf_stats.fast_reqs++; + qdio_perf_stat_inc(&perf_stats.fast_reqs); } } /* @@ -3516,10 +3514,7 @@ do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags, __qdio_outbound_processing(q); } - if (qdio_performance_stats) { - perf_stats.outbound_time+=NOW-perf_stats.start_time_outbound; - perf_stats.outbound_cnt++; - } + qdio_perf_stat_inc(&perf_stats.outbound_cnt); } /* count must be 1 in iqdio */ @@ -3589,33 +3584,67 @@ qdio_perf_procfile_read(char *buffer, char **buffer_location, off_t offset, return 0; #define _OUTP_IT(x...) c+=sprintf(buffer+c,x) - _OUTP_IT("i_p_nc/c=%lu/%lu\n",i_p_nc,i_p_c); - _OUTP_IT("ii_p_nc/c=%lu/%lu\n",ii_p_nc,ii_p_c); - _OUTP_IT("o_p_nc/c=%lu/%lu\n",o_p_nc,o_p_c); - _OUTP_IT("Number of tasklet runs (total) : %lu\n", - perf_stats.tl_runs); +#ifdef CONFIG_64BIT + _OUTP_IT("Number of tasklet runs (total) : %li\n", + (long)atomic64_read(&perf_stats.tl_runs)); + _OUTP_IT("Inbound tasklet runs tried/retried : %li/%li\n", + (long)atomic64_read(&perf_stats.inbound_tl_runs), + (long)atomic64_read(&perf_stats.inbound_tl_runs_resched)); + _OUTP_IT("Inbound-thin tasklet runs tried/retried : %li/%li\n", + (long)atomic64_read(&perf_stats.inbound_thin_tl_runs), + (long)atomic64_read(&perf_stats.inbound_thin_tl_runs_resched)); + _OUTP_IT("Outbound tasklet runs tried/retried : %li/%li\n", + (long)atomic64_read(&perf_stats.outbound_tl_runs), + (long)atomic64_read(&perf_stats.outbound_tl_runs_resched)); + _OUTP_IT("\n"); + _OUTP_IT("Number of SIGA sync's issued : %li\n", + (long)atomic64_read(&perf_stats.siga_syncs)); + _OUTP_IT("Number of SIGA in's issued : %li\n", + (long)atomic64_read(&perf_stats.siga_ins)); + _OUTP_IT("Number of SIGA out's issued : %li\n", + (long)atomic64_read(&perf_stats.siga_outs)); + _OUTP_IT("Number of PCIs caught : %li\n", + (long)atomic64_read(&perf_stats.pcis)); + _OUTP_IT("Number of adapter interrupts caught : %li\n", + (long)atomic64_read(&perf_stats.thinints)); + _OUTP_IT("Number of fast requeues (outg. SBALs w/o SIGA) : %li\n", + (long)atomic64_read(&perf_stats.fast_reqs)); _OUTP_IT("\n"); - _OUTP_IT("Number of SIGA sync's issued : %lu\n", - perf_stats.siga_syncs); - _OUTP_IT("Number of SIGA in's issued : %lu\n", - perf_stats.siga_ins); - _OUTP_IT("Number of SIGA out's issued : %lu\n", - perf_stats.siga_outs); - _OUTP_IT("Number of PCIs caught : %lu\n", - perf_stats.pcis); - _OUTP_IT("Number of adapter interrupts caught : %lu\n", - perf_stats.thinints); - _OUTP_IT("Number of fast requeues (outg. SBALs w/o SIGA) : %lu\n", - perf_stats.fast_reqs); + _OUTP_IT("Number of inbound transfers : %li\n", + (long)atomic64_read(&perf_stats.inbound_cnt)); + _OUTP_IT("Number of do_QDIOs outbound : %li\n", + (long)atomic64_read(&perf_stats.outbound_cnt)); +#else /* CONFIG_64BIT */ + _OUTP_IT("Number of tasklet runs (total) : %i\n", + atomic_read(&perf_stats.tl_runs)); + _OUTP_IT("Inbound tasklet runs tried/retried : %i/%i\n", + atomic_read(&perf_stats.inbound_tl_runs), + atomic_read(&perf_stats.inbound_tl_runs_resched)); + _OUTP_IT("Inbound-thin tasklet runs tried/retried : %i/%i\n", + atomic_read(&perf_stats.inbound_thin_tl_runs), + atomic_read(&perf_stats.inbound_thin_tl_runs_resched)); + _OUTP_IT("Outbound tasklet runs tried/retried : %i/%i\n", + atomic_read(&perf_stats.outbound_tl_runs), + atomic_read(&perf_stats.outbound_tl_runs_resched)); _OUTP_IT("\n"); - _OUTP_IT("Total time of all inbound actions (us) incl. UL : %lu\n", - perf_stats.inbound_time); - _OUTP_IT("Number of inbound transfers : %lu\n", - perf_stats.inbound_cnt); - _OUTP_IT("Total time of all outbound do_QDIOs (us) : %lu\n", - perf_stats.outbound_time); - _OUTP_IT("Number of do_QDIOs outbound : %lu\n", - perf_stats.outbound_cnt); + _OUTP_IT("Number of SIGA sync's issued : %i\n", + atomic_read(&perf_stats.siga_syncs)); + _OUTP_IT("Number of SIGA in's issued : %i\n", + atomic_read(&perf_stats.siga_ins)); + _OUTP_IT("Number of SIGA out's issued : %i\n", + atomic_read(&perf_stats.siga_outs)); + _OUTP_IT("Number of PCIs caught : %i\n", + atomic_read(&perf_stats.pcis)); + _OUTP_IT("Number of adapter interrupts caught : %i\n", + atomic_read(&perf_stats.thinints)); + _OUTP_IT("Number of fast requeues (outg. SBALs w/o SIGA) : %i\n", + atomic_read(&perf_stats.fast_reqs)); + _OUTP_IT("\n"); + _OUTP_IT("Number of inbound transfers : %i\n", + atomic_read(&perf_stats.inbound_cnt)); + _OUTP_IT("Number of do_QDIOs outbound : %i\n", + atomic_read(&perf_stats.outbound_cnt)); +#endif /* CONFIG_64BIT */ _OUTP_IT("\n"); return c; @@ -3642,8 +3671,6 @@ qdio_add_procfs_entry(void) static void qdio_remove_procfs_entry(void) { - perf_stats.tl_runs=0; - if (!proc_perf_file_registration) /* means if it went ok earlier */ remove_proc_entry(QDIO_PERF,&proc_root); } @@ -3671,13 +3698,38 @@ qdio_performance_stats_store(struct bus_type *bus, const char *buf, size_t count qdio_performance_stats = i; if (i==0) { /* reset perf. stat. info */ - i_p_nc = 0; - i_p_c = 0; - ii_p_nc = 0; - ii_p_c = 0; - o_p_nc = 0; - o_p_c = 0; - memset(&perf_stats, 0, sizeof(struct qdio_perf_stats)); +#ifdef CONFIG_64BIT + atomic64_set(&perf_stats.tl_runs, 0); + atomic64_set(&perf_stats.outbound_tl_runs, 0); + atomic64_set(&perf_stats.inbound_tl_runs, 0); + atomic64_set(&perf_stats.inbound_tl_runs_resched, 0); + atomic64_set(&perf_stats.inbound_thin_tl_runs, 0); + atomic64_set(&perf_stats.inbound_thin_tl_runs_resched, + 0); + atomic64_set(&perf_stats.siga_outs, 0); + atomic64_set(&perf_stats.siga_ins, 0); + atomic64_set(&perf_stats.siga_syncs, 0); + atomic64_set(&perf_stats.pcis, 0); + atomic64_set(&perf_stats.thinints, 0); + atomic64_set(&perf_stats.fast_reqs, 0); + atomic64_set(&perf_stats.outbound_cnt, 0); + atomic64_set(&perf_stats.inbound_cnt, 0); +#else /* CONFIG_64BIT */ + atomic_set(&perf_stats.tl_runs, 0); + atomic_set(&perf_stats.outbound_tl_runs, 0); + atomic_set(&perf_stats.inbound_tl_runs, 0); + atomic_set(&perf_stats.inbound_tl_runs_resched, 0); + atomic_set(&perf_stats.inbound_thin_tl_runs, 0); + atomic_set(&perf_stats.inbound_thin_tl_runs_resched, 0); + atomic_set(&perf_stats.siga_outs, 0); + atomic_set(&perf_stats.siga_ins, 0); + atomic_set(&perf_stats.siga_syncs, 0); + atomic_set(&perf_stats.pcis, 0); + atomic_set(&perf_stats.thinints, 0); + atomic_set(&perf_stats.fast_reqs, 0); + atomic_set(&perf_stats.outbound_cnt, 0); + atomic_set(&perf_stats.inbound_cnt, 0); +#endif /* CONFIG_64BIT */ } } else { QDIO_PRINT_WARN("QDIO performance_stats: write 0 or 1 to this file!\n"); diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index ec9af72b2af..6d7aad18f6f 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h @@ -60,6 +60,7 @@ #define QDIO_ACTIVATE_TIMEOUT ((5*HZ)>>10) #define QDIO_CLEANUP_CLEAR_TIMEOUT (20*HZ) #define QDIO_CLEANUP_HALT_TIMEOUT (10*HZ) +#define QDIO_FORCE_CHECK_TIMEOUT (10*HZ) enum qdio_irq_states { QDIO_IRQ_STATE_INACTIVE, @@ -406,21 +407,43 @@ do_clear_global_summary(void) #define CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS 0x04 struct qdio_perf_stats { - unsigned long tl_runs; - - unsigned long siga_outs; - unsigned long siga_ins; - unsigned long siga_syncs; - unsigned long pcis; - unsigned long thinints; - unsigned long fast_reqs; - - __u64 start_time_outbound; - unsigned long outbound_cnt; - unsigned long outbound_time; - __u64 start_time_inbound; - unsigned long inbound_cnt; - unsigned long inbound_time; +#ifdef CONFIG_64BIT + atomic64_t tl_runs; + atomic64_t outbound_tl_runs; + atomic64_t outbound_tl_runs_resched; + atomic64_t inbound_tl_runs; + atomic64_t inbound_tl_runs_resched; + atomic64_t inbound_thin_tl_runs; + atomic64_t inbound_thin_tl_runs_resched; + + atomic64_t siga_outs; + atomic64_t siga_ins; + atomic64_t siga_syncs; + atomic64_t pcis; + atomic64_t thinints; + atomic64_t fast_reqs; + + atomic64_t outbound_cnt; + atomic64_t inbound_cnt; +#else /* CONFIG_64BIT */ + atomic_t tl_runs; + atomic_t outbound_tl_runs; + atomic_t outbound_tl_runs_resched; + atomic_t inbound_tl_runs; + atomic_t inbound_tl_runs_resched; + atomic_t inbound_thin_tl_runs; + atomic_t inbound_thin_tl_runs_resched; + + atomic_t siga_outs; + atomic_t siga_ins; + atomic_t siga_syncs; + atomic_t pcis; + atomic_t thinints; + atomic_t fast_reqs; + + atomic_t outbound_cnt; + atomic_t inbound_cnt; +#endif /* CONFIG_64BIT */ }; /* unlikely as the later the better */ @@ -489,8 +512,8 @@ struct qdio_q { void *irq_ptr; -#ifdef QDIO_USE_TIMERS_FOR_POLLING struct timer_list timer; +#ifdef QDIO_USE_TIMERS_FOR_POLLING atomic_t timer_already_set; spinlock_t timer_lock; #else /* QDIO_USE_TIMERS_FOR_POLLING */ @@ -536,6 +559,7 @@ struct qdio_q { } timing; atomic_t busy_siga_counter; unsigned int queue_type; + unsigned int is_pci_out; /* leave this member at the end. won't be cleared in qdio_fill_qs */ struct slib *slib; /* a page is allocated under this pointer, |