diff options
author | Jan Glauber <jang@linux.vnet.ibm.com> | 2009-06-22 12:08:11 +0200 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2009-06-22 12:08:20 +0200 |
commit | 9a2c160a8cbd5b3253672b3bac462c64d0d2eef7 (patch) | |
tree | 72090561a0cfc35eaebc8630a213badaf129e196 /drivers/s390/cio | |
parent | 60b5df2f12f2ab54bfa7c1f0f0ce3f5953e73c0b (diff) |
[S390] qdio: fix check for running under z/VM
The check whether qdio runs under z/VM was incorrect since SIGA-Sync is not
set if the device runs with QIOASSIST. Use MACHINE_IS_VM instead to prevent
polling under z/VM.
Merge qdio_inbound_q_done and tiqdio_is_inbound_q_done.
Signed-off-by: Jan Glauber <jang@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'drivers/s390/cio')
-rw-r--r-- | drivers/s390/cio/qdio_main.c | 48 |
1 files changed, 13 insertions, 35 deletions
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 377d881385c..127e78eef65 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -499,7 +499,7 @@ check_next: /* * No siga-sync needed for non-qebsm here, as the inbound queue * will be synced on the next siga-r, resp. - * tiqdio_is_inbound_q_done will do the siga-sync. + * qdio_inbound_q_done will do the siga-sync. */ q->first_to_check = add_buf(q->first_to_check, count); atomic_sub(count, &q->nr_buf_used); @@ -530,35 +530,32 @@ static int qdio_inbound_q_moved(struct qdio_q *q) if ((bufnr != q->last_move) || q->qdio_error) { q->last_move = bufnr; - if (!need_siga_sync(q) && !pci_out_supported(q)) + if (!is_thinint_irq(q->irq_ptr) && !MACHINE_IS_VM) q->u.in.timestamp = get_usecs(); - - DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in moved"); return 1; } else return 0; } -static int qdio_inbound_q_done(struct qdio_q *q) +static inline int qdio_inbound_q_done(struct qdio_q *q) { unsigned char state = 0; if (!atomic_read(&q->nr_buf_used)) return 1; - /* - * We need that one for synchronization with the adapter, as it - * does a kind of PCI avoidance. - */ qdio_siga_sync_q(q); - get_buf_state(q, q->first_to_check, &state, 0); + if (state == SLSB_P_INPUT_PRIMED) - /* we got something to do */ + /* more work coming */ return 0; - /* on VM, we don't poll, so the q is always done here */ - if (need_siga_sync(q) || pci_out_supported(q)) + if (is_thinint_irq(q->irq_ptr)) + return 1; + + /* don't poll under z/VM */ + if (MACHINE_IS_VM) return 1; /* @@ -569,27 +566,8 @@ static int qdio_inbound_q_done(struct qdio_q *q) DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%3d", q->first_to_check); return 1; - } else { - DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in notd:%3d", - q->first_to_check); - return 0; - } -} - -static inline int tiqdio_inbound_q_done(struct qdio_q *q) -{ - unsigned char state = 0; - - if (!atomic_read(&q->nr_buf_used)) - return 1; - - qdio_siga_sync_q(q); - get_buf_state(q, q->first_to_check, &state, 0); - - if (state == SLSB_P_INPUT_PRIMED) - /* more work coming */ + } else return 0; - return 1; } static void qdio_kick_handler(struct qdio_q *q) @@ -847,7 +825,7 @@ static void __tiqdio_inbound_processing(struct qdio_q *q) qdio_kick_handler(q); - if (!tiqdio_inbound_q_done(q)) { + if (!qdio_inbound_q_done(q)) { qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop); if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) tasklet_schedule(&q->tasklet); @@ -858,7 +836,7 @@ static void __tiqdio_inbound_processing(struct qdio_q *q) * We need to check again to not lose initiative after * resetting the ACK state. */ - if (!tiqdio_inbound_q_done(q)) { + if (!qdio_inbound_q_done(q)) { qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2); if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) tasklet_schedule(&q->tasklet); |