From e23d6d2b090658007732770720a44375cba23200 Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Thu, 6 Oct 2005 13:25:16 -0700 Subject: [IB] mthca: detect SRQ overflow The hardware relies on us keeping one extra work request that never gets used in SRQs. Add checks to the SRQ work request posting functions so that they fail when someone is about to use up that extra work request, rather than when someone uses the very last work request. Signed-off-by: Roland Dreier --- drivers/infiniband/hw/mthca/mthca_srq.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c index 13d2290261d..e464321a7aa 100644 --- a/drivers/infiniband/hw/mthca/mthca_srq.c +++ b/drivers/infiniband/hw/mthca/mthca_srq.c @@ -438,6 +438,14 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, wqe = get_wqe(srq, ind); next_ind = *wqe_to_link(wqe); + + if (next_ind < 0) { + mthca_err(dev, "SRQ %06x full\n", srq->srqn); + err = -ENOMEM; + *bad_wr = wr; + break; + } + prev_wqe = srq->last; srq->last = wqe; @@ -529,6 +537,13 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, wqe = get_wqe(srq, ind); next_ind = *wqe_to_link(wqe); + if (next_ind < 0) { + mthca_err(dev, "SRQ %06x full\n", srq->srqn); + err = -ENOMEM; + *bad_wr = wr; + break; + } + ((struct mthca_next_seg *) wqe)->nda_op = cpu_to_be32((next_ind << srq->wqe_shift) | 1); ((struct mthca_next_seg *) wqe)->ee_nds = 0; -- cgit v1.2.3