From a8b56f296d7d977fea2512e353a131f8da490aa5 Mon Sep 17 00:00:00 2001 From: Ralph Campbell Date: Mon, 27 Oct 2008 15:31:25 -0700 Subject: IB/ipath: Fix RDMA write with immediate copy of last packet When the last packet of a RDMA write with immediate is received, the next receive work queue entry ID should be used to generate a completion entry. The code was incorrectly resetting part of the state used to copy the last packet. Signed-off-by: Ralph Campbell Signed-off-by: Roland Dreier --- drivers/infiniband/hw/ipath/ipath_ruc.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c index fc0f6d9e603..2296832f94d 100644 --- a/drivers/infiniband/hw/ipath/ipath_ruc.c +++ b/drivers/infiniband/hw/ipath/ipath_ruc.c @@ -156,7 +156,7 @@ bail: /** * ipath_get_rwqe - copy the next RWQE into the QP's RWQE * @qp: the QP - * @wr_id_only: update wr_id only, not SGEs + * @wr_id_only: update qp->r_wr_id only, not qp->r_sge * * Return 0 if no RWQE is available, otherwise return 1. * @@ -173,8 +173,6 @@ int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only) u32 tail; int ret; - qp->r_sge.sg_list = qp->r_sg_list; - if (qp->ibqp.srq) { srq = to_isrq(qp->ibqp.srq); handler = srq->ibsrq.event_handler; @@ -206,8 +204,10 @@ int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only) wqe = get_rwqe_ptr(rq, tail); if (++tail >= rq->size) tail = 0; - } while (!wr_id_only && !ipath_init_sge(qp, wqe, &qp->r_len, - &qp->r_sge)); + if (wr_id_only) + break; + qp->r_sge.sg_list = qp->r_sg_list; + } while (!ipath_init_sge(qp, wqe, &qp->r_len, &qp->r_sge)); qp->r_wr_id = wqe->wr_id; wq->tail = tail; -- cgit v1.2.3 From 5880ff19fa29466cb9d7e293710e6aebecfecdd1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ilpo=20J=C3=A4rvinen?= Date: Thu, 30 Oct 2008 13:39:43 +0200 Subject: RDMA/nes: Reindent mis-indented spinlocks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Ilpo Järvinen Signed-off-by: Roland Dreier --- drivers/infiniband/hw/nes/nes_verbs.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 932e56fcf77..ffdd141efe3 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -220,14 +220,14 @@ static int nes_bind_mw(struct ib_qp *ibqp, struct ib_mw *ibmw, if (nesqp->ibqp_state > IB_QPS_RTS) return -EINVAL; - spin_lock_irqsave(&nesqp->lock, flags); + spin_lock_irqsave(&nesqp->lock, flags); head = nesqp->hwqp.sq_head; qsize = nesqp->hwqp.sq_tail; /* Check for SQ overflow */ if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) { - spin_unlock_irqrestore(&nesqp->lock, flags); + spin_unlock_irqrestore(&nesqp->lock, flags); return -EINVAL; } @@ -269,7 +269,7 @@ static int nes_bind_mw(struct ib_qp *ibqp, struct ib_mw *ibmw, nes_write32(nesdev->regs+NES_WQE_ALLOC, (1 << 24) | 0x00800000 | nesqp->hwqp.qp_id); - spin_unlock_irqrestore(&nesqp->lock, flags); + spin_unlock_irqrestore(&nesqp->lock, flags); return 0; } @@ -3212,7 +3212,7 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr, if (nesqp->ibqp_state > IB_QPS_RTS) return -EINVAL; - spin_lock_irqsave(&nesqp->lock, flags); + spin_lock_irqsave(&nesqp->lock, flags); head = nesqp->hwqp.sq_head; @@ -3337,7 +3337,7 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr, (counter << 24) | 0x00800000 | nesqp->hwqp.qp_id); } - spin_unlock_irqrestore(&nesqp->lock, flags); + spin_unlock_irqrestore(&nesqp->lock, flags); if (err) *bad_wr = ib_wr; @@ -3368,7 +3368,7 @@ static int nes_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr, if (nesqp->ibqp_state > IB_QPS_RTS) return -EINVAL; - spin_lock_irqsave(&nesqp->lock, flags); + spin_lock_irqsave(&nesqp->lock, flags); head = nesqp->hwqp.rq_head; @@ -3421,7 +3421,7 @@ static int nes_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr, nes_write32(nesdev->regs+NES_WQE_ALLOC, (counter<<24) | nesqp->hwqp.qp_id); } - spin_unlock_irqrestore(&nesqp->lock, flags); + spin_unlock_irqrestore(&nesqp->lock, flags); if (err) *bad_wr = ib_wr; @@ -3453,7 +3453,7 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) nes_debug(NES_DBG_CQ, "\n"); - spin_lock_irqsave(&nescq->lock, flags); + spin_lock_irqsave(&nescq->lock, flags); head = nescq->hw_cq.cq_head; cq_size = nescq->hw_cq.cq_size; @@ -3562,7 +3562,7 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) nes_debug(NES_DBG_CQ, "Reporting %u completions for CQ%u.\n", cqe_count, nescq->hw_cq.cq_number); - spin_unlock_irqrestore(&nescq->lock, flags); + spin_unlock_irqrestore(&nescq->lock, flags); return cqe_count; } -- cgit v1.2.3 From 35c6d6942c966e6d74ea801d8b5007d7f900ce92 Mon Sep 17 00:00:00 2001 From: Chien Tung Date: Sun, 2 Nov 2008 21:37:35 -0800 Subject: RDMA/nes: Correct handling of PBL resources * Roll back allocated structures on failures. * Use GFP_ATOMIC instead of GFP_KERNEL since we are holding a lock. * Acquire nesadapter->pbl_lock when modifying PBL counters. * Decrement PBL counters on deallocation. Signed-off-by: Chien Tung Signed-off-by: Roland Dreier --- drivers/infiniband/hw/nes/nes_verbs.c | 44 ++++++++++++++++++++++++++--------- 1 file changed, 33 insertions(+), 11 deletions(-) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index ffdd141efe3..a8c2193a026 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -349,7 +349,7 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd, if (nesfmr->nesmr.pbls_used > nesadapter->free_4kpbl) { spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); ret = -ENOMEM; - goto failed_vpbl_alloc; + goto failed_vpbl_avail; } else { nesadapter->free_4kpbl -= nesfmr->nesmr.pbls_used; } @@ -357,7 +357,7 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd, if (nesfmr->nesmr.pbls_used > nesadapter->free_256pbl) { spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); ret = -ENOMEM; - goto failed_vpbl_alloc; + goto failed_vpbl_avail; } else { nesadapter->free_256pbl -= nesfmr->nesmr.pbls_used; } @@ -391,14 +391,14 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd, goto failed_vpbl_alloc; } - nesfmr->root_vpbl.leaf_vpbl = kzalloc(sizeof(*nesfmr->root_vpbl.leaf_vpbl)*1024, GFP_KERNEL); + nesfmr->leaf_pbl_cnt = nesfmr->nesmr.pbls_used-1; + nesfmr->root_vpbl.leaf_vpbl = kzalloc(sizeof(*nesfmr->root_vpbl.leaf_vpbl)*1024, GFP_ATOMIC); if (!nesfmr->root_vpbl.leaf_vpbl) { spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); ret = -ENOMEM; goto failed_leaf_vpbl_alloc; } - nesfmr->leaf_pbl_cnt = nesfmr->nesmr.pbls_used-1; nes_debug(NES_DBG_MR, "two level pbl, root_vpbl.pbl_vbase=%p" " leaf_pbl_cnt=%d root_vpbl.leaf_vpbl=%p\n", nesfmr->root_vpbl.pbl_vbase, nesfmr->leaf_pbl_cnt, nesfmr->root_vpbl.leaf_vpbl); @@ -519,6 +519,16 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd, nesfmr->root_vpbl.pbl_pbase); failed_vpbl_alloc: + if (nesfmr->nesmr.pbls_used != 0) { + spin_lock_irqsave(&nesadapter->pbl_lock, flags); + if (nesfmr->nesmr.pbl_4k) + nesadapter->free_4kpbl += nesfmr->nesmr.pbls_used; + else + nesadapter->free_256pbl += nesfmr->nesmr.pbls_used; + spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); + } + +failed_vpbl_avail: kfree(nesfmr); failed_fmr_alloc: @@ -534,18 +544,14 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd, */ static int nes_dealloc_fmr(struct ib_fmr *ibfmr) { + unsigned long flags; struct nes_mr *nesmr = to_nesmr_from_ibfmr(ibfmr); struct nes_fmr *nesfmr = to_nesfmr(nesmr); struct nes_vnic *nesvnic = to_nesvnic(ibfmr->device); struct nes_device *nesdev = nesvnic->nesdev; - struct nes_mr temp_nesmr = *nesmr; + struct nes_adapter *nesadapter = nesdev->nesadapter; int i = 0; - temp_nesmr.ibmw.device = ibfmr->device; - temp_nesmr.ibmw.pd = ibfmr->pd; - temp_nesmr.ibmw.rkey = ibfmr->rkey; - temp_nesmr.ibmw.uobject = NULL; - /* free the resources */ if (nesfmr->leaf_pbl_cnt == 0) { /* single PBL case */ @@ -561,8 +567,24 @@ static int nes_dealloc_fmr(struct ib_fmr *ibfmr) pci_free_consistent(nesdev->pcidev, 8192, nesfmr->root_vpbl.pbl_vbase, nesfmr->root_vpbl.pbl_pbase); } + nesmr->ibmw.device = ibfmr->device; + nesmr->ibmw.pd = ibfmr->pd; + nesmr->ibmw.rkey = ibfmr->rkey; + nesmr->ibmw.uobject = NULL; + + if (nesfmr->nesmr.pbls_used != 0) { + spin_lock_irqsave(&nesadapter->pbl_lock, flags); + if (nesfmr->nesmr.pbl_4k) { + nesadapter->free_4kpbl += nesfmr->nesmr.pbls_used; + WARN_ON(nesadapter->free_4kpbl > nesadapter->max_4kpbl); + } else { + nesadapter->free_256pbl += nesfmr->nesmr.pbls_used; + WARN_ON(nesadapter->free_256pbl > nesadapter->max_256pbl); + } + spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); + } - return nes_dealloc_mw(&temp_nesmr.ibmw); + return nes_dealloc_mw(&nesmr->ibmw); } -- cgit v1.2.3 From 2e369544ac14de7bd0d76b369c1f6110eefbea8a Mon Sep 17 00:00:00 2001 From: Vadim Makhervaks Date: Sun, 2 Nov 2008 21:39:17 -0800 Subject: RDMA/nes: Fix CQ allocation scheme for multicast receive queue apps Fix CQ allocation for multicast receive queue applications. Before this patch, the CQ was not lined up with the right NIC. Signed-off-by: Vadim Makhervaks Signed-off-by: Chien Tung Signed-off-by: Roland Dreier --- drivers/infiniband/hw/nes/nes_verbs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index a8c2193a026..d36c9a0bf1b 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -1617,7 +1617,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries, nes_ucontext->mcrqf = req.mcrqf; if (nes_ucontext->mcrqf) { if (nes_ucontext->mcrqf & 0x80000000) - nescq->hw_cq.cq_number = nesvnic->nic.qp_id + 12 + (nes_ucontext->mcrqf & 0xf) - 1; + nescq->hw_cq.cq_number = nesvnic->nic.qp_id + 28 + 2 * ((nes_ucontext->mcrqf & 0xf) - 1); else if (nes_ucontext->mcrqf & 0x40000000) nescq->hw_cq.cq_number = nes_ucontext->mcrqf & 0xffff; else -- cgit v1.2.3 From 633693660045b3e46a63ed618eb38a54339fbcc0 Mon Sep 17 00:00:00 2001 From: Chien Tung Date: Sun, 2 Nov 2008 21:40:55 -0800 Subject: RDMA/nes: Mitigate compatibility issue regarding PCIe write credits Under heavy load, there is an compatibility issue regarding PCIe write credits with certain chipsets. It can be mitigated by limiting read requests to 256 Bytes. This workaround is always enabled for Tbird2 on Gladius. We also add a module parameter to enable workaround for non-Gladius cards. Signed-off-by: Chien Tung Signed-off-by: Roland Dreier --- drivers/infiniband/hw/nes/nes.c | 16 ++++++++++++++++ drivers/infiniband/hw/nes/nes_hw.h | 1 + 2 files changed, 17 insertions(+) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c index a2b04d62b1a..aa1dc41f04c 100644 --- a/drivers/infiniband/hw/nes/nes.c +++ b/drivers/infiniband/hw/nes/nes.c @@ -95,6 +95,10 @@ unsigned int wqm_quanta = 0x10000; module_param(wqm_quanta, int, 0644); MODULE_PARM_DESC(wqm_quanta, "WQM quanta"); +static unsigned int limit_maxrdreqsz; +module_param(limit_maxrdreqsz, bool, 0644); +MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes"); + LIST_HEAD(nes_adapter_list); static LIST_HEAD(nes_dev_list); @@ -588,6 +592,18 @@ static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_i nesdev->nesadapter->port_count; } + if ((limit_maxrdreqsz || + ((nesdev->nesadapter->phy_type[0] == NES_PHY_TYPE_GLADIUS) && + (hw_rev == NE020_REV1))) && + (pcie_get_readrq(pcidev) > 256)) { + if (pcie_set_readrq(pcidev, 256)) + printk(KERN_ERR PFX "Unable to set max read request" + " to 256 bytes\n"); + else + nes_debug(NES_DBG_INIT, "Max read request size set" + " to 256 bytes\n"); + } + tasklet_init(&nesdev->dpc_tasklet, nes_dpc, (unsigned long)nesdev); /* bring up the Control QP */ diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h index 610b9d85959..bc0b4de0445 100644 --- a/drivers/infiniband/hw/nes/nes_hw.h +++ b/drivers/infiniband/hw/nes/nes_hw.h @@ -40,6 +40,7 @@ #define NES_PHY_TYPE_ARGUS 4 #define NES_PHY_TYPE_PUMA_1G 5 #define NES_PHY_TYPE_PUMA_10G 6 +#define NES_PHY_TYPE_GLADIUS 7 #define NES_MULTICAST_PF_MAX 8 -- cgit v1.2.3 From 7f3abf5c7c9a9febdd643b9d4005382144525475 Mon Sep 17 00:00:00 2001 From: Vladimir Sokolovsky Date: Wed, 5 Nov 2008 10:56:52 -0800 Subject: IB/mlx4: Set umem field to NULL in mlx4_ib_alloc_fast_reg_mr() Set mr->umem to NULL in mlx4_ib_alloc_fast_reg_mr(). Otherwise ib_dereg_mr() may invoke ib_umem_release() on a random pointer value and get an oops. Signed-off-by: Vladimir Sokolovsky Signed-off-by: Roland Dreier --- drivers/infiniband/hw/mlx4/mr.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index 87f5c5a87b9..8e4d26d56a9 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c @@ -205,6 +205,7 @@ struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd, goto err_mr; mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key; + mr->umem = NULL; return &mr->ibmr; -- cgit v1.2.3 From fad96ab62d38b94efbdb4c3c5fc55cb90d57937d Mon Sep 17 00:00:00 2001 From: Stefan Roscher Date: Tue, 11 Nov 2008 15:44:22 -0800 Subject: IB/ehca: Remove reference to special QP in case of port activation failure If the initialization of a special QP (e.g. AQP1) fails due to a software timeout, we have to remove the reference to that special QP struct from the port struct to stop the driver from accessing the QP, since it will be/has been destroyed by the caller, eg in this case ib_mad. Signed-off-by: Stefan Roscher Signed-off-by: Roland Dreier --- drivers/infiniband/hw/ehca/ehca_irq.c | 44 ++++++++++++++++++++++------------- drivers/infiniband/hw/ehca/ehca_qp.c | 5 ++++ 2 files changed, 33 insertions(+), 16 deletions(-) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c index cb55be04442..757035ea246 100644 --- a/drivers/infiniband/hw/ehca/ehca_irq.c +++ b/drivers/infiniband/hw/ehca/ehca_irq.c @@ -359,36 +359,48 @@ static void notify_port_conf_change(struct ehca_shca *shca, int port_num) *old_attr = new_attr; } +/* replay modify_qp for sqps -- return 0 if all is well, 1 if AQP1 destroyed */ +static int replay_modify_qp(struct ehca_sport *sport) +{ + int aqp1_destroyed; + unsigned long flags; + + spin_lock_irqsave(&sport->mod_sqp_lock, flags); + + aqp1_destroyed = !sport->ibqp_sqp[IB_QPT_GSI]; + + if (sport->ibqp_sqp[IB_QPT_SMI]) + ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_SMI]); + if (!aqp1_destroyed) + ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_GSI]); + + spin_unlock_irqrestore(&sport->mod_sqp_lock, flags); + + return aqp1_destroyed; +} + static void parse_ec(struct ehca_shca *shca, u64 eqe) { u8 ec = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe); u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe); u8 spec_event; struct ehca_sport *sport = &shca->sport[port - 1]; - unsigned long flags; switch (ec) { case 0x30: /* port availability change */ if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) { - int suppress_event; - /* replay modify_qp for sqps */ - spin_lock_irqsave(&sport->mod_sqp_lock, flags); - suppress_event = !sport->ibqp_sqp[IB_QPT_GSI]; - if (sport->ibqp_sqp[IB_QPT_SMI]) - ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_SMI]); - if (!suppress_event) - ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_GSI]); - spin_unlock_irqrestore(&sport->mod_sqp_lock, flags); - - /* AQP1 was destroyed, ignore this event */ - if (suppress_event) - break; + /* only replay modify_qp calls in autodetect mode; + * if AQP1 was destroyed, the port is already down + * again and we can drop the event. + */ + if (ehca_nr_ports < 0) + if (replay_modify_qp(sport)) + break; sport->port_state = IB_PORT_ACTIVE; dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE, "is active"); - ehca_query_sma_attr(shca, port, - &sport->saved_attr); + ehca_query_sma_attr(shca, port, &sport->saved_attr); } else { sport->port_state = IB_PORT_DOWN; dispatch_port_event(shca, port, IB_EVENT_PORT_ERR, diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c index 4d54b9f6456..9e05ee2db39 100644 --- a/drivers/infiniband/hw/ehca/ehca_qp.c +++ b/drivers/infiniband/hw/ehca/ehca_qp.c @@ -860,6 +860,11 @@ static struct ehca_qp *internal_create_qp( if (qp_type == IB_QPT_GSI) { h_ret = ehca_define_sqp(shca, my_qp, init_attr); if (h_ret != H_SUCCESS) { + kfree(my_qp->mod_qp_parm); + my_qp->mod_qp_parm = NULL; + /* the QP pointer is no longer valid */ + shca->sport[init_attr->port_num - 1].ibqp_sqp[qp_type] = + NULL; ret = ehca2ib_return_code(h_ret); goto create_qp_exit6; } -- cgit v1.2.3 From fe25c56190bbc0951d7c53b4ccd148e669d69938 Mon Sep 17 00:00:00 2001 From: Yossi Etigin Date: Wed, 12 Nov 2008 10:24:36 -0800 Subject: IPoIB: Don't enable NAPI when it's already enabled If a P_Key is not present when an interface is created, ipoib_open() will return after doing napi_enable(). ipoib_open() will be called again from ipoib_pkey_poll() when the P_Key appears, after NAPI has already been enabled, and try to enable it again. This triggers a BUG_ON() in napi_enable(). Fix this by moving the call to napi_enable() to after the test for P_Key presence. Signed-off-by: Yossi Etigin Signed-off-by: Roland Dreier --- drivers/infiniband/ulp/ipoib/ipoib_main.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index fddded7900d..b1eeb5a427c 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -106,12 +106,13 @@ int ipoib_open(struct net_device *dev) ipoib_dbg(priv, "bringing up interface\n"); - napi_enable(&priv->napi); set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); if (ipoib_pkey_dev_delay_open(dev)) return 0; + napi_enable(&priv->napi); + if (ipoib_ib_dev_open(dev)) { napi_disable(&priv->napi); return -EINVAL; -- cgit v1.2.3 From 93a3ab939ba90e00e193f0bad98f43fbdfbd925d Mon Sep 17 00:00:00 2001 From: Yossi Etigin Date: Wed, 12 Nov 2008 10:24:38 -0800 Subject: IPoIB: Fix hang in ipoib_flush_paths() ipoib_flush_paths() can hang during an SM up/down loop: if path_rec_start() fails (for instance, because there is no sm_ah), the path is still added to the path list by neigh_add_path(). Then, ipoib_flush_paths() will wait for path->done, but it will never complete because the request was not issued at all. Fix this by completing path->done if issuing the query fails. This fixes . Signed-off-by: Yossi Etigin Signed-off-by: Roland Dreier --- drivers/infiniband/ulp/ipoib/ipoib_main.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index b1eeb5a427c..0b2f601e8ca 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -547,6 +547,7 @@ static int path_rec_start(struct net_device *dev, if (path->query_id < 0) { ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id); path->query = NULL; + complete(&path->done); return path->query_id; } -- cgit v1.2.3 From ff79ae80837cf45cb703b34824dd3862d2ddcb24 Mon Sep 17 00:00:00 2001 From: Yossi Etigin Date: Wed, 12 Nov 2008 10:24:39 -0800 Subject: IPoIB: Fix crash in path_rec_completion() Fix a crash in path_rec_completion() during an SM up/down loop. If more than one path record request is issued, the first completion releases path->done, allowing ipoib_flush_paths() to free the path, and thus corrupting it for the second completion. Commit ee1e2c82 ("IPoIB: Refresh paths instead of flushing them on SM change events") added the field path->valid and changed the test "if (!path)" to "if (!path || !path->valid)". This change made it possible for a path with an outstanding query to pass the test and issue another query on the same path. Having two queries on the same path leads to a crash. This fixes . Signed-off-by: Yossi Etigin Signed-off-by: Roland Dreier --- drivers/infiniband/ulp/ipoib/ipoib_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 0b2f601e8ca..85257f6b957 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -664,7 +664,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, skb_push(skb, sizeof *phdr); __skb_queue_tail(&path->queue, skb); - if (path_rec_start(dev, path)) { + if (!path->query && path_rec_start(dev, path)) { spin_unlock_irqrestore(&priv->lock, flags); path_free(dev, path); return; -- cgit v1.2.3