diff options
Diffstat (limited to 'drivers/infiniband')
24 files changed, 198 insertions, 157 deletions
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index d441815a3e0..fde92ce4515 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -1821,7 +1821,7 @@ static int cma_alloc_port(struct idr *ps, struct rdma_id_private *id_priv, struct rdma_bind_list *bind_list; int port, ret; - bind_list = kmalloc(sizeof *bind_list, GFP_KERNEL); + bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL); if (!bind_list) return -ENOMEM; diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index b516b93b855..c859134c1da 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -266,7 +266,7 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id, mutex_lock(&ctx->file->mut); if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) { if (!ctx->backlog) { - ret = -EDQUOT; + ret = -ENOMEM; kfree(uevent); goto out; } diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c index d737c738d87..f5e9aeec6f6 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_hal.c +++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c @@ -36,6 +36,7 @@ #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/pci.h> +#include <linux/dma-mapping.h> #include "cxio_resource.h" #include "cxio_hal.h" @@ -497,9 +498,9 @@ static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p) u64 sge_cmd, ctx0, ctx1; u64 base_addr; struct t3_modify_qp_wr *wqe; - struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_KERNEL); - + struct sk_buff *skb; + skb = alloc_skb(sizeof(*wqe), GFP_KERNEL); if (!skb) { PDBG("%s alloc_skb failed\n", __FUNCTION__); return -ENOMEM; @@ -507,7 +508,7 @@ static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p) err = cxio_hal_init_ctrl_cq(rdev_p); if (err) { PDBG("%s err %d initializing ctrl_cq\n", __FUNCTION__, err); - return err; + goto err; } rdev_p->ctrl_qp.workq = dma_alloc_coherent( &(rdev_p->rnic_info.pdev->dev), @@ -517,7 +518,8 @@ static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p) GFP_KERNEL); if (!rdev_p->ctrl_qp.workq) { PDBG("%s dma_alloc_coherent failed\n", __FUNCTION__); - return -ENOMEM; + err = -ENOMEM; + goto err; } pci_unmap_addr_set(&rdev_p->ctrl_qp, mapping, rdev_p->ctrl_qp.dma_addr); @@ -555,6 +557,9 @@ static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p) rdev_p->ctrl_qp.workq, 1 << T3_CTRL_QP_SIZE_LOG2); skb->priority = CPL_PRIORITY_CONTROL; return (cxgb3_ofld_send(rdev_p->t3cdev_p, skb)); +err: + kfree_skb(skb); + return err; } static int cxio_hal_destroy_ctrl_qp(struct cxio_rdev *rdev_p) diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index b21fde8b659..d0ed1d35ca3 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c @@ -305,8 +305,7 @@ static int status2errno(int status) */ static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp) { - if (skb) { - BUG_ON(skb_cloned(skb)); + if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) { skb_trim(skb, 0); skb_get(skb); } else { @@ -1415,6 +1414,7 @@ static int peer_close(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) wake_up(&ep->com.waitq); break; case FPDU_MODE: + start_ep_timer(ep); __state_set(&ep->com, CLOSING); attrs.next_state = IWCH_QP_STATE_CLOSING; iwch_modify_qp(ep->com.qp->rhp, ep->com.qp, @@ -1425,7 +1425,6 @@ static int peer_close(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) disconnect = 0; break; case CLOSING: - start_ep_timer(ep); __state_set(&ep->com, MORIBUND); disconnect = 0; break; @@ -1487,8 +1486,10 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) case CONNECTING: break; case MPA_REQ_WAIT: + stop_ep_timer(ep); break; case MPA_REQ_SENT: + stop_ep_timer(ep); connect_reply_upcall(ep, -ECONNRESET); break; case MPA_REP_SENT: @@ -1507,9 +1508,10 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) get_ep(&ep->com); break; case MORIBUND: + case CLOSING: stop_ep_timer(ep); + /*FALLTHROUGH*/ case FPDU_MODE: - case CLOSING: if (ep->com.cm_id && ep->com.qp) { attrs.next_state = IWCH_QP_STATE_ERROR; ret = iwch_modify_qp(ep->com.qp->rhp, @@ -1570,7 +1572,6 @@ static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) spin_lock_irqsave(&ep->com.lock, flags); switch (ep->com.state) { case CLOSING: - start_ep_timer(ep); __state_set(&ep->com, MORIBUND); break; case MORIBUND: @@ -1586,6 +1587,8 @@ static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) __state_set(&ep->com, DEAD); release = 1; break; + case ABORTING: + break; case DEAD: default: BUG_ON(1); @@ -1659,6 +1662,7 @@ static void ep_timeout(unsigned long arg) break; case MPA_REQ_WAIT: break; + case CLOSING: case MORIBUND: if (ep->com.cm_id && ep->com.qp) { attrs.next_state = IWCH_QP_STATE_ERROR; @@ -1687,12 +1691,11 @@ int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) return -ECONNRESET; } BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); - state_set(&ep->com, CLOSING); if (mpa_rev == 0) abort_connection(ep, NULL, GFP_KERNEL); else { err = send_mpa_reject(ep, pdata, pdata_len); - err = send_halfclose(ep, GFP_KERNEL); + err = iwch_ep_disconnect(ep, 0, GFP_KERNEL); } return 0; } @@ -1957,11 +1960,11 @@ int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp) case MPA_REQ_RCVD: case MPA_REP_SENT: case FPDU_MODE: + start_ep_timer(ep); ep->com.state = CLOSING; close = 1; break; case CLOSING: - start_ep_timer(ep); ep->com.state = MORIBUND; close = 1; break; diff --git a/drivers/infiniband/hw/cxgb3/iwch_ev.c b/drivers/infiniband/hw/cxgb3/iwch_ev.c index 54362afbf72..b40676662a8 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_ev.c +++ b/drivers/infiniband/hw/cxgb3/iwch_ev.c @@ -47,12 +47,6 @@ static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp, struct iwch_qp_attributes attrs; struct iwch_qp *qhp; - printk(KERN_ERR "%s - AE qpid 0x%x opcode %d status 0x%x " - "type %d wrid.hi 0x%x wrid.lo 0x%x \n", __FUNCTION__, - CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe), - CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe), - CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe)); - spin_lock(&rnicp->lock); qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe)); @@ -73,6 +67,12 @@ static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp, return; } + printk(KERN_ERR "%s - AE qpid 0x%x opcode %d status 0x%x " + "type %d wrid.hi 0x%x wrid.lo 0x%x \n", __FUNCTION__, + CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe), + CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe), + CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe)); + atomic_inc(&qhp->refcnt); spin_unlock(&rnicp->lock); diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index 9947a144a92..24e0df04f7d 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c @@ -331,6 +331,7 @@ static int iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) int ret = 0; struct iwch_mm_entry *mm; struct iwch_ucontext *ucontext; + u64 addr; PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __FUNCTION__, vma->vm_pgoff, key, len); @@ -345,10 +346,11 @@ static int iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) mm = remove_mmap(ucontext, key, len); if (!mm) return -EINVAL; + addr = mm->addr; kfree(mm); - if ((mm->addr >= rdev_p->rnic_info.udbell_physbase) && - (mm->addr < (rdev_p->rnic_info.udbell_physbase + + if ((addr >= rdev_p->rnic_info.udbell_physbase) && + (addr < (rdev_p->rnic_info.udbell_physbase + rdev_p->rnic_info.udbell_len))) { /* @@ -362,7 +364,7 @@ static int iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; vma->vm_flags &= ~VM_MAYREAD; ret = io_remap_pfn_range(vma, vma->vm_start, - mm->addr >> PAGE_SHIFT, + addr >> PAGE_SHIFT, len, vma->vm_page_prot); } else { @@ -370,7 +372,7 @@ static int iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) * Map WQ or CQ contig dma memory... */ ret = remap_pfn_range(vma, vma->vm_start, - mm->addr >> PAGE_SHIFT, + addr >> PAGE_SHIFT, len, vma->vm_page_prot); } @@ -463,9 +465,6 @@ static struct ib_mr *iwch_register_phys_mem(struct ib_pd *pd, php = to_iwch_pd(pd); rhp = php->rhp; - acc = iwch_convert_access(acc); - - mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); if (!mhp) return ERR_PTR(-ENOMEM); @@ -491,12 +490,7 @@ static struct ib_mr *iwch_register_phys_mem(struct ib_pd *pd, mhp->attr.pdid = php->pdid; mhp->attr.zbva = 0; - /* NOTE: TPT perms are backwards from BIND WR perms! */ - mhp->attr.perms = (acc & 0x1) << 3; - mhp->attr.perms |= (acc & 0x2) << 1; - mhp->attr.perms |= (acc & 0x4) >> 1; - mhp->attr.perms |= (acc & 0x8) >> 3; - + mhp->attr.perms = iwch_ib_to_tpt_access(acc); mhp->attr.va_fbo = *iova_start; mhp->attr.page_size = shift - 12; @@ -525,7 +519,6 @@ static int iwch_reregister_phys_mem(struct ib_mr *mr, struct iwch_mr mh, *mhp; struct iwch_pd *php; struct iwch_dev *rhp; - int new_acc; __be64 *page_list = NULL; int shift = 0; u64 total_size; @@ -546,19 +539,20 @@ static int iwch_reregister_phys_mem(struct ib_mr *mr, if (rhp != php->rhp) return -EINVAL; - new_acc = mhp->attr.perms; - memcpy(&mh, mhp, sizeof *mhp); if (mr_rereg_mask & IB_MR_REREG_PD) php = to_iwch_pd(pd); if (mr_rereg_mask & IB_MR_REREG_ACCESS) - mh.attr.perms = iwch_convert_access(acc); - if (mr_rereg_mask & IB_MR_REREG_TRANS) + mh.attr.perms = iwch_ib_to_tpt_access(acc); + if (mr_rereg_mask & IB_MR_REREG_TRANS) { ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start, &total_size, &npages, &shift, &page_list); + if (ret) + return ret; + } ret = iwch_reregister_mem(rhp, php, &mh, shift, page_list, npages); kfree(page_list); @@ -568,7 +562,7 @@ static int iwch_reregister_phys_mem(struct ib_mr *mr, if (mr_rereg_mask & IB_MR_REREG_PD) mhp->attr.pdid = php->pdid; if (mr_rereg_mask & IB_MR_REREG_ACCESS) - mhp->attr.perms = acc; + mhp->attr.perms = iwch_ib_to_tpt_access(acc); if (mr_rereg_mask & IB_MR_REREG_TRANS) { mhp->attr.zbva = 0; mhp->attr.va_fbo = *iova_start; @@ -613,8 +607,6 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, struct ib_umem *region, goto err; } - acc = iwch_convert_access(acc); - i = n = 0; list_for_each_entry(chunk, ®ion->chunk_list, list) @@ -630,10 +622,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, struct ib_umem *region, mhp->rhp = rhp; mhp->attr.pdid = php->pdid; mhp->attr.zbva = 0; - mhp->attr.perms = (acc & 0x1) << 3; - mhp->attr.perms |= (acc & 0x2) << 1; - mhp->attr.perms |= (acc & 0x4) >> 1; - mhp->attr.perms |= (acc & 0x8) >> 3; + mhp->attr.perms = iwch_ib_to_tpt_access(acc); mhp->attr.va_fbo = region->virt_base; mhp->attr.page_size = shift - 12; mhp->attr.len = (u32) region->length; @@ -736,10 +725,8 @@ static int iwch_destroy_qp(struct ib_qp *ib_qp) qhp = to_iwch_qp(ib_qp); rhp = qhp->rhp; - if (qhp->attr.state == IWCH_QP_STATE_RTS) { - attrs.next_state = IWCH_QP_STATE_ERROR; - iwch_modify_qp(rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, &attrs, 0); - } + attrs.next_state = IWCH_QP_STATE_ERROR; + iwch_modify_qp(rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, &attrs, 0); wait_event(qhp->wait, !qhp->ep); remove_handle(rhp, &rhp->qpidr, qhp->wq.qpid); diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.h b/drivers/infiniband/hw/cxgb3/iwch_provider.h index de0fe1b93a0..93bcc56756b 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.h +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.h @@ -286,27 +286,20 @@ static inline int iwch_convert_state(enum ib_qp_state ib_state) } } -enum iwch_mem_perms { - IWCH_MEM_ACCESS_LOCAL_READ = 1 << 0, - IWCH_MEM_ACCESS_LOCAL_WRITE = 1 << 1, - IWCH_MEM_ACCESS_REMOTE_READ = 1 << 2, - IWCH_MEM_ACCESS_REMOTE_WRITE = 1 << 3, - IWCH_MEM_ACCESS_ATOMICS = 1 << 4, - IWCH_MEM_ACCESS_BINDING = 1 << 5, - IWCH_MEM_ACCESS_LOCAL = - (IWCH_MEM_ACCESS_LOCAL_READ | IWCH_MEM_ACCESS_LOCAL_WRITE), - IWCH_MEM_ACCESS_REMOTE = - (IWCH_MEM_ACCESS_REMOTE_WRITE | IWCH_MEM_ACCESS_REMOTE_READ) - /* cannot go beyond 1 << 31 */ -} __attribute__ ((packed)); - -static inline u32 iwch_convert_access(int acc) +static inline u32 iwch_ib_to_tpt_access(int acc) { - return (acc & IB_ACCESS_REMOTE_WRITE ? IWCH_MEM_ACCESS_REMOTE_WRITE : 0) - | (acc & IB_ACCESS_REMOTE_READ ? IWCH_MEM_ACCESS_REMOTE_READ : 0) | - (acc & IB_ACCESS_LOCAL_WRITE ? IWCH_MEM_ACCESS_LOCAL_WRITE : 0) | - (acc & IB_ACCESS_MW_BIND ? IWCH_MEM_ACCESS_BINDING : 0) | - IWCH_MEM_ACCESS_LOCAL_READ; + return (acc & IB_ACCESS_REMOTE_WRITE ? TPT_REMOTE_WRITE : 0) | + (acc & IB_ACCESS_REMOTE_READ ? TPT_REMOTE_READ : 0) | + (acc & IB_ACCESS_LOCAL_WRITE ? TPT_LOCAL_WRITE : 0) | + TPT_LOCAL_READ; +} + +static inline u32 iwch_ib_to_mwbind_access(int acc) +{ + return (acc & IB_ACCESS_REMOTE_WRITE ? T3_MEM_ACCESS_REM_WRITE : 0) | + (acc & IB_ACCESS_REMOTE_READ ? T3_MEM_ACCESS_REM_READ : 0) | + (acc & IB_ACCESS_LOCAL_WRITE ? T3_MEM_ACCESS_LOCAL_WRITE : 0) | + T3_MEM_ACCESS_LOCAL_READ; } enum iwch_mmid_state { diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c index 9ea00cc4a5f..0a472c9b44d 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_qp.c +++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c @@ -439,7 +439,7 @@ int iwch_bind_mw(struct ib_qp *qp, wqe->bind.type = T3_VA_BASED_TO; /* TBD: check perms */ - wqe->bind.perms = iwch_convert_access(mw_bind->mw_access_flags); + wqe->bind.perms = iwch_ib_to_mwbind_access(mw_bind->mw_access_flags); wqe->bind.mr_stag = cpu_to_be32(mw_bind->mr->lkey); wqe->bind.mw_stag = cpu_to_be32(mw->rkey); wqe->bind.mw_len = cpu_to_be32(mw_bind->length); diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h index 40404c9e281..82ded44c6ce 100644 --- a/drivers/infiniband/hw/ehca/ehca_classes.h +++ b/drivers/infiniband/hw/ehca/ehca_classes.h @@ -52,6 +52,8 @@ struct ehca_mw; struct ehca_pd; struct ehca_av; +#include <linux/wait.h> + #include <rdma/ib_verbs.h> #include <rdma/ib_user_verbs.h> @@ -153,7 +155,9 @@ struct ehca_cq { spinlock_t cb_lock; struct hlist_head qp_hashtab[QP_HASHTAB_LEN]; struct list_head entry; - u32 nr_callbacks; + u32 nr_callbacks; /* #events assigned to cpu by scaling code */ + u32 nr_events; /* #events seen */ + wait_queue_head_t wait_completion; spinlock_t task_lock; u32 ownpid; /* mmap counter for resources mapped into user space */ diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c index 6ebfa27e4e1..e2cdc1a16fe 100644 --- a/drivers/infiniband/hw/ehca/ehca_cq.c +++ b/drivers/infiniband/hw/ehca/ehca_cq.c @@ -146,6 +146,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, spin_lock_init(&my_cq->spinlock); spin_lock_init(&my_cq->cb_lock); spin_lock_init(&my_cq->task_lock); + init_waitqueue_head(&my_cq->wait_completion); my_cq->ownpid = current->tgid; cq = &my_cq->ib_cq; @@ -302,6 +303,16 @@ create_cq_exit1: return cq; } +static int get_cq_nr_events(struct ehca_cq *my_cq) +{ + int ret; + unsigned long flags; + spin_lock_irqsave(&ehca_cq_idr_lock, flags); + ret = my_cq->nr_events; + spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); + return ret; +} + int ehca_destroy_cq(struct ib_cq *cq) { u64 h_ret; @@ -329,10 +340,11 @@ int ehca_destroy_cq(struct ib_cq *cq) } spin_lock_irqsave(&ehca_cq_idr_lock, flags); - while (my_cq->nr_callbacks) { + while (my_cq->nr_events) { spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); - yield(); + wait_event(my_cq->wait_completion, !get_cq_nr_events(my_cq)); spin_lock_irqsave(&ehca_cq_idr_lock, flags); + /* recheck nr_events to assure no cqe has just arrived */ } idr_remove(&ehca_cq_idr, my_cq->token); diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c index 3ec53c687d0..f284be1c916 100644 --- a/drivers/infiniband/hw/ehca/ehca_irq.c +++ b/drivers/infiniband/hw/ehca/ehca_irq.c @@ -66,7 +66,9 @@ static void queue_comp_task(struct ehca_cq *__cq); static struct ehca_comp_pool* pool; +#ifdef CONFIG_HOTPLUG_CPU static struct notifier_block comp_pool_callback_nb; +#endif static inline void comp_event_callback(struct ehca_cq *cq) { @@ -404,10 +406,11 @@ static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe) u32 token; unsigned long flags; struct ehca_cq *cq; + eqe_value = eqe->entry; ehca_dbg(&shca->ib_device, "eqe_value=%lx", eqe_value); if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) { - ehca_dbg(&shca->ib_device, "... completion event"); + ehca_dbg(&shca->ib_device, "Got completion event"); token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value); spin_lock_irqsave(&ehca_cq_idr_lock, flags); cq = idr_find(&ehca_cq_idr, token); @@ -419,16 +422,20 @@ static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe) return; } reset_eq_pending(cq); - if (ehca_scaling_code) { + cq->nr_events++; + spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); + if (ehca_scaling_code) queue_comp_task(cq); - spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); - } else { - spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); + else { comp_event_callback(cq); + spin_lock_irqsave(&ehca_cq_idr_lock, flags); + cq->nr_events--; + if (!cq->nr_events) + wake_up(&cq->wait_completion); + spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); } } else { - ehca_dbg(&shca->ib_device, - "Got non completion event"); + ehca_dbg(&shca->ib_device, "Got non completion event"); parse_identifier(shca, eqe_value); } } @@ -478,6 +485,7 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq) "token=%x", token); continue; } + eqe_cache[eqe_cnt].cq->nr_events++; spin_unlock(&ehca_cq_idr_lock); } else eqe_cache[eqe_cnt].cq = NULL; @@ -504,12 +512,18 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq) /* call completion handler for cached eqes */ for (i = 0; i < eqe_cnt; i++) if (eq->eqe_cache[i].cq) { - if (ehca_scaling_code) { - spin_lock(&ehca_cq_idr_lock); + if (ehca_scaling_code) queue_comp_task(eq->eqe_cache[i].cq); - spin_unlock(&ehca_cq_idr_lock); - } else - comp_event_callback(eq->eqe_cache[i].cq); + else { + struct ehca_cq *cq = eq->eqe_cache[i].cq; + comp_event_callback(cq); + spin_lock_irqsave(&ehca_cq_idr_lock, flags); + cq->nr_events--; + if (!cq->nr_events) + wake_up(&cq->wait_completion); + spin_unlock_irqrestore(&ehca_cq_idr_lock, + flags); + } } else { ehca_dbg(&shca->ib_device, "Got non completion event"); parse_identifier(shca, eq->eqe_cache[i].eqe->entry); @@ -523,7 +537,6 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq) if (!eqe) break; process_eqe(shca, eqe); - eqe_cnt++; } while (1); unlock_irq_spinlock: @@ -567,8 +580,7 @@ static void __queue_comp_task(struct ehca_cq *__cq, list_add_tail(&__cq->entry, &cct->cq_list); cct->cq_jobs++; wake_up(&cct->wait_queue); - } - else + } else __cq->nr_callbacks++; spin_unlock(&__cq->task_lock); @@ -577,18 +589,21 @@ static void __queue_comp_task(struct ehca_cq *__cq, static void queue_comp_task(struct ehca_cq *__cq) { - int cpu; int cpu_id; struct ehca_cpu_comp_task *cct; + int cq_jobs; + unsigned long flags; - cpu = get_cpu(); cpu_id = find_next_online_cpu(pool); BUG_ON(!cpu_online(cpu_id)); cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id); BUG_ON(!cct); - if (cct->cq_jobs > 0) { + spin_lock_irqsave(&cct->task_lock, flags); + cq_jobs = cct->cq_jobs; + spin_unlock_irqrestore(&cct->task_lock, flags); + if (cq_jobs > 0) { cpu_id = find_next_online_cpu(pool); cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id); BUG_ON(!cct); @@ -608,11 +623,17 @@ static void run_comp_task(struct ehca_cpu_comp_task* cct) cq = list_entry(cct->cq_list.next, struct ehca_cq, entry); spin_unlock_irqrestore(&cct->task_lock, flags); comp_event_callback(cq); - spin_lock_irqsave(&cct->task_lock, flags); + spin_lock_irqsave(&ehca_cq_idr_lock, flags); + cq->nr_events--; + if (!cq->nr_events) + wake_up(&cq->wait_completion); + spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); + + spin_lock_irqsave(&cct->task_lock, flags); spin_lock(&cq->task_lock); cq->nr_callbacks--; - if (cq->nr_callbacks == 0) { + if (!cq->nr_callbacks) { list_del_init(cct->cq_list.next); cct->cq_jobs--; } @@ -714,6 +735,7 @@ static void take_over_work(struct ehca_comp_pool *pool, } +#ifdef CONFIG_HOTPLUG_CPU static int comp_pool_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) @@ -756,6 +778,7 @@ static int comp_pool_callback(struct notifier_block *nfb, return NOTIFY_OK; } +#endif int ehca_create_comp_pool(void) { @@ -786,9 +809,11 @@ int ehca_create_comp_pool(void) } } +#ifdef CONFIG_HOTPLUG_CPU comp_pool_callback_nb.notifier_call = comp_pool_callback; comp_pool_callback_nb.priority =0; register_cpu_notifier(&comp_pool_callback_nb); +#endif printk(KERN_INFO "eHCA scaling code enabled\n"); @@ -802,7 +827,9 @@ void ehca_destroy_comp_pool(void) if (!ehca_scaling_code) return; +#ifdef CONFIG_HOTPLUG_CPU unregister_cpu_notifier(&comp_pool_callback_nb); +#endif for (i = 0; i < NR_CPUS; i++) { if (cpu_online(i)) diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c index c1835121a82..059da9628bb 100644 --- a/drivers/infiniband/hw/ehca/ehca_main.c +++ b/drivers/infiniband/hw/ehca/ehca_main.c @@ -52,7 +52,7 @@ MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver"); -MODULE_VERSION("SVNEHCA_0021"); +MODULE_VERSION("SVNEHCA_0022"); int ehca_open_aqp1 = 0; int ehca_debug_level = 0; @@ -810,7 +810,7 @@ int __init ehca_module_init(void) int ret; printk(KERN_INFO "eHCA Infiniband Device Driver " - "(Rel.: SVNEHCA_0021)\n"); + "(Rel.: SVNEHCA_0022)\n"); idr_init(&ehca_qp_idr); idr_init(&ehca_cq_idr); spin_lock_init(&ehca_qp_idr_lock); diff --git a/drivers/infiniband/hw/ipath/ipath_dma.c b/drivers/infiniband/hw/ipath/ipath_dma.c index f6f94904082..f87f003e3ef 100644 --- a/drivers/infiniband/hw/ipath/ipath_dma.c +++ b/drivers/infiniband/hw/ipath/ipath_dma.c @@ -167,7 +167,7 @@ static void *ipath_dma_alloc_coherent(struct ib_device *dev, size_t size, } static void ipath_dma_free_coherent(struct ib_device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_handle) + void *cpu_addr, u64 dma_handle) { free_pages((unsigned long) cpu_addr, get_order(size)); } diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c index 5b40a846ff9..ed55979bfd3 100644 --- a/drivers/infiniband/hw/ipath/ipath_fs.c +++ b/drivers/infiniband/hw/ipath/ipath_fs.c @@ -451,12 +451,18 @@ bail: return ret; } -static void remove_file(struct dentry *parent, char *name) +static int remove_file(struct dentry *parent, char *name) { struct dentry *tmp; + int ret; tmp = lookup_one_len(name, parent, strlen(name)); + if (IS_ERR(tmp)) { + ret = PTR_ERR(tmp); + goto bail; + } + spin_lock(&dcache_lock); spin_lock(&tmp->d_lock); if (!(d_unhashed(tmp) && tmp->d_inode)) { @@ -469,6 +475,14 @@ static void remove_file(struct dentry *parent, char *name) spin_unlock(&tmp->d_lock); spin_unlock(&dcache_lock); } + + ret = 0; +bail: + /* + * We don't expect clients to care about the return value, but + * it's there if they need it. + */ + return ret; } static int remove_device_files(struct super_block *sb, diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c index 8e4846b5c64..fdb576dcfaa 100644 --- a/drivers/infiniband/hw/mthca/mthca_mr.c +++ b/drivers/infiniband/hw/mthca/mthca_mr.c @@ -881,8 +881,8 @@ int mthca_init_mr_table(struct mthca_dev *dev) } mpts = mtts = 1 << i; } else { - mpts = dev->limits.num_mtt_segs; - mtts = dev->limits.num_mpts; + mtts = dev->limits.num_mtt_segs; + mpts = dev->limits.num_mpts; } if (!mthca_is_memfree(dev) && diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index 71dc84bd425..1c6b63aca26 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c @@ -1088,21 +1088,21 @@ static void mthca_unmap_memfree(struct mthca_dev *dev, static int mthca_alloc_memfree(struct mthca_dev *dev, struct mthca_qp *qp) { - int ret = 0; - if (mthca_is_memfree(dev)) { qp->rq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_RQ, qp->qpn, &qp->rq.db); if (qp->rq.db_index < 0) - return ret; + return -ENOMEM; qp->sq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SQ, qp->qpn, &qp->sq.db); - if (qp->sq.db_index < 0) + if (qp->sq.db_index < 0) { mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index); + return -ENOMEM; + } } - return ret; + return 0; } static void mthca_free_memfree(struct mthca_dev *dev, diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 3484e8ba24a..2b242a4823f 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -131,7 +131,7 @@ static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev, int id, int skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE); mapping[i + 1] = ib_dma_map_page(priv->ca, skb_shinfo(skb)->frags[i].page, - 0, PAGE_SIZE, DMA_TO_DEVICE); + 0, PAGE_SIZE, DMA_FROM_DEVICE); if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i + 1]))) goto partial_error; } @@ -452,7 +452,7 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_ skb->len, tx->mtu); ++priv->stats.tx_dropped; ++priv->stats.tx_errors; - ipoib_cm_skb_too_long(dev, skb, tx->mtu - INFINIBAND_ALEN); + ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN); return; } @@ -1095,7 +1095,7 @@ static void ipoib_cm_stale_task(struct work_struct *work) /* List if sorted by LRU, start from tail, * stop when we see a recently used entry */ p = list_entry(priv->cm.passive_ids.prev, typeof(*p), list); - if (time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT)) + if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT)) break; list_del_init(&p->list); spin_unlock_irqrestore(&priv->lock, flags); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index f2aa923ddbe..ba0ee5cf2ad 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -328,9 +328,9 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_tx_buf *tx_req; u64 addr; - if (unlikely(skb->len > priv->mcast_mtu + INFINIBAND_ALEN)) { + if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) { ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", - skb->len, priv->mcast_mtu + INFINIBAND_ALEN); + skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN); ++priv->stats.tx_dropped; ++priv->stats.tx_errors; ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index f9dbc6f6814..f2a40ae8e7d 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -380,7 +380,7 @@ static void path_rec_completion(int status, struct net_device *dev = path->dev; struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_ah *ah = NULL; - struct ipoib_neigh *neigh; + struct ipoib_neigh *neigh, *tn; struct sk_buff_head skqueue; struct sk_buff *skb; unsigned long flags; @@ -418,7 +418,7 @@ static void path_rec_completion(int status, while ((skb = __skb_dequeue(&path->queue))) __skb_queue_tail(&skqueue, skb); - list_for_each_entry(neigh, &path->neigh_list, list) { + list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) { kref_get(&path->ah->ref); neigh->ah = path->ah; memcpy(&neigh->dgid.raw, &path->pathrec.dgid.raw, @@ -814,7 +814,7 @@ static void ipoib_set_mcast_list(struct net_device *dev) queue_work(ipoib_workqueue, &priv->restart_task); } -static void ipoib_neigh_destructor(struct neighbour *n) +static void ipoib_neigh_cleanup(struct neighbour *n) { struct ipoib_neigh *neigh; struct ipoib_dev_priv *priv = netdev_priv(n->dev); @@ -822,7 +822,7 @@ static void ipoib_neigh_destructor(struct neighbour *n) struct ipoib_ah *ah = NULL; ipoib_dbg(priv, - "neigh_destructor for %06x " IPOIB_GID_FMT "\n", + "neigh_cleanup for %06x " IPOIB_GID_FMT "\n", IPOIB_QPN(n->ha), IPOIB_GID_RAW_ARG(n->ha + 4)); @@ -874,7 +874,7 @@ void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh) static int ipoib_neigh_setup_dev(struct net_device *dev, struct neigh_parms *parms) { - parms->neigh_destructor = ipoib_neigh_destructor; + parms->neigh_cleanup = ipoib_neigh_cleanup; return 0; } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index bb2e3d5eee2..54fbead4de0 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -407,6 +407,10 @@ static int ipoib_mcast_join_complete(int status, queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0); mutex_unlock(&mcast_mutex); + + if (mcast == priv->broadcast) + netif_carrier_on(dev); + return 0; } @@ -594,7 +598,6 @@ void ipoib_mcast_join_task(struct work_struct *work) ipoib_dbg_mcast(priv, "successfully joined all multicast groups\n"); clear_bit(IPOIB_MCAST_RUN, &priv->flags); - netif_carrier_on(dev); } int ipoib_mcast_start_thread(struct net_device *dev) @@ -641,6 +644,9 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast) struct ipoib_dev_priv *priv = netdev_priv(dev); int ret = 0; + if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) + ib_sa_free_multicast(mcast->mc); + if (test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { ipoib_dbg_mcast(priv, "leaving MGID " IPOIB_GID_FMT "\n", IPOIB_GID_ARG(mcast->mcmember.mgid)); @@ -652,9 +658,6 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast) ipoib_warn(priv, "ipoib_mcast_detach failed (result = %d)\n", ret); } - if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) - ib_sa_free_multicast(mcast->mc); - return 0; } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c index 3cb551b8875..7f3ec205e35 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c @@ -259,12 +259,13 @@ void ipoib_event(struct ib_event_handler *handler, struct ipoib_dev_priv *priv = container_of(handler, struct ipoib_dev_priv, event_handler); - if (record->event == IB_EVENT_PORT_ERR || - record->event == IB_EVENT_PKEY_CHANGE || - record->event == IB_EVENT_PORT_ACTIVE || - record->event == IB_EVENT_LID_CHANGE || - record->event == IB_EVENT_SM_CHANGE || - record->event == IB_EVENT_CLIENT_REREGISTER) { + if ((record->event == IB_EVENT_PORT_ERR || + record->event == IB_EVENT_PKEY_CHANGE || + record->event == IB_EVENT_PORT_ACTIVE || + record->event == IB_EVENT_LID_CHANGE || + record->event == IB_EVENT_SM_CHANGE || + record->event == IB_EVENT_CLIENT_REREGISTER) && + record->element.port_num == priv->port) { ipoib_dbg(priv, "Port state change event\n"); queue_work(ipoib_workqueue, &priv->flush_task); } diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index cae8c96a55f..8960196ffb0 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h @@ -245,7 +245,6 @@ struct iser_conn { wait_queue_head_t wait; /* waitq for conn/disconn */ atomic_t post_recv_buf_count; /* posted rx count */ atomic_t post_send_buf_count; /* posted tx count */ - struct work_struct comperror_work; /* conn term sleepable ctx*/ char name[ISER_OBJECT_NAME_SIZE]; struct iser_page_vec *page_vec; /* represents SG to fmr maps* * maps serialized as tx is*/ diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index 89e37283c83..278fcbccc2d 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c @@ -658,6 +658,7 @@ void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask) { int deferred; int is_rdma_aligned = 1; + struct iser_regd_buf *regd; /* if we were reading, copy back to unaligned sglist, * anyway dma_unmap and free the copy @@ -672,20 +673,20 @@ void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask) } if (iser_ctask->dir[ISER_DIR_IN]) { - deferred = iser_regd_buff_release - (&iser_ctask->rdma_regd[ISER_DIR_IN]); + regd = &iser_ctask->rdma_regd[ISER_DIR_IN]; + deferred = iser_regd_buff_release(regd); if (deferred) { - iser_err("References remain for BUF-IN rdma reg\n"); - BUG(); + iser_err("%d references remain for BUF-IN rdma reg\n", + atomic_read(®d->ref_count)); } } if (iser_ctask->dir[ISER_DIR_OUT]) { - deferred = iser_regd_buff_release - (&iser_ctask->rdma_regd[ISER_DIR_OUT]); + regd = &iser_ctask->rdma_regd[ISER_DIR_OUT]; + deferred = iser_regd_buff_release(regd); if (deferred) { - iser_err("References remain for BUF-OUT rdma reg\n"); - BUG(); + iser_err("%d references remain for BUF-OUT rdma reg\n", + atomic_read(®d->ref_count)); } } diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 693b7700289..1fc967464a2 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -48,7 +48,6 @@ static void iser_cq_tasklet_fn(unsigned long data); static void iser_cq_callback(struct ib_cq *cq, void *cq_context); -static void iser_comp_error_worker(struct work_struct *work); static void iser_cq_event_callback(struct ib_event *cause, void *context) { @@ -480,7 +479,6 @@ int iser_conn_init(struct iser_conn **ibconn) init_waitqueue_head(&ib_conn->wait); atomic_set(&ib_conn->post_recv_buf_count, 0); atomic_set(&ib_conn->post_send_buf_count, 0); - INIT_WORK(&ib_conn->comperror_work, iser_comp_error_worker); INIT_LIST_HEAD(&ib_conn->conn_list); spin_lock_init(&ib_conn->lock); @@ -753,26 +751,6 @@ int iser_post_send(struct iser_desc *tx_desc) return ret_val; } -static void iser_comp_error_worker(struct work_struct *work) -{ - struct iser_conn *ib_conn = - container_of(work, struct iser_conn, comperror_work); - - /* getting here when the state is UP means that the conn is being * - * terminated asynchronously from the iSCSI layer's perspective. */ - if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP, - ISER_CONN_TERMINATING)) - iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn, - ISCSI_ERR_CONN_FAILED); - - /* complete the termination process if disconnect event was delivered * - * note there are no more non completed posts to the QP */ - if (ib_conn->disc_evt_flag) { - ib_conn->state = ISER_CONN_DOWN; - wake_up_interruptible(&ib_conn->wait); - } -} - static void iser_handle_comp_error(struct iser_desc *desc) { struct iser_dto *dto = &desc->dto; @@ -791,8 +769,22 @@ static void iser_handle_comp_error(struct iser_desc *desc) } if (atomic_read(&ib_conn->post_recv_buf_count) == 0 && - atomic_read(&ib_conn->post_send_buf_count) == 0) - schedule_work(&ib_conn->comperror_work); + atomic_read(&ib_conn->post_send_buf_count) == 0) { + /* getting here when the state is UP means that the conn is * + * being terminated asynchronously from the iSCSI layer's * + * perspective. */ + if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP, + ISER_CONN_TERMINATING)) + iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn, + ISCSI_ERR_CONN_FAILED); + + /* complete the termination process if disconnect event was delivered * + * note there are no more non completed posts to the QP */ + if (ib_conn->disc_evt_flag) { + ib_conn->state = ISER_CONN_DOWN; + wake_up_interruptible(&ib_conn->wait); + } + } } static void iser_cq_tasklet_fn(unsigned long data) |