diff options
Diffstat (limited to 'drivers')
67 files changed, 1551 insertions, 1299 deletions
diff --git a/drivers/base/class.c b/drivers/base/class.c index 0e71dff327c..b1ea4df85c7 100644 --- a/drivers/base/class.c +++ b/drivers/base/class.c @@ -456,6 +456,35 @@ static void class_device_remove_attrs(struct class_device * cd) } } +static int class_device_add_groups(struct class_device * cd) +{ + int i; + int error = 0; + + if (cd->groups) { + for (i = 0; cd->groups[i]; i++) { + error = sysfs_create_group(&cd->kobj, cd->groups[i]); + if (error) { + while (--i >= 0) + sysfs_remove_group(&cd->kobj, cd->groups[i]); + goto out; + } + } + } +out: + return error; +} + +static void class_device_remove_groups(struct class_device * cd) +{ + int i; + if (cd->groups) { + for (i = 0; cd->groups[i]; i++) { + sysfs_remove_group(&cd->kobj, cd->groups[i]); + } + } +} + static ssize_t show_dev(struct class_device *class_dev, char *buf) { return print_dev_t(buf, class_dev->devt); @@ -559,6 +588,8 @@ int class_device_add(struct class_device *class_dev) class_name); } + class_device_add_groups(class_dev); + kobject_uevent(&class_dev->kobj, KOBJ_ADD); /* notify any interfaces this device is now here */ @@ -672,6 +703,7 @@ void class_device_del(struct class_device *class_dev) if (class_dev->devt_attr) class_device_remove_file(class_dev, class_dev->devt_attr); class_device_remove_attrs(class_dev); + class_device_remove_groups(class_dev); kobject_uevent(&class_dev->kobj, KOBJ_REMOVE); kobject_del(&class_dev->kobj); diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c index 02114a0bd0d..128b2632512 100644 --- a/drivers/char/pcmcia/cm4000_cs.c +++ b/drivers/char/pcmcia/cm4000_cs.c @@ -1981,10 +1981,6 @@ static int __init cmm_init(void) if (!cmm_class) return -1; - rc = pcmcia_register_driver(&cm4000_driver); - if (rc < 0) - return rc; - major = register_chrdev(0, DEVICE_NAME, &cm4000_fops); if (major < 0) { printk(KERN_WARNING MODULE_NAME @@ -1992,6 +1988,12 @@ static int __init cmm_init(void) return -1; } + rc = pcmcia_register_driver(&cm4000_driver); + if (rc < 0) { + unregister_chrdev(major, DEVICE_NAME); + return rc; + } + return 0; } diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c index 29efa64580a..47a8465bf95 100644 --- a/drivers/char/pcmcia/cm4040_cs.c +++ b/drivers/char/pcmcia/cm4040_cs.c @@ -724,16 +724,19 @@ static int __init cm4040_init(void) if (!cmx_class) return -1; - rc = pcmcia_register_driver(&reader_driver); - if (rc < 0) - return rc; - major = register_chrdev(0, DEVICE_NAME, &reader_fops); if (major < 0) { printk(KERN_WARNING MODULE_NAME ": could not get major number\n"); return -1; } + + rc = pcmcia_register_driver(&reader_driver); + if (rc < 0) { + unregister_chrdev(major, DEVICE_NAME); + return rc; + } + return 0; } diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 956d121cb16..3e6ffcaa5af 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -74,6 +74,8 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */ static DEFINE_MUTEX (dbs_mutex); static DECLARE_WORK (dbs_work, do_dbs_timer, NULL); +static struct workqueue_struct *dbs_workq; + struct dbs_tuners { unsigned int sampling_rate; unsigned int sampling_down_factor; @@ -364,23 +366,29 @@ static void do_dbs_timer(void *data) mutex_lock(&dbs_mutex); for_each_online_cpu(i) dbs_check_cpu(i); - schedule_delayed_work(&dbs_work, - usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); + queue_delayed_work(dbs_workq, &dbs_work, + usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); mutex_unlock(&dbs_mutex); } static inline void dbs_timer_init(void) { INIT_WORK(&dbs_work, do_dbs_timer, NULL); - schedule_delayed_work(&dbs_work, - usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); + if (!dbs_workq) + dbs_workq = create_singlethread_workqueue("ondemand"); + if (!dbs_workq) { + printk(KERN_ERR "ondemand: Cannot initialize kernel thread\n"); + return; + } + queue_delayed_work(dbs_workq, &dbs_work, + usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); return; } static inline void dbs_timer_exit(void) { - cancel_delayed_work(&dbs_work); - return; + if (dbs_workq) + cancel_rearming_delayed_workqueue(dbs_workq, &dbs_work); } static int cpufreq_governor_dbs(struct cpufreq_policy *policy, @@ -489,8 +497,12 @@ static int __init cpufreq_gov_dbs_init(void) static void __exit cpufreq_gov_dbs_exit(void) { - /* Make sure that the scheduled work is indeed not running */ - flush_scheduled_work(); + /* Make sure that the scheduled work is indeed not running. + Assumes the timer has been cancelled first. */ + if (dbs_workq) { + flush_workqueue(dbs_workq); + destroy_workqueue(dbs_workq); + } cpufreq_unregister_governor(&cpufreq_gov_dbs); } diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index 15121cb5a1f..21f9282c1b2 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c @@ -336,7 +336,7 @@ static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr, switch (width) { case 4: ret = sprintf(buf, "%u\n", (out_mad->data[40 + offset / 8] >> - (offset % 4)) & 0xf); + (4 - (offset % 8))) & 0xf); break; case 8: ret = sprintf(buf, "%u\n", out_mad->data[40 + offset / 8]); diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c index 312cf90731e..205854e9c66 100644 --- a/drivers/infiniband/hw/mthca/mthca_cq.c +++ b/drivers/infiniband/hw/mthca/mthca_cq.c @@ -238,9 +238,9 @@ void mthca_cq_event(struct mthca_dev *dev, u32 cqn, spin_lock(&dev->cq_table.lock); cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); - if (cq) - atomic_inc(&cq->refcount); + ++cq->refcount; + spin_unlock(&dev->cq_table.lock); if (!cq) { @@ -254,8 +254,10 @@ void mthca_cq_event(struct mthca_dev *dev, u32 cqn, if (cq->ibcq.event_handler) cq->ibcq.event_handler(&event, cq->ibcq.cq_context); - if (atomic_dec_and_test(&cq->refcount)) + spin_lock(&dev->cq_table.lock); + if (!--cq->refcount) wake_up(&cq->wait); + spin_unlock(&dev->cq_table.lock); } static inline int is_recv_cqe(struct mthca_cqe *cqe) @@ -267,23 +269,13 @@ static inline int is_recv_cqe(struct mthca_cqe *cqe) return !(cqe->is_send & 0x80); } -void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, +void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn, struct mthca_srq *srq) { - struct mthca_cq *cq; struct mthca_cqe *cqe; u32 prod_index; int nfreed = 0; - spin_lock_irq(&dev->cq_table.lock); - cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); - if (cq) - atomic_inc(&cq->refcount); - spin_unlock_irq(&dev->cq_table.lock); - - if (!cq) - return; - spin_lock_irq(&cq->lock); /* @@ -301,7 +293,7 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, if (0) mthca_dbg(dev, "Cleaning QPN %06x from CQN %06x; ci %d, pi %d\n", - qpn, cqn, cq->cons_index, prod_index); + qpn, cq->cqn, cq->cons_index, prod_index); /* * Now sweep backwards through the CQ, removing CQ entries @@ -325,8 +317,6 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, } spin_unlock_irq(&cq->lock); - if (atomic_dec_and_test(&cq->refcount)) - wake_up(&cq->wait); } void mthca_cq_resize_copy_cqes(struct mthca_cq *cq) @@ -821,7 +811,7 @@ int mthca_init_cq(struct mthca_dev *dev, int nent, } spin_lock_init(&cq->lock); - atomic_set(&cq->refcount, 1); + cq->refcount = 1; init_waitqueue_head(&cq->wait); memset(cq_context, 0, sizeof *cq_context); @@ -896,6 +886,17 @@ err_out: return err; } +static inline int get_cq_refcount(struct mthca_dev *dev, struct mthca_cq *cq) +{ + int c; + + spin_lock_irq(&dev->cq_table.lock); + c = cq->refcount; + spin_unlock_irq(&dev->cq_table.lock); + + return c; +} + void mthca_free_cq(struct mthca_dev *dev, struct mthca_cq *cq) { @@ -929,6 +930,7 @@ void mthca_free_cq(struct mthca_dev *dev, spin_lock_irq(&dev->cq_table.lock); mthca_array_clear(&dev->cq_table.cq, cq->cqn & (dev->limits.num_cqs - 1)); + --cq->refcount; spin_unlock_irq(&dev->cq_table.lock); if (dev->mthca_flags & MTHCA_FLAG_MSI_X) @@ -936,8 +938,7 @@ void mthca_free_cq(struct mthca_dev *dev, else synchronize_irq(dev->pdev->irq); - atomic_dec(&cq->refcount); - wait_event(cq->wait, !atomic_read(&cq->refcount)); + wait_event(cq->wait, !get_cq_refcount(dev, cq)); if (cq->is_kernel) { mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h index 4c1dcb4c182..f8160b8de09 100644 --- a/drivers/infiniband/hw/mthca/mthca_dev.h +++ b/drivers/infiniband/hw/mthca/mthca_dev.h @@ -496,7 +496,7 @@ void mthca_free_cq(struct mthca_dev *dev, void mthca_cq_completion(struct mthca_dev *dev, u32 cqn); void mthca_cq_event(struct mthca_dev *dev, u32 cqn, enum ib_event_type event_type); -void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, +void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn, struct mthca_srq *srq); void mthca_cq_resize_copy_cqes(struct mthca_cq *cq); int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent); diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c index 25e1c1db9a4..a486dec1707 100644 --- a/drivers/infiniband/hw/mthca/mthca_mr.c +++ b/drivers/infiniband/hw/mthca/mthca_mr.c @@ -761,6 +761,7 @@ void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr) int __devinit mthca_init_mr_table(struct mthca_dev *dev) { + unsigned long addr; int err, i; err = mthca_alloc_init(&dev->mr_table.mpt_alloc, @@ -796,9 +797,12 @@ int __devinit mthca_init_mr_table(struct mthca_dev *dev) goto err_fmr_mpt; } + addr = pci_resource_start(dev->pdev, 4) + + ((pci_resource_len(dev->pdev, 4) - 1) & + dev->mr_table.mpt_base); + dev->mr_table.tavor_fmr.mpt_base = - ioremap(dev->mr_table.mpt_base, - (1 << i) * sizeof (struct mthca_mpt_entry)); + ioremap(addr, (1 << i) * sizeof(struct mthca_mpt_entry)); if (!dev->mr_table.tavor_fmr.mpt_base) { mthca_warn(dev, "MPT ioremap for FMR failed.\n"); @@ -806,9 +810,12 @@ int __devinit mthca_init_mr_table(struct mthca_dev *dev) goto err_fmr_mpt; } + addr = pci_resource_start(dev->pdev, 4) + + ((pci_resource_len(dev->pdev, 4) - 1) & + dev->mr_table.mtt_base); + dev->mr_table.tavor_fmr.mtt_base = - ioremap(dev->mr_table.mtt_base, - (1 << i) * MTHCA_MTT_SEG_SIZE); + ioremap(addr, (1 << i) * MTHCA_MTT_SEG_SIZE); if (!dev->mr_table.tavor_fmr.mtt_base) { mthca_warn(dev, "MTT ioremap for FMR failed.\n"); err = -ENOMEM; diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h index 6676a786d69..179a8f610d0 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.h +++ b/drivers/infiniband/hw/mthca/mthca_provider.h @@ -139,11 +139,12 @@ struct mthca_ah { * a qp may be locked, with the send cq locked first. No other * nesting should be done. * - * Each struct mthca_cq/qp also has an atomic_t ref count. The - * pointer from the cq/qp_table to the struct counts as one reference. - * This reference also is good for access through the consumer API, so - * modifying the CQ/QP etc doesn't need to take another reference. - * Access because of a completion being polled does need a reference. + * Each struct mthca_cq/qp also has an ref count, protected by the + * corresponding table lock. The pointer from the cq/qp_table to the + * struct counts as one reference. This reference also is good for + * access through the consumer API, so modifying the CQ/QP etc doesn't + * need to take another reference. Access to a QP because of a + * completion being polled does not need a reference either. * * Finally, each struct mthca_cq/qp has a wait_queue_head_t for the * destroy function to sleep on. @@ -159,8 +160,9 @@ struct mthca_ah { * - decrement ref count; if zero, wake up waiters * * To destroy a CQ/QP, we can do the following: - * - lock cq/qp_table, remove pointer, unlock cq/qp_table lock - * - decrement ref count + * - lock cq/qp_table + * - remove pointer and decrement ref count + * - unlock cq/qp_table lock * - wait_event until ref count is zero * * It is the consumer's responsibilty to make sure that no QP @@ -197,7 +199,7 @@ struct mthca_cq_resize { struct mthca_cq { struct ib_cq ibcq; spinlock_t lock; - atomic_t refcount; + int refcount; int cqn; u32 cons_index; struct mthca_cq_buf buf; @@ -217,7 +219,7 @@ struct mthca_cq { struct mthca_srq { struct ib_srq ibsrq; spinlock_t lock; - atomic_t refcount; + int refcount; int srqn; int max; int max_gs; @@ -254,7 +256,7 @@ struct mthca_wq { struct mthca_qp { struct ib_qp ibqp; - atomic_t refcount; + int refcount; u32 qpn; int is_direct; u8 port; /* for SQP and memfree use only */ diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index f37b0e36732..19765f6f8d5 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c @@ -240,7 +240,7 @@ void mthca_qp_event(struct mthca_dev *dev, u32 qpn, spin_lock(&dev->qp_table.lock); qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1)); if (qp) - atomic_inc(&qp->refcount); + ++qp->refcount; spin_unlock(&dev->qp_table.lock); if (!qp) { @@ -257,8 +257,10 @@ void mthca_qp_event(struct mthca_dev *dev, u32 qpn, if (qp->ibqp.event_handler) qp->ibqp.event_handler(&event, qp->ibqp.qp_context); - if (atomic_dec_and_test(&qp->refcount)) + spin_lock(&dev->qp_table.lock); + if (!--qp->refcount) wake_up(&qp->wait); + spin_unlock(&dev->qp_table.lock); } static int to_mthca_state(enum ib_qp_state ib_state) @@ -833,10 +835,10 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) * entries and reinitialize the QP. */ if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) { - mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn, + mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); if (qp->ibqp.send_cq != qp->ibqp.recv_cq) - mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn, + mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn, qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); mthca_wq_init(&qp->sq); @@ -1096,7 +1098,7 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev, int ret; int i; - atomic_set(&qp->refcount, 1); + qp->refcount = 1; init_waitqueue_head(&qp->wait); qp->state = IB_QPS_RESET; qp->atomic_rd_en = 0; @@ -1318,6 +1320,17 @@ int mthca_alloc_sqp(struct mthca_dev *dev, return err; } +static inline int get_qp_refcount(struct mthca_dev *dev, struct mthca_qp *qp) +{ + int c; + + spin_lock_irq(&dev->qp_table.lock); + c = qp->refcount; + spin_unlock_irq(&dev->qp_table.lock); + + return c; +} + void mthca_free_qp(struct mthca_dev *dev, struct mthca_qp *qp) { @@ -1339,14 +1352,14 @@ void mthca_free_qp(struct mthca_dev *dev, spin_lock(&dev->qp_table.lock); mthca_array_clear(&dev->qp_table.qp, qp->qpn & (dev->limits.num_qps - 1)); + --qp->refcount; spin_unlock(&dev->qp_table.lock); if (send_cq != recv_cq) spin_unlock(&recv_cq->lock); spin_unlock_irq(&send_cq->lock); - atomic_dec(&qp->refcount); - wait_event(qp->wait, !atomic_read(&qp->refcount)); + wait_event(qp->wait, !get_qp_refcount(dev, qp)); if (qp->state != IB_QPS_RESET) mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0, @@ -1358,10 +1371,10 @@ void mthca_free_qp(struct mthca_dev *dev, * unref the mem-free tables and free the QPN in our table. */ if (!qp->ibqp.uobject) { - mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn, + mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); if (qp->ibqp.send_cq != qp->ibqp.recv_cq) - mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn, + mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn, qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); mthca_free_memfree(dev, qp); diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c index adcaf85355a..1ea433291fa 100644 --- a/drivers/infiniband/hw/mthca/mthca_srq.c +++ b/drivers/infiniband/hw/mthca/mthca_srq.c @@ -241,7 +241,7 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, goto err_out_mailbox; spin_lock_init(&srq->lock); - atomic_set(&srq->refcount, 1); + srq->refcount = 1; init_waitqueue_head(&srq->wait); if (mthca_is_memfree(dev)) @@ -308,6 +308,17 @@ err_out: return err; } +static inline int get_srq_refcount(struct mthca_dev *dev, struct mthca_srq *srq) +{ + int c; + + spin_lock_irq(&dev->srq_table.lock); + c = srq->refcount; + spin_unlock_irq(&dev->srq_table.lock); + + return c; +} + void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) { struct mthca_mailbox *mailbox; @@ -329,10 +340,10 @@ void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) spin_lock_irq(&dev->srq_table.lock); mthca_array_clear(&dev->srq_table.srq, srq->srqn & (dev->limits.num_srqs - 1)); + --srq->refcount; spin_unlock_irq(&dev->srq_table.lock); - atomic_dec(&srq->refcount); - wait_event(srq->wait, !atomic_read(&srq->refcount)); + wait_event(srq->wait, !get_srq_refcount(dev, srq)); if (!srq->ibsrq.uobject) { mthca_free_srq_buf(dev, srq); @@ -414,7 +425,7 @@ void mthca_srq_event(struct mthca_dev *dev, u32 srqn, spin_lock(&dev->srq_table.lock); srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1)); if (srq) - atomic_inc(&srq->refcount); + ++srq->refcount; spin_unlock(&dev->srq_table.lock); if (!srq) { @@ -431,8 +442,10 @@ void mthca_srq_event(struct mthca_dev *dev, u32 srqn, srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context); out: - if (atomic_dec_and_test(&srq->refcount)) + spin_lock(&dev->srq_table.lock); + if (!--srq->refcount) wake_up(&srq->wait); + spin_unlock(&dev->srq_table.lock); } /* diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c index 4ca175553f9..f887780e809 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c @@ -158,10 +158,8 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey) if (priv->pkey == pkey) { unregister_netdev(priv->dev); ipoib_dev_cleanup(priv->dev); - list_del(&priv->list); - - kfree(priv); + free_netdev(priv->dev); ret = 0; break; diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 5bb55742ada..c32ce4348e1 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -409,6 +409,34 @@ static int srp_connect_target(struct srp_target_port *target) } } +static void srp_unmap_data(struct scsi_cmnd *scmnd, + struct srp_target_port *target, + struct srp_request *req) +{ + struct scatterlist *scat; + int nents; + + if (!scmnd->request_buffer || + (scmnd->sc_data_direction != DMA_TO_DEVICE && + scmnd->sc_data_direction != DMA_FROM_DEVICE)) + return; + + /* + * This handling of non-SG commands can be killed when the + * SCSI midlayer no longer generates non-SG commands. + */ + if (likely(scmnd->use_sg)) { + nents = scmnd->use_sg; + scat = scmnd->request_buffer; + } else { + nents = 1; + scat = &req->fake_sg; + } + + dma_unmap_sg(target->srp_host->dev->dma_device, scat, nents, + scmnd->sc_data_direction); +} + static int srp_reconnect_target(struct srp_target_port *target) { struct ib_cm_id *new_cm_id; @@ -455,16 +483,16 @@ static int srp_reconnect_target(struct srp_target_port *target) list_for_each_entry(req, &target->req_queue, list) { req->scmnd->result = DID_RESET << 16; req->scmnd->scsi_done(req->scmnd); + srp_unmap_data(req->scmnd, target, req); } target->rx_head = 0; target->tx_head = 0; target->tx_tail = 0; - target->req_head = 0; - for (i = 0; i < SRP_SQ_SIZE - 1; ++i) - target->req_ring[i].next = i + 1; - target->req_ring[SRP_SQ_SIZE - 1].next = -1; + INIT_LIST_HEAD(&target->free_reqs); INIT_LIST_HEAD(&target->req_queue); + for (i = 0; i < SRP_SQ_SIZE; ++i) + list_add_tail(&target->req_ring[i].list, &target->free_reqs); ret = srp_connect_target(target); if (ret) @@ -589,40 +617,10 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, return len; } -static void srp_unmap_data(struct scsi_cmnd *scmnd, - struct srp_target_port *target, - struct srp_request *req) -{ - struct scatterlist *scat; - int nents; - - if (!scmnd->request_buffer || - (scmnd->sc_data_direction != DMA_TO_DEVICE && - scmnd->sc_data_direction != DMA_FROM_DEVICE)) - return; - - /* - * This handling of non-SG commands can be killed when the - * SCSI midlayer no longer generates non-SG commands. - */ - if (likely(scmnd->use_sg)) { - nents = scmnd->use_sg; - scat = scmnd->request_buffer; - } else { - nents = 1; - scat = &req->fake_sg; - } - - dma_unmap_sg(target->srp_host->dev->dma_device, scat, nents, - scmnd->sc_data_direction); -} - -static void srp_remove_req(struct srp_target_port *target, struct srp_request *req, - int index) +static void srp_remove_req(struct srp_target_port *target, struct srp_request *req) { - list_del(&req->list); - req->next = target->req_head; - target->req_head = index; + srp_unmap_data(req->scmnd, target, req); + list_move_tail(&req->list, &target->free_reqs); } static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) @@ -647,7 +645,7 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) req->tsk_status = rsp->data[3]; complete(&req->done); } else { - scmnd = req->scmnd; + scmnd = req->scmnd; if (!scmnd) printk(KERN_ERR "Null scmnd for RSP w/tag %016llx\n", (unsigned long long) rsp->tag); @@ -665,14 +663,11 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER)) scmnd->resid = be32_to_cpu(rsp->data_in_res_cnt); - srp_unmap_data(scmnd, target, req); - if (!req->tsk_mgmt) { - req->scmnd = NULL; scmnd->host_scribble = (void *) -1L; scmnd->scsi_done(scmnd); - srp_remove_req(target, req, rsp->tag & ~SRP_TAG_TSK_MGMT); + srp_remove_req(target, req); } else req->cmd_done = 1; } @@ -859,7 +854,6 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd, struct srp_request *req; struct srp_iu *iu; struct srp_cmd *cmd; - long req_index; int len; if (target->state == SRP_TARGET_CONNECTING) @@ -879,22 +873,20 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd, dma_sync_single_for_cpu(target->srp_host->dev->dma_device, iu->dma, SRP_MAX_IU_LEN, DMA_TO_DEVICE); - req_index = target->req_head; + req = list_entry(target->free_reqs.next, struct srp_request, list); scmnd->scsi_done = done; scmnd->result = 0; - scmnd->host_scribble = (void *) req_index; + scmnd->host_scribble = (void *) (long) req->index; cmd = iu->buf; memset(cmd, 0, sizeof *cmd); cmd->opcode = SRP_CMD; cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48); - cmd->tag = req_index; + cmd->tag = req->index; memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len); - req = &target->req_ring[req_index]; - req->scmnd = scmnd; req->cmd = iu; req->cmd_done = 0; @@ -919,8 +911,7 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd, goto err_unmap; } - target->req_head = req->next; - list_add_tail(&req->list, &target->req_queue); + list_move_tail(&req->list, &target->req_queue); return 0; @@ -1143,30 +1134,20 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) return 0; } -static int srp_send_tsk_mgmt(struct scsi_cmnd *scmnd, u8 func) +static int srp_send_tsk_mgmt(struct srp_target_port *target, + struct srp_request *req, u8 func) { - struct srp_target_port *target = host_to_target(scmnd->device->host); - struct srp_request *req; struct srp_iu *iu; struct srp_tsk_mgmt *tsk_mgmt; - int req_index; - int ret = FAILED; spin_lock_irq(target->scsi_host->host_lock); if (target->state == SRP_TARGET_DEAD || target->state == SRP_TARGET_REMOVED) { - scmnd->result = DID_BAD_TARGET << 16; + req->scmnd->result = DID_BAD_TARGET << 16; goto out; } - if (scmnd->host_scribble == (void *) -1L) - goto out; - - req_index = (long) scmnd->host_scribble; - printk(KERN_ERR "Abort for req_index %d\n", req_index); - - req = &target->req_ring[req_index]; init_completion(&req->done); iu = __srp_get_tx_iu(target); @@ -1177,10 +1158,10 @@ static int srp_send_tsk_mgmt(struct scsi_cmnd *scmnd, u8 func) memset(tsk_mgmt, 0, sizeof *tsk_mgmt); tsk_mgmt->opcode = SRP_TSK_MGMT; - tsk_mgmt->lun = cpu_to_be64((u64) scmnd->device->lun << 48); - tsk_mgmt->tag = req_index | SRP_TAG_TSK_MGMT; + tsk_mgmt->lun = cpu_to_be64((u64) req->scmnd->device->lun << 48); + tsk_mgmt->tag = req->index | SRP_TAG_TSK_MGMT; tsk_mgmt->tsk_mgmt_func = func; - tsk_mgmt->task_tag = req_index; + tsk_mgmt->task_tag = req->index; if (__srp_post_send(target, iu, sizeof *tsk_mgmt)) goto out; @@ -1188,37 +1169,85 @@ static int srp_send_tsk_mgmt(struct scsi_cmnd *scmnd, u8 func) req->tsk_mgmt = iu; spin_unlock_irq(target->scsi_host->host_lock); + if (!wait_for_completion_timeout(&req->done, msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS))) - return FAILED; - spin_lock_irq(target->scsi_host->host_lock); + return -1; - if (req->cmd_done) { - srp_remove_req(target, req, req_index); - scmnd->scsi_done(scmnd); - } else if (!req->tsk_status) { - srp_remove_req(target, req, req_index); - scmnd->result = DID_ABORT << 16; - ret = SUCCESS; - } + return 0; out: spin_unlock_irq(target->scsi_host->host_lock); - return ret; + return -1; +} + +static int srp_find_req(struct srp_target_port *target, + struct scsi_cmnd *scmnd, + struct srp_request **req) +{ + if (scmnd->host_scribble == (void *) -1L) + return -1; + + *req = &target->req_ring[(long) scmnd->host_scribble]; + + return 0; } static int srp_abort(struct scsi_cmnd *scmnd) { + struct srp_target_port *target = host_to_target(scmnd->device->host); + struct srp_request *req; + int ret = SUCCESS; + printk(KERN_ERR "SRP abort called\n"); - return srp_send_tsk_mgmt(scmnd, SRP_TSK_ABORT_TASK); + if (srp_find_req(target, scmnd, &req)) + return FAILED; + if (srp_send_tsk_mgmt(target, req, SRP_TSK_ABORT_TASK)) + return FAILED; + + spin_lock_irq(target->scsi_host->host_lock); + + if (req->cmd_done) { + srp_remove_req(target, req); + scmnd->scsi_done(scmnd); + } else if (!req->tsk_status) { + srp_remove_req(target, req); + scmnd->result = DID_ABORT << 16; + } else + ret = FAILED; + + spin_unlock_irq(target->scsi_host->host_lock); + + return ret; } static int srp_reset_device(struct scsi_cmnd *scmnd) { + struct srp_target_port *target = host_to_target(scmnd->device->host); + struct srp_request *req, *tmp; + printk(KERN_ERR "SRP reset_device called\n"); - return srp_send_tsk_mgmt(scmnd, SRP_TSK_LUN_RESET); + if (srp_find_req(target, scmnd, &req)) + return FAILED; + if (srp_send_tsk_mgmt(target, req, SRP_TSK_LUN_RESET)) + return FAILED; + if (req->tsk_status) + return FAILED; + + spin_lock_irq(target->scsi_host->host_lock); + + list_for_each_entry_safe(req, tmp, &target->req_queue, list) + if (req->scmnd->device == scmnd->device) { + req->scmnd->result = DID_RESET << 16; + scmnd->scsi_done(scmnd); + srp_remove_req(target, req); + } + + spin_unlock_irq(target->scsi_host->host_lock); + + return SUCCESS; } static int srp_reset_host(struct scsi_cmnd *scmnd) @@ -1518,10 +1547,12 @@ static ssize_t srp_create_target(struct class_device *class_dev, INIT_WORK(&target->work, srp_reconnect_work, target); - for (i = 0; i < SRP_SQ_SIZE - 1; ++i) - target->req_ring[i].next = i + 1; - target->req_ring[SRP_SQ_SIZE - 1].next = -1; + INIT_LIST_HEAD(&target->free_reqs); INIT_LIST_HEAD(&target->req_queue); + for (i = 0; i < SRP_SQ_SIZE; ++i) { + target->req_ring[i].index = i; + list_add_tail(&target->req_ring[i].list, &target->free_reqs); + } ret = srp_parse_options(buf, target); if (ret) diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h index bd7f7c3115d..c5cd43aae86 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.h +++ b/drivers/infiniband/ulp/srp/ib_srp.h @@ -101,7 +101,7 @@ struct srp_request { */ struct scatterlist fake_sg; struct completion done; - short next; + short index; u8 cmd_done; u8 tsk_status; }; @@ -133,7 +133,7 @@ struct srp_target_port { unsigned tx_tail; struct srp_iu *tx_ring[SRP_SQ_SIZE + 1]; - int req_head; + struct list_head free_reqs; struct list_head req_queue; struct srp_request req_ring[SRP_SQ_SIZE]; diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index 266414ca281..9080853fe28 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c @@ -1189,7 +1189,6 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id) ioc->diagPending = 0; spin_lock_init(&ioc->diagLock); spin_lock_init(&ioc->fc_rescan_work_lock); - spin_lock_init(&ioc->fc_rport_lock); spin_lock_init(&ioc->initializing_hba_lock); /* Initialize the event logging. @@ -5736,11 +5735,13 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag) return rc; } +# define EVENT_DESCR_STR_SZ 100 + /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static void EventDescriptionStr(u8 event, u32 evData0, char *evStr) { - char *ds; + char *ds = NULL; switch(event) { case MPI_EVENT_NONE: @@ -5777,9 +5778,9 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr) if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LIP) ds = "Loop State(LIP) Change"; else if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LPE) - ds = "Loop State(LPE) Change"; /* ??? */ + ds = "Loop State(LPE) Change"; /* ??? */ else - ds = "Loop State(LPB) Change"; /* ??? */ + ds = "Loop State(LPB) Change"; /* ??? */ break; case MPI_EVENT_LOGOUT: ds = "Logout"; @@ -5841,27 +5842,32 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr) break; case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: { - char buf[50]; u8 id = (u8)(evData0); u8 ReasonCode = (u8)(evData0 >> 16); switch (ReasonCode) { case MPI_EVENT_SAS_DEV_STAT_RC_ADDED: - sprintf(buf,"SAS Device Status Change: Added: id=%d", id); + snprintf(evStr, EVENT_DESCR_STR_SZ, + "SAS Device Status Change: Added: id=%d", id); break; case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING: - sprintf(buf,"SAS Device Status Change: Deleted: id=%d", id); + snprintf(evStr, EVENT_DESCR_STR_SZ, + "SAS Device Status Change: Deleted: id=%d", id); break; case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA: - sprintf(buf,"SAS Device Status Change: SMART Data: id=%d", id); + snprintf(evStr, EVENT_DESCR_STR_SZ, + "SAS Device Status Change: SMART Data: id=%d", + id); break; case MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED: - sprintf(buf,"SAS Device Status Change: No Persistancy Added: id=%d", id); + snprintf(evStr, EVENT_DESCR_STR_SZ, + "SAS Device Status Change: No Persistancy " + "Added: id=%d", id); break; default: - sprintf(buf,"SAS Device Status Change: Unknown: id=%d", id); - break; + snprintf(evStr, EVENT_DESCR_STR_SZ, + "SAS Device Status Change: Unknown: id=%d", id); + break; } - ds = buf; break; } case MPI_EVENT_ON_BUS_TIMER_EXPIRED: @@ -5878,41 +5884,46 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr) break; case MPI_EVENT_SAS_PHY_LINK_STATUS: { - char buf[50]; u8 LinkRates = (u8)(evData0 >> 8); u8 PhyNumber = (u8)(evData0); LinkRates = (LinkRates & MPI_EVENT_SAS_PLS_LR_CURRENT_MASK) >> MPI_EVENT_SAS_PLS_LR_CURRENT_SHIFT; switch (LinkRates) { case MPI_EVENT_SAS_PLS_LR_RATE_UNKNOWN: - sprintf(buf,"SAS PHY Link Status: Phy=%d:" + snprintf(evStr, EVENT_DESCR_STR_SZ, + "SAS PHY Link Status: Phy=%d:" " Rate Unknown",PhyNumber); break; case MPI_EVENT_SAS_PLS_LR_RATE_PHY_DISABLED: - sprintf(buf,"SAS PHY Link Status: Phy=%d:" + snprintf(evStr, EVENT_DESCR_STR_SZ, + "SAS PHY Link Status: Phy=%d:" " Phy Disabled",PhyNumber); break; case MPI_EVENT_SAS_PLS_LR_RATE_FAILED_SPEED_NEGOTIATION: - sprintf(buf,"SAS PHY Link Status: Phy=%d:" + snprintf(evStr, EVENT_DESCR_STR_SZ, + "SAS PHY Link Status: Phy=%d:" " Failed Speed Nego",PhyNumber); break; case MPI_EVENT_SAS_PLS_LR_RATE_SATA_OOB_COMPLETE: - sprintf(buf,"SAS PHY Link Status: Phy=%d:" + snprintf(evStr, EVENT_DESCR_STR_SZ, + "SAS PHY Link Status: Phy=%d:" " Sata OOB Completed",PhyNumber); break; case MPI_EVENT_SAS_PLS_LR_RATE_1_5: - sprintf(buf,"SAS PHY Link Status: Phy=%d:" + snprintf(evStr, EVENT_DESCR_STR_SZ, + "SAS PHY Link Status: Phy=%d:" " Rate 1.5 Gbps",PhyNumber); break; case MPI_EVENT_SAS_PLS_LR_RATE_3_0: - sprintf(buf,"SAS PHY Link Status: Phy=%d:" + snprintf(evStr, EVENT_DESCR_STR_SZ, + "SAS PHY Link Status: Phy=%d:" " Rate 3.0 Gpbs",PhyNumber); break; default: - sprintf(buf,"SAS PHY Link Status: Phy=%d", PhyNumber); + snprintf(evStr, EVENT_DESCR_STR_SZ, + "SAS PHY Link Status: Phy=%d", PhyNumber); break; } - ds = buf; break; } case MPI_EVENT_SAS_DISCOVERY_ERROR: @@ -5921,9 +5932,8 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr) case MPI_EVENT_IR_RESYNC_UPDATE: { u8 resync_complete = (u8)(evData0 >> 16); - char buf[40]; - sprintf(buf,"IR Resync Update: Complete = %d:",resync_complete); - ds = buf; + snprintf(evStr, EVENT_DESCR_STR_SZ, + "IR Resync Update: Complete = %d:",resync_complete); break; } case MPI_EVENT_IR2: @@ -5976,7 +5986,8 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr) ds = "Unknown"; break; } - strcpy(evStr,ds); + if (ds) + strncpy(evStr, ds, EVENT_DESCR_STR_SZ); } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ @@ -5998,7 +6009,7 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply int ii; int r = 0; int handlers = 0; - char evStr[100]; + char evStr[EVENT_DESCR_STR_SZ]; u8 event; /* diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h index be7e8501b53..f673cca507e 100644 --- a/drivers/message/fusion/mptbase.h +++ b/drivers/message/fusion/mptbase.h @@ -76,8 +76,8 @@ #define COPYRIGHT "Copyright (c) 1999-2005 " MODULEAUTHOR #endif -#define MPT_LINUX_VERSION_COMMON "3.03.08" -#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.03.08" +#define MPT_LINUX_VERSION_COMMON "3.03.09" +#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.03.09" #define WHAT_MAGIC_STRING "@" "(" "#" ")" #define show_mptmod_ver(s,ver) \ @@ -489,7 +489,6 @@ typedef struct _RaidCfgData { #define MPT_RPORT_INFO_FLAGS_REGISTERED 0x01 /* rport registered */ #define MPT_RPORT_INFO_FLAGS_MISSING 0x02 /* missing from DevPage0 scan */ -#define MPT_RPORT_INFO_FLAGS_MAPPED_VDEV 0x04 /* target mapped in vdev */ /* * data allocated for each fc rport device @@ -501,7 +500,6 @@ struct mptfc_rport_info struct scsi_target *starget; FCDevicePage0_t pg0; u8 flags; - u8 remap_needed; }; /* @@ -628,11 +626,11 @@ typedef struct _MPT_ADAPTER struct work_struct mptscsih_persistTask; struct list_head fc_rports; - spinlock_t fc_rport_lock; /* list and ri flags */ spinlock_t fc_rescan_work_lock; int fc_rescan_work_count; struct work_struct fc_rescan_work; - + char fc_rescan_work_q_name[KOBJ_NAME_LEN]; + struct workqueue_struct *fc_rescan_work_q; } MPT_ADAPTER; /* diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c index b343f2a68b1..856487741ef 100644 --- a/drivers/message/fusion/mptfc.c +++ b/drivers/message/fusion/mptfc.c @@ -341,9 +341,6 @@ mptfc_generate_rport_ids(FCDevicePage0_t *pg0, struct fc_rport_identifiers *rid) rid->port_name = ((u64)pg0->WWPN.High) << 32 | (u64)pg0->WWPN.Low; rid->port_id = pg0->PortIdentifier; rid->roles = FC_RPORT_ROLE_UNKNOWN; - rid->roles |= FC_RPORT_ROLE_FCP_TARGET; - if (pg0->Protocol & MPI_FC_DEVICE_PAGE0_PROT_FCP_INITIATOR) - rid->roles |= FC_RPORT_ROLE_FCP_INITIATOR; return 0; } @@ -355,15 +352,18 @@ mptfc_register_dev(MPT_ADAPTER *ioc, int channel, FCDevicePage0_t *pg0) struct fc_rport *rport; struct mptfc_rport_info *ri; int new_ri = 1; - u64 pn; - unsigned long flags; + u64 pn, nn; VirtTarget *vtarget; + u32 roles = FC_RPORT_ROLE_UNKNOWN; if (mptfc_generate_rport_ids(pg0, &rport_ids) < 0) return; + roles |= FC_RPORT_ROLE_FCP_TARGET; + if (pg0->Protocol & MPI_FC_DEVICE_PAGE0_PROT_FCP_INITIATOR) + roles |= FC_RPORT_ROLE_FCP_INITIATOR; + /* scan list looking for a match */ - spin_lock_irqsave(&ioc->fc_rport_lock, flags); list_for_each_entry(ri, &ioc->fc_rports, list) { pn = (u64)ri->pg0.WWPN.High << 32 | (u64)ri->pg0.WWPN.Low; if (pn == rport_ids.port_name) { /* match */ @@ -373,11 +373,9 @@ mptfc_register_dev(MPT_ADAPTER *ioc, int channel, FCDevicePage0_t *pg0) } } if (new_ri) { /* allocate one */ - spin_unlock_irqrestore(&ioc->fc_rport_lock, flags); ri = kzalloc(sizeof(struct mptfc_rport_info), GFP_KERNEL); if (!ri) return; - spin_lock_irqsave(&ioc->fc_rport_lock, flags); list_add_tail(&ri->list, &ioc->fc_rports); } @@ -387,14 +385,11 @@ mptfc_register_dev(MPT_ADAPTER *ioc, int channel, FCDevicePage0_t *pg0) /* MPT_RPORT_INFO_FLAGS_REGISTERED - rport not previously deleted */ if (!(ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED)) { ri->flags |= MPT_RPORT_INFO_FLAGS_REGISTERED; - spin_unlock_irqrestore(&ioc->fc_rport_lock, flags); rport = fc_remote_port_add(ioc->sh, channel, &rport_ids); - spin_lock_irqsave(&ioc->fc_rport_lock, flags); if (rport) { ri->rport = rport; if (new_ri) /* may have been reset by user */ rport->dev_loss_tmo = mptfc_dev_loss_tmo; - *((struct mptfc_rport_info **)rport->dd_data) = ri; /* * if already mapped, remap here. If not mapped, * target_alloc will allocate vtarget and map, @@ -406,16 +401,21 @@ mptfc_register_dev(MPT_ADAPTER *ioc, int channel, FCDevicePage0_t *pg0) vtarget->target_id = pg0->CurrentTargetID; vtarget->bus_id = pg0->CurrentBus; } - ri->remap_needed = 0; } + *((struct mptfc_rport_info **)rport->dd_data) = ri; + /* scan will be scheduled once rport becomes a target */ + fc_remote_port_rolechg(rport,roles); + + pn = (u64)ri->pg0.WWPN.High << 32 | (u64)ri->pg0.WWPN.Low; + nn = (u64)ri->pg0.WWNN.High << 32 | (u64)ri->pg0.WWNN.Low; dfcprintk ((MYIOC_s_INFO_FMT "mptfc_reg_dev.%d: %x, %llx / %llx, tid %d, " "rport tid %d, tmo %d\n", ioc->name, ioc->sh->host_no, pg0->PortIdentifier, - pg0->WWNN, - pg0->WWPN, + (unsigned long long)nn, + (unsigned long long)pn, pg0->CurrentTargetID, ri->rport->scsi_target_id, ri->rport->dev_loss_tmo)); @@ -425,8 +425,6 @@ mptfc_register_dev(MPT_ADAPTER *ioc, int channel, FCDevicePage0_t *pg0) ri = NULL; } } - spin_unlock_irqrestore(&ioc->fc_rport_lock,flags); - } /* @@ -476,7 +474,6 @@ mptfc_target_alloc(struct scsi_target *starget) vtarget->target_id = ri->pg0.CurrentTargetID; vtarget->bus_id = ri->pg0.CurrentBus; ri->starget = starget; - ri->remap_needed = 0; rc = 0; } } @@ -502,10 +499,10 @@ mptfc_slave_alloc(struct scsi_device *sdev) VirtDevice *vdev; struct scsi_target *starget; struct fc_rport *rport; - unsigned long flags; - rport = starget_to_rport(scsi_target(sdev)); + starget = scsi_target(sdev); + rport = starget_to_rport(starget); if (!rport || fc_remote_port_chkready(rport)) return -ENXIO; @@ -519,10 +516,8 @@ mptfc_slave_alloc(struct scsi_device *sdev) return -ENOMEM; } - spin_lock_irqsave(&hd->ioc->fc_rport_lock,flags); sdev->hostdata = vdev; - starget = scsi_target(sdev); vtarget = starget->hostdata; if (vtarget->num_luns == 0) { @@ -535,14 +530,16 @@ mptfc_slave_alloc(struct scsi_device *sdev) vdev->vtarget = vtarget; vdev->lun = sdev->lun; - spin_unlock_irqrestore(&hd->ioc->fc_rport_lock,flags); - vtarget->num_luns++; + #ifdef DMPT_DEBUG_FC - { + { + u64 nn, pn; struct mptfc_rport_info *ri; ri = *((struct mptfc_rport_info **)rport->dd_data); + pn = (u64)ri->pg0.WWPN.High << 32 | (u64)ri->pg0.WWPN.Low; + nn = (u64)ri->pg0.WWNN.High << 32 | (u64)ri->pg0.WWNN.Low; dfcprintk ((MYIOC_s_INFO_FMT "mptfc_slv_alloc.%d: num_luns %d, sdev.id %d, " "CurrentTargetID %d, %x %llx %llx\n", @@ -550,7 +547,9 @@ mptfc_slave_alloc(struct scsi_device *sdev) sdev->host->host_no, vtarget->num_luns, sdev->id, ri->pg0.CurrentTargetID, - ri->pg0.PortIdentifier, ri->pg0.WWPN, ri->pg0.WWNN)); + ri->pg0.PortIdentifier, + (unsigned long long)pn, + (unsigned long long)nn)); } #endif @@ -570,11 +569,31 @@ mptfc_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) done(SCpnt); return 0; } + + /* dd_data is null until finished adding target */ ri = *((struct mptfc_rport_info **)rport->dd_data); - if (unlikely(ri->remap_needed)) - return SCSI_MLQUEUE_HOST_BUSY; + if (unlikely(!ri)) { + dfcprintk ((MYIOC_s_INFO_FMT + "mptfc_qcmd.%d: %d:%d, dd_data is null.\n", + ((MPT_SCSI_HOST *) SCpnt->device->host->hostdata)->ioc->name, + ((MPT_SCSI_HOST *) SCpnt->device->host->hostdata)->ioc->sh->host_no, + SCpnt->device->id,SCpnt->device->lun)); + SCpnt->result = DID_IMM_RETRY << 16; + done(SCpnt); + return 0; + } - return mptscsih_qcmd(SCpnt,done); + err = mptscsih_qcmd(SCpnt,done); +#ifdef DMPT_DEBUG_FC + if (unlikely(err)) { + dfcprintk ((MYIOC_s_INFO_FMT + "mptfc_qcmd.%d: %d:%d, mptscsih_qcmd returns non-zero.\n", + ((MPT_SCSI_HOST *) SCpnt->device->host->hostdata)->ioc->name, + ((MPT_SCSI_HOST *) SCpnt->device->host->hostdata)->ioc->sh->host_no, + SCpnt->device->id,SCpnt->device->lun)); + } +#endif + return err; } static void @@ -615,18 +634,17 @@ mptfc_rescan_devices(void *arg) MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg; int ii; int work_to_do; + u64 pn; unsigned long flags; struct mptfc_rport_info *ri; do { /* start by tagging all ports as missing */ - spin_lock_irqsave(&ioc->fc_rport_lock,flags); list_for_each_entry(ri, &ioc->fc_rports, list) { if (ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED) { ri->flags |= MPT_RPORT_INFO_FLAGS_MISSING; } } - spin_unlock_irqrestore(&ioc->fc_rport_lock,flags); /* * now rescan devices known to adapter, @@ -639,33 +657,24 @@ mptfc_rescan_devices(void *arg) } /* delete devices still missing */ - spin_lock_irqsave(&ioc->fc_rport_lock, flags); list_for_each_entry(ri, &ioc->fc_rports, list) { /* if newly missing, delete it */ - if ((ri->flags & (MPT_RPORT_INFO_FLAGS_REGISTERED | - MPT_RPORT_INFO_FLAGS_MISSING)) - == (MPT_RPORT_INFO_FLAGS_REGISTERED | - MPT_RPORT_INFO_FLAGS_MISSING)) { + if (ri->flags & MPT_RPORT_INFO_FLAGS_MISSING) { ri->flags &= ~(MPT_RPORT_INFO_FLAGS_REGISTERED| MPT_RPORT_INFO_FLAGS_MISSING); - ri->remap_needed = 1; - fc_remote_port_delete(ri->rport); - /* - * remote port not really deleted 'cause - * binding is by WWPN and driver only - * registers FCP_TARGETs but cannot trust - * data structures. - */ + fc_remote_port_delete(ri->rport); /* won't sleep */ ri->rport = NULL; + + pn = (u64)ri->pg0.WWPN.High << 32 | + (u64)ri->pg0.WWPN.Low; dfcprintk ((MYIOC_s_INFO_FMT "mptfc_rescan.%d: %llx deleted\n", ioc->name, ioc->sh->host_no, - ri->pg0.WWPN)); + (unsigned long long)pn)); } } - spin_unlock_irqrestore(&ioc->fc_rport_lock,flags); /* * allow multiple passes as target state @@ -870,10 +879,23 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto out_mptfc_probe; } - for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) { - mptfc_init_host_attr(ioc,ii); - mptfc_GetFcDevPage0(ioc,ii,mptfc_register_dev); - } + /* initialize workqueue */ + + snprintf(ioc->fc_rescan_work_q_name, KOBJ_NAME_LEN, "mptfc_wq_%d", + sh->host_no); + ioc->fc_rescan_work_q = + create_singlethread_workqueue(ioc->fc_rescan_work_q_name); + if (!ioc->fc_rescan_work_q) + goto out_mptfc_probe; + + /* + * scan for rports - + * by doing it via the workqueue, some locking is eliminated + */ + + ioc->fc_rescan_work_count = 1; + queue_work(ioc->fc_rescan_work_q, &ioc->fc_rescan_work); + flush_workqueue(ioc->fc_rescan_work_q); return 0; @@ -949,8 +971,18 @@ mptfc_init(void) static void __devexit mptfc_remove(struct pci_dev *pdev) { - MPT_ADAPTER *ioc = pci_get_drvdata(pdev); - struct mptfc_rport_info *p, *n; + MPT_ADAPTER *ioc = pci_get_drvdata(pdev); + struct mptfc_rport_info *p, *n; + struct workqueue_struct *work_q; + unsigned long flags; + + /* destroy workqueue */ + if ((work_q=ioc->fc_rescan_work_q)) { + spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags); + ioc->fc_rescan_work_q = NULL; + spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags); + destroy_workqueue(work_q); + } fc_remove_host(ioc->sh); diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index e9716b10ace..af6ec553ff7 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -91,6 +91,7 @@ enum mptsas_hotplug_action { MPTSAS_DEL_DEVICE, MPTSAS_ADD_RAID, MPTSAS_DEL_RAID, + MPTSAS_IGNORE_EVENT, }; struct mptsas_hotplug_event { @@ -298,6 +299,26 @@ mptsas_find_portinfo_by_handle(MPT_ADAPTER *ioc, u16 handle) return rc; } +/* + * Returns true if there is a scsi end device + */ +static inline int +mptsas_is_end_device(struct mptsas_devinfo * attached) +{ + if ((attached->handle) && + (attached->device_info & + MPI_SAS_DEVICE_INFO_END_DEVICE) && + ((attached->device_info & + MPI_SAS_DEVICE_INFO_SSP_TARGET) | + (attached->device_info & + MPI_SAS_DEVICE_INFO_STP_TARGET) | + (attached->device_info & + MPI_SAS_DEVICE_INFO_SATA_DEVICE))) + return 1; + else + return 0; +} + static int mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure, u32 form, u32 form_specific) @@ -872,7 +893,11 @@ mptsas_sas_device_pg0(MPT_ADAPTER *ioc, struct mptsas_devinfo *device_info, SasDevicePage0_t *buffer; dma_addr_t dma_handle; __le64 sas_address; - int error; + int error=0; + + if (ioc->sas_discovery_runtime && + mptsas_is_end_device(device_info)) + goto out; hdr.PageVersion = MPI_SASDEVICE0_PAGEVERSION; hdr.ExtPageLength = 0; @@ -1009,7 +1034,11 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, CONFIGPARMS cfg; SasExpanderPage1_t *buffer; dma_addr_t dma_handle; - int error; + int error=0; + + if (ioc->sas_discovery_runtime && + mptsas_is_end_device(&phy_info->attached)) + goto out; hdr.PageVersion = MPI_SASEXPANDER0_PAGEVERSION; hdr.ExtPageLength = 0; @@ -1068,26 +1097,6 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, return error; } -/* - * Returns true if there is a scsi end device - */ -static inline int -mptsas_is_end_device(struct mptsas_devinfo * attached) -{ - if ((attached->handle) && - (attached->device_info & - MPI_SAS_DEVICE_INFO_END_DEVICE) && - ((attached->device_info & - MPI_SAS_DEVICE_INFO_SSP_TARGET) | - (attached->device_info & - MPI_SAS_DEVICE_INFO_STP_TARGET) | - (attached->device_info & - MPI_SAS_DEVICE_INFO_SATA_DEVICE))) - return 1; - else - return 0; -} - static void mptsas_parse_device_info(struct sas_identify *identify, struct mptsas_devinfo *device_info) @@ -1737,6 +1746,9 @@ mptsas_hotplug_work(void *arg) break; case MPTSAS_ADD_DEVICE: + if (ev->phys_disk_num_valid) + mpt_findImVolumes(ioc); + /* * Refresh sas device pg0 data */ @@ -1868,6 +1880,9 @@ mptsas_hotplug_work(void *arg) scsi_device_put(sdev); mpt_findImVolumes(ioc); break; + case MPTSAS_IGNORE_EVENT: + default: + break; } kfree(ev); @@ -1940,7 +1955,8 @@ mptscsih_send_raid_event(MPT_ADAPTER *ioc, EVENT_DATA_RAID *raid_event_data) { struct mptsas_hotplug_event *ev; - RAID_VOL0_STATUS * volumeStatus; + int status = le32_to_cpu(raid_event_data->SettingsStatus); + int state = (status >> 8) & 0xff; if (ioc->bus_type != SAS) return; @@ -1955,6 +1971,7 @@ mptscsih_send_raid_event(MPT_ADAPTER *ioc, INIT_WORK(&ev->work, mptsas_hotplug_work, ev); ev->ioc = ioc; ev->id = raid_event_data->VolumeID; + ev->event_type = MPTSAS_IGNORE_EVENT; switch (raid_event_data->ReasonCode) { case MPI_EVENT_RAID_RC_PHYSDISK_DELETED: @@ -1966,6 +1983,25 @@ mptscsih_send_raid_event(MPT_ADAPTER *ioc, ev->phys_disk_num = raid_event_data->PhysDiskNum; ev->event_type = MPTSAS_DEL_DEVICE; break; + case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED: + switch (state) { + case MPI_PD_STATE_ONLINE: + ioc->raid_data.isRaid = 1; + ev->phys_disk_num_valid = 1; + ev->phys_disk_num = raid_event_data->PhysDiskNum; + ev->event_type = MPTSAS_ADD_DEVICE; + break; + case MPI_PD_STATE_MISSING: + case MPI_PD_STATE_NOT_COMPATIBLE: + case MPI_PD_STATE_OFFLINE_AT_HOST_REQUEST: + case MPI_PD_STATE_FAILED_AT_HOST_REQUEST: + case MPI_PD_STATE_OFFLINE_FOR_ANOTHER_REASON: + ev->event_type = MPTSAS_DEL_DEVICE; + break; + default: + break; + } + break; case MPI_EVENT_RAID_RC_VOLUME_DELETED: ev->event_type = MPTSAS_DEL_RAID; break; @@ -1973,11 +2009,18 @@ mptscsih_send_raid_event(MPT_ADAPTER *ioc, ev->event_type = MPTSAS_ADD_RAID; break; case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED: - volumeStatus = (RAID_VOL0_STATUS *) & - raid_event_data->SettingsStatus; - ev->event_type = (volumeStatus->State == - MPI_RAIDVOL0_STATUS_STATE_FAILED) ? - MPTSAS_DEL_RAID : MPTSAS_ADD_RAID; + switch (state) { + case MPI_RAIDVOL0_STATUS_STATE_FAILED: + case MPI_RAIDVOL0_STATUS_STATE_MISSING: + ev->event_type = MPTSAS_DEL_RAID; + break; + case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL: + case MPI_RAIDVOL0_STATUS_STATE_DEGRADED: + ev->event_type = MPTSAS_ADD_RAID; + break; + default: + break; + } break; default: break; diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index 3729062db31..84fa271eb8f 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c @@ -632,7 +632,11 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: /* 0x0043 */ /* Spoof to SCSI Selection Timeout! */ - sc->result = DID_NO_CONNECT << 16; + if (ioc->bus_type != FC) + sc->result = DID_NO_CONNECT << 16; + /* else fibre, just stall until rescan event */ + else + sc->result = DID_REQUEUE << 16; if (hd->sel_timeout[pScsiReq->TargetID] < 0xFFFF) hd->sel_timeout[pScsiReq->TargetID]++; @@ -877,7 +881,7 @@ mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, VirtDevice *vdevice) struct scsi_cmnd *sc; dsprintk((KERN_INFO MYNAM ": search_running target %d lun %d max %d\n", - vdevice->target_id, vdevice->lun, max)); + vdevice->vtarget->target_id, vdevice->lun, max)); for (ii=0; ii < max; ii++) { if ((sc = hd->ScsiLookup[ii]) != NULL) { @@ -1645,7 +1649,6 @@ int mptscsih_abort(struct scsi_cmnd * SCpnt) { MPT_SCSI_HOST *hd; - MPT_ADAPTER *ioc; MPT_FRAME_HDR *mf; u32 ctx2abort; int scpnt_idx; @@ -1663,14 +1666,6 @@ mptscsih_abort(struct scsi_cmnd * SCpnt) return FAILED; } - ioc = hd->ioc; - if (hd->resetPending) { - return FAILED; - } - - if (hd->timeouts < -1) - hd->timeouts++; - /* Find this command */ if ((scpnt_idx = SCPNT_TO_LOOKUP_IDX(SCpnt)) < 0) { @@ -1684,6 +1679,13 @@ mptscsih_abort(struct scsi_cmnd * SCpnt) return SUCCESS; } + if (hd->resetPending) { + return FAILED; + } + + if (hd->timeouts < -1) + hd->timeouts++; + printk(KERN_WARNING MYNAM ": %s: attempting task abort! (sc=%p)\n", hd->ioc->name, SCpnt); scsi_print_command(SCpnt); @@ -1703,7 +1705,7 @@ mptscsih_abort(struct scsi_cmnd * SCpnt) vdev = SCpnt->device->hostdata; retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK, vdev->vtarget->bus_id, vdev->vtarget->target_id, vdev->lun, - ctx2abort, mptscsih_get_tm_timeout(ioc)); + ctx2abort, mptscsih_get_tm_timeout(hd->ioc)); printk (KERN_WARNING MYNAM ": %s: task abort: %s (sc=%p)\n", hd->ioc->name, @@ -2521,15 +2523,15 @@ mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) /* 7. FC: Rescan for blocked rports which might have returned. */ - else if (ioc->bus_type == FC) { - int work_count; - unsigned long flags; - + if (ioc->bus_type == FC) { spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags); - work_count = ++ioc->fc_rescan_work_count; + if (ioc->fc_rescan_work_q) { + if (ioc->fc_rescan_work_count++ == 0) { + queue_work(ioc->fc_rescan_work_q, + &ioc->fc_rescan_work); + } + } spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags); - if (work_count == 1) - schedule_work(&ioc->fc_rescan_work); } dtmprintk((MYIOC_s_WARN_FMT "Post-Reset complete.\n", ioc->name)); @@ -2544,7 +2546,6 @@ mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply) { MPT_SCSI_HOST *hd; u8 event = le32_to_cpu(pEvReply->Event) & 0xFF; - int work_count; unsigned long flags; devtverboseprintk((MYIOC_s_INFO_FMT "MPT event (=%02Xh) routed to SCSI host driver!\n", @@ -2569,10 +2570,13 @@ mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply) case MPI_EVENT_RESCAN: /* 06 */ spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags); - work_count = ++ioc->fc_rescan_work_count; + if (ioc->fc_rescan_work_q) { + if (ioc->fc_rescan_work_count++ == 0) { + queue_work(ioc->fc_rescan_work_q, + &ioc->fc_rescan_work); + } + } spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags); - if (work_count == 1) - schedule_work(&ioc->fc_rescan_work); break; /* diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c index 09c745b19cc..f2a4d382ea1 100644 --- a/drivers/message/fusion/mptspi.c +++ b/drivers/message/fusion/mptspi.c @@ -783,6 +783,70 @@ static struct pci_device_id mptspi_pci_table[] = { }; MODULE_DEVICE_TABLE(pci, mptspi_pci_table); + +/* + * renegotiate for a given target + */ +static void +mptspi_dv_renegotiate_work(void *data) +{ + struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data; + struct _MPT_SCSI_HOST *hd = wqw->hd; + struct scsi_device *sdev; + + kfree(wqw); + + shost_for_each_device(sdev, hd->ioc->sh) + mptspi_dv_device(hd, sdev); +} + +static void +mptspi_dv_renegotiate(struct _MPT_SCSI_HOST *hd) +{ + struct work_queue_wrapper *wqw = kmalloc(sizeof(*wqw), GFP_ATOMIC); + + if (!wqw) + return; + + INIT_WORK(&wqw->work, mptspi_dv_renegotiate_work, wqw); + wqw->hd = hd; + + schedule_work(&wqw->work); +} + +/* + * spi module reset handler + */ +static int +mptspi_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) +{ + struct _MPT_SCSI_HOST *hd = (struct _MPT_SCSI_HOST *)ioc->sh->hostdata; + int rc; + + rc = mptscsih_ioc_reset(ioc, reset_phase); + + if (reset_phase == MPT_IOC_POST_RESET) + mptspi_dv_renegotiate(hd); + + return rc; +} + +/* + * spi module resume handler + */ +static int +mptspi_resume(struct pci_dev *pdev) +{ + MPT_ADAPTER *ioc = pci_get_drvdata(pdev); + struct _MPT_SCSI_HOST *hd = (struct _MPT_SCSI_HOST *)ioc->sh->hostdata; + int rc; + + rc = mptscsih_resume(pdev); + mptspi_dv_renegotiate(hd); + + return rc; +} + /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* @@ -1032,7 +1096,7 @@ static struct pci_driver mptspi_driver = { .shutdown = mptscsih_shutdown, #ifdef CONFIG_PM .suspend = mptscsih_suspend, - .resume = mptscsih_resume, + .resume = mptspi_resume, #endif }; @@ -1061,7 +1125,7 @@ mptspi_init(void) ": Registered for IOC event notifications\n")); } - if (mpt_reset_register(mptspiDoneCtx, mptscsih_ioc_reset) == 0) { + if (mpt_reset_register(mptspiDoneCtx, mptspi_ioc_reset) == 0) { dprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n")); } diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c index 1363083b4d8..14dbad14afb 100644 --- a/drivers/net/au1000_eth.c +++ b/drivers/net/au1000_eth.c @@ -52,6 +52,7 @@ #include <linux/mii.h> #include <linux/skbuff.h> #include <linux/delay.h> +#include <linux/crc32.h> #include <asm/mipsregs.h> #include <asm/irq.h> #include <asm/io.h> @@ -2070,23 +2071,6 @@ static void au1000_tx_timeout(struct net_device *dev) netif_wake_queue(dev); } - -static unsigned const ethernet_polynomial = 0x04c11db7U; -static inline u32 ether_crc(int length, unsigned char *data) -{ - int crc = -1; - - while(--length >= 0) { - unsigned char current_octet = *data++; - int bit; - for (bit = 0; bit < 8; bit++, current_octet >>= 1) - crc = (crc << 1) ^ - ((crc < 0) ^ (current_octet & 1) ? - ethernet_polynomial : 0); - } - return crc; -} - static void set_rx_mode(struct net_device *dev) { struct au1000_private *aup = (struct au1000_private *) dev->priv; diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c index 1f3627470c9..1ddefd28121 100644 --- a/drivers/net/dl2k.c +++ b/drivers/net/dl2k.c @@ -765,7 +765,7 @@ rio_free_tx (struct net_device *dev, int irq) break; skb = np->tx_skbuff[entry]; pci_unmap_single (np->pdev, - np->tx_ring[entry].fraginfo & 0xffffffffffff, + np->tx_ring[entry].fraginfo & DMA_48BIT_MASK, skb->len, PCI_DMA_TODEVICE); if (irq) dev_kfree_skb_irq (skb); @@ -893,7 +893,7 @@ receive_packet (struct net_device *dev) /* Small skbuffs for short packets */ if (pkt_len > copy_thresh) { pci_unmap_single (np->pdev, - desc->fraginfo & 0xffffffffffff, + desc->fraginfo & DMA_48BIT_MASK, np->rx_buf_sz, PCI_DMA_FROMDEVICE); skb_put (skb = np->rx_skbuff[entry], pkt_len); @@ -901,7 +901,7 @@ receive_packet (struct net_device *dev) } else if ((skb = dev_alloc_skb (pkt_len + 2)) != NULL) { pci_dma_sync_single_for_cpu(np->pdev, desc->fraginfo & - 0xffffffffffff, + DMA_48BIT_MASK, np->rx_buf_sz, PCI_DMA_FROMDEVICE); skb->dev = dev; @@ -913,7 +913,7 @@ receive_packet (struct net_device *dev) skb_put (skb, pkt_len); pci_dma_sync_single_for_device(np->pdev, desc->fraginfo & - 0xffffffffffff, + DMA_48BIT_MASK, np->rx_buf_sz, PCI_DMA_FROMDEVICE); } @@ -1800,7 +1800,7 @@ rio_close (struct net_device *dev) skb = np->rx_skbuff[i]; if (skb) { pci_unmap_single(np->pdev, - np->rx_ring[i].fraginfo & 0xffffffffffff, + np->rx_ring[i].fraginfo & DMA_48BIT_MASK, skb->len, PCI_DMA_FROMDEVICE); dev_kfree_skb (skb); np->rx_skbuff[i] = NULL; @@ -1810,7 +1810,7 @@ rio_close (struct net_device *dev) skb = np->tx_skbuff[i]; if (skb) { pci_unmap_single(np->pdev, - np->tx_ring[i].fraginfo & 0xffffffffffff, + np->tx_ring[i].fraginfo & DMA_48BIT_MASK, skb->len, PCI_DMA_TODEVICE); dev_kfree_skb (skb); np->tx_skbuff[i] = NULL; diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile index 27ab75f2079..c1ce2398efe 100644 --- a/drivers/net/irda/Makefile +++ b/drivers/net/irda/Makefile @@ -46,4 +46,4 @@ obj-$(CONFIG_MA600_DONGLE) += ma600-sir.o obj-$(CONFIG_TOIM3232_DONGLE) += toim3232-sir.o # The SIR helper module -sir-dev-objs := sir_dev.o sir_dongle.o sir_kthread.o +sir-dev-objs := sir_dev.o sir_dongle.o diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c index 96bdb73c228..cd87593e4e8 100644 --- a/drivers/net/irda/irda-usb.c +++ b/drivers/net/irda/irda-usb.c @@ -1778,7 +1778,7 @@ static int irda_usb_probe(struct usb_interface *intf, if (self->needspatch) { ret = usb_control_msg (self->usbdev, usb_sndctrlpipe (self->usbdev, 0), - 0x02, 0x40, 0, 0, 0, 0, msecs_to_jiffies(500)); + 0x02, 0x40, 0, 0, NULL, 0, 500); if (ret < 0) { IRDA_DEBUG (0, "usb_control_msg failed %d\n", ret); goto err_out_3; diff --git a/drivers/net/irda/sir-dev.h b/drivers/net/irda/sir-dev.h index f69fb4cec76..9fa294a546d 100644 --- a/drivers/net/irda/sir-dev.h +++ b/drivers/net/irda/sir-dev.h @@ -15,23 +15,14 @@ #define IRDA_SIR_H #include <linux/netdevice.h> +#include <linux/workqueue.h> #include <net/irda/irda.h> #include <net/irda/irda_device.h> // iobuff_t -/* FIXME: unify irda_request with sir_fsm! */ - -struct irda_request { - struct list_head lh_request; - unsigned long pending; - void (*func)(void *); - void *data; - struct timer_list timer; -}; - struct sir_fsm { struct semaphore sem; - struct irda_request rq; + struct work_struct work; unsigned state, substate; int param; int result; diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c index ea7c9464d46..3b5854d10c1 100644 --- a/drivers/net/irda/sir_dev.c +++ b/drivers/net/irda/sir_dev.c @@ -23,6 +23,298 @@ #include "sir-dev.h" + +static struct workqueue_struct *irda_sir_wq; + +/* STATE MACHINE */ + +/* substate handler of the config-fsm to handle the cases where we want + * to wait for transmit completion before changing the port configuration + */ + +static int sirdev_tx_complete_fsm(struct sir_dev *dev) +{ + struct sir_fsm *fsm = &dev->fsm; + unsigned next_state, delay; + unsigned bytes_left; + + do { + next_state = fsm->substate; /* default: stay in current substate */ + delay = 0; + + switch(fsm->substate) { + + case SIRDEV_STATE_WAIT_XMIT: + if (dev->drv->chars_in_buffer) + bytes_left = dev->drv->chars_in_buffer(dev); + else + bytes_left = 0; + if (!bytes_left) { + next_state = SIRDEV_STATE_WAIT_UNTIL_SENT; + break; + } + + if (dev->speed > 115200) + delay = (bytes_left*8*10000) / (dev->speed/100); + else if (dev->speed > 0) + delay = (bytes_left*10*10000) / (dev->speed/100); + else + delay = 0; + /* expected delay (usec) until remaining bytes are sent */ + if (delay < 100) { + udelay(delay); + delay = 0; + break; + } + /* sleep some longer delay (msec) */ + delay = (delay+999) / 1000; + break; + + case SIRDEV_STATE_WAIT_UNTIL_SENT: + /* block until underlaying hardware buffer are empty */ + if (dev->drv->wait_until_sent) + dev->drv->wait_until_sent(dev); + next_state = SIRDEV_STATE_TX_DONE; + break; + + case SIRDEV_STATE_TX_DONE: + return 0; + + default: + IRDA_ERROR("%s - undefined state\n", __FUNCTION__); + return -EINVAL; + } + fsm->substate = next_state; + } while (delay == 0); + return delay; +} + +/* + * Function sirdev_config_fsm + * + * State machine to handle the configuration of the device (and attached dongle, if any). + * This handler is scheduled for execution in kIrDAd context, so we can sleep. + * however, kIrDAd is shared by all sir_dev devices so we better don't sleep there too + * long. Instead, for longer delays we start a timer to reschedule us later. + * On entry, fsm->sem is always locked and the netdev xmit queue stopped. + * Both must be unlocked/restarted on completion - but only on final exit. + */ + +static void sirdev_config_fsm(void *data) +{ + struct sir_dev *dev = data; + struct sir_fsm *fsm = &dev->fsm; + int next_state; + int ret = -1; + unsigned delay; + + IRDA_DEBUG(2, "%s(), <%ld>\n", __FUNCTION__, jiffies); + + do { + IRDA_DEBUG(3, "%s - state=0x%04x / substate=0x%04x\n", + __FUNCTION__, fsm->state, fsm->substate); + + next_state = fsm->state; + delay = 0; + + switch(fsm->state) { + + case SIRDEV_STATE_DONGLE_OPEN: + if (dev->dongle_drv != NULL) { + ret = sirdev_put_dongle(dev); + if (ret) { + fsm->result = -EINVAL; + next_state = SIRDEV_STATE_ERROR; + break; + } + } + + /* Initialize dongle */ + ret = sirdev_get_dongle(dev, fsm->param); + if (ret) { + fsm->result = ret; + next_state = SIRDEV_STATE_ERROR; + break; + } + + /* Dongles are powered through the modem control lines which + * were just set during open. Before resetting, let's wait for + * the power to stabilize. This is what some dongle drivers did + * in open before, while others didn't - should be safe anyway. + */ + + delay = 50; + fsm->substate = SIRDEV_STATE_DONGLE_RESET; + next_state = SIRDEV_STATE_DONGLE_RESET; + + fsm->param = 9600; + + break; + + case SIRDEV_STATE_DONGLE_CLOSE: + /* shouldn't we just treat this as success=? */ + if (dev->dongle_drv == NULL) { + fsm->result = -EINVAL; + next_state = SIRDEV_STATE_ERROR; + break; + } + + ret = sirdev_put_dongle(dev); + if (ret) { + fsm->result = ret; + next_state = SIRDEV_STATE_ERROR; + break; + } + next_state = SIRDEV_STATE_DONE; + break; + + case SIRDEV_STATE_SET_DTR_RTS: + ret = sirdev_set_dtr_rts(dev, + (fsm->param&0x02) ? TRUE : FALSE, + (fsm->param&0x01) ? TRUE : FALSE); + next_state = SIRDEV_STATE_DONE; + break; + + case SIRDEV_STATE_SET_SPEED: + fsm->substate = SIRDEV_STATE_WAIT_XMIT; + next_state = SIRDEV_STATE_DONGLE_CHECK; + break; + + case SIRDEV_STATE_DONGLE_CHECK: + ret = sirdev_tx_complete_fsm(dev); + if (ret < 0) { + fsm->result = ret; + next_state = SIRDEV_STATE_ERROR; + break; + } + if ((delay=ret) != 0) + break; + + if (dev->dongle_drv) { + fsm->substate = SIRDEV_STATE_DONGLE_RESET; + next_state = SIRDEV_STATE_DONGLE_RESET; + } + else { + dev->speed = fsm->param; + next_state = SIRDEV_STATE_PORT_SPEED; + } + break; + + case SIRDEV_STATE_DONGLE_RESET: + if (dev->dongle_drv->reset) { + ret = dev->dongle_drv->reset(dev); + if (ret < 0) { + fsm->result = ret; + next_state = SIRDEV_STATE_ERROR; + break; + } + } + else + ret = 0; + if ((delay=ret) == 0) { + /* set serial port according to dongle default speed */ + if (dev->drv->set_speed) + dev->drv->set_speed(dev, dev->speed); + fsm->substate = SIRDEV_STATE_DONGLE_SPEED; + next_state = SIRDEV_STATE_DONGLE_SPEED; + } + break; + + case SIRDEV_STATE_DONGLE_SPEED: + if (dev->dongle_drv->reset) { + ret = dev->dongle_drv->set_speed(dev, fsm->param); + if (ret < 0) { + fsm->result = ret; + next_state = SIRDEV_STATE_ERROR; + break; + } + } + else + ret = 0; + if ((delay=ret) == 0) + next_state = SIRDEV_STATE_PORT_SPEED; + break; + + case SIRDEV_STATE_PORT_SPEED: + /* Finally we are ready to change the serial port speed */ + if (dev->drv->set_speed) + dev->drv->set_speed(dev, dev->speed); + dev->new_speed = 0; + next_state = SIRDEV_STATE_DONE; + break; + + case SIRDEV_STATE_DONE: + /* Signal network layer so it can send more frames */ + netif_wake_queue(dev->netdev); + next_state = SIRDEV_STATE_COMPLETE; + break; + + default: + IRDA_ERROR("%s - undefined state\n", __FUNCTION__); + fsm->result = -EINVAL; + /* fall thru */ + + case SIRDEV_STATE_ERROR: + IRDA_ERROR("%s - error: %d\n", __FUNCTION__, fsm->result); + +#if 0 /* don't enable this before we have netdev->tx_timeout to recover */ + netif_stop_queue(dev->netdev); +#else + netif_wake_queue(dev->netdev); +#endif + /* fall thru */ + + case SIRDEV_STATE_COMPLETE: + /* config change finished, so we are not busy any longer */ + sirdev_enable_rx(dev); + up(&fsm->sem); + return; + } + fsm->state = next_state; + } while(!delay); + + queue_delayed_work(irda_sir_wq, &fsm->work, msecs_to_jiffies(delay)); +} + +/* schedule some device configuration task for execution by kIrDAd + * on behalf of the above state machine. + * can be called from process or interrupt/tasklet context. + */ + +int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned param) +{ + struct sir_fsm *fsm = &dev->fsm; + + IRDA_DEBUG(2, "%s - state=0x%04x / param=%u\n", __FUNCTION__, initial_state, param); + + if (down_trylock(&fsm->sem)) { + if (in_interrupt() || in_atomic() || irqs_disabled()) { + IRDA_DEBUG(1, "%s(), state machine busy!\n", __FUNCTION__); + return -EWOULDBLOCK; + } else + down(&fsm->sem); + } + + if (fsm->state == SIRDEV_STATE_DEAD) { + /* race with sirdev_close should never happen */ + IRDA_ERROR("%s(), instance staled!\n", __FUNCTION__); + up(&fsm->sem); + return -ESTALE; /* or better EPIPE? */ + } + + netif_stop_queue(dev->netdev); + atomic_set(&dev->enable_rx, 0); + + fsm->state = initial_state; + fsm->param = param; + fsm->result = 0; + + INIT_WORK(&fsm->work, sirdev_config_fsm, dev); + queue_work(irda_sir_wq, &fsm->work); + return 0; +} + + /***************************************************************************/ void sirdev_enable_rx(struct sir_dev *dev) @@ -619,10 +911,6 @@ struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *n spin_lock_init(&dev->tx_lock); init_MUTEX(&dev->fsm.sem); - INIT_LIST_HEAD(&dev->fsm.rq.lh_request); - dev->fsm.rq.pending = 0; - init_timer(&dev->fsm.rq.timer); - dev->drv = drv; dev->netdev = ndev; @@ -682,3 +970,22 @@ int sirdev_put_instance(struct sir_dev *dev) } EXPORT_SYMBOL(sirdev_put_instance); +static int __init sir_wq_init(void) +{ + irda_sir_wq = create_singlethread_workqueue("irda_sir_wq"); + if (!irda_sir_wq) + return -ENOMEM; + return 0; +} + +static void __exit sir_wq_exit(void) +{ + destroy_workqueue(irda_sir_wq); +} + +module_init(sir_wq_init); +module_exit(sir_wq_exit); + +MODULE_AUTHOR("Martin Diehl <info@mdiehl.de>"); +MODULE_DESCRIPTION("IrDA SIR core"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/irda/sir_kthread.c b/drivers/net/irda/sir_kthread.c deleted file mode 100644 index e3904d6bfec..00000000000 --- a/drivers/net/irda/sir_kthread.c +++ /dev/null @@ -1,508 +0,0 @@ -/********************************************************************* - * - * sir_kthread.c: dedicated thread to process scheduled - * sir device setup requests - * - * Copyright (c) 2002 Martin Diehl - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 of - * the License, or (at your option) any later version. - * - ********************************************************************/ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/version.h> -#include <linux/init.h> -#include <linux/smp_lock.h> -#include <linux/completion.h> -#include <linux/delay.h> - -#include <net/irda/irda.h> - -#include "sir-dev.h" - -/************************************************************************** - * - * kIrDAd kernel thread and config state machine - * - */ - -struct irda_request_queue { - struct list_head request_list; - spinlock_t lock; - task_t *thread; - struct completion exit; - wait_queue_head_t kick, done; - atomic_t num_pending; -}; - -static struct irda_request_queue irda_rq_queue; - -static int irda_queue_request(struct irda_request *rq) -{ - int ret = 0; - unsigned long flags; - - if (!test_and_set_bit(0, &rq->pending)) { - spin_lock_irqsave(&irda_rq_queue.lock, flags); - list_add_tail(&rq->lh_request, &irda_rq_queue.request_list); - wake_up(&irda_rq_queue.kick); - atomic_inc(&irda_rq_queue.num_pending); - spin_unlock_irqrestore(&irda_rq_queue.lock, flags); - ret = 1; - } - return ret; -} - -static void irda_request_timer(unsigned long data) -{ - struct irda_request *rq = (struct irda_request *)data; - unsigned long flags; - - spin_lock_irqsave(&irda_rq_queue.lock, flags); - list_add_tail(&rq->lh_request, &irda_rq_queue.request_list); - wake_up(&irda_rq_queue.kick); - spin_unlock_irqrestore(&irda_rq_queue.lock, flags); -} - -static int irda_queue_delayed_request(struct irda_request *rq, unsigned long delay) -{ - int ret = 0; - struct timer_list *timer = &rq->timer; - - if (!test_and_set_bit(0, &rq->pending)) { - timer->expires = jiffies + delay; - timer->function = irda_request_timer; - timer->data = (unsigned long)rq; - atomic_inc(&irda_rq_queue.num_pending); - add_timer(timer); - ret = 1; - } - return ret; -} - -static void run_irda_queue(void) -{ - unsigned long flags; - struct list_head *entry, *tmp; - struct irda_request *rq; - - spin_lock_irqsave(&irda_rq_queue.lock, flags); - list_for_each_safe(entry, tmp, &irda_rq_queue.request_list) { - rq = list_entry(entry, struct irda_request, lh_request); - list_del_init(entry); - spin_unlock_irqrestore(&irda_rq_queue.lock, flags); - - clear_bit(0, &rq->pending); - rq->func(rq->data); - - if (atomic_dec_and_test(&irda_rq_queue.num_pending)) - wake_up(&irda_rq_queue.done); - - spin_lock_irqsave(&irda_rq_queue.lock, flags); - } - spin_unlock_irqrestore(&irda_rq_queue.lock, flags); -} - -static int irda_thread(void *startup) -{ - DECLARE_WAITQUEUE(wait, current); - - daemonize("kIrDAd"); - - irda_rq_queue.thread = current; - - complete((struct completion *)startup); - - while (irda_rq_queue.thread != NULL) { - - /* We use TASK_INTERRUPTIBLE, rather than - * TASK_UNINTERRUPTIBLE. Andrew Morton made this - * change ; he told me that it is safe, because "signal - * blocking is now handled in daemonize()", he added - * that the problem is that "uninterruptible sleep - * contributes to load average", making user worry. - * Jean II */ - set_task_state(current, TASK_INTERRUPTIBLE); - add_wait_queue(&irda_rq_queue.kick, &wait); - if (list_empty(&irda_rq_queue.request_list)) - schedule(); - else - __set_task_state(current, TASK_RUNNING); - remove_wait_queue(&irda_rq_queue.kick, &wait); - - /* make swsusp happy with our thread */ - try_to_freeze(); - - run_irda_queue(); - } - -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,35) - reparent_to_init(); -#endif - complete_and_exit(&irda_rq_queue.exit, 0); - /* never reached */ - return 0; -} - - -static void flush_irda_queue(void) -{ - if (atomic_read(&irda_rq_queue.num_pending)) { - - DECLARE_WAITQUEUE(wait, current); - - if (!list_empty(&irda_rq_queue.request_list)) - run_irda_queue(); - - set_task_state(current, TASK_UNINTERRUPTIBLE); - add_wait_queue(&irda_rq_queue.done, &wait); - if (atomic_read(&irda_rq_queue.num_pending)) - schedule(); - else - __set_task_state(current, TASK_RUNNING); - remove_wait_queue(&irda_rq_queue.done, &wait); - } -} - -/* substate handler of the config-fsm to handle the cases where we want - * to wait for transmit completion before changing the port configuration - */ - -static int irda_tx_complete_fsm(struct sir_dev *dev) -{ - struct sir_fsm *fsm = &dev->fsm; - unsigned next_state, delay; - unsigned bytes_left; - - do { - next_state = fsm->substate; /* default: stay in current substate */ - delay = 0; - - switch(fsm->substate) { - - case SIRDEV_STATE_WAIT_XMIT: - if (dev->drv->chars_in_buffer) - bytes_left = dev->drv->chars_in_buffer(dev); - else - bytes_left = 0; - if (!bytes_left) { - next_state = SIRDEV_STATE_WAIT_UNTIL_SENT; - break; - } - - if (dev->speed > 115200) - delay = (bytes_left*8*10000) / (dev->speed/100); - else if (dev->speed > 0) - delay = (bytes_left*10*10000) / (dev->speed/100); - else - delay = 0; - /* expected delay (usec) until remaining bytes are sent */ - if (delay < 100) { - udelay(delay); - delay = 0; - break; - } - /* sleep some longer delay (msec) */ - delay = (delay+999) / 1000; - break; - - case SIRDEV_STATE_WAIT_UNTIL_SENT: - /* block until underlaying hardware buffer are empty */ - if (dev->drv->wait_until_sent) - dev->drv->wait_until_sent(dev); - next_state = SIRDEV_STATE_TX_DONE; - break; - - case SIRDEV_STATE_TX_DONE: - return 0; - - default: - IRDA_ERROR("%s - undefined state\n", __FUNCTION__); - return -EINVAL; - } - fsm->substate = next_state; - } while (delay == 0); - return delay; -} - -/* - * Function irda_config_fsm - * - * State machine to handle the configuration of the device (and attached dongle, if any). - * This handler is scheduled for execution in kIrDAd context, so we can sleep. - * however, kIrDAd is shared by all sir_dev devices so we better don't sleep there too - * long. Instead, for longer delays we start a timer to reschedule us later. - * On entry, fsm->sem is always locked and the netdev xmit queue stopped. - * Both must be unlocked/restarted on completion - but only on final exit. - */ - -static void irda_config_fsm(void *data) -{ - struct sir_dev *dev = data; - struct sir_fsm *fsm = &dev->fsm; - int next_state; - int ret = -1; - unsigned delay; - - IRDA_DEBUG(2, "%s(), <%ld>\n", __FUNCTION__, jiffies); - - do { - IRDA_DEBUG(3, "%s - state=0x%04x / substate=0x%04x\n", - __FUNCTION__, fsm->state, fsm->substate); - - next_state = fsm->state; - delay = 0; - - switch(fsm->state) { - - case SIRDEV_STATE_DONGLE_OPEN: - if (dev->dongle_drv != NULL) { - ret = sirdev_put_dongle(dev); - if (ret) { - fsm->result = -EINVAL; - next_state = SIRDEV_STATE_ERROR; - break; - } - } - - /* Initialize dongle */ - ret = sirdev_get_dongle(dev, fsm->param); - if (ret) { - fsm->result = ret; - next_state = SIRDEV_STATE_ERROR; - break; - } - - /* Dongles are powered through the modem control lines which - * were just set during open. Before resetting, let's wait for - * the power to stabilize. This is what some dongle drivers did - * in open before, while others didn't - should be safe anyway. - */ - - delay = 50; - fsm->substate = SIRDEV_STATE_DONGLE_RESET; - next_state = SIRDEV_STATE_DONGLE_RESET; - - fsm->param = 9600; - - break; - - case SIRDEV_STATE_DONGLE_CLOSE: - /* shouldn't we just treat this as success=? */ - if (dev->dongle_drv == NULL) { - fsm->result = -EINVAL; - next_state = SIRDEV_STATE_ERROR; - break; - } - - ret = sirdev_put_dongle(dev); - if (ret) { - fsm->result = ret; - next_state = SIRDEV_STATE_ERROR; - break; - } - next_state = SIRDEV_STATE_DONE; - break; - - case SIRDEV_STATE_SET_DTR_RTS: - ret = sirdev_set_dtr_rts(dev, - (fsm->param&0x02) ? TRUE : FALSE, - (fsm->param&0x01) ? TRUE : FALSE); - next_state = SIRDEV_STATE_DONE; - break; - - case SIRDEV_STATE_SET_SPEED: - fsm->substate = SIRDEV_STATE_WAIT_XMIT; - next_state = SIRDEV_STATE_DONGLE_CHECK; - break; - - case SIRDEV_STATE_DONGLE_CHECK: - ret = irda_tx_complete_fsm(dev); - if (ret < 0) { - fsm->result = ret; - next_state = SIRDEV_STATE_ERROR; - break; - } - if ((delay=ret) != 0) - break; - - if (dev->dongle_drv) { - fsm->substate = SIRDEV_STATE_DONGLE_RESET; - next_state = SIRDEV_STATE_DONGLE_RESET; - } - else { - dev->speed = fsm->param; - next_state = SIRDEV_STATE_PORT_SPEED; - } - break; - - case SIRDEV_STATE_DONGLE_RESET: - if (dev->dongle_drv->reset) { - ret = dev->dongle_drv->reset(dev); - if (ret < 0) { - fsm->result = ret; - next_state = SIRDEV_STATE_ERROR; - break; - } - } - else - ret = 0; - if ((delay=ret) == 0) { - /* set serial port according to dongle default speed */ - if (dev->drv->set_speed) - dev->drv->set_speed(dev, dev->speed); - fsm->substate = SIRDEV_STATE_DONGLE_SPEED; - next_state = SIRDEV_STATE_DONGLE_SPEED; - } - break; - - case SIRDEV_STATE_DONGLE_SPEED: - if (dev->dongle_drv->reset) { - ret = dev->dongle_drv->set_speed(dev, fsm->param); - if (ret < 0) { - fsm->result = ret; - next_state = SIRDEV_STATE_ERROR; - break; - } - } - else - ret = 0; - if ((delay=ret) == 0) - next_state = SIRDEV_STATE_PORT_SPEED; - break; - - case SIRDEV_STATE_PORT_SPEED: - /* Finally we are ready to change the serial port speed */ - if (dev->drv->set_speed) - dev->drv->set_speed(dev, dev->speed); - dev->new_speed = 0; - next_state = SIRDEV_STATE_DONE; - break; - - case SIRDEV_STATE_DONE: - /* Signal network layer so it can send more frames */ - netif_wake_queue(dev->netdev); - next_state = SIRDEV_STATE_COMPLETE; - break; - - default: - IRDA_ERROR("%s - undefined state\n", __FUNCTION__); - fsm->result = -EINVAL; - /* fall thru */ - - case SIRDEV_STATE_ERROR: - IRDA_ERROR("%s - error: %d\n", __FUNCTION__, fsm->result); - -#if 0 /* don't enable this before we have netdev->tx_timeout to recover */ - netif_stop_queue(dev->netdev); -#else - netif_wake_queue(dev->netdev); -#endif - /* fall thru */ - - case SIRDEV_STATE_COMPLETE: - /* config change finished, so we are not busy any longer */ - sirdev_enable_rx(dev); - up(&fsm->sem); - return; - } - fsm->state = next_state; - } while(!delay); - - irda_queue_delayed_request(&fsm->rq, msecs_to_jiffies(delay)); -} - -/* schedule some device configuration task for execution by kIrDAd - * on behalf of the above state machine. - * can be called from process or interrupt/tasklet context. - */ - -int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned param) -{ - struct sir_fsm *fsm = &dev->fsm; - int xmit_was_down; - - IRDA_DEBUG(2, "%s - state=0x%04x / param=%u\n", __FUNCTION__, initial_state, param); - - if (down_trylock(&fsm->sem)) { - if (in_interrupt() || in_atomic() || irqs_disabled()) { - IRDA_DEBUG(1, "%s(), state machine busy!\n", __FUNCTION__); - return -EWOULDBLOCK; - } else - down(&fsm->sem); - } - - if (fsm->state == SIRDEV_STATE_DEAD) { - /* race with sirdev_close should never happen */ - IRDA_ERROR("%s(), instance staled!\n", __FUNCTION__); - up(&fsm->sem); - return -ESTALE; /* or better EPIPE? */ - } - - xmit_was_down = netif_queue_stopped(dev->netdev); - netif_stop_queue(dev->netdev); - atomic_set(&dev->enable_rx, 0); - - fsm->state = initial_state; - fsm->param = param; - fsm->result = 0; - - INIT_LIST_HEAD(&fsm->rq.lh_request); - fsm->rq.pending = 0; - fsm->rq.func = irda_config_fsm; - fsm->rq.data = dev; - - if (!irda_queue_request(&fsm->rq)) { /* returns 0 on error! */ - atomic_set(&dev->enable_rx, 1); - if (!xmit_was_down) - netif_wake_queue(dev->netdev); - up(&fsm->sem); - return -EAGAIN; - } - return 0; -} - -static int __init irda_thread_create(void) -{ - struct completion startup; - int pid; - - spin_lock_init(&irda_rq_queue.lock); - irda_rq_queue.thread = NULL; - INIT_LIST_HEAD(&irda_rq_queue.request_list); - init_waitqueue_head(&irda_rq_queue.kick); - init_waitqueue_head(&irda_rq_queue.done); - atomic_set(&irda_rq_queue.num_pending, 0); - - init_completion(&startup); - pid = kernel_thread(irda_thread, &startup, CLONE_FS|CLONE_FILES); - if (pid <= 0) - return -EAGAIN; - else - wait_for_completion(&startup); - - return 0; -} - -static void __exit irda_thread_join(void) -{ - if (irda_rq_queue.thread) { - flush_irda_queue(); - init_completion(&irda_rq_queue.exit); - irda_rq_queue.thread = NULL; - wake_up(&irda_rq_queue.kick); - wait_for_completion(&irda_rq_queue.exit); - } -} - -module_init(irda_thread_create); -module_exit(irda_thread_join); - -MODULE_AUTHOR("Martin Diehl <info@mdiehl.de>"); -MODULE_DESCRIPTION("IrDA SIR core"); -MODULE_LICENSE("GPL"); - diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c index 58f76cefbc8..a4674044bd6 100644 --- a/drivers/net/irda/smsc-ircc2.c +++ b/drivers/net/irda/smsc-ircc2.c @@ -54,6 +54,7 @@ #include <linux/rtnetlink.h> #include <linux/serial_reg.h> #include <linux/dma-mapping.h> +#include <linux/pnp.h> #include <linux/platform_device.h> #include <asm/io.h> @@ -358,6 +359,16 @@ static inline void register_bank(int iobase, int bank) iobase + IRCC_MASTER); } +#ifdef CONFIG_PNP +/* PNP hotplug support */ +static const struct pnp_device_id smsc_ircc_pnp_table[] = { + { .id = "SMCf010", .driver_data = 0 }, + /* and presumably others */ + { } +}; +MODULE_DEVICE_TABLE(pnp, smsc_ircc_pnp_table); +#endif + /******************************************************************************* * @@ -2072,7 +2083,8 @@ static void smsc_ircc_sir_wait_hw_transmitter_finish(struct smsc_ircc_cb *self) /* PROBING * - * + * REVISIT we can be told about the device by PNP, and should use that info + * instead of probing hardware and creating a platform_device ... */ static int __init smsc_ircc_look_for_chips(void) diff --git a/drivers/net/ne.c b/drivers/net/ne.c index 93c494bcd18..b32765215f7 100644 --- a/drivers/net/ne.c +++ b/drivers/net/ne.c @@ -139,8 +139,9 @@ bad_clone_list[] __initdata = { #if defined(CONFIG_PLAT_MAPPI) # define DCR_VAL 0x4b -#elif defined(CONFIG_PLAT_OAKS32R) -# define DCR_VAL 0x48 +#elif defined(CONFIG_PLAT_OAKS32R) || \ + defined(CONFIG_TOSHIBA_RBTX4927) || defined(CONFIG_TOSHIBA_RBTX4938) +# define DCR_VAL 0x48 /* 8-bit mode */ #else # define DCR_VAL 0x49 #endif @@ -396,10 +397,22 @@ static int __init ne_probe1(struct net_device *dev, int ioaddr) /* We must set the 8390 for word mode. */ outb_p(DCR_VAL, ioaddr + EN0_DCFG); start_page = NESM_START_PG; - stop_page = NESM_STOP_PG; + + /* + * Realtek RTL8019AS datasheet says that the PSTOP register + * shouldn't exceed 0x60 in 8-bit mode. + * This chip can be identified by reading the signature from + * the remote byte count registers (otherwise write-only)... + */ + if ((DCR_VAL & 0x01) == 0 && /* 8-bit mode */ + inb(ioaddr + EN0_RCNTLO) == 0x50 && + inb(ioaddr + EN0_RCNTHI) == 0x70) + stop_page = 0x60; + else + stop_page = NESM_STOP_PG; } else { start_page = NE1SM_START_PG; - stop_page = NE1SM_STOP_PG; + stop_page = NE1SM_STOP_PG; } #if defined(CONFIG_PLAT_MAPPI) || defined(CONFIG_PLAT_OAKS32R) @@ -509,15 +522,9 @@ static int __init ne_probe1(struct net_device *dev, int ioaddr) ei_status.name = name; ei_status.tx_start_page = start_page; ei_status.stop_page = stop_page; -#if defined(CONFIG_TOSHIBA_RBTX4927) || defined(CONFIG_TOSHIBA_RBTX4938) - wordlength = 1; -#endif -#ifdef CONFIG_PLAT_OAKS32R - ei_status.word16 = 0; -#else - ei_status.word16 = (wordlength == 2); -#endif + /* Use 16-bit mode only if this wasn't overridden by DCR_VAL */ + ei_status.word16 = (wordlength == 2 && (DCR_VAL & 0x01)); ei_status.rx_start_page = start_page + TX_PAGES; #ifdef PACKETBUF_MEMSIZE diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 459443b572c..1b236bdf6b9 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -60,8 +60,10 @@ int mdiobus_register(struct mii_bus *bus) for (i = 0; i < PHY_MAX_ADDR; i++) { struct phy_device *phydev; - if (bus->phy_mask & (1 << i)) + if (bus->phy_mask & (1 << i)) { + bus->phy_map[i] = NULL; continue; + } phydev = get_phy_device(bus, i); diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c index b82191d2bee..f5a3bf4d959 100644 --- a/drivers/net/sis900.c +++ b/drivers/net/sis900.c @@ -127,6 +127,7 @@ static const struct mii_chip_info { } mii_chip_table[] = { { "SiS 900 Internal MII PHY", 0x001d, 0x8000, LAN }, { "SiS 7014 Physical Layer Solution", 0x0016, 0xf830, LAN }, + { "SiS 900 on Foxconn 661 7MI", 0x0143, 0xBC70, LAN }, { "Altimata AC101LF PHY", 0x0022, 0x5520, LAN }, { "ADM 7001 LAN PHY", 0x002e, 0xcc60, LAN }, { "AMD 79C901 10BASE-T PHY", 0x0000, 0x6B70, LAN }, diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 227df9876a2..ffd267fab21 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c @@ -51,7 +51,7 @@ #include "sky2.h" #define DRV_NAME "sky2" -#define DRV_VERSION "1.2" +#define DRV_VERSION "1.3" #define PFX DRV_NAME " " /* @@ -79,6 +79,8 @@ #define NAPI_WEIGHT 64 #define PHY_RETRIES 1000 +#define RING_NEXT(x,s) (((x)+1) & ((s)-1)) + static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR @@ -96,6 +98,10 @@ static int disable_msi = 0; module_param(disable_msi, int, 0); MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)"); +static int idle_timeout = 100; +module_param(idle_timeout, int, 0); +MODULE_PARM_DESC(idle_timeout, "Idle timeout workaround for lost interrupts (ms)"); + static const struct pci_device_id sky2_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, @@ -122,6 +128,7 @@ MODULE_DEVICE_TABLE(pci, sky2_id_table); /* Avoid conditionals by using array */ static const unsigned txqaddr[] = { Q_XA1, Q_XA2 }; static const unsigned rxqaddr[] = { Q_R1, Q_R2 }; +static const u32 portirq_msk[] = { Y2_IS_PORT_1, Y2_IS_PORT_2 }; /* This driver supports yukon2 chipset only */ static const char *yukon2_name[] = { @@ -298,7 +305,8 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port) struct sky2_port *sky2 = netdev_priv(hw->dev[port]); u16 ctrl, ct1000, adv, pg, ledctrl, ledover; - if (sky2->autoneg == AUTONEG_ENABLE && hw->chip_id != CHIP_ID_YUKON_XL) { + if (sky2->autoneg == AUTONEG_ENABLE && + (hw->chip_id != CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)) { u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK | @@ -326,7 +334,7 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port) ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO); if (sky2->autoneg == AUTONEG_ENABLE && - hw->chip_id == CHIP_ID_YUKON_XL) { + (hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)) { ctrl &= ~PHY_M_PC_DSC_MSK; ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA; } @@ -442,10 +450,11 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port) gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); /* set LED Function Control register */ - gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */ - PHY_M_LEDC_INIT_CTRL(7) | /* 10 Mbps */ - PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */ - PHY_M_LEDC_STA0_CTRL(7))); /* 1000 Mbps */ + gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, + (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */ + PHY_M_LEDC_INIT_CTRL(7) | /* 10 Mbps */ + PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */ + PHY_M_LEDC_STA0_CTRL(7))); /* 1000 Mbps */ /* set Polarity Control register */ gm_phy_write(hw, port, PHY_MARV_PHY_STAT, @@ -459,6 +468,25 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port) /* restore page register */ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); break; + case CHIP_ID_YUKON_EC_U: + pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); + + /* select page 3 to access LED control register */ + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); + + /* set LED Function Control register */ + gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, + (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */ + PHY_M_LEDC_INIT_CTRL(8) | /* 10 Mbps */ + PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */ + PHY_M_LEDC_STA0_CTRL(7)));/* 1000 Mbps */ + + /* set Blink Rate in LED Timer Control Register */ + gm_phy_write(hw, port, PHY_MARV_INT_MASK, + ledctrl | PHY_M_LED_BLINK_RT(BLINK_84MS)); + /* restore page register */ + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); + break; default: /* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */ @@ -467,19 +495,21 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port) ledover |= PHY_M_LED_MO_RX(MO_LED_OFF); } - if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev >= 2) { + if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev == CHIP_REV_YU_EC_A1) { /* apply fixes in PHY AFE */ - gm_phy_write(hw, port, 22, 255); + pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 255); + /* increase differential signal amplitude in 10BASE-T */ - gm_phy_write(hw, port, 24, 0xaa99); - gm_phy_write(hw, port, 23, 0x2011); + gm_phy_write(hw, port, 0x18, 0xaa99); + gm_phy_write(hw, port, 0x17, 0x2011); /* fix for IEEE A/B Symmetry failure in 1000BASE-T */ - gm_phy_write(hw, port, 24, 0xa204); - gm_phy_write(hw, port, 23, 0x2002); + gm_phy_write(hw, port, 0x18, 0xa204); + gm_phy_write(hw, port, 0x17, 0x2002); /* set page register to 0 */ - gm_phy_write(hw, port, 22, 0); + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); } else { gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl); @@ -553,6 +583,11 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port) if (sky2->duplex == DUPLEX_FULL) reg |= GM_GPCR_DUP_FULL; + + /* turn off pause in 10/100mbps half duplex */ + else if (sky2->speed != SPEED_1000 && + hw->chip_id != CHIP_ID_YUKON_EC_U) + sky2->tx_pause = sky2->rx_pause = 0; } else reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL; @@ -719,7 +754,7 @@ static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2) { struct sky2_tx_le *le = sky2->tx_le + sky2->tx_prod; - sky2->tx_prod = (sky2->tx_prod + 1) % TX_RING_SIZE; + sky2->tx_prod = RING_NEXT(sky2->tx_prod, TX_RING_SIZE); return le; } @@ -735,7 +770,7 @@ static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx) static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2) { struct sky2_rx_le *le = sky2->rx_le + sky2->rx_put; - sky2->rx_put = (sky2->rx_put + 1) % RX_LE_SIZE; + sky2->rx_put = RING_NEXT(sky2->rx_put, RX_LE_SIZE); return le; } @@ -1050,7 +1085,7 @@ static int sky2_up(struct net_device *dev) /* Enable interrupts from phy/mac for port */ imask = sky2_read32(hw, B0_IMSK); - imask |= (port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2; + imask |= portirq_msk[port]; sky2_write32(hw, B0_IMSK, imask); return 0; @@ -1078,7 +1113,7 @@ err_out: /* Modular subtraction in ring */ static inline int tx_dist(unsigned tail, unsigned head) { - return (head - tail) % TX_RING_SIZE; + return (head - tail) & (TX_RING_SIZE - 1); } /* Number of list elements available for next tx */ @@ -1255,7 +1290,7 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev) le->opcode = OP_BUFFER | HW_OWNER; fre = sky2->tx_ring - + ((re - sky2->tx_ring) + i + 1) % TX_RING_SIZE; + + RING_NEXT((re - sky2->tx_ring) + i, TX_RING_SIZE); pci_unmap_addr_set(fre, mapaddr, mapping); } @@ -1315,7 +1350,7 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done) for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { struct tx_ring_info *fre; - fre = sky2->tx_ring + (put + i + 1) % TX_RING_SIZE; + fre = sky2->tx_ring + RING_NEXT(put + i, TX_RING_SIZE); pci_unmap_page(pdev, pci_unmap_addr(fre, mapaddr), skb_shinfo(skb)->frags[i].size, PCI_DMA_TODEVICE); @@ -1401,7 +1436,7 @@ static int sky2_down(struct net_device *dev) /* Disable port IRQ */ imask = sky2_read32(hw, B0_IMSK); - imask &= ~(sky2->port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2; + imask &= ~portirq_msk[port]; sky2_write32(hw, B0_IMSK, imask); /* turn off LED's */ @@ -1498,17 +1533,26 @@ static void sky2_link_up(struct sky2_port *sky2) sky2_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF); - if (hw->chip_id == CHIP_ID_YUKON_XL) { + if (hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U) { u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); + u16 led = PHY_M_LEDC_LOS_CTRL(1); /* link active */ + + switch(sky2->speed) { + case SPEED_10: + led |= PHY_M_LEDC_INIT_CTRL(7); + break; + + case SPEED_100: + led |= PHY_M_LEDC_STA1_CTRL(7); + break; + + case SPEED_1000: + led |= PHY_M_LEDC_STA0_CTRL(7); + break; + } gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); - gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */ - PHY_M_LEDC_INIT_CTRL(sky2->speed == - SPEED_10 ? 7 : 0) | - PHY_M_LEDC_STA1_CTRL(sky2->speed == - SPEED_100 ? 7 : 0) | - PHY_M_LEDC_STA0_CTRL(sky2->speed == - SPEED_1000 ? 7 : 0)); + gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, led); gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); } @@ -1583,7 +1627,7 @@ static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux) sky2->speed = sky2_phy_speed(hw, aux); /* Pause bits are offset (9..8) */ - if (hw->chip_id == CHIP_ID_YUKON_XL) + if (hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U) aux >>= 6; sky2->rx_pause = (aux & PHY_M_PS_RX_P_EN) != 0; @@ -1859,35 +1903,28 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last) static int sky2_status_intr(struct sky2_hw *hw, int to_do) { int work_done = 0; + u16 hwidx = sky2_read16(hw, STAT_PUT_IDX); rmb(); - for(;;) { + while (hw->st_idx != hwidx) { struct sky2_status_le *le = hw->st_le + hw->st_idx; struct net_device *dev; struct sky2_port *sky2; struct sk_buff *skb; u32 status; u16 length; - u8 link, opcode; - opcode = le->opcode; - if (!opcode) - break; - opcode &= ~HW_OWNER; - - hw->st_idx = (hw->st_idx + 1) % STATUS_RING_SIZE; - le->opcode = 0; + hw->st_idx = RING_NEXT(hw->st_idx, STATUS_RING_SIZE); - link = le->link; - BUG_ON(link >= 2); - dev = hw->dev[link]; + BUG_ON(le->link >= 2); + dev = hw->dev[le->link]; sky2 = netdev_priv(dev); length = le->length; status = le->status; - switch (opcode) { + switch (le->opcode & ~HW_OWNER) { case OP_RXSTAT: skb = sky2_receive(sky2, length, status); if (!skb) @@ -1927,7 +1964,8 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do) case OP_TXINDEXLE: /* TX index reports status for both ports */ - sky2_tx_done(hw->dev[0], status & 0xffff); + BUILD_BUG_ON(TX_RING_SIZE > 0x1000); + sky2_tx_done(hw->dev[0], status & 0xfff); if (hw->dev[1]) sky2_tx_done(hw->dev[1], ((status >> 24) & 0xff) @@ -1937,8 +1975,8 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do) default: if (net_ratelimit()) printk(KERN_WARNING PFX - "unknown status opcode 0x%x\n", opcode); - break; + "unknown status opcode 0x%x\n", le->opcode); + goto exit_loop; } } @@ -2089,12 +2127,13 @@ static void sky2_descriptor_error(struct sky2_hw *hw, unsigned port, */ static void sky2_idle(unsigned long arg) { - struct net_device *dev = (struct net_device *) arg; + struct sky2_hw *hw = (struct sky2_hw *) arg; + struct net_device *dev = hw->dev[0]; - local_irq_disable(); if (__netif_rx_schedule_prep(dev)) __netif_rx_schedule(dev); - local_irq_enable(); + + mod_timer(&hw->idle_timer, jiffies + msecs_to_jiffies(idle_timeout)); } @@ -2105,65 +2144,46 @@ static int sky2_poll(struct net_device *dev0, int *budget) int work_done = 0; u32 status = sky2_read32(hw, B0_Y2_SP_EISR); - restart_poll: - if (unlikely(status & ~Y2_IS_STAT_BMU)) { - if (status & Y2_IS_HW_ERR) - sky2_hw_intr(hw); - - if (status & Y2_IS_IRQ_PHY1) - sky2_phy_intr(hw, 0); - - if (status & Y2_IS_IRQ_PHY2) - sky2_phy_intr(hw, 1); + if (status & Y2_IS_HW_ERR) + sky2_hw_intr(hw); - if (status & Y2_IS_IRQ_MAC1) - sky2_mac_intr(hw, 0); + if (status & Y2_IS_IRQ_PHY1) + sky2_phy_intr(hw, 0); - if (status & Y2_IS_IRQ_MAC2) - sky2_mac_intr(hw, 1); + if (status & Y2_IS_IRQ_PHY2) + sky2_phy_intr(hw, 1); - if (status & Y2_IS_CHK_RX1) - sky2_descriptor_error(hw, 0, "receive", Y2_IS_CHK_RX1); + if (status & Y2_IS_IRQ_MAC1) + sky2_mac_intr(hw, 0); - if (status & Y2_IS_CHK_RX2) - sky2_descriptor_error(hw, 1, "receive", Y2_IS_CHK_RX2); + if (status & Y2_IS_IRQ_MAC2) + sky2_mac_intr(hw, 1); - if (status & Y2_IS_CHK_TXA1) - sky2_descriptor_error(hw, 0, "transmit", Y2_IS_CHK_TXA1); + if (status & Y2_IS_CHK_RX1) + sky2_descriptor_error(hw, 0, "receive", Y2_IS_CHK_RX1); - if (status & Y2_IS_CHK_TXA2) - sky2_descriptor_error(hw, 1, "transmit", Y2_IS_CHK_TXA2); - } + if (status & Y2_IS_CHK_RX2) + sky2_descriptor_error(hw, 1, "receive", Y2_IS_CHK_RX2); - if (status & Y2_IS_STAT_BMU) { - work_done += sky2_status_intr(hw, work_limit - work_done); - *budget -= work_done; - dev0->quota -= work_done; + if (status & Y2_IS_CHK_TXA1) + sky2_descriptor_error(hw, 0, "transmit", Y2_IS_CHK_TXA1); - if (work_done >= work_limit) - return 1; + if (status & Y2_IS_CHK_TXA2) + sky2_descriptor_error(hw, 1, "transmit", Y2_IS_CHK_TXA2); + if (status & Y2_IS_STAT_BMU) sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); - } - - mod_timer(&hw->idle_timer, jiffies + HZ); - local_irq_disable(); - __netif_rx_complete(dev0); + work_done = sky2_status_intr(hw, work_limit); + *budget -= work_done; + dev0->quota -= work_done; - status = sky2_read32(hw, B0_Y2_SP_LISR); + if (work_done >= work_limit) + return 1; - if (unlikely(status)) { - /* More work pending, try and keep going */ - if (__netif_rx_schedule_prep(dev0)) { - __netif_rx_reschedule(dev0, work_done); - status = sky2_read32(hw, B0_Y2_SP_EISR); - local_irq_enable(); - goto restart_poll; - } - } + netif_rx_complete(dev0); - local_irq_enable(); + status = sky2_read32(hw, B0_Y2_SP_LISR); return 0; } @@ -2244,13 +2264,6 @@ static int __devinit sky2_reset(struct sky2_hw *hw) return -EOPNOTSUPP; } - /* This chip is new and not tested yet */ - if (hw->chip_id == CHIP_ID_YUKON_EC_U) { - pr_info(PFX "%s: is a version of Yukon 2 chipset that has not been tested yet.\n", - pci_name(hw->pdev)); - pr_info("Please report success/failure to maintainer <shemminger@osdl.org>\n"); - } - /* disable ASF */ if (hw->chip_id <= CHIP_ID_YUKON_EC) { sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET); @@ -3302,7 +3315,10 @@ static int __devinit sky2_probe(struct pci_dev *pdev, sky2_write32(hw, B0_IMSK, Y2_IS_BASE); - setup_timer(&hw->idle_timer, sky2_idle, (unsigned long) dev); + setup_timer(&hw->idle_timer, sky2_idle, (unsigned long) hw); + if (idle_timeout > 0) + mod_timer(&hw->idle_timer, + jiffies + msecs_to_jiffies(idle_timeout)); pci_set_drvdata(pdev, hw); @@ -3342,6 +3358,8 @@ static void __devexit sky2_remove(struct pci_dev *pdev) del_timer_sync(&hw->idle_timer); sky2_write32(hw, B0_IMSK, 0); + synchronize_irq(hw->pdev->irq); + dev0 = hw->dev[0]; dev1 = hw->dev[1]; if (dev1) diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h index b026f5653f0..8012994c9b9 100644 --- a/drivers/net/sky2.h +++ b/drivers/net/sky2.h @@ -378,6 +378,9 @@ enum { CHIP_REV_YU_EC_A1 = 0, /* Chip Rev. for Yukon-EC A1/A0 */ CHIP_REV_YU_EC_A2 = 1, /* Chip Rev. for Yukon-EC A2 */ CHIP_REV_YU_EC_A3 = 2, /* Chip Rev. for Yukon-EC A3 */ + + CHIP_REV_YU_EC_U_A0 = 0, + CHIP_REV_YU_EC_U_A1 = 1, }; /* B2_Y2_CLK_GATE 8 bit Clock Gating (Yukon-2 only) */ diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c index 43f5e86fc55..394339d5e87 100644 --- a/drivers/net/spider_net.c +++ b/drivers/net/spider_net.c @@ -1652,6 +1652,8 @@ spider_net_enable_card(struct spider_net_card *card) { SPIDER_NET_GFTRESTRT, SPIDER_NET_RESTART_VALUE }, { SPIDER_NET_GMRWOLCTRL, 0 }, + { SPIDER_NET_GTESTMD, 0x10000000 }, + { SPIDER_NET_GTTQMSK, 0x00400040 }, { SPIDER_NET_GTESTMD, 0 }, { SPIDER_NET_GMACINTEN, 0 }, @@ -1792,15 +1794,7 @@ spider_net_setup_phy(struct spider_net_card *card) if (phy->def->ops->setup_forced) phy->def->ops->setup_forced(phy, SPEED_1000, DUPLEX_FULL); - /* the following two writes could be moved to sungem_phy.c */ - /* enable fiber mode */ - spider_net_write_phy(card->netdev, 1, MII_NCONFIG, 0x9020); - /* LEDs active in both modes, autosense prio = fiber */ - spider_net_write_phy(card->netdev, 1, MII_NCONFIG, 0x945f); - - /* switch off fibre autoneg */ - spider_net_write_phy(card->netdev, 1, MII_NCONFIG, 0xfc01); - spider_net_write_phy(card->netdev, 1, 0x0b, 0x0004); + phy->def->ops->enable_fiber(phy); phy->def->ops->read_link(phy); pr_info("Found %s with %i Mbps, %s-duplex.\n", phy->def->name, diff --git a/drivers/net/spider_net.h b/drivers/net/spider_net.h index 5922b529a04..3b8d951cf73 100644 --- a/drivers/net/spider_net.h +++ b/drivers/net/spider_net.h @@ -120,6 +120,8 @@ extern char spider_net_driver_name[]; #define SPIDER_NET_GMRUAFILnR 0x00000500 #define SPIDER_NET_GMRUA0FIL15R 0x00000578 +#define SPIDER_NET_GTTQMSK 0x00000934 + /* RX DMA controller registers, all 0x00000a.. are for DMA controller A, * 0x00000b.. for DMA controller B, etc. */ #define SPIDER_NET_GDADCHA 0x00000a00 diff --git a/drivers/net/sungem_phy.c b/drivers/net/sungem_phy.c index 046371ee5bb..b2ddd5e7930 100644 --- a/drivers/net/sungem_phy.c +++ b/drivers/net/sungem_phy.c @@ -329,6 +329,30 @@ static int bcm5421_init(struct mii_phy* phy) return 0; } +static int bcm5421_enable_fiber(struct mii_phy* phy) +{ + /* enable fiber mode */ + phy_write(phy, MII_NCONFIG, 0x9020); + /* LEDs active in both modes, autosense prio = fiber */ + phy_write(phy, MII_NCONFIG, 0x945f); + + /* switch off fibre autoneg */ + phy_write(phy, MII_NCONFIG, 0xfc01); + phy_write(phy, 0x0b, 0x0004); + + return 0; +} + +static int bcm5461_enable_fiber(struct mii_phy* phy) +{ + phy_write(phy, MII_NCONFIG, 0xfc0c); + phy_write(phy, MII_BMCR, 0x4140); + phy_write(phy, MII_NCONFIG, 0xfc0b); + phy_write(phy, MII_BMCR, 0x0140); + + return 0; +} + static int bcm54xx_setup_aneg(struct mii_phy *phy, u32 advertise) { u16 ctl, adv; @@ -762,6 +786,7 @@ static struct mii_phy_ops bcm5421_phy_ops = { .setup_forced = bcm54xx_setup_forced, .poll_link = genmii_poll_link, .read_link = bcm54xx_read_link, + .enable_fiber = bcm5421_enable_fiber, }; static struct mii_phy_def bcm5421_phy_def = { @@ -792,6 +817,25 @@ static struct mii_phy_def bcm5421k2_phy_def = { .ops = &bcm5421k2_phy_ops }; +static struct mii_phy_ops bcm5461_phy_ops = { + .init = bcm5421_init, + .suspend = generic_suspend, + .setup_aneg = bcm54xx_setup_aneg, + .setup_forced = bcm54xx_setup_forced, + .poll_link = genmii_poll_link, + .read_link = bcm54xx_read_link, + .enable_fiber = bcm5461_enable_fiber, +}; + +static struct mii_phy_def bcm5461_phy_def = { + .phy_id = 0x002060c0, + .phy_id_mask = 0xfffffff0, + .name = "BCM5461", + .features = MII_GBIT_FEATURES, + .magic_aneg = 1, + .ops = &bcm5461_phy_ops +}; + /* Broadcom BCM 5462 built-in Vesta */ static struct mii_phy_ops bcm5462V_phy_ops = { .init = bcm5421_init, @@ -857,6 +901,7 @@ static struct mii_phy_def* mii_phy_table[] = { &bcm5411_phy_def, &bcm5421_phy_def, &bcm5421k2_phy_def, + &bcm5461_phy_def, &bcm5462V_phy_def, &marvell_phy_def, &genmii_phy_def, diff --git a/drivers/net/sungem_phy.h b/drivers/net/sungem_phy.h index 430544496c5..69e125197fc 100644 --- a/drivers/net/sungem_phy.h +++ b/drivers/net/sungem_phy.h @@ -12,6 +12,7 @@ struct mii_phy_ops int (*setup_forced)(struct mii_phy *phy, int speed, int fd); int (*poll_link)(struct mii_phy *phy); int (*read_link)(struct mii_phy *phy); + int (*enable_fiber)(struct mii_phy *phy); }; /* Structure used to statically define an mii/gii based PHY */ diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index beeb612be98..2bd9592b75c 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -8454,6 +8454,9 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) tx_len = 1514; skb = dev_alloc_skb(tx_len); + if (!skb) + return -ENOMEM; + tx_data = skb_put(skb, tx_len); memcpy(tx_data, tp->dev->dev_addr, 6); memset(tx_data + 6, 0x0, 8); diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c index 9a06e61df0a..e2982a83ae4 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c @@ -939,9 +939,9 @@ static int bcm43xx_sprom_extract(struct bcm43xx_private *bcm) return 0; } -static void bcm43xx_geo_init(struct bcm43xx_private *bcm) +static int bcm43xx_geo_init(struct bcm43xx_private *bcm) { - struct ieee80211_geo geo; + struct ieee80211_geo *geo; struct ieee80211_channel *chan; int have_a = 0, have_bg = 0; int i; @@ -949,7 +949,10 @@ static void bcm43xx_geo_init(struct bcm43xx_private *bcm) struct bcm43xx_phyinfo *phy; const char *iso_country; - memset(&geo, 0, sizeof(geo)); + geo = kzalloc(sizeof(*geo), GFP_KERNEL); + if (!geo) + return -ENOMEM; + for (i = 0; i < bcm->nr_80211_available; i++) { phy = &(bcm->core_80211_ext[i].phy); switch (phy->type) { @@ -967,31 +970,36 @@ static void bcm43xx_geo_init(struct bcm43xx_private *bcm) iso_country = bcm43xx_locale_iso(bcm->sprom.locale); if (have_a) { - for (i = 0, channel = 0; channel < 201; channel++) { - chan = &geo.a[i++]; + for (i = 0, channel = IEEE80211_52GHZ_MIN_CHANNEL; + channel <= IEEE80211_52GHZ_MAX_CHANNEL; channel++) { + chan = &geo->a[i++]; chan->freq = bcm43xx_channel_to_freq_a(channel); chan->channel = channel; } - geo.a_channels = i; + geo->a_channels = i; } if (have_bg) { - for (i = 0, channel = 1; channel < 15; channel++) { - chan = &geo.bg[i++]; + for (i = 0, channel = IEEE80211_24GHZ_MIN_CHANNEL; + channel <= IEEE80211_24GHZ_MAX_CHANNEL; channel++) { + chan = &geo->bg[i++]; chan->freq = bcm43xx_channel_to_freq_bg(channel); chan->channel = channel; } - geo.bg_channels = i; + geo->bg_channels = i; } - memcpy(geo.name, iso_country, 2); + memcpy(geo->name, iso_country, 2); if (0 /*TODO: Outdoor use only */) - geo.name[2] = 'O'; + geo->name[2] = 'O'; else if (0 /*TODO: Indoor use only */) - geo.name[2] = 'I'; + geo->name[2] = 'I'; else - geo.name[2] = ' '; - geo.name[3] = '\0'; + geo->name[2] = ' '; + geo->name[3] = '\0'; + + ieee80211_set_geo(bcm->ieee, geo); + kfree(geo); - ieee80211_set_geo(bcm->ieee, &geo); + return 0; } /* DummyTransmission function, as documented on @@ -3479,16 +3487,17 @@ static int bcm43xx_attach_board(struct bcm43xx_private *bcm) goto err_80211_unwind; bcm43xx_wireless_core_disable(bcm); } + err = bcm43xx_geo_init(bcm); + if (err) + goto err_80211_unwind; bcm43xx_pctl_set_crystal(bcm, 0); /* Set the MAC address in the networking subsystem */ - if (bcm43xx_current_phy(bcm)->type == BCM43xx_PHYTYPE_A) + if (is_valid_ether_addr(bcm->sprom.et1macaddr)) memcpy(bcm->net_dev->dev_addr, bcm->sprom.et1macaddr, 6); else memcpy(bcm->net_dev->dev_addr, bcm->sprom.il0macaddr, 6); - bcm43xx_geo_init(bcm); - snprintf(bcm->nick, IW_ESSID_MAX_SIZE, "Broadcom %04X", bcm->chip_id); diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.h b/drivers/net/wireless/bcm43xx/bcm43xx_main.h index eca79a38594..30a202b258b 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_main.h +++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.h @@ -118,12 +118,14 @@ int bcm43xx_channel_to_freq(struct bcm43xx_private *bcm, static inline int bcm43xx_is_valid_channel_a(u8 channel) { - return (channel <= 200); + return (channel >= IEEE80211_52GHZ_MIN_CHANNEL + && channel <= IEEE80211_52GHZ_MAX_CHANNEL); } static inline int bcm43xx_is_valid_channel_bg(u8 channel) { - return (channel >= 1 && channel <= 14); + return (channel >= IEEE80211_24GHZ_MIN_CHANNEL + && channel <= IEEE80211_24GHZ_MAX_CHANNEL); } static inline int bcm43xx_is_valid_channel(struct bcm43xx_private *bcm, diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_phy.c b/drivers/net/wireless/bcm43xx/bcm43xx_phy.c index 33137165727..b0abac51553 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_phy.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_phy.c @@ -1287,7 +1287,7 @@ static void bcm43xx_phy_initg(struct bcm43xx_private *bcm) if (radio->revision == 8) bcm43xx_phy_write(bcm, 0x0805, 0x3230); bcm43xx_phy_init_pctl(bcm); - if (bcm->chip_id == 0x4306 && bcm->chip_package != 2) { + if (bcm->chip_id == 0x4306 && bcm->chip_package == 2) { bcm43xx_phy_write(bcm, 0x0429, bcm43xx_phy_read(bcm, 0x0429) & 0xBFFF); bcm43xx_phy_write(bcm, 0x04C3, diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_wx.c b/drivers/net/wireless/bcm43xx/bcm43xx_wx.c index 3edbb481a0a..b45063974ae 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_wx.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_wx.c @@ -182,8 +182,11 @@ static int bcm43xx_wx_set_mode(struct net_device *net_dev, mode = BCM43xx_INITIAL_IWMODE; bcm43xx_lock_mmio(bcm, flags); - if (bcm->ieee->iw_mode != mode) - bcm43xx_set_iwmode(bcm, mode); + if (bcm->initialized) { + if (bcm->ieee->iw_mode != mode) + bcm43xx_set_iwmode(bcm, mode); + } else + bcm->ieee->iw_mode = mode; bcm43xx_unlock_mmio(bcm, flags); return 0; diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c index cb30d9c1153..0c9c2f400bf 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c +++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c @@ -219,6 +219,7 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ahc->flags |= AHC_39BIT_ADDRESSING; } else { if (dma_set_mask(dev, DMA_32BIT_MASK)) { + ahc_free(ahc); printk(KERN_WARNING "aic7xxx: No suitable DMA available.\n"); return (-ENODEV); } diff --git a/drivers/scsi/aic7xxx/aic7xxx_pci.c b/drivers/scsi/aic7xxx/aic7xxx_pci.c index 5f586140e05..3adecef2178 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_pci.c +++ b/drivers/scsi/aic7xxx/aic7xxx_pci.c @@ -2036,12 +2036,12 @@ ahc_pci_resume(struct ahc_softc *ahc) * that the OS doesn't know about and rely on our chip * reset handler to handle the rest. */ - ahc_pci_write_config(ahc->dev_softc, DEVCONFIG, /*bytes*/4, - ahc->bus_softc.pci_softc.devconfig); - ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND, /*bytes*/1, - ahc->bus_softc.pci_softc.command); - ahc_pci_write_config(ahc->dev_softc, CSIZE_LATTIME, /*bytes*/1, - ahc->bus_softc.pci_softc.csize_lattime); + ahc_pci_write_config(ahc->dev_softc, DEVCONFIG, + ahc->bus_softc.pci_softc.devconfig, /*bytes*/4); + ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND, + ahc->bus_softc.pci_softc.command, /*bytes*/1); + ahc_pci_write_config(ahc->dev_softc, CSIZE_LATTIME, + ahc->bus_softc.pci_softc.csize_lattime, /*bytes*/1); if ((ahc->flags & AHC_HAS_TERM_LOGIC) != 0) { struct seeprom_descriptor sd; u_int sxfrctl1; diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 0a8ad37ae89..2e9be83a697 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -739,7 +739,8 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata) { struct viosrp_adapter_info *req; struct srp_event_struct *evt_struct; - + dma_addr_t addr; + evt_struct = get_event_struct(&hostdata->pool); if (!evt_struct) { printk(KERN_ERR "ibmvscsi: couldn't allocate an event " @@ -757,10 +758,10 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata) req->common.type = VIOSRP_ADAPTER_INFO_TYPE; req->common.length = sizeof(hostdata->madapter_info); - req->buffer = dma_map_single(hostdata->dev, - &hostdata->madapter_info, - sizeof(hostdata->madapter_info), - DMA_BIDIRECTIONAL); + req->buffer = addr = dma_map_single(hostdata->dev, + &hostdata->madapter_info, + sizeof(hostdata->madapter_info), + DMA_BIDIRECTIONAL); if (dma_mapping_error(req->buffer)) { printk(KERN_ERR @@ -770,8 +771,13 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata) return; } - if (ibmvscsi_send_srp_event(evt_struct, hostdata)) + if (ibmvscsi_send_srp_event(evt_struct, hostdata)) { printk(KERN_ERR "ibmvscsi: couldn't send ADAPTER_INFO_REQ!\n"); + dma_unmap_single(hostdata->dev, + addr, + sizeof(hostdata->madapter_info), + DMA_BIDIRECTIONAL); + } }; /** @@ -1259,6 +1265,7 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata, { struct viosrp_host_config *host_config; struct srp_event_struct *evt_struct; + dma_addr_t addr; int rc; evt_struct = get_event_struct(&hostdata->pool); @@ -1279,8 +1286,9 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata, memset(host_config, 0x00, sizeof(*host_config)); host_config->common.type = VIOSRP_HOST_CONFIG_TYPE; host_config->common.length = length; - host_config->buffer = dma_map_single(hostdata->dev, buffer, length, - DMA_BIDIRECTIONAL); + host_config->buffer = addr = dma_map_single(hostdata->dev, buffer, + length, + DMA_BIDIRECTIONAL); if (dma_mapping_error(host_config->buffer)) { printk(KERN_ERR @@ -1291,11 +1299,9 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata, init_completion(&evt_struct->comp); rc = ibmvscsi_send_srp_event(evt_struct, hostdata); - if (rc == 0) { + if (rc == 0) wait_for_completion(&evt_struct->comp); - dma_unmap_single(hostdata->dev, host_config->buffer, - length, DMA_BIDIRECTIONAL); - } + dma_unmap_single(hostdata->dev, addr, length, DMA_BIDIRECTIONAL); return rc; } diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index fad607b2e6f..ee22173fce4 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -27,7 +27,6 @@ void lpfc_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *); int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *); -void lpfc_set_slim(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t); int lpfc_reg_login(struct lpfc_hba *, uint32_t, uint8_t *, LPFC_MBOXQ_t *, uint32_t); void lpfc_unreg_login(struct lpfc_hba *, uint32_t, LPFC_MBOXQ_t *); diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h index 8932b1be2b6..41cf5d3ea6c 100644 --- a/drivers/scsi/lpfc/lpfc_disc.h +++ b/drivers/scsi/lpfc/lpfc_disc.h @@ -113,6 +113,7 @@ struct lpfc_nodelist { #define NLP_NPR_ADISC 0x2000000 /* Issue ADISC when dq'ed from NPR list */ #define NLP_DELAY_REMOVE 0x4000000 /* Defer removal till end of DSM */ +#define NLP_NODEV_REMOVE 0x8000000 /* Defer removal till discovery ends */ /* Defines for list searchs */ #define NLP_SEARCH_MAPPED 0x1 /* search mapped */ diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 4813beaaca8..283b7d824c3 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -302,10 +302,6 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, if (lpfc_reg_login(phba, Fabric_DID, (uint8_t *) sp, mbox, 0)) goto fail_free_mbox; - /* - * set_slim mailbox command needs to execute first, - * queue this command to be processed later. - */ mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; mbox->context2 = ndlp; @@ -781,25 +777,26 @@ lpfc_cmpl_els_plogi(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, if (disc && phba->num_disc_nodes) { /* Check to see if there are more PLOGIs to be sent */ lpfc_more_plogi(phba); - } - if (phba->num_disc_nodes == 0) { - spin_lock_irq(phba->host->host_lock); - phba->fc_flag &= ~FC_NDISC_ACTIVE; - spin_unlock_irq(phba->host->host_lock); + if (phba->num_disc_nodes == 0) { + spin_lock_irq(phba->host->host_lock); + phba->fc_flag &= ~FC_NDISC_ACTIVE; + spin_unlock_irq(phba->host->host_lock); - lpfc_can_disctmo(phba); - if (phba->fc_flag & FC_RSCN_MODE) { - /* Check to see if more RSCNs came in while we were - * processing this one. - */ - if ((phba->fc_rscn_id_cnt == 0) && - (!(phba->fc_flag & FC_RSCN_DISCOVERY))) { - spin_lock_irq(phba->host->host_lock); - phba->fc_flag &= ~FC_RSCN_MODE; - spin_unlock_irq(phba->host->host_lock); - } else { - lpfc_els_handle_rscn(phba); + lpfc_can_disctmo(phba); + if (phba->fc_flag & FC_RSCN_MODE) { + /* + * Check to see if more RSCNs came in while + * we were processing this one. + */ + if ((phba->fc_rscn_id_cnt == 0) && + (!(phba->fc_flag & FC_RSCN_DISCOVERY))) { + spin_lock_irq(phba->host->host_lock); + phba->fc_flag &= ~FC_RSCN_MODE; + spin_unlock_irq(phba->host->host_lock); + } else { + lpfc_els_handle_rscn(phba); + } } } } @@ -1263,7 +1260,7 @@ lpfc_issue_els_logo(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, psli = &phba->sli; pring = &psli->ring[LPFC_ELS_RING]; - cmdsize = 2 * (sizeof (uint32_t) + sizeof (struct lpfc_name)); + cmdsize = (2 * sizeof (uint32_t)) + sizeof (struct lpfc_name); elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp, ndlp->nlp_DID, ELS_CMD_LOGO); if (!elsiocb) @@ -1451,22 +1448,23 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_hba *phba, struct lpfc_nodelist * nlp) * PLOGIs to be sent */ lpfc_more_plogi(phba); - } - if (phba->num_disc_nodes == 0) { - phba->fc_flag &= ~FC_NDISC_ACTIVE; - lpfc_can_disctmo(phba); - if (phba->fc_flag & FC_RSCN_MODE) { - /* Check to see if more RSCNs - * came in while we were - * processing this one. - */ - if((phba->fc_rscn_id_cnt==0) && - (!(phba->fc_flag & FC_RSCN_DISCOVERY))) { - phba->fc_flag &= ~FC_RSCN_MODE; - } - else { - lpfc_els_handle_rscn(phba); + if (phba->num_disc_nodes == 0) { + phba->fc_flag &= ~FC_NDISC_ACTIVE; + lpfc_can_disctmo(phba); + if (phba->fc_flag & FC_RSCN_MODE) { + /* + * Check to see if more RSCNs + * came in while we were + * processing this one. + */ + if((phba->fc_rscn_id_cnt==0) && + !(phba->fc_flag & FC_RSCN_DISCOVERY)) { + phba->fc_flag &= ~FC_RSCN_MODE; + } + else { + lpfc_els_handle_rscn(phba); + } } } } @@ -1872,9 +1870,6 @@ lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, if (mbox) { if ((rspiocb->iocb.ulpStatus == 0) && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { - /* set_slim mailbox command needs to execute first, - * queue this command to be processed later. - */ lpfc_unreg_rpi(phba, ndlp); mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; mbox->context2 = ndlp; @@ -1920,6 +1915,7 @@ lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag, uint8_t *pcmd; uint16_t cmdsize; int rc; + ELS_PKT *els_pkt_ptr; psli = &phba->sli; pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ @@ -1958,6 +1954,23 @@ lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag, pcmd += sizeof (uint32_t); memcpy(pcmd, &phba->fc_sparam, sizeof (struct serv_parm)); break; + case ELS_CMD_PRLO: + cmdsize = sizeof (uint32_t) + sizeof (PRLO); + elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry, + ndlp, ndlp->nlp_DID, ELS_CMD_PRLO); + if (!elsiocb) + return 1; + + icmd = &elsiocb->iocb; + icmd->ulpContext = oldcmd->ulpContext; /* Xri */ + pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); + + memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt, + sizeof (uint32_t) + sizeof (PRLO)); + *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC; + els_pkt_ptr = (ELS_PKT *) pcmd; + els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED; + break; default: return 1; } @@ -2498,7 +2511,7 @@ lpfc_els_rcv_rscn(struct lpfc_hba * phba, /* If we are about to begin discovery, just ACC the RSCN. * Discovery processing will satisfy it. */ - if (phba->hba_state < LPFC_NS_QRY) { + if (phba->hba_state <= LPFC_NS_QRY) { lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, newnode); return 0; diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 6721e679df6..adb086009ae 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -311,8 +311,8 @@ lpfc_workq_post_event(struct lpfc_hba * phba, void *arg1, void *arg2, evtp->evt_arg2 = arg2; evtp->evt = evt; - list_add_tail(&evtp->evt_listp, &phba->work_list); spin_lock_irq(phba->host->host_lock); + list_add_tail(&evtp->evt_listp, &phba->work_list); if (phba->work_wait) wake_up(phba->work_wait); spin_unlock_irq(phba->host->host_lock); @@ -1071,10 +1071,6 @@ lpfc_register_remote_port(struct lpfc_hba * phba, /* initialize static port data */ rport->maxframe_size = ndlp->nlp_maxframe; rport->supported_classes = ndlp->nlp_class_sup; - if ((rport->scsi_target_id != -1) && - (rport->scsi_target_id < MAX_FCP_TARGET)) { - ndlp->nlp_sid = rport->scsi_target_id; - } rdata = rport->dd_data; rdata->pnode = ndlp; @@ -1087,6 +1083,10 @@ lpfc_register_remote_port(struct lpfc_hba * phba, if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN) fc_remote_port_rolechg(rport, rport_ids.roles); + if ((rport->scsi_target_id != -1) && + (rport->scsi_target_id < MAX_FCP_TARGET)) { + ndlp->nlp_sid = rport->scsi_target_id; + } return; } @@ -1238,6 +1238,7 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list) evt_listp); } + nlp->nlp_flag &= ~NLP_NODEV_REMOVE; nlp->nlp_type |= NLP_FC_NODE; break; case NLP_MAPPED_LIST: @@ -1258,6 +1259,7 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list) evt_listp); } + nlp->nlp_flag &= ~NLP_NODEV_REMOVE; break; case NLP_NPR_LIST: nlp->nlp_flag |= list; @@ -1402,6 +1404,8 @@ lpfc_check_sli_ndlp(struct lpfc_hba * phba, if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) return 1; case CMD_ELS_REQUEST64_CR: + if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID) + return 1; case CMD_XMIT_ELS_RSP64_CX: if (iocb->context1 == (uint8_t *) ndlp) return 1; @@ -1901,10 +1905,8 @@ lpfc_setup_disc_node(struct lpfc_hba * phba, uint32_t did) */ if (ndlp->nlp_flag & NLP_DELAY_TMO) lpfc_cancel_retry_delay_tmo(phba, ndlp); - } else { - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; + } else ndlp = NULL; - } } else { flg = ndlp->nlp_flag & NLP_LIST_MASK; if ((flg == NLP_ADISC_LIST) || (flg == NLP_PLOGI_LIST)) diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 54d04188f7c..eedf9880136 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -449,6 +449,7 @@ struct serv_parm { /* Structure is in Big Endian format */ #define ELS_CMD_RRQ 0x12000000 #define ELS_CMD_PRLI 0x20100014 #define ELS_CMD_PRLO 0x21100014 +#define ELS_CMD_PRLO_ACC 0x02100014 #define ELS_CMD_PDISC 0x50000000 #define ELS_CMD_FDISC 0x51000000 #define ELS_CMD_ADISC 0x52000000 @@ -484,6 +485,7 @@ struct serv_parm { /* Structure is in Big Endian format */ #define ELS_CMD_RRQ 0x12 #define ELS_CMD_PRLI 0x14001020 #define ELS_CMD_PRLO 0x14001021 +#define ELS_CMD_PRLO_ACC 0x14001002 #define ELS_CMD_PDISC 0x50 #define ELS_CMD_FDISC 0x51 #define ELS_CMD_ADISC 0x52 @@ -1539,6 +1541,7 @@ typedef struct { #define FLAGS_TOPOLOGY_FAILOVER 0x0400 /* Bit 10 */ #define FLAGS_LINK_SPEED 0x0800 /* Bit 11 */ +#define FLAGS_IMED_ABORT 0x04000 /* Bit 14 */ uint32_t link_speed; #define LINK_SPEED_AUTO 0 /* Auto selection */ diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 66d5d003555..908d0f27706 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -294,15 +294,6 @@ lpfc_config_port_post(struct lpfc_hba * phba) } } - /* This should turn on DELAYED ABTS for ELS timeouts */ - lpfc_set_slim(phba, pmb, 0x052198, 0x1); - if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { - phba->hba_state = LPFC_HBA_ERROR; - mempool_free( pmb, phba->mbox_mem_pool); - return -EIO; - } - - lpfc_read_config(phba, pmb); if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { lpfc_printf_log(phba, @@ -804,7 +795,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp) int max_speed; char * ports; char * bus; - } m; + } m = {"<Unknown>", 0, "", ""}; pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype); ports = (hdrtype == 0x80) ? "2-port " : ""; @@ -1627,7 +1618,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) error = lpfc_alloc_sysfs_attr(phba); if (error) - goto out_kthread_stop; + goto out_remove_host; error = request_irq(phba->pcidev->irq, lpfc_intr_handler, SA_SHIRQ, LPFC_DRIVER_NAME, phba); @@ -1644,8 +1635,10 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; error = lpfc_sli_hba_setup(phba); - if (error) + if (error) { + error = -ENODEV; goto out_free_irq; + } if (phba->cfg_poll & DISABLE_FCP_RING_INT) { spin_lock_irq(phba->host->host_lock); @@ -1700,6 +1693,9 @@ out_free_irq: free_irq(phba->pcidev->irq, phba); out_free_sysfs_attr: lpfc_free_sysfs_attr(phba); +out_remove_host: + fc_remove_host(phba->host); + scsi_remove_host(phba->host); out_kthread_stop: kthread_stop(phba->worker_thread); out_free_iocbq: @@ -1721,12 +1717,14 @@ out_iounmap_slim: out_idr_remove: idr_remove(&lpfc_hba_index, phba->brd_no); out_put_host: + phba->host = NULL; scsi_host_put(host); out_release_regions: pci_release_regions(pdev); out_disable_device: pci_disable_device(pdev); out: + pci_set_drvdata(pdev, NULL); return error; } diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index c585e2b2e58..e42f22aaf71 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -200,6 +200,9 @@ lpfc_init_link(struct lpfc_hba * phba, break; } + /* Enable asynchronous ABTS responses from firmware */ + mb->un.varInitLnk.link_flags |= FLAGS_IMED_ABORT; + /* NEW_FEATURE * Setting up the link speed */ @@ -292,36 +295,6 @@ lpfc_unreg_did(struct lpfc_hba * phba, uint32_t did, LPFC_MBOXQ_t * pmb) return; } -/***********************************************/ - -/* command to write slim */ -/***********************************************/ -void -lpfc_set_slim(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint32_t addr, - uint32_t value) -{ - MAILBOX_t *mb; - - mb = &pmb->mb; - memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); - - /* addr = 0x090597 is AUTO ABTS disable for ELS commands */ - /* addr = 0x052198 is DELAYED ABTS enable for ELS commands */ - - /* - * Always turn on DELAYED ABTS for ELS timeouts - */ - if ((addr == 0x052198) && (value == 0)) - value = 1; - - mb->un.varWords[0] = addr; - mb->un.varWords[1] = value; - - mb->mbxCommand = MBX_SET_SLIM; - mb->mbxOwner = OWN_HOST; - return; -} - /**********************************************/ /* lpfc_read_nv Issue a READ CONFIG */ /* mailbox command */ diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 3d77bd999b7..27d60ad897c 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -465,14 +465,18 @@ lpfc_rcv_padisc(struct lpfc_hba * phba, static int lpfc_rcv_logo(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, - struct lpfc_iocbq *cmdiocb) + struct lpfc_iocbq *cmdiocb, + uint32_t els_cmd) { /* Put ndlp on NPR list with 1 sec timeout for plogi, ACC logo */ /* Only call LOGO ACC for first LOGO, this avoids sending unnecessary * PLOGIs during LOGO storms from a device. */ ndlp->nlp_flag |= NLP_LOGO_ACC; - lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); + if (els_cmd == ELS_CMD_PRLO) + lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0); + else + lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); if (!(ndlp->nlp_type & NLP_FABRIC) || (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) { @@ -681,7 +685,7 @@ lpfc_rcv_logo_plogi_issue(struct lpfc_hba * phba, /* software abort outstanding PLOGI */ lpfc_els_abort(phba, ndlp, 1); - lpfc_rcv_logo(phba, ndlp, cmdiocb); + lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO); return ndlp->nlp_state; } @@ -788,10 +792,6 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba, if (lpfc_reg_login (phba, irsp->un.elsreq64.remoteID, (uint8_t *) sp, mbox, 0) == 0) { - /* set_slim mailbox command needs to - * execute first, queue this command to - * be processed later. - */ switch (ndlp->nlp_DID) { case NameServer_DID: mbox->mbox_cmpl = @@ -832,11 +832,17 @@ static uint32_t lpfc_device_rm_plogi_issue(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) { - /* software abort outstanding PLOGI */ - lpfc_els_abort(phba, ndlp, 1); + if(ndlp->nlp_flag & NLP_NPR_2B_DISC) { + ndlp->nlp_flag |= NLP_NODEV_REMOVE; + return ndlp->nlp_state; + } + else { + /* software abort outstanding PLOGI */ + lpfc_els_abort(phba, ndlp, 1); - lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); - return NLP_STE_FREED_NODE; + lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); + return NLP_STE_FREED_NODE; + } } static uint32_t @@ -851,7 +857,7 @@ lpfc_device_recov_plogi_issue(struct lpfc_hba * phba, ndlp->nlp_state = NLP_STE_NPR_NODE; lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); spin_lock_irq(phba->host->host_lock); - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; + ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); spin_unlock_irq(phba->host->host_lock); return ndlp->nlp_state; @@ -905,7 +911,7 @@ lpfc_rcv_logo_adisc_issue(struct lpfc_hba * phba, /* software abort outstanding ADISC */ lpfc_els_abort(phba, ndlp, 0); - lpfc_rcv_logo(phba, ndlp, cmdiocb); + lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO); return ndlp->nlp_state; } @@ -932,7 +938,7 @@ lpfc_rcv_prlo_adisc_issue(struct lpfc_hba * phba, cmdiocb = (struct lpfc_iocbq *) arg; /* Treat like rcv logo */ - lpfc_rcv_logo(phba, ndlp, cmdiocb); + lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_PRLO); return ndlp->nlp_state; } @@ -987,11 +993,17 @@ lpfc_device_rm_adisc_issue(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) { - /* software abort outstanding ADISC */ - lpfc_els_abort(phba, ndlp, 1); + if(ndlp->nlp_flag & NLP_NPR_2B_DISC) { + ndlp->nlp_flag |= NLP_NODEV_REMOVE; + return ndlp->nlp_state; + } + else { + /* software abort outstanding ADISC */ + lpfc_els_abort(phba, ndlp, 1); - lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); - return NLP_STE_FREED_NODE; + lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); + return NLP_STE_FREED_NODE; + } } static uint32_t @@ -1006,7 +1018,7 @@ lpfc_device_recov_adisc_issue(struct lpfc_hba * phba, ndlp->nlp_state = NLP_STE_NPR_NODE; lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); spin_lock_irq(phba->host->host_lock); - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; + ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); ndlp->nlp_flag |= NLP_NPR_ADISC; spin_unlock_irq(phba->host->host_lock); @@ -1048,7 +1060,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_hba * phba, cmdiocb = (struct lpfc_iocbq *) arg; - lpfc_rcv_logo(phba, ndlp, cmdiocb); + lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO); return ndlp->nlp_state; } @@ -1073,7 +1085,7 @@ lpfc_rcv_prlo_reglogin_issue(struct lpfc_hba * phba, struct lpfc_iocbq *cmdiocb; cmdiocb = (struct lpfc_iocbq *) arg; - lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); + lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0); return ndlp->nlp_state; } @@ -1133,8 +1145,14 @@ lpfc_device_rm_reglogin_issue(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) { - lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); - return NLP_STE_FREED_NODE; + if(ndlp->nlp_flag & NLP_NPR_2B_DISC) { + ndlp->nlp_flag |= NLP_NODEV_REMOVE; + return ndlp->nlp_state; + } + else { + lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); + return NLP_STE_FREED_NODE; + } } static uint32_t @@ -1146,7 +1164,7 @@ lpfc_device_recov_reglogin_issue(struct lpfc_hba * phba, ndlp->nlp_state = NLP_STE_NPR_NODE; lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); spin_lock_irq(phba->host->host_lock); - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; + ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); spin_unlock_irq(phba->host->host_lock); return ndlp->nlp_state; } @@ -1186,7 +1204,7 @@ lpfc_rcv_logo_prli_issue(struct lpfc_hba * phba, /* Software abort outstanding PRLI before sending acc */ lpfc_els_abort(phba, ndlp, 1); - lpfc_rcv_logo(phba, ndlp, cmdiocb); + lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO); return ndlp->nlp_state; } @@ -1214,7 +1232,7 @@ lpfc_rcv_prlo_prli_issue(struct lpfc_hba * phba, struct lpfc_iocbq *cmdiocb; cmdiocb = (struct lpfc_iocbq *) arg; - lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); + lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0); return ndlp->nlp_state; } @@ -1278,11 +1296,17 @@ static uint32_t lpfc_device_rm_prli_issue(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) { - /* software abort outstanding PRLI */ - lpfc_els_abort(phba, ndlp, 1); + if(ndlp->nlp_flag & NLP_NPR_2B_DISC) { + ndlp->nlp_flag |= NLP_NODEV_REMOVE; + return ndlp->nlp_state; + } + else { + /* software abort outstanding PLOGI */ + lpfc_els_abort(phba, ndlp, 1); - lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); - return NLP_STE_FREED_NODE; + lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); + return NLP_STE_FREED_NODE; + } } @@ -1313,7 +1337,7 @@ lpfc_device_recov_prli_issue(struct lpfc_hba * phba, ndlp->nlp_state = NLP_STE_NPR_NODE; lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); spin_lock_irq(phba->host->host_lock); - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; + ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); spin_unlock_irq(phba->host->host_lock); return ndlp->nlp_state; } @@ -1351,7 +1375,7 @@ lpfc_rcv_logo_unmap_node(struct lpfc_hba * phba, cmdiocb = (struct lpfc_iocbq *) arg; - lpfc_rcv_logo(phba, ndlp, cmdiocb); + lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO); return ndlp->nlp_state; } @@ -1375,7 +1399,7 @@ lpfc_rcv_prlo_unmap_node(struct lpfc_hba * phba, cmdiocb = (struct lpfc_iocbq *) arg; - lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); + lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0); return ndlp->nlp_state; } @@ -1386,7 +1410,7 @@ lpfc_device_recov_unmap_node(struct lpfc_hba * phba, ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE; ndlp->nlp_state = NLP_STE_NPR_NODE; lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; + ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); lpfc_disc_set_adisc(phba, ndlp); return ndlp->nlp_state; @@ -1424,7 +1448,7 @@ lpfc_rcv_logo_mapped_node(struct lpfc_hba * phba, cmdiocb = (struct lpfc_iocbq *) arg; - lpfc_rcv_logo(phba, ndlp, cmdiocb); + lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO); return ndlp->nlp_state; } @@ -1456,7 +1480,7 @@ lpfc_rcv_prlo_mapped_node(struct lpfc_hba * phba, spin_unlock_irq(phba->host->host_lock); /* Treat like rcv logo */ - lpfc_rcv_logo(phba, ndlp, cmdiocb); + lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_PRLO); return ndlp->nlp_state; } @@ -1469,7 +1493,7 @@ lpfc_device_recov_mapped_node(struct lpfc_hba * phba, ndlp->nlp_state = NLP_STE_NPR_NODE; lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); spin_lock_irq(phba->host->host_lock); - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; + ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); spin_unlock_irq(phba->host->host_lock); lpfc_disc_set_adisc(phba, ndlp); return ndlp->nlp_state; @@ -1551,7 +1575,7 @@ lpfc_rcv_logo_npr_node(struct lpfc_hba * phba, cmdiocb = (struct lpfc_iocbq *) arg; - lpfc_rcv_logo(phba, ndlp, cmdiocb); + lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO); return ndlp->nlp_state; } @@ -1617,9 +1641,16 @@ lpfc_cmpl_plogi_npr_node(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb, *rspiocb; + IOCB_t *irsp; cmdiocb = (struct lpfc_iocbq *) arg; rspiocb = cmdiocb->context_un.rsp_iocb; + + irsp = &rspiocb->iocb; + if (irsp->ulpStatus) { + lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); + return NLP_STE_FREED_NODE; + } return ndlp->nlp_state; } @@ -1628,9 +1659,16 @@ lpfc_cmpl_prli_npr_node(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb, *rspiocb; + IOCB_t *irsp; cmdiocb = (struct lpfc_iocbq *) arg; rspiocb = cmdiocb->context_un.rsp_iocb; + + irsp = &rspiocb->iocb; + if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) { + lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); + return NLP_STE_FREED_NODE; + } return ndlp->nlp_state; } @@ -1649,9 +1687,16 @@ lpfc_cmpl_adisc_npr_node(struct lpfc_hba * phba, uint32_t evt) { struct lpfc_iocbq *cmdiocb, *rspiocb; + IOCB_t *irsp; cmdiocb = (struct lpfc_iocbq *) arg; rspiocb = cmdiocb->context_un.rsp_iocb; + + irsp = &rspiocb->iocb; + if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) { + lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); + return NLP_STE_FREED_NODE; + } return ndlp->nlp_state; } @@ -1668,7 +1713,12 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_hba * phba, if (!mb->mbxStatus) ndlp->nlp_rpi = mb->un.varWords[0]; - + else { + if (ndlp->nlp_flag & NLP_NODEV_REMOVE) { + lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); + return NLP_STE_FREED_NODE; + } + } return ndlp->nlp_state; } @@ -1677,6 +1727,10 @@ lpfc_device_rm_npr_node(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) { + if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { + ndlp->nlp_flag |= NLP_NODEV_REMOVE; + return ndlp->nlp_state; + } lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); return NLP_STE_FREED_NODE; } @@ -1687,7 +1741,7 @@ lpfc_device_recov_npr_node(struct lpfc_hba * phba, uint32_t evt) { spin_lock_irq(phba->host->host_lock); - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; + ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); spin_unlock_irq(phba->host->host_lock); if (ndlp->nlp_flag & NLP_DELAY_TMO) { lpfc_cancel_retry_delay_tmo(phba, ndlp); diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index f9379987372..7dc4c2e6bed 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -629,8 +629,7 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq; IOCB_t *piocb; struct fcp_cmnd *fcp_cmnd; - struct scsi_device *scsi_dev = lpfc_cmd->pCmd->device; - struct lpfc_rport_data *rdata = scsi_dev->hostdata; + struct lpfc_rport_data *rdata = lpfc_cmd->rdata; struct lpfc_nodelist *ndlp = rdata->pnode; if ((ndlp == NULL) || (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { @@ -665,56 +664,18 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba, piocb->ulpTimeout = lpfc_cmd->timeout; } - lpfc_cmd->rdata = rdata; - - switch (task_mgmt_cmd) { - case FCP_LUN_RESET: - /* Issue LUN Reset to TGT <num> LUN <num> */ - lpfc_printf_log(phba, - KERN_INFO, - LOG_FCP, - "%d:0703 Issue LUN Reset to TGT %d LUN %d " - "Data: x%x x%x\n", - phba->brd_no, - scsi_dev->id, scsi_dev->lun, - ndlp->nlp_rpi, ndlp->nlp_flag); - - break; - case FCP_ABORT_TASK_SET: - /* Issue Abort Task Set to TGT <num> LUN <num> */ - lpfc_printf_log(phba, - KERN_INFO, - LOG_FCP, - "%d:0701 Issue Abort Task Set to TGT %d LUN %d " - "Data: x%x x%x\n", - phba->brd_no, - scsi_dev->id, scsi_dev->lun, - ndlp->nlp_rpi, ndlp->nlp_flag); - - break; - case FCP_TARGET_RESET: - /* Issue Target Reset to TGT <num> */ - lpfc_printf_log(phba, - KERN_INFO, - LOG_FCP, - "%d:0702 Issue Target Reset to TGT %d " - "Data: x%x x%x\n", - phba->brd_no, - scsi_dev->id, ndlp->nlp_rpi, - ndlp->nlp_flag); - break; - } - return (1); } static int -lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba) +lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba, + unsigned tgt_id, struct lpfc_rport_data *rdata) { struct lpfc_iocbq *iocbq; struct lpfc_iocbq *iocbqrsp; int ret; + lpfc_cmd->rdata = rdata; ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_TARGET_RESET); if (!ret) return FAILED; @@ -726,6 +687,13 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba) if (!iocbqrsp) return FAILED; + /* Issue Target Reset to TGT <num> */ + lpfc_printf_log(phba, KERN_INFO, LOG_FCP, + "%d:0702 Issue Target Reset to TGT %d " + "Data: x%x x%x\n", + phba->brd_no, tgt_id, rdata->pnode->nlp_rpi, + rdata->pnode->nlp_flag); + ret = lpfc_sli_issue_iocb_wait(phba, &phba->sli.ring[phba->sli.fcp_ring], iocbq, iocbqrsp, lpfc_cmd->timeout); @@ -1021,6 +989,7 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd) lpfc_cmd->pCmd = cmnd; lpfc_cmd->timeout = 60; lpfc_cmd->scsi_hba = phba; + lpfc_cmd->rdata = rdata; ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_LUN_RESET); if (!ret) @@ -1033,6 +1002,11 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd) if (iocbqrsp == NULL) goto out_free_scsi_buf; + lpfc_printf_log(phba, KERN_INFO, LOG_FCP, + "%d:0703 Issue LUN Reset to TGT %d LUN %d " + "Data: x%x x%x\n", phba->brd_no, cmnd->device->id, + cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag); + ret = lpfc_sli_issue_iocb_wait(phba, &phba->sli.ring[phba->sli.fcp_ring], iocbq, iocbqrsp, lpfc_cmd->timeout); @@ -1104,7 +1078,6 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd) int match; int ret = FAILED, i, err_count = 0; int cnt, loopcnt; - unsigned int midlayer_id = 0; struct lpfc_scsi_buf * lpfc_cmd; lpfc_block_requests(phba); @@ -1124,7 +1097,6 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd) * targets known to the driver. Should any target reset * fail, this routine returns failure to the midlayer. */ - midlayer_id = cmnd->device->id; for (i = 0; i < MAX_FCP_TARGET; i++) { /* Search the mapped list for this target ID */ match = 0; @@ -1137,9 +1109,8 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd) if (!match) continue; - lpfc_cmd->pCmd->device->id = i; - lpfc_cmd->pCmd->device->hostdata = ndlp->rport->dd_data; - ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba); + ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba, + i, ndlp->rport->dd_data); if (ret != SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_FCP, "%d:0713 Bus Reset on target %d failed\n", @@ -1158,7 +1129,6 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd) * the targets. Unfortunately, some targets do not abide by * this forcing the driver to double check. */ - cmnd->device->id = midlayer_id; cnt = lpfc_sli_sum_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], 0, 0, LPFC_CTX_HOST); if (cnt) diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 4cf1366108b..6b737568b83 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "8.1.4" +#define LPFC_DRIVER_VERSION "8.1.6" #define LPFC_DRIVER_NAME "lpfc" diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c index 80b68a2481b..de35ffe2f79 100644 --- a/drivers/scsi/megaraid.c +++ b/drivers/scsi/megaraid.c @@ -4471,7 +4471,6 @@ mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru) { Scsi_Cmnd *scmd; struct scsi_device *sdev; - unsigned long flags = 0; scb_t *scb; int rval; diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c index c11e5ce6865..bec1424eda8 100644 --- a/drivers/scsi/megaraid/megaraid_mbox.c +++ b/drivers/scsi/megaraid/megaraid_mbox.c @@ -10,7 +10,7 @@ * 2 of the License, or (at your option) any later version. * * FILE : megaraid_mbox.c - * Version : v2.20.4.7 (Nov 14 2005) + * Version : v2.20.4.8 (Apr 11 2006) * * Authors: * Atul Mukker <Atul.Mukker@lsil.com> @@ -2278,6 +2278,7 @@ megaraid_mbox_dpc(unsigned long devp) unsigned long flags; uint8_t c; int status; + uioc_t *kioc; if (!adapter) return; @@ -2320,6 +2321,9 @@ megaraid_mbox_dpc(unsigned long devp) // remove from local clist list_del_init(&scb->list); + kioc = (uioc_t *)scb->gp; + kioc->status = 0; + megaraid_mbox_mm_done(adapter, scb); continue; @@ -2636,6 +2640,7 @@ megaraid_reset_handler(struct scsi_cmnd *scp) int recovery_window; int recovering; int i; + uioc_t *kioc; adapter = SCP2ADAPTER(scp); raid_dev = ADAP2RAIDDEV(adapter); @@ -2655,32 +2660,51 @@ megaraid_reset_handler(struct scsi_cmnd *scp) // Also, reset all the commands currently owned by the driver spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags); list_for_each_entry_safe(scb, tmp, &adapter->pend_list, list) { - list_del_init(&scb->list); // from pending list - con_log(CL_ANN, (KERN_WARNING - "megaraid: %ld:%d[%d:%d], reset from pending list\n", - scp->serial_number, scb->sno, - scb->dev_channel, scb->dev_target)); + if (scb->sno >= MBOX_MAX_SCSI_CMDS) { + con_log(CL_ANN, (KERN_WARNING + "megaraid: IOCTL packet with %d[%d:%d] being reset\n", + scb->sno, scb->dev_channel, scb->dev_target)); - scp->result = (DID_RESET << 16); - scp->scsi_done(scp); + scb->status = -1; - megaraid_dealloc_scb(adapter, scb); + kioc = (uioc_t *)scb->gp; + kioc->status = -EFAULT; + + megaraid_mbox_mm_done(adapter, scb); + } else { + if (scb->scp == scp) { // Found command + con_log(CL_ANN, (KERN_WARNING + "megaraid: %ld:%d[%d:%d], reset from pending list\n", + scp->serial_number, scb->sno, + scb->dev_channel, scb->dev_target)); + } else { + con_log(CL_ANN, (KERN_WARNING + "megaraid: IO packet with %d[%d:%d] being reset\n", + scb->sno, scb->dev_channel, scb->dev_target)); + } + + scb->scp->result = (DID_RESET << 16); + scb->scp->scsi_done(scb->scp); + + megaraid_dealloc_scb(adapter, scb); + } } spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags); if (adapter->outstanding_cmds) { con_log(CL_ANN, (KERN_NOTICE "megaraid: %d outstanding commands. Max wait %d sec\n", - adapter->outstanding_cmds, MBOX_RESET_WAIT)); + adapter->outstanding_cmds, + (MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT))); } recovery_window = MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT; recovering = adapter->outstanding_cmds; - for (i = 0; i < recovery_window && adapter->outstanding_cmds; i++) { + for (i = 0; i < recovery_window; i++) { megaraid_ack_sequence(adapter); @@ -2689,12 +2713,11 @@ megaraid_reset_handler(struct scsi_cmnd *scp) con_log(CL_ANN, ( "megaraid mbox: Wait for %d commands to complete:%d\n", adapter->outstanding_cmds, - MBOX_RESET_WAIT - i)); + (MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT) - i)); } // bailout if no recovery happended in reset time - if ((i == MBOX_RESET_WAIT) && - (recovering == adapter->outstanding_cmds)) { + if (adapter->outstanding_cmds == 0) { break; } @@ -2918,12 +2941,13 @@ mbox_post_sync_cmd_fast(adapter_t *adapter, uint8_t raw_mbox[]) wmb(); WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1); - for (i = 0; i < 0xFFFFF; i++) { + for (i = 0; i < MBOX_SYNC_WAIT_CNT; i++) { if (mbox->numstatus != 0xFF) break; rmb(); + udelay(MBOX_SYNC_DELAY_200); } - if (i == 0xFFFFF) { + if (i == MBOX_SYNC_WAIT_CNT) { // We may need to re-calibrate the counter con_log(CL_ANN, (KERN_CRIT "megaraid: fast sync command timed out\n")); @@ -3475,7 +3499,7 @@ megaraid_cmm_register(adapter_t *adapter) adp.drvr_data = (unsigned long)adapter; adp.pdev = adapter->pdev; adp.issue_uioc = megaraid_mbox_mm_handler; - adp.timeout = 300; + adp.timeout = MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT; adp.max_kioc = MBOX_MAX_USER_CMDS; if ((rval = mraid_mm_register_adp(&adp)) != 0) { @@ -3702,7 +3726,6 @@ megaraid_mbox_mm_done(adapter_t *adapter, scb_t *scb) unsigned long flags; kioc = (uioc_t *)scb->gp; - kioc->status = 0; mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf; mbox64->mbox32.status = scb->status; raw_mbox = (uint8_t *)&mbox64->mbox32; diff --git a/drivers/scsi/megaraid/megaraid_mbox.h b/drivers/scsi/megaraid/megaraid_mbox.h index 882fb1a0b57..868fb0ec93e 100644 --- a/drivers/scsi/megaraid/megaraid_mbox.h +++ b/drivers/scsi/megaraid/megaraid_mbox.h @@ -21,8 +21,8 @@ #include "megaraid_ioctl.h" -#define MEGARAID_VERSION "2.20.4.7" -#define MEGARAID_EXT_VERSION "(Release Date: Mon Nov 14 12:27:22 EST 2005)" +#define MEGARAID_VERSION "2.20.4.8" +#define MEGARAID_EXT_VERSION "(Release Date: Mon Apr 11 12:27:22 EST 2006)" /* @@ -100,6 +100,9 @@ #define MBOX_BUSY_WAIT 10 // max usec to wait for busy mailbox #define MBOX_RESET_WAIT 180 // wait these many seconds in reset #define MBOX_RESET_EXT_WAIT 120 // extended wait reset +#define MBOX_SYNC_WAIT_CNT 0xFFFF // wait loop index for synchronous mode + +#define MBOX_SYNC_DELAY_200 200 // 200 micro-seconds /* * maximum transfer that can happen through the firmware commands issued diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c index 8f3ce043229..e8f534fb336 100644 --- a/drivers/scsi/megaraid/megaraid_mm.c +++ b/drivers/scsi/megaraid/megaraid_mm.c @@ -898,10 +898,8 @@ mraid_mm_register_adp(mraid_mmadp_t *lld_adp) adapter = kmalloc(sizeof(mraid_mmadp_t), GFP_KERNEL); - if (!adapter) { - rval = -ENOMEM; - goto memalloc_error; - } + if (!adapter) + return -ENOMEM; memset(adapter, 0, sizeof(mraid_mmadp_t)); diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 017729c59a4..584fe5d8e50 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -599,6 +599,7 @@ qla2x00_wait_for_loop_ready(scsi_qla_host_t *ha) * Either SUCCESS or FAILED. * * Note: +* Only return FAILED if command not returned by firmware. **************************************************************************/ int qla2xxx_eh_abort(struct scsi_cmnd *cmd) @@ -609,11 +610,12 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) unsigned int id, lun; unsigned long serial; unsigned long flags; + int wait = 0; if (!CMD_SP(cmd)) - return FAILED; + return SUCCESS; - ret = FAILED; + ret = SUCCESS; id = cmd->device->id; lun = cmd->device->lun; @@ -642,7 +644,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) } else { DEBUG3(printk("%s(%ld): abort_command " "mbx success.\n", __func__, ha->host_no)); - ret = SUCCESS; + wait = 1; } spin_lock_irqsave(&ha->hardware_lock, flags); @@ -651,17 +653,18 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) spin_unlock_irqrestore(&ha->hardware_lock, flags); /* Wait for the command to be returned. */ - if (ret == SUCCESS) { + if (wait) { if (qla2x00_eh_wait_on_command(ha, cmd) != QLA_SUCCESS) { qla_printk(KERN_ERR, ha, "scsi(%ld:%d:%d): Abort handler timed out -- %lx " "%x.\n", ha->host_no, id, lun, serial, ret); + ret = FAILED; } } qla_printk(KERN_INFO, ha, - "scsi(%ld:%d:%d): Abort command issued -- %lx %x.\n", ha->host_no, - id, lun, serial, ret); + "scsi(%ld:%d:%d): Abort command issued -- %d %lx %x.\n", + ha->host_no, id, lun, wait, serial, ret); return ret; } @@ -1700,8 +1703,8 @@ qla2x00_free_device(scsi_qla_host_t *ha) ha->flags.online = 0; /* Detach interrupts */ - if (ha->pdev->irq) - free_irq(ha->pdev->irq, ha); + if (ha->host->irq) + free_irq(ha->host->irq, ha); /* release io space registers */ if (ha->iobase) diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index c750d3399a9..941c1e15c89 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c @@ -56,6 +56,8 @@ static struct { {"DENON", "DRD-25X", "V", BLIST_NOLUN}, /* locks up */ {"HITACHI", "DK312C", "CM81", BLIST_NOLUN}, /* responds to all lun */ {"HITACHI", "DK314C", "CR21", BLIST_NOLUN}, /* responds to all lun */ + {"IBM", "2104-DU3", NULL, BLIST_NOLUN}, /* locks up */ + {"IBM", "2104-TU3", NULL, BLIST_NOLUN}, /* locks up */ {"IMS", "CDD521/10", "2.06", BLIST_NOLUN}, /* locks up */ {"MAXTOR", "XT-3280", "PR02", BLIST_NOLUN}, /* locks up */ {"MAXTOR", "XT-4380S", "B3C", BLIST_NOLUN}, /* locks up */ diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 7b0f9a3810d..764a8b375ea 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1067,16 +1067,29 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes, break; case NOT_READY: /* - * If the device is in the process of becoming ready, - * retry. + * If the device is in the process of becoming + * ready, or has a temporary blockage, retry. */ - if (sshdr.asc == 0x04 && sshdr.ascq == 0x01) { - scsi_requeue_command(q, cmd); - return; + if (sshdr.asc == 0x04) { + switch (sshdr.ascq) { + case 0x01: /* becoming ready */ + case 0x04: /* format in progress */ + case 0x05: /* rebuild in progress */ + case 0x06: /* recalculation in progress */ + case 0x07: /* operation in progress */ + case 0x08: /* Long write in progress */ + case 0x09: /* self test in progress */ + scsi_requeue_command(q, cmd); + return; + default: + break; + } } - if (!(req->flags & REQ_QUIET)) + if (!(req->flags & REQ_QUIET)) { scmd_printk(KERN_INFO, cmd, - "Device not ready.\n"); + "Device not ready: "); + scsi_print_sense_hdr("", &sshdr); + } scsi_end_request(cmd, 0, this_count, 1); return; case VOLUME_OVERFLOW: diff --git a/drivers/scsi/sim710.c b/drivers/scsi/sim710.c index 3274ab76c8d..255886a9ac5 100644 --- a/drivers/scsi/sim710.c +++ b/drivers/scsi/sim710.c @@ -75,7 +75,7 @@ param_setup(char *str) else if(!strncmp(pos, "id:", 3)) { if(slot == -1) { printk(KERN_WARNING "sim710: Must specify slot for id parameter\n"); - } else if(slot > MAX_SLOTS) { + } else if(slot >= MAX_SLOTS) { printk(KERN_WARNING "sim710: Illegal slot %d for id %d\n", slot, val); } else { id_array[slot] = val; diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c index 674b15c78f6..bbf78aaf9e0 100644 --- a/drivers/serial/8250.c +++ b/drivers/serial/8250.c @@ -362,6 +362,40 @@ serial_out(struct uart_8250_port *up, int offset, int value) #define serial_inp(up, offset) serial_in(up, offset) #define serial_outp(up, offset, value) serial_out(up, offset, value) +/* Uart divisor latch read */ +static inline int _serial_dl_read(struct uart_8250_port *up) +{ + return serial_inp(up, UART_DLL) | serial_inp(up, UART_DLM) << 8; +} + +/* Uart divisor latch write */ +static inline void _serial_dl_write(struct uart_8250_port *up, int value) +{ + serial_outp(up, UART_DLL, value & 0xff); + serial_outp(up, UART_DLM, value >> 8 & 0xff); +} + +#ifdef CONFIG_SERIAL_8250_AU1X00 +/* Au1x00 haven't got a standard divisor latch */ +static int serial_dl_read(struct uart_8250_port *up) +{ + if (up->port.iotype == UPIO_AU) + return __raw_readl(up->port.membase + 0x28); + else + return _serial_dl_read(up); +} + +static void serial_dl_write(struct uart_8250_port *up, int value) +{ + if (up->port.iotype == UPIO_AU) + __raw_writel(value, up->port.membase + 0x28); + else + _serial_dl_write(up, value); +} +#else +#define serial_dl_read(up) _serial_dl_read(up) +#define serial_dl_write(up, value) _serial_dl_write(up, value) +#endif /* * For the 16C950 @@ -494,7 +528,8 @@ static void disable_rsa(struct uart_8250_port *up) */ static int size_fifo(struct uart_8250_port *up) { - unsigned char old_fcr, old_mcr, old_dll, old_dlm, old_lcr; + unsigned char old_fcr, old_mcr, old_lcr; + unsigned short old_dl; int count; old_lcr = serial_inp(up, UART_LCR); @@ -505,10 +540,8 @@ static int size_fifo(struct uart_8250_port *up) UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT); serial_outp(up, UART_MCR, UART_MCR_LOOP); serial_outp(up, UART_LCR, UART_LCR_DLAB); - old_dll = serial_inp(up, UART_DLL); - old_dlm = serial_inp(up, UART_DLM); - serial_outp(up, UART_DLL, 0x01); - serial_outp(up, UART_DLM, 0x00); + old_dl = serial_dl_read(up); + serial_dl_write(up, 0x0001); serial_outp(up, UART_LCR, 0x03); for (count = 0; count < 256; count++) serial_outp(up, UART_TX, count); @@ -519,8 +552,7 @@ static int size_fifo(struct uart_8250_port *up) serial_outp(up, UART_FCR, old_fcr); serial_outp(up, UART_MCR, old_mcr); serial_outp(up, UART_LCR, UART_LCR_DLAB); - serial_outp(up, UART_DLL, old_dll); - serial_outp(up, UART_DLM, old_dlm); + serial_dl_write(up, old_dl); serial_outp(up, UART_LCR, old_lcr); return count; @@ -750,8 +782,7 @@ static void autoconfig_16550a(struct uart_8250_port *up) serial_outp(up, UART_LCR, 0xE0); - quot = serial_inp(up, UART_DLM) << 8; - quot += serial_inp(up, UART_DLL); + quot = serial_dl_read(up); quot <<= 3; status1 = serial_in(up, 0x04); /* EXCR1 */ @@ -759,8 +790,7 @@ static void autoconfig_16550a(struct uart_8250_port *up) status1 |= 0x10; /* 1.625 divisor for baud_base --> 921600 */ serial_outp(up, 0x04, status1); - serial_outp(up, UART_DLL, quot & 0xff); - serial_outp(up, UART_DLM, quot >> 8); + serial_dl_write(up, quot); serial_outp(up, UART_LCR, 0); @@ -1862,8 +1892,7 @@ serial8250_set_termios(struct uart_port *port, struct termios *termios, serial_outp(up, UART_LCR, cval | UART_LCR_DLAB);/* set DLAB */ } - serial_outp(up, UART_DLL, quot & 0xff); /* LS of divisor */ - serial_outp(up, UART_DLM, quot >> 8); /* MS of divisor */ + serial_dl_write(up, quot); /* * LCR DLAB must be set to enable 64-byte FIFO mode. If the FCR @@ -1906,6 +1935,9 @@ static int serial8250_request_std_resource(struct uart_8250_port *up) int ret = 0; switch (up->port.iotype) { + case UPIO_AU: + size = 0x100000; + /* fall thru */ case UPIO_MEM: if (!up->port.mapbase) break; @@ -1938,6 +1970,9 @@ static void serial8250_release_std_resource(struct uart_8250_port *up) unsigned int size = 8 << up->port.regshift; switch (up->port.iotype) { + case UPIO_AU: + size = 0x100000; + /* fall thru */ case UPIO_MEM: if (!up->port.mapbase) break; @@ -2200,10 +2235,17 @@ static void serial8250_console_write(struct console *co, const char *s, unsigned int count) { struct uart_8250_port *up = &serial8250_ports[co->index]; + unsigned long flags; unsigned int ier; + int locked = 1; touch_nmi_watchdog(); + if (oops_in_progress) { + locked = spin_trylock_irqsave(&up->port.lock, flags); + } else + spin_lock_irqsave(&up->port.lock, flags); + /* * First save the IER then disable the interrupts */ @@ -2221,8 +2263,10 @@ serial8250_console_write(struct console *co, const char *s, unsigned int count) * and restore the IER */ wait_for_xmitr(up, BOTH_EMPTY); - up->ier |= UART_IER_THRI; - serial_out(up, UART_IER, ier | UART_IER_THRI); + serial_out(up, UART_IER, ier); + + if (locked) + spin_unlock_irqrestore(&up->port.lock, flags); } static int serial8250_console_setup(struct console *co, char *options) diff --git a/drivers/serial/8250_au1x00.c b/drivers/serial/8250_au1x00.c index 3d1bfd07208..58015fd14be 100644 --- a/drivers/serial/8250_au1x00.c +++ b/drivers/serial/8250_au1x00.c @@ -30,13 +30,12 @@ { \ .iobase = _base, \ .membase = (void __iomem *)_base,\ - .mapbase = _base, \ + .mapbase = CPHYSADDR(_base), \ .irq = _irq, \ .uartclk = 0, /* filled */ \ .regshift = 2, \ .iotype = UPIO_AU, \ - .flags = UPF_SKIP_TEST | \ - UPF_IOREMAP, \ + .flags = UPF_SKIP_TEST \ } static struct plat_serial8250_port au1x00_data[] = { diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c index fcd7744c425..aeb8153ccf2 100644 --- a/drivers/serial/serial_core.c +++ b/drivers/serial/serial_core.c @@ -1500,20 +1500,18 @@ uart_block_til_ready(struct file *filp, struct uart_state *state) static struct uart_state *uart_get(struct uart_driver *drv, int line) { struct uart_state *state; + int ret = 0; - mutex_lock(&port_mutex); state = drv->state + line; if (mutex_lock_interruptible(&state->mutex)) { - state = ERR_PTR(-ERESTARTSYS); - goto out; + ret = -ERESTARTSYS; + goto err; } state->count++; - if (!state->port) { - state->count--; - mutex_unlock(&state->mutex); - state = ERR_PTR(-ENXIO); - goto out; + if (!state->port || state->port->flags & UPF_DEAD) { + ret = -ENXIO; + goto err_unlock; } if (!state->info) { @@ -1531,15 +1529,17 @@ static struct uart_state *uart_get(struct uart_driver *drv, int line) tasklet_init(&state->info->tlet, uart_tasklet_action, (unsigned long)state); } else { - state->count--; - mutex_unlock(&state->mutex); - state = ERR_PTR(-ENOMEM); + ret = -ENOMEM; + goto err_unlock; } } - - out: - mutex_unlock(&port_mutex); return state; + + err_unlock: + state->count--; + mutex_unlock(&state->mutex); + err: + return ERR_PTR(ret); } /* @@ -2085,45 +2085,6 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state, } } -/* - * This reverses the effects of uart_configure_port, hanging up the - * port before removal. - */ -static void -uart_unconfigure_port(struct uart_driver *drv, struct uart_state *state) -{ - struct uart_port *port = state->port; - struct uart_info *info = state->info; - - if (info && info->tty) - tty_vhangup(info->tty); - - mutex_lock(&state->mutex); - - state->info = NULL; - - /* - * Free the port IO and memory resources, if any. - */ - if (port->type != PORT_UNKNOWN) - port->ops->release_port(port); - - /* - * Indicate that there isn't a port here anymore. - */ - port->type = PORT_UNKNOWN; - - /* - * Kill the tasklet, and free resources. - */ - if (info) { - tasklet_kill(&info->tlet); - kfree(info); - } - - mutex_unlock(&state->mutex); -} - static struct tty_operations uart_ops = { .open = uart_open, .close = uart_close, @@ -2270,6 +2231,7 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *port) state = drv->state + port->line; mutex_lock(&port_mutex); + mutex_lock(&state->mutex); if (state->port) { ret = -EINVAL; goto out; @@ -2304,7 +2266,13 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *port) port->cons && !(port->cons->flags & CON_ENABLED)) register_console(port->cons); + /* + * Ensure UPF_DEAD is not set. + */ + port->flags &= ~UPF_DEAD; + out: + mutex_unlock(&state->mutex); mutex_unlock(&port_mutex); return ret; @@ -2322,6 +2290,7 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *port) int uart_remove_one_port(struct uart_driver *drv, struct uart_port *port) { struct uart_state *state = drv->state + port->line; + struct uart_info *info; BUG_ON(in_interrupt()); @@ -2332,11 +2301,48 @@ int uart_remove_one_port(struct uart_driver *drv, struct uart_port *port) mutex_lock(&port_mutex); /* + * Mark the port "dead" - this prevents any opens from + * succeeding while we shut down the port. + */ + mutex_lock(&state->mutex); + port->flags |= UPF_DEAD; + mutex_unlock(&state->mutex); + + /* * Remove the devices from devfs */ tty_unregister_device(drv->tty_driver, port->line); - uart_unconfigure_port(drv, state); + info = state->info; + if (info && info->tty) + tty_vhangup(info->tty); + + /* + * All users of this port should now be disconnected from + * this driver, and the port shut down. We should be the + * only thread fiddling with this port from now on. + */ + state->info = NULL; + + /* + * Free the port IO and memory resources, if any. + */ + if (port->type != PORT_UNKNOWN) + port->ops->release_port(port); + + /* + * Indicate that there isn't a port here anymore. + */ + port->type = PORT_UNKNOWN; + + /* + * Kill the tasklet, and free resources. + */ + if (info) { + tasklet_kill(&info->tlet); + kfree(info); + } + state->port = NULL; mutex_unlock(&port_mutex); diff --git a/drivers/video/logo/Makefile b/drivers/video/logo/Makefile index 4ef5cd19609..b985dfad6c6 100644 --- a/drivers/video/logo/Makefile +++ b/drivers/video/logo/Makefile @@ -34,7 +34,7 @@ extra-y += $(call logo-cfiles,_clut224,ppm) extra-y += $(call logo-cfiles,_gray256,pgm) # Create commands like "pnmtologo -t mono -n logo_mac_mono -o ..." -quiet_cmd_logo = LOGO $@ +quiet_cmd_logo = LOGO $@ cmd_logo = scripts/pnmtologo \ -t $(patsubst $*_%,%,$(notdir $(basename $<))) \ -n $(notdir $(basename $<)) -o $@ $< |