diff options
Diffstat (limited to 'drivers/net/tg3.c')
-rw-r--r-- | drivers/net/tg3.c | 1383 |
1 files changed, 1084 insertions, 299 deletions
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index f79b02e80e7..7e371b1209a 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -7,7 +7,12 @@ * Copyright (C) 2005 Broadcom Corporation. * * Firmware is: - * Copyright (C) 2000-2003 Broadcom Corporation. + * Derived from proprietary unpublished source code, + * Copyright (C) 2000-2003 Broadcom Corporation. + * + * Permission is hereby granted for the distribution of this firmware + * data in hexadecimal or equivalent format, provided this copyright + * notice is accompanying it. */ #include <linux/config.h> @@ -61,8 +66,8 @@ #define DRV_MODULE_NAME "tg3" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "3.27" -#define DRV_MODULE_RELDATE "May 5, 2005" +#define DRV_MODULE_VERSION "3.32" +#define DRV_MODULE_RELDATE "June 24, 2005" #define TG3_DEF_MAC_MODE 0 #define TG3_DEF_RX_MODE 0 @@ -133,6 +138,8 @@ /* number of ETHTOOL_GSTATS u64's */ #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64)) +#define TG3_NUM_TEST 6 + static char version[] __devinitdata = DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; @@ -206,6 +213,8 @@ static struct pci_device_id tg3_pci_tbl[] = { PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, + { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M, @@ -314,15 +323,24 @@ static struct { { "nic_tx_threshold_hit" } }; +static struct { + const char string[ETH_GSTRING_LEN]; +} ethtool_test_keys[TG3_NUM_TEST] = { + { "nvram test (online) " }, + { "link test (online) " }, + { "register test (offline)" }, + { "memory test (offline)" }, + { "loopback test (offline)" }, + { "interrupt test (offline)" }, +}; + static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val) { if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) { - unsigned long flags; - - spin_lock_irqsave(&tp->indirect_lock, flags); + spin_lock_bh(&tp->indirect_lock); pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); - spin_unlock_irqrestore(&tp->indirect_lock, flags); + spin_unlock_bh(&tp->indirect_lock); } else { writel(val, tp->regs + off); if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0) @@ -333,12 +351,10 @@ static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val) static void _tw32_flush(struct tg3 *tp, u32 off, u32 val) { if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) { - unsigned long flags; - - spin_lock_irqsave(&tp->indirect_lock, flags); + spin_lock_bh(&tp->indirect_lock); pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); - spin_unlock_irqrestore(&tp->indirect_lock, flags); + spin_unlock_bh(&tp->indirect_lock); } else { void __iomem *dest = tp->regs + off; writel(val, dest); @@ -378,28 +394,24 @@ static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val) static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val) { - unsigned long flags; - - spin_lock_irqsave(&tp->indirect_lock, flags); + spin_lock_bh(&tp->indirect_lock); pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); /* Always leave this as zero. */ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); - spin_unlock_irqrestore(&tp->indirect_lock, flags); + spin_unlock_bh(&tp->indirect_lock); } static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val) { - unsigned long flags; - - spin_lock_irqsave(&tp->indirect_lock, flags); + spin_lock_bh(&tp->indirect_lock); pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); /* Always leave this as zero. */ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); - spin_unlock_irqrestore(&tp->indirect_lock, flags); + spin_unlock_bh(&tp->indirect_lock); } static void tg3_disable_ints(struct tg3 *tp) @@ -418,11 +430,14 @@ static inline void tg3_cond_int(struct tg3 *tp) static void tg3_enable_ints(struct tg3 *tp) { + tp->irq_sync = 0; + wmb(); + tw32(TG3PCI_MISC_HOST_CTRL, (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); - tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000); + tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, + (tp->last_tag << 24)); tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); - tg3_cond_int(tp); } @@ -455,16 +470,23 @@ static void tg3_restart_ints(struct tg3 *tp) { tw32(TG3PCI_MISC_HOST_CTRL, (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); - tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000); + tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, + tp->last_tag << 24); mmiowb(); - if (tg3_has_work(tp)) + /* When doing tagged status, this work check is unnecessary. + * The last_tag we write above tells the chip which piece of + * work we've completed. + */ + if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) && + tg3_has_work(tp)) tw32(HOSTCC_MODE, tp->coalesce_mode | (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW)); } static inline void tg3_netif_stop(struct tg3 *tp) { + tp->dev->trans_start = jiffies; /* prevent tx timeout */ netif_poll_disable(tp->dev); netif_tx_disable(tp->dev); } @@ -477,7 +499,8 @@ static inline void tg3_netif_start(struct tg3 *tp) * (such as after tg3_init_hw) */ netif_poll_enable(tp->dev); - tg3_cond_int(tp); + tp->hw_status->status |= SD_STATUS_UPDATED; + tg3_enable_ints(tp); } static void tg3_switch_clocks(struct tg3 *tp) @@ -2500,7 +2523,7 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset) if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { if (netif_carrier_ok(tp->dev)) { tw32(HOSTCC_STAT_COAL_TICKS, - DEFAULT_STAT_COAL_TICKS); + tp->coal.stats_block_coalesce_usecs); } else { tw32(HOSTCC_STAT_COAL_TICKS, 0); } @@ -2551,7 +2574,7 @@ static void tg3_tx(struct tg3 *tp) sw_idx = NEXT_TX(sw_idx); } - dev_kfree_skb_irq(skb); + dev_kfree_skb(skb); } tp->tx_cons = sw_idx; @@ -2857,11 +2880,8 @@ static int tg3_poll(struct net_device *netdev, int *budget) { struct tg3 *tp = netdev_priv(netdev); struct tg3_hw_status *sblk = tp->hw_status; - unsigned long flags; int done; - spin_lock_irqsave(&tp->lock, flags); - /* handle link change and other phy events */ if (!(tp->tg3_flags & (TG3_FLAG_USE_LINKCHG_REG | @@ -2869,7 +2889,9 @@ static int tg3_poll(struct net_device *netdev, int *budget) if (sblk->status & SD_STATUS_LINK_CHG) { sblk->status = SD_STATUS_UPDATED | (sblk->status & ~SD_STATUS_LINK_CHG); + spin_lock(&tp->lock); tg3_setup_phy(tp, 0); + spin_unlock(&tp->lock); } } @@ -2880,13 +2902,10 @@ static int tg3_poll(struct net_device *netdev, int *budget) spin_unlock(&tp->tx_lock); } - spin_unlock_irqrestore(&tp->lock, flags); - /* run RX thread, within the bounds set by NAPI. * All RX "locking" is done by ensuring outside * code synchronizes with dev->poll() */ - done = 1; if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) { int orig_budget = *budget; int work_done; @@ -2898,22 +2917,59 @@ static int tg3_poll(struct net_device *netdev, int *budget) *budget -= work_done; netdev->quota -= work_done; - - if (work_done >= orig_budget) - done = 0; } + if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) + tp->last_tag = sblk->status_tag; + rmb(); + sblk->status &= ~SD_STATUS_UPDATED; + /* if no more work, tell net stack and NIC we're done */ + done = !tg3_has_work(tp); if (done) { - spin_lock_irqsave(&tp->lock, flags); - __netif_rx_complete(netdev); + spin_lock(&tp->lock); + netif_rx_complete(netdev); tg3_restart_ints(tp); - spin_unlock_irqrestore(&tp->lock, flags); + spin_unlock(&tp->lock); } return (done ? 0 : 1); } +static void tg3_irq_quiesce(struct tg3 *tp) +{ + BUG_ON(tp->irq_sync); + + tp->irq_sync = 1; + smp_mb(); + + synchronize_irq(tp->pdev->irq); +} + +static inline int tg3_irq_sync(struct tg3 *tp) +{ + return tp->irq_sync; +} + +/* Fully shutdown all tg3 driver activity elsewhere in the system. + * If irq_sync is non-zero, then the IRQ handler must be synchronized + * with as well. Most of the time, this is not necessary except when + * shutting down the device. + */ +static inline void tg3_full_lock(struct tg3 *tp, int irq_sync) +{ + if (irq_sync) + tg3_irq_quiesce(tp); + spin_lock_bh(&tp->lock); + spin_lock(&tp->tx_lock); +} + +static inline void tg3_full_unlock(struct tg3 *tp) +{ + spin_unlock(&tp->tx_lock); + spin_unlock_bh(&tp->lock); +} + /* MSI ISR - No need to check for interrupt sharing and no need to * flush status block and interrupt mailbox. PCI ordering rules * guarantee that MSI will arrive after the status block. @@ -2923,31 +2979,28 @@ static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs) struct net_device *dev = dev_id; struct tg3 *tp = netdev_priv(dev); struct tg3_hw_status *sblk = tp->hw_status; - unsigned long flags; - - spin_lock_irqsave(&tp->lock, flags); /* - * writing any value to intr-mbox-0 clears PCI INTA# and + * Writing any value to intr-mbox-0 clears PCI INTA# and * chip-internal interrupt pending events. - * writing non-zero to intr-mbox-0 additional tells the + * Writing non-zero to intr-mbox-0 additional tells the * NIC to stop sending us irqs, engaging "in-intr-handler" * event coalescing. */ tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); + tp->last_tag = sblk->status_tag; + rmb(); + if (tg3_irq_sync(tp)) + goto out; sblk->status &= ~SD_STATUS_UPDATED; - if (likely(tg3_has_work(tp))) netif_rx_schedule(dev); /* schedule NAPI poll */ else { - /* no work, re-enable interrupts - */ + /* No work, re-enable interrupts. */ tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, - 0x00000000); + tp->last_tag << 24); } - - spin_unlock_irqrestore(&tp->lock, flags); - +out: return IRQ_RETVAL(1); } @@ -2956,10 +3009,50 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs) struct net_device *dev = dev_id; struct tg3 *tp = netdev_priv(dev); struct tg3_hw_status *sblk = tp->hw_status; - unsigned long flags; unsigned int handled = 1; - spin_lock_irqsave(&tp->lock, flags); + /* In INTx mode, it is possible for the interrupt to arrive at + * the CPU before the status block posted prior to the interrupt. + * Reading the PCI State register will confirm whether the + * interrupt is ours and will flush the status block. + */ + if ((sblk->status & SD_STATUS_UPDATED) || + !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { + /* + * Writing any value to intr-mbox-0 clears PCI INTA# and + * chip-internal interrupt pending events. + * Writing non-zero to intr-mbox-0 additional tells the + * NIC to stop sending us irqs, engaging "in-intr-handler" + * event coalescing. + */ + tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, + 0x00000001); + if (tg3_irq_sync(tp)) + goto out; + sblk->status &= ~SD_STATUS_UPDATED; + if (likely(tg3_has_work(tp))) + netif_rx_schedule(dev); /* schedule NAPI poll */ + else { + /* No work, shared interrupt perhaps? re-enable + * interrupts, and flush that PCI write + */ + tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, + 0x00000000); + tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); + } + } else { /* shared interrupt */ + handled = 0; + } +out: + return IRQ_RETVAL(handled); +} + +static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs) +{ + struct net_device *dev = dev_id; + struct tg3 *tp = netdev_priv(dev); + struct tg3_hw_status *sblk = tp->hw_status; + unsigned int handled = 1; /* In INTx mode, it is possible for the interrupt to arrive at * the CPU before the status block posted prior to the interrupt. @@ -2977,13 +3070,11 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs) */ tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); - /* - * Flush PCI write. This also guarantees that our - * status block has been flushed to host memory. - */ - tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); + tp->last_tag = sblk->status_tag; + rmb(); + if (tg3_irq_sync(tp)) + goto out; sblk->status &= ~SD_STATUS_UPDATED; - if (likely(tg3_has_work(tp))) netif_rx_schedule(dev); /* schedule NAPI poll */ else { @@ -2991,15 +3082,13 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs) * interrupts, and flush that PCI write */ tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, - 0x00000000); + tp->last_tag << 24); tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); } } else { /* shared interrupt */ handled = 0; } - - spin_unlock_irqrestore(&tp->lock, flags); - +out: return IRQ_RETVAL(handled); } @@ -3020,7 +3109,7 @@ static irqreturn_t tg3_test_isr(int irq, void *dev_id, } static int tg3_init_hw(struct tg3 *); -static int tg3_halt(struct tg3 *, int); +static int tg3_halt(struct tg3 *, int, int); #ifdef CONFIG_NET_POLL_CONTROLLER static void tg3_poll_controller(struct net_device *dev) @@ -3038,19 +3127,17 @@ static void tg3_reset_task(void *_data) tg3_netif_stop(tp); - spin_lock_irq(&tp->lock); - spin_lock(&tp->tx_lock); + tg3_full_lock(tp, 1); restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER; tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER; - tg3_halt(tp, 0); + tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); tg3_init_hw(tp); tg3_netif_start(tp); - spin_unlock(&tp->tx_lock); - spin_unlock_irq(&tp->lock); + tg3_full_unlock(tp); if (restart_timer) mod_timer(&tp->timer, jiffies + 1); @@ -3156,39 +3243,21 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) unsigned int i; u32 len, entry, base_flags, mss; int would_hit_hwbug; - unsigned long flags; len = skb_headlen(skb); /* No BH disabling for tx_lock here. We are running in BH disabled * context and TX reclaim runs via tp->poll inside of a software - * interrupt. Rejoice! - * - * Actually, things are not so simple. If we are to take a hw - * IRQ here, we can deadlock, consider: - * - * CPU1 CPU2 - * tg3_start_xmit - * take tp->tx_lock - * tg3_timer - * take tp->lock - * tg3_interrupt - * spin on tp->lock - * spin on tp->tx_lock - * - * So we really do need to disable interrupts when taking - * tx_lock here. + * interrupt. Furthermore, IRQ processing runs lockless so we have + * no IRQ context deadlocks to worry about either. Rejoice! */ - local_irq_save(flags); - if (!spin_trylock(&tp->tx_lock)) { - local_irq_restore(flags); + if (!spin_trylock(&tp->tx_lock)) return NETDEV_TX_LOCKED; - } /* This is a hard error, log it. */ if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { netif_stop_queue(dev); - spin_unlock_irqrestore(&tp->tx_lock, flags); + spin_unlock(&tp->tx_lock); printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n", dev->name); return NETDEV_TX_BUSY; @@ -3353,7 +3422,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) out_unlock: mmiowb(); - spin_unlock_irqrestore(&tp->tx_lock, flags); + spin_unlock(&tp->tx_lock); dev->trans_start = jiffies; @@ -3387,10 +3456,10 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu) } tg3_netif_stop(tp); - spin_lock_irq(&tp->lock); - spin_lock(&tp->tx_lock); - tg3_halt(tp, 1); + tg3_full_lock(tp, 1); + + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); tg3_set_mtu(dev, tp, new_mtu); @@ -3398,8 +3467,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu) tg3_netif_start(tp); - spin_unlock(&tp->tx_lock); - spin_unlock_irq(&tp->lock); + tg3_full_unlock(tp); return 0; } @@ -4081,19 +4149,19 @@ static void tg3_stop_fw(struct tg3 *tp) } /* tp->lock is held. */ -static int tg3_halt(struct tg3 *tp, int silent) +static int tg3_halt(struct tg3 *tp, int kind, int silent) { int err; tg3_stop_fw(tp); - tg3_write_sig_pre_reset(tp, RESET_KIND_SHUTDOWN); + tg3_write_sig_pre_reset(tp, kind); tg3_abort_hw(tp, silent); err = tg3_chip_reset(tp); - tg3_write_sig_legacy(tp, RESET_KIND_SHUTDOWN); - tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN); + tg3_write_sig_legacy(tp, kind); + tg3_write_sig_post_reset(tp, kind); if (err) return err; @@ -4307,7 +4375,12 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_b */ tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG; + /* It is possible that bootcode is still loading at this point. + * Get the nvram lock first before halting the cpu. + */ + tg3_nvram_lock(tp); err = tg3_halt_cpu(tp, cpu_base); + tg3_nvram_unlock(tp); if (err) goto out; @@ -5015,9 +5088,9 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p) memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); - spin_lock_irq(&tp->lock); + spin_lock_bh(&tp->lock); __tg3_set_mac_addr(tp); - spin_unlock_irq(&tp->lock); + spin_unlock_bh(&tp->lock); return 0; } @@ -5044,6 +5117,27 @@ static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr, } static void __tg3_set_rx_mode(struct net_device *); +static void tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec) +{ + tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs); + tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs); + tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames); + tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames); + if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { + tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq); + tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq); + } + tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq); + tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq); + if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { + u32 val = ec->stats_block_coalesce_usecs; + + if (!netif_carrier_ok(tp->dev)) + val = 0; + + tw32(HOSTCC_STAT_COAL_TICKS, val); + } +} /* tp->lock is held. */ static int tg3_reset_hw(struct tg3 *tp) @@ -5366,16 +5460,7 @@ static int tg3_reset_hw(struct tg3 *tp) udelay(10); } - tw32(HOSTCC_RXCOL_TICKS, 0); - tw32(HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS); - tw32(HOSTCC_RXMAX_FRAMES, 1); - tw32(HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES); - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { - tw32(HOSTCC_RXCOAL_TICK_INT, 0); - tw32(HOSTCC_TXCOAL_TICK_INT, 0); - } - tw32(HOSTCC_RXCOAL_MAXF_INT, 1); - tw32(HOSTCC_TXCOAL_MAXF_INT, 0); + tg3_set_coalesce(tp, &tp->coal); /* set status block DMA address */ tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, @@ -5388,8 +5473,6 @@ static int tg3_reset_hw(struct tg3 *tp) * the tg3_periodic_fetch_stats call there, and * tg3_get_stats to see how this works for 5705/5750 chips. */ - tw32(HOSTCC_STAT_COAL_TICKS, - DEFAULT_STAT_COAL_TICKS); tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, ((u64) tp->stats_mapping >> 32)); tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, @@ -5445,7 +5528,8 @@ static int tg3_reset_hw(struct tg3 *tp) udelay(100); tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0); - tr32(MAILBOX_INTERRUPT_0); + tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); + tp->last_tag = 0; if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { tw32_f(DMAC_MODE, DMAC_MODE_ENABLE); @@ -5643,9 +5727,6 @@ static int tg3_reset_hw(struct tg3 *tp) tg3_write_sig_post_reset(tp, RESET_KIND_INIT); - if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) - tg3_enable_ints(tp); - return 0; } @@ -5718,36 +5799,35 @@ static void tg3_periodic_fetch_stats(struct tg3 *tp) static void tg3_timer(unsigned long __opaque) { struct tg3 *tp = (struct tg3 *) __opaque; - unsigned long flags; - spin_lock_irqsave(&tp->lock, flags); - spin_lock(&tp->tx_lock); + spin_lock(&tp->lock); - /* All of this garbage is because when using non-tagged - * IRQ status the mailbox/status_block protocol the chip - * uses with the cpu is race prone. - */ - if (tp->hw_status->status & SD_STATUS_UPDATED) { - tw32(GRC_LOCAL_CTRL, - tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); - } else { - tw32(HOSTCC_MODE, tp->coalesce_mode | - (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW)); - } + if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) { + /* All of this garbage is because when using non-tagged + * IRQ status the mailbox/status_block protocol the chip + * uses with the cpu is race prone. + */ + if (tp->hw_status->status & SD_STATUS_UPDATED) { + tw32(GRC_LOCAL_CTRL, + tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); + } else { + tw32(HOSTCC_MODE, tp->coalesce_mode | + (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW)); + } - if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { - tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER; - spin_unlock(&tp->tx_lock); - spin_unlock_irqrestore(&tp->lock, flags); - schedule_work(&tp->reset_task); - return; + if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { + tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER; + spin_unlock(&tp->lock); + schedule_work(&tp->reset_task); + return; + } } - if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) - tg3_periodic_fetch_stats(tp); - /* This part only runs once per second. */ if (!--tp->timer_counter) { + if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) + tg3_periodic_fetch_stats(tp); + if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) { u32 mac_stat; int phy_event; @@ -5805,8 +5885,7 @@ static void tg3_timer(unsigned long __opaque) tp->asf_counter = tp->asf_multiplier; } - spin_unlock(&tp->tx_lock); - spin_unlock_irqrestore(&tp->lock, flags); + spin_unlock(&tp->lock); tp->timer.expires = jiffies + tp->timer_offset; add_timer(&tp->timer); @@ -5818,6 +5897,9 @@ static int tg3_test_interrupt(struct tg3 *tp) int err, i; u32 int_mbox = 0; + if (!netif_running(dev)) + return -ENODEV; + tg3_disable_ints(tp); free_irq(tp->pdev->irq, dev); @@ -5846,9 +5928,13 @@ static int tg3_test_interrupt(struct tg3 *tp) if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) err = request_irq(tp->pdev->irq, tg3_msi, SA_SAMPLE_RANDOM, dev->name, dev); - else - err = request_irq(tp->pdev->irq, tg3_interrupt, + else { + irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt; + if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) + fn = tg3_interrupt_tagged; + err = request_irq(tp->pdev->irq, fn, SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev); + } if (err) return err; @@ -5900,23 +5986,26 @@ static int tg3_test_msi(struct tg3 *tp) tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; - err = request_irq(tp->pdev->irq, tg3_interrupt, - SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev); + { + irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt; + if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) + fn = tg3_interrupt_tagged; + err = request_irq(tp->pdev->irq, fn, + SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev); + } if (err) return err; /* Need to reset the chip because the MSI cycle may have terminated * with Master Abort. */ - spin_lock_irq(&tp->lock); - spin_lock(&tp->tx_lock); + tg3_full_lock(tp, 1); - tg3_halt(tp, 1); + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); err = tg3_init_hw(tp); - spin_unlock(&tp->tx_lock); - spin_unlock_irq(&tp->lock); + tg3_full_unlock(tp); if (err) free_irq(tp->pdev->irq, dev); @@ -5929,14 +6018,12 @@ static int tg3_open(struct net_device *dev) struct tg3 *tp = netdev_priv(dev); int err; - spin_lock_irq(&tp->lock); - spin_lock(&tp->tx_lock); + tg3_full_lock(tp, 0); tg3_disable_ints(tp); tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE; - spin_unlock(&tp->tx_lock); - spin_unlock_irq(&tp->lock); + tg3_full_unlock(tp); /* The placement of this call is tied * to the setup and use of Host TX descriptors. @@ -5948,7 +6035,13 @@ static int tg3_open(struct net_device *dev) if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) && (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) { - if (pci_enable_msi(tp->pdev) == 0) { + /* All MSI supporting chips should support tagged + * status. Assert that this is the case. + */ + if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) { + printk(KERN_WARNING PFX "%s: MSI without TAGGED? " + "Not using MSI.\n", tp->dev->name); + } else if (pci_enable_msi(tp->pdev) == 0) { u32 msi_mode; msi_mode = tr32(MSGINT_MODE); @@ -5959,9 +6052,14 @@ static int tg3_open(struct net_device *dev) if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) err = request_irq(tp->pdev->irq, tg3_msi, SA_SAMPLE_RANDOM, dev->name, dev); - else - err = request_irq(tp->pdev->irq, tg3_interrupt, + else { + irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt; + if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) + fn = tg3_interrupt_tagged; + + err = request_irq(tp->pdev->irq, fn, SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev); + } if (err) { if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { @@ -5972,17 +6070,23 @@ static int tg3_open(struct net_device *dev) return err; } - spin_lock_irq(&tp->lock); - spin_lock(&tp->tx_lock); + tg3_full_lock(tp, 0); err = tg3_init_hw(tp); if (err) { - tg3_halt(tp, 1); + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); tg3_free_rings(tp); } else { - tp->timer_offset = HZ / 10; - tp->timer_counter = tp->timer_multiplier = 10; - tp->asf_counter = tp->asf_multiplier = (10 * 120); + if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) + tp->timer_offset = HZ; + else + tp->timer_offset = HZ / 10; + + BUG_ON(tp->timer_offset > HZ); + tp->timer_counter = tp->timer_multiplier = + (HZ / tp->timer_offset); + tp->asf_counter = tp->asf_multiplier = + ((HZ / tp->timer_offset) * 120); init_timer(&tp->timer); tp->timer.expires = jiffies + tp->timer_offset; @@ -5990,8 +6094,7 @@ static int tg3_open(struct net_device *dev) tp->timer.function = tg3_timer; } - spin_unlock(&tp->tx_lock); - spin_unlock_irq(&tp->lock); + tg3_full_unlock(tp); if (err) { free_irq(tp->pdev->irq, dev); @@ -6005,34 +6108,31 @@ static int tg3_open(struct net_device *dev) if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { err = tg3_test_msi(tp); + if (err) { - spin_lock_irq(&tp->lock); - spin_lock(&tp->tx_lock); + tg3_full_lock(tp, 0); if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { pci_disable_msi(tp->pdev); tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; } - tg3_halt(tp, 1); + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); tg3_free_rings(tp); tg3_free_consistent(tp); - spin_unlock(&tp->tx_lock); - spin_unlock_irq(&tp->lock); + tg3_full_unlock(tp); return err; } } - spin_lock_irq(&tp->lock); - spin_lock(&tp->tx_lock); + tg3_full_lock(tp, 0); add_timer(&tp->timer); tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; tg3_enable_ints(tp); - spin_unlock(&tp->tx_lock); - spin_unlock_irq(&tp->lock); + tg3_full_unlock(tp); netif_start_queue(dev); @@ -6278,23 +6378,21 @@ static int tg3_close(struct net_device *dev) del_timer_sync(&tp->timer); - spin_lock_irq(&tp->lock); - spin_lock(&tp->tx_lock); + tg3_full_lock(tp, 1); #if 0 tg3_dump_state(tp); #endif tg3_disable_ints(tp); - tg3_halt(tp, 1); + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); tg3_free_rings(tp); tp->tg3_flags &= ~(TG3_FLAG_INIT_COMPLETE | TG3_FLAG_GOT_SERDES_FLOWCTL); netif_carrier_off(tp->dev); - spin_unlock(&tp->tx_lock); - spin_unlock_irq(&tp->lock); + tg3_full_unlock(tp); free_irq(tp->pdev->irq, dev); if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { @@ -6331,16 +6429,15 @@ static unsigned long calc_crc_errors(struct tg3 *tp) if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) && (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) { - unsigned long flags; u32 val; - spin_lock_irqsave(&tp->lock, flags); + spin_lock_bh(&tp->lock); if (!tg3_readphy(tp, 0x1e, &val)) { tg3_writephy(tp, 0x1e, val | 0x8000); tg3_readphy(tp, 0x14, &val); } else val = 0; - spin_unlock_irqrestore(&tp->lock, flags); + spin_unlock_bh(&tp->lock); tp->phy_crc_errors += val; @@ -6602,11 +6699,9 @@ static void tg3_set_rx_mode(struct net_device *dev) { struct tg3 *tp = netdev_priv(dev); - spin_lock_irq(&tp->lock); - spin_lock(&tp->tx_lock); + tg3_full_lock(tp, 0); __tg3_set_rx_mode(dev); - spin_unlock(&tp->tx_lock); - spin_unlock_irq(&tp->lock); + tg3_full_unlock(tp); } #define TG3_REGDUMP_LEN (32 * 1024) @@ -6628,8 +6723,7 @@ static void tg3_get_regs(struct net_device *dev, memset(p, 0, TG3_REGDUMP_LEN); - spin_lock_irq(&tp->lock); - spin_lock(&tp->tx_lock); + tg3_full_lock(tp, 0); #define __GET_REG32(reg) (*(p)++ = tr32(reg)) #define GET_REG32_LOOP(base,len) \ @@ -6679,8 +6773,7 @@ do { p = (u32 *)(orig_p + (reg)); \ #undef GET_REG32_LOOP #undef GET_REG32_1 - spin_unlock(&tp->tx_lock); - spin_unlock_irq(&tp->lock); + tg3_full_unlock(tp); } static int tg3_get_eeprom_len(struct net_device *dev) @@ -6856,8 +6949,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) return -EINVAL; } - spin_lock_irq(&tp->lock); - spin_lock(&tp->tx_lock); + tg3_full_lock(tp, 0); tp->link_config.autoneg = cmd->autoneg; if (cmd->autoneg == AUTONEG_ENABLE) { @@ -6873,8 +6965,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) if (netif_running(dev)) tg3_setup_phy(tp, 1); - spin_unlock(&tp->tx_lock); - spin_unlock_irq(&tp->lock); + tg3_full_unlock(tp); return 0; } @@ -6910,12 +7001,12 @@ static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP)) return -EINVAL; - spin_lock_irq(&tp->lock); + spin_lock_bh(&tp->lock); if (wol->wolopts & WAKE_MAGIC) tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; else tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE; - spin_unlock_irq(&tp->lock); + spin_unlock_bh(&tp->lock); return 0; } @@ -6955,7 +7046,7 @@ static int tg3_nway_reset(struct net_device *dev) if (!netif_running(dev)) return -EAGAIN; - spin_lock_irq(&tp->lock); + spin_lock_bh(&tp->lock); r = -EINVAL; tg3_readphy(tp, MII_BMCR, &bmcr); if (!tg3_readphy(tp, MII_BMCR, &bmcr) && @@ -6963,7 +7054,7 @@ static int tg3_nway_reset(struct net_device *dev) tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART); r = 0; } - spin_unlock_irq(&tp->lock); + spin_unlock_bh(&tp->lock); return r; } @@ -6985,17 +7076,19 @@ static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam * static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) { struct tg3 *tp = netdev_priv(dev); + int irq_sync = 0; if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) || (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) || (ering->tx_pending > TG3_TX_RING_SIZE - 1)) return -EINVAL; - if (netif_running(dev)) + if (netif_running(dev)) { tg3_netif_stop(tp); + irq_sync = 1; + } - spin_lock_irq(&tp->lock); - spin_lock(&tp->tx_lock); + tg3_full_lock(tp, irq_sync); tp->rx_pending = ering->rx_pending; @@ -7006,13 +7099,12 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e tp->tx_pending = ering->tx_pending; if (netif_running(dev)) { - tg3_halt(tp, 1); + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); tg3_init_hw(tp); tg3_netif_start(tp); } - spin_unlock(&tp->tx_lock); - spin_unlock_irq(&tp->lock); + tg3_full_unlock(tp); return 0; } @@ -7029,12 +7121,15 @@ static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) { struct tg3 *tp = netdev_priv(dev); + int irq_sync = 0; - if (netif_running(dev)) + if (netif_running(dev)) { tg3_netif_stop(tp); + irq_sync = 1; + } + + tg3_full_lock(tp, irq_sync); - spin_lock_irq(&tp->lock); - spin_lock(&tp->tx_lock); if (epause->autoneg) tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; else @@ -7049,12 +7144,12 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE; if (netif_running(dev)) { - tg3_halt(tp, 1); + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); tg3_init_hw(tp); tg3_netif_start(tp); } - spin_unlock(&tp->tx_lock); - spin_unlock_irq(&tp->lock); + + tg3_full_unlock(tp); return 0; } @@ -7075,12 +7170,12 @@ static int tg3_set_rx_csum(struct net_device *dev, u32 data) return 0; } - spin_lock_irq(&tp->lock); + spin_lock_bh(&tp->lock); if (data) tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS; else tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS; - spin_unlock_irq(&tp->lock); + spin_unlock_bh(&tp->lock); return 0; } @@ -7108,12 +7203,20 @@ static int tg3_get_stats_count (struct net_device *dev) return TG3_NUM_STATS; } +static int tg3_get_test_count (struct net_device *dev) +{ + return TG3_NUM_TEST; +} + static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf) { switch (stringset) { case ETH_SS_STATS: memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys)); break; + case ETH_SS_TEST: + memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys)); + break; default: WARN_ON(1); /* we need a WARN() */ break; @@ -7127,6 +7230,517 @@ static void tg3_get_ethtool_stats (struct net_device *dev, memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats)); } +#define NVRAM_TEST_SIZE 0x100 + +static int tg3_test_nvram(struct tg3 *tp) +{ + u32 *buf, csum; + int i, j, err = 0; + + buf = kmalloc(NVRAM_TEST_SIZE, GFP_KERNEL); + if (buf == NULL) + return -ENOMEM; + + for (i = 0, j = 0; i < NVRAM_TEST_SIZE; i += 4, j++) { + u32 val; + + if ((err = tg3_nvram_read(tp, i, &val)) != 0) + break; + buf[j] = cpu_to_le32(val); + } + if (i < NVRAM_TEST_SIZE) + goto out; + + err = -EIO; + if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC) + goto out; + + /* Bootstrap checksum at offset 0x10 */ + csum = calc_crc((unsigned char *) buf, 0x10); + if(csum != cpu_to_le32(buf[0x10/4])) + goto out; + + /* Manufacturing block starts at offset 0x74, checksum at 0xfc */ + csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88); + if (csum != cpu_to_le32(buf[0xfc/4])) + goto out; + + err = 0; + +out: + kfree(buf); + return err; +} + +#define TG3_SERDES_TIMEOUT_SEC 2 +#define TG3_COPPER_TIMEOUT_SEC 6 + +static int tg3_test_link(struct tg3 *tp) +{ + int i, max; + + if (!netif_running(tp->dev)) + return -ENODEV; + + if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) + max = TG3_SERDES_TIMEOUT_SEC; + else + max = TG3_COPPER_TIMEOUT_SEC; + + for (i = 0; i < max; i++) { + if (netif_carrier_ok(tp->dev)) + return 0; + + if (msleep_interruptible(1000)) + break; + } + + return -EIO; +} + +/* Only test the commonly used registers */ +static int tg3_test_registers(struct tg3 *tp) +{ + int i, is_5705; + u32 offset, read_mask, write_mask, val, save_val, read_val; + static struct { + u16 offset; + u16 flags; +#define TG3_FL_5705 0x1 +#define TG3_FL_NOT_5705 0x2 +#define TG3_FL_NOT_5788 0x4 + u32 read_mask; + u32 write_mask; + } reg_tbl[] = { + /* MAC Control Registers */ + { MAC_MODE, TG3_FL_NOT_5705, + 0x00000000, 0x00ef6f8c }, + { MAC_MODE, TG3_FL_5705, + 0x00000000, 0x01ef6b8c }, + { MAC_STATUS, TG3_FL_NOT_5705, + 0x03800107, 0x00000000 }, + { MAC_STATUS, TG3_FL_5705, + 0x03800100, 0x00000000 }, + { MAC_ADDR_0_HIGH, 0x0000, + 0x00000000, 0x0000ffff }, + { MAC_ADDR_0_LOW, 0x0000, + 0x00000000, 0xffffffff }, + { MAC_RX_MTU_SIZE, 0x0000, + 0x00000000, 0x0000ffff }, + { MAC_TX_MODE, 0x0000, + 0x00000000, 0x00000070 }, + { MAC_TX_LENGTHS, 0x0000, + 0x00000000, 0x00003fff }, + { MAC_RX_MODE, TG3_FL_NOT_5705, + 0x00000000, 0x000007fc }, + { MAC_RX_MODE, TG3_FL_5705, + 0x00000000, 0x000007dc }, + { MAC_HASH_REG_0, 0x0000, + 0x00000000, 0xffffffff }, + { MAC_HASH_REG_1, 0x0000, + 0x00000000, 0xffffffff }, + { MAC_HASH_REG_2, 0x0000, + 0x00000000, 0xffffffff }, + { MAC_HASH_REG_3, 0x0000, + 0x00000000, 0xffffffff }, + + /* Receive Data and Receive BD Initiator Control Registers. */ + { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705, + 0x00000000, 0x00000003 }, + { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { RCVDBDI_STD_BD+0, 0x0000, + 0x00000000, 0xffffffff }, + { RCVDBDI_STD_BD+4, 0x0000, + 0x00000000, 0xffffffff }, + { RCVDBDI_STD_BD+8, 0x0000, + 0x00000000, 0xffff0002 }, + { RCVDBDI_STD_BD+0xc, 0x0000, + 0x00000000, 0xffffffff }, + + /* Receive BD Initiator Control Registers. */ + { RCVBDI_STD_THRESH, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { RCVBDI_STD_THRESH, TG3_FL_5705, + 0x00000000, 0x000003ff }, + { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + + /* Host Coalescing Control Registers. */ + { HOSTCC_MODE, TG3_FL_NOT_5705, + 0x00000000, 0x00000004 }, + { HOSTCC_MODE, TG3_FL_5705, + 0x00000000, 0x000000f6 }, + { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_RXCOL_TICKS, TG3_FL_5705, + 0x00000000, 0x000003ff }, + { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_TXCOL_TICKS, TG3_FL_5705, + 0x00000000, 0x000003ff }, + { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, + 0x00000000, 0x000000ff }, + { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, + 0x00000000, 0x000000ff }, + { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, + 0x00000000, 0x000000ff }, + { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, + 0x00000000, 0x000000ff }, + { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000, + 0x00000000, 0xffffffff }, + { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000, + 0x00000000, 0xffffffff }, + { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000, + 0xffffffff, 0x00000000 }, + { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000, + 0xffffffff, 0x00000000 }, + + /* Buffer Manager Control Registers. */ + { BUFMGR_MB_POOL_ADDR, 0x0000, + 0x00000000, 0x007fff80 }, + { BUFMGR_MB_POOL_SIZE, 0x0000, + 0x00000000, 0x007fffff }, + { BUFMGR_MB_RDMA_LOW_WATER, 0x0000, + 0x00000000, 0x0000003f }, + { BUFMGR_MB_MACRX_LOW_WATER, 0x0000, + 0x00000000, 0x000001ff }, + { BUFMGR_MB_HIGH_WATER, 0x0000, + 0x00000000, 0x000001ff }, + { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705, + 0xffffffff, 0x00000000 }, + { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705, + 0xffffffff, 0x00000000 }, + + /* Mailbox Registers */ + { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000, + 0x00000000, 0x000001ff }, + { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705, + 0x00000000, 0x000001ff }, + { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000, + 0x00000000, 0x000007ff }, + { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000, + 0x00000000, 0x000001ff }, + + { 0xffff, 0x0000, 0x00000000, 0x00000000 }, + }; + + if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) + is_5705 = 1; + else + is_5705 = 0; + + for (i = 0; reg_tbl[i].offset != 0xffff; i++) { + if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705)) + continue; + + if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705)) + continue; + + if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) && + (reg_tbl[i].flags & TG3_FL_NOT_5788)) + continue; + + offset = (u32) reg_tbl[i].offset; + read_mask = reg_tbl[i].read_mask; + write_mask = reg_tbl[i].write_mask; + + /* Save the original register content */ + save_val = tr32(offset); + + /* Determine the read-only value. */ + read_val = save_val & read_mask; + + /* Write zero to the register, then make sure the read-only bits + * are not changed and the read/write bits are all zeros. + */ + tw32(offset, 0); + + val = tr32(offset); + + /* Test the read-only and read/write bits. */ + if (((val & read_mask) != read_val) || (val & write_mask)) + goto out; + + /* Write ones to all the bits defined by RdMask and WrMask, then + * make sure the read-only bits are not changed and the + * read/write bits are all ones. + */ + tw32(offset, read_mask | write_mask); + + val = tr32(offset); + + /* Test the read-only bits. */ + if ((val & read_mask) != read_val) + goto out; + + /* Test the read/write bits. */ + if ((val & write_mask) != write_mask) + goto out; + + tw32(offset, save_val); + } + + return 0; + +out: + printk(KERN_ERR PFX "Register test failed at offset %x\n", offset); + tw32(offset, save_val); + return -EIO; +} + +static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len) +{ + static u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a }; + int i; + u32 j; + + for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) { + for (j = 0; j < len; j += 4) { + u32 val; + + tg3_write_mem(tp, offset + j, test_pattern[i]); + tg3_read_mem(tp, offset + j, &val); + if (val != test_pattern[i]) + return -EIO; + } + } + return 0; +} + +static int tg3_test_memory(struct tg3 *tp) +{ + static struct mem_entry { + u32 offset; + u32 len; + } mem_tbl_570x[] = { + { 0x00000000, 0x01000}, + { 0x00002000, 0x1c000}, + { 0xffffffff, 0x00000} + }, mem_tbl_5705[] = { + { 0x00000100, 0x0000c}, + { 0x00000200, 0x00008}, + { 0x00000b50, 0x00400}, + { 0x00004000, 0x00800}, + { 0x00006000, 0x01000}, + { 0x00008000, 0x02000}, + { 0x00010000, 0x0e000}, + { 0xffffffff, 0x00000} + }; + struct mem_entry *mem_tbl; + int err = 0; + int i; + + if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) + mem_tbl = mem_tbl_5705; + else + mem_tbl = mem_tbl_570x; + + for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) { + if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset, + mem_tbl[i].len)) != 0) + break; + } + + return err; +} + +static int tg3_test_loopback(struct tg3 *tp) +{ + u32 mac_mode, send_idx, rx_start_idx, rx_idx, tx_idx, opaque_key; + u32 desc_idx; + struct sk_buff *skb, *rx_skb; + u8 *tx_data; + dma_addr_t map; + int num_pkts, tx_len, rx_len, i, err; + struct tg3_rx_buffer_desc *desc; + + if (!netif_running(tp->dev)) + return -ENODEV; + + err = -EIO; + + tg3_abort_hw(tp, 1); + + tg3_reset_hw(tp); + + mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) | + MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY | + MAC_MODE_PORT_MODE_GMII; + tw32(MAC_MODE, mac_mode); + + tx_len = 1514; + skb = dev_alloc_skb(tx_len); + tx_data = skb_put(skb, tx_len); + memcpy(tx_data, tp->dev->dev_addr, 6); + memset(tx_data + 6, 0x0, 8); + + tw32(MAC_RX_MTU_SIZE, tx_len + 4); + + for (i = 14; i < tx_len; i++) + tx_data[i] = (u8) (i & 0xff); + + map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE); + + tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | + HOSTCC_MODE_NOW); + + udelay(10); + + rx_start_idx = tp->hw_status->idx[0].rx_producer; + + send_idx = 0; + num_pkts = 0; + + tg3_set_txd(tp, send_idx, map, tx_len, 0, 1); + + send_idx++; + num_pkts++; + + tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, send_idx); + tr32(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW); + + udelay(10); + + for (i = 0; i < 10; i++) { + tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | + HOSTCC_MODE_NOW); + + udelay(10); + + tx_idx = tp->hw_status->idx[0].tx_consumer; + rx_idx = tp->hw_status->idx[0].rx_producer; + if ((tx_idx == send_idx) && + (rx_idx == (rx_start_idx + num_pkts))) + break; + } + + pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE); + dev_kfree_skb(skb); + + if (tx_idx != send_idx) + goto out; + + if (rx_idx != rx_start_idx + num_pkts) + goto out; + + desc = &tp->rx_rcb[rx_start_idx]; + desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; + opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; + if (opaque_key != RXD_OPAQUE_RING_STD) + goto out; + + if ((desc->err_vlan & RXD_ERR_MASK) != 0 && + (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) + goto out; + + rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; + if (rx_len != tx_len) + goto out; + + rx_skb = tp->rx_std_buffers[desc_idx].skb; + + map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping); + pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE); + + for (i = 14; i < tx_len; i++) { + if (*(rx_skb->data + i) != (u8) (i & 0xff)) + goto out; + } + err = 0; + + /* tg3_free_rings will unmap and free the rx_skb */ +out: + return err; +} + +static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, + u64 *data) +{ + struct tg3 *tp = netdev_priv(dev); + + memset(data, 0, sizeof(u64) * TG3_NUM_TEST); + + if (tg3_test_nvram(tp) != 0) { + etest->flags |= ETH_TEST_FL_FAILED; + data[0] = 1; + } + if (tg3_test_link(tp) != 0) { + etest->flags |= ETH_TEST_FL_FAILED; + data[1] = 1; + } + if (etest->flags & ETH_TEST_FL_OFFLINE) { + int irq_sync = 0; + + if (netif_running(dev)) { + tg3_netif_stop(tp); + irq_sync = 1; + } + + tg3_full_lock(tp, irq_sync); + + tg3_halt(tp, RESET_KIND_SUSPEND, 1); + tg3_nvram_lock(tp); + tg3_halt_cpu(tp, RX_CPU_BASE); + if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) + tg3_halt_cpu(tp, TX_CPU_BASE); + tg3_nvram_unlock(tp); + + if (tg3_test_registers(tp) != 0) { + etest->flags |= ETH_TEST_FL_FAILED; + data[2] = 1; + } + if (tg3_test_memory(tp) != 0) { + etest->flags |= ETH_TEST_FL_FAILED; + data[3] = 1; + } + if (tg3_test_loopback(tp) != 0) { + etest->flags |= ETH_TEST_FL_FAILED; + data[4] = 1; + } + + tg3_full_unlock(tp); + + if (tg3_test_interrupt(tp) != 0) { + etest->flags |= ETH_TEST_FL_FAILED; + data[5] = 1; + } + + tg3_full_lock(tp, 0); + + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + if (netif_running(dev)) { + tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; + tg3_init_hw(tp); + tg3_netif_start(tp); + } + + tg3_full_unlock(tp); + } +} + static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct mii_ioctl_data *data = if_mii(ifr); @@ -7144,9 +7758,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) break; /* We have no PHY */ - spin_lock_irq(&tp->lock); + spin_lock_bh(&tp->lock); err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval); - spin_unlock_irq(&tp->lock); + spin_unlock_bh(&tp->lock); data->val_out = mii_regval; @@ -7160,9 +7774,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) if (!capable(CAP_NET_ADMIN)) return -EPERM; - spin_lock_irq(&tp->lock); + spin_lock_bh(&tp->lock); err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in); - spin_unlock_irq(&tp->lock); + spin_unlock_bh(&tp->lock); return err; @@ -7178,31 +7792,35 @@ static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) { struct tg3 *tp = netdev_priv(dev); - spin_lock_irq(&tp->lock); - spin_lock(&tp->tx_lock); + tg3_full_lock(tp, 0); tp->vlgrp = grp; /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */ __tg3_set_rx_mode(dev); - spin_unlock(&tp->tx_lock); - spin_unlock_irq(&tp->lock); + tg3_full_unlock(tp); } static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) { struct tg3 *tp = netdev_priv(dev); - spin_lock_irq(&tp->lock); - spin_lock(&tp->tx_lock); + tg3_full_lock(tp, 0); if (tp->vlgrp) tp->vlgrp->vlan_devices[vid] = NULL; - spin_unlock(&tp->tx_lock); - spin_unlock_irq(&tp->lock); + tg3_full_unlock(tp); } #endif +static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) +{ + struct tg3 *tp = netdev_priv(dev); + + memcpy(ec, &tp->coal, sizeof(*ec)); + return 0; +} + static struct ethtool_ops tg3_ethtool_ops = { .get_settings = tg3_get_settings, .set_settings = tg3_set_settings, @@ -7232,9 +7850,12 @@ static struct ethtool_ops tg3_ethtool_ops = { .get_tso = ethtool_op_get_tso, .set_tso = tg3_set_tso, #endif + .self_test_count = tg3_get_test_count, + .self_test = tg3_self_test, .get_strings = tg3_get_strings, .get_stats_count = tg3_get_stats_count, .get_ethtool_stats = tg3_get_ethtool_stats, + .get_coalesce = tg3_get_coalesce, }; static void __devinit tg3_get_eeprom_size(struct tg3 *tp) @@ -7914,6 +8535,16 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp) case NIC_SRAM_DATA_CFG_LED_MODE_MAC: tp->led_ctrl = LED_CTRL_MODE_MAC; + + /* Default to PHY_1_MODE if 0 (MAC_MODE) is + * read on some older 5700/5701 bootcode. + */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == + ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == + ASIC_REV_5701) + tp->led_ctrl = LED_CTRL_MODE_PHY_1; + break; case SHASTA_EXT_LED_SHARED: @@ -8422,15 +9053,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG; - /* Only 5701 and later support tagged irq status mode. - * Also, 5788 chips cannot use tagged irq status. - * - * However, since we are using NAPI avoid tagged irq status - * because the interrupt condition is more difficult to - * fully clear in that mode. - */ tp->coalesce_mode = 0; - if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX && GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX) tp->coalesce_mode |= HOSTCC_MODE_32BYTE; @@ -8494,6 +9117,18 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M)) tp->tg3_flags2 |= TG3_FLG2_IS_5788; + if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) && + (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)) + tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS; + if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) { + tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD | + HOSTCC_MODE_CLRTICK_TXBD); + + tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS; + pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, + tp->misc_host_ctrl); + } + /* these are limited to 10/100 only */ if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 && (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) || @@ -8671,6 +9306,146 @@ static int __devinit tg3_get_device_address(struct tg3 *tp) return 0; } +#define BOUNDARY_SINGLE_CACHELINE 1 +#define BOUNDARY_MULTI_CACHELINE 2 + +static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val) +{ + int cacheline_size; + u8 byte; + int goal; + + pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte); + if (byte == 0) + cacheline_size = 1024; + else + cacheline_size = (int) byte * 4; + + /* On 5703 and later chips, the boundary bits have no + * effect. + */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 && + !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) + goto out; + +#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC) + goal = BOUNDARY_MULTI_CACHELINE; +#else +#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA) + goal = BOUNDARY_SINGLE_CACHELINE; +#else + goal = 0; +#endif +#endif + + if (!goal) + goto out; + + /* PCI controllers on most RISC systems tend to disconnect + * when a device tries to burst across a cache-line boundary. + * Therefore, letting tg3 do so just wastes PCI bandwidth. + * + * Unfortunately, for PCI-E there are only limited + * write-side controls for this, and thus for reads + * we will still get the disconnects. We'll also waste + * these PCI cycles for both read and write for chips + * other than 5700 and 5701 which do not implement the + * boundary bits. + */ + if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) && + !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) { + switch (cacheline_size) { + case 16: + case 32: + case 64: + case 128: + if (goal == BOUNDARY_SINGLE_CACHELINE) { + val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX | + DMA_RWCTRL_WRITE_BNDRY_128_PCIX); + } else { + val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | + DMA_RWCTRL_WRITE_BNDRY_384_PCIX); + } + break; + + case 256: + val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX | + DMA_RWCTRL_WRITE_BNDRY_256_PCIX); + break; + + default: + val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | + DMA_RWCTRL_WRITE_BNDRY_384_PCIX); + break; + }; + } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { + switch (cacheline_size) { + case 16: + case 32: + case 64: + if (goal == BOUNDARY_SINGLE_CACHELINE) { + val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; + val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE; + break; + } + /* fallthrough */ + case 128: + default: + val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; + val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE; + break; + }; + } else { + switch (cacheline_size) { + case 16: + if (goal == BOUNDARY_SINGLE_CACHELINE) { + val |= (DMA_RWCTRL_READ_BNDRY_16 | + DMA_RWCTRL_WRITE_BNDRY_16); + break; + } + /* fallthrough */ + case 32: + if (goal == BOUNDARY_SINGLE_CACHELINE) { + val |= (DMA_RWCTRL_READ_BNDRY_32 | + DMA_RWCTRL_WRITE_BNDRY_32); + break; + } + /* fallthrough */ + case 64: + if (goal == BOUNDARY_SINGLE_CACHELINE) { + val |= (DMA_RWCTRL_READ_BNDRY_64 | + DMA_RWCTRL_WRITE_BNDRY_64); + break; + } + /* fallthrough */ + case 128: + if (goal == BOUNDARY_SINGLE_CACHELINE) { + val |= (DMA_RWCTRL_READ_BNDRY_128 | + DMA_RWCTRL_WRITE_BNDRY_128); + break; + } + /* fallthrough */ + case 256: + val |= (DMA_RWCTRL_READ_BNDRY_256 | + DMA_RWCTRL_WRITE_BNDRY_256); + break; + case 512: + val |= (DMA_RWCTRL_READ_BNDRY_512 | + DMA_RWCTRL_WRITE_BNDRY_512); + break; + case 1024: + default: + val |= (DMA_RWCTRL_READ_BNDRY_1024 | + DMA_RWCTRL_WRITE_BNDRY_1024); + break; + }; + } + +out: + return val; +} + static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device) { struct tg3_internal_buffer_desc test_desc; @@ -8752,12 +9527,12 @@ static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dm return ret; } -#define TEST_BUFFER_SIZE 0x400 +#define TEST_BUFFER_SIZE 0x2000 static int __devinit tg3_test_dma(struct tg3 *tp) { dma_addr_t buf_dma; - u32 *buf; + u32 *buf, saved_dma_rwctrl; int ret; buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma); @@ -8769,46 +9544,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp) tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) | (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT)); -#ifndef CONFIG_X86 - { - u8 byte; - int cacheline_size; - pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte); - - if (byte == 0) - cacheline_size = 1024; - else - cacheline_size = (int) byte * 4; - - switch (cacheline_size) { - case 16: - case 32: - case 64: - case 128: - if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) && - !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) { - tp->dma_rwctrl |= - DMA_RWCTRL_WRITE_BNDRY_384_PCIX; - break; - } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { - tp->dma_rwctrl &= - ~(DMA_RWCTRL_PCI_WRITE_CMD); - tp->dma_rwctrl |= - DMA_RWCTRL_WRITE_BNDRY_128_PCIE; - break; - } - /* fallthrough */ - case 256: - if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE) && - !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) - tp->dma_rwctrl |= - DMA_RWCTRL_WRITE_BNDRY_256; - else if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) - tp->dma_rwctrl |= - DMA_RWCTRL_WRITE_BNDRY_256_PCIX; - }; - } -#endif + tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl); if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { /* DMA read watermark not used on PCIE */ @@ -8827,7 +9563,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp) if (ccval == 0x6 || ccval == 0x7) tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; - /* Set bit 23 to renable PCIX hw bug fix */ + /* Set bit 23 to enable PCIX hw bug fix */ tp->dma_rwctrl |= 0x009f0000; } else { tp->dma_rwctrl |= 0x001b000f; @@ -8868,6 +9604,13 @@ static int __devinit tg3_test_dma(struct tg3 *tp) GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) goto out; + /* It is best to perform DMA test with maximum write burst size + * to expose the 5700/5701 write DMA bug. + */ + saved_dma_rwctrl = tp->dma_rwctrl; + tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; + tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); + while (1) { u32 *p = buf, i; @@ -8906,8 +9649,9 @@ static int __devinit tg3_test_dma(struct tg3 *tp) if (p[i] == i) continue; - if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) == - DMA_RWCTRL_WRITE_BNDRY_DISAB) { + if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != + DMA_RWCTRL_WRITE_BNDRY_16) { + tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); break; @@ -8924,6 +9668,28 @@ static int __devinit tg3_test_dma(struct tg3 *tp) break; } } + if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != + DMA_RWCTRL_WRITE_BNDRY_16) { + static struct pci_device_id dma_wait_state_chipsets[] = { + { PCI_DEVICE(PCI_VENDOR_ID_APPLE, + PCI_DEVICE_ID_APPLE_UNI_N_PCI15) }, + { }, + }; + + /* DMA test passed without adjusting DMA boundary, + * now look for chipsets that are known to expose the + * DMA bug without failing the test. + */ + if (pci_dev_present(dma_wait_state_chipsets)) { + tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; + tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; + } + else + /* Safe to use the calculated DMA boundary. */ + tp->dma_rwctrl = saved_dma_rwctrl; + + tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); + } out: pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma); @@ -9011,6 +9777,31 @@ static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp) return peer; } +static void __devinit tg3_init_coal(struct tg3 *tp) +{ + struct ethtool_coalesce *ec = &tp->coal; + + memset(ec, 0, sizeof(*ec)); + ec->cmd = ETHTOOL_GCOALESCE; + ec->rx_coalesce_usecs = LOW_RXCOL_TICKS; + ec->tx_coalesce_usecs = LOW_TXCOL_TICKS; + ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES; + ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES; + ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT; + ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT; + ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT; + ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT; + ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS; + + if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD | + HOSTCC_MODE_CLRTICK_TXBD)) { + ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS; + ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS; + ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS; + ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS; + } +} + static int __devinit tg3_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -9232,7 +10023,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { pci_save_state(tp->pdev); tw32(MEMARB_MODE, MEMARB_MODE_ENABLE); - tg3_halt(tp, 1); + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); } err = tg3_test_dma(tp); @@ -9256,6 +10047,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, /* flow control autonegotiation is default behavior */ tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; + tg3_init_coal(tp); + err = register_netdev(dev); if (err) { printk(KERN_ERR PFX "Cannot register net device, " @@ -9298,6 +10091,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0, (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0, (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0); + printk(KERN_INFO "%s: dma_rwctrl[%08x]\n", + dev->name, tp->dma_rwctrl); return 0; @@ -9345,24 +10140,19 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state) del_timer_sync(&tp->timer); - spin_lock_irq(&tp->lock); - spin_lock(&tp->tx_lock); + tg3_full_lock(tp, 1); tg3_disable_ints(tp); - spin_unlock(&tp->tx_lock); - spin_unlock_irq(&tp->lock); + tg3_full_unlock(tp); netif_device_detach(dev); - spin_lock_irq(&tp->lock); - spin_lock(&tp->tx_lock); - tg3_halt(tp, 1); - spin_unlock(&tp->tx_lock); - spin_unlock_irq(&tp->lock); + tg3_full_lock(tp, 0); + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + tg3_full_unlock(tp); err = tg3_set_power_state(tp, pci_choose_state(pdev, state)); if (err) { - spin_lock_irq(&tp->lock); - spin_lock(&tp->tx_lock); + tg3_full_lock(tp, 0); tg3_init_hw(tp); @@ -9372,8 +10162,7 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state) netif_device_attach(dev); tg3_netif_start(tp); - spin_unlock(&tp->tx_lock); - spin_unlock_irq(&tp->lock); + tg3_full_unlock(tp); } return err; @@ -9396,20 +10185,16 @@ static int tg3_resume(struct pci_dev *pdev) netif_device_attach(dev); - spin_lock_irq(&tp->lock); - spin_lock(&tp->tx_lock); + tg3_full_lock(tp, 0); tg3_init_hw(tp); tp->timer.expires = jiffies + tp->timer_offset; add_timer(&tp->timer); - tg3_enable_ints(tp); - tg3_netif_start(tp); - spin_unlock(&tp->tx_lock); - spin_unlock_irq(&tp->lock); + tg3_full_unlock(tp); return 0; } |