diff options
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/Kconfig | 4 | ||||
-rw-r--r-- | drivers/net/arm/w90p910_ether.c | 4 | ||||
-rw-r--r-- | drivers/net/e100.c | 2 | ||||
-rw-r--r-- | drivers/net/fec_mpc52xx.c | 5 | ||||
-rw-r--r-- | drivers/net/ibm_newemac/core.c | 2 | ||||
-rw-r--r-- | drivers/net/irda/au1k_ir.c | 4 | ||||
-rw-r--r-- | drivers/net/irda/pxaficp_ir.c | 4 | ||||
-rw-r--r-- | drivers/net/irda/sa1100_ir.c | 4 | ||||
-rw-r--r-- | drivers/net/ixp2000/ixpdev.c | 5 | ||||
-rw-r--r-- | drivers/net/macb.c | 7 | ||||
-rw-r--r-- | drivers/net/mlx4/en_tx.c | 5 | ||||
-rw-r--r-- | drivers/net/smc91x.c | 40 | ||||
-rw-r--r-- | drivers/net/virtio_net.c | 61 | ||||
-rw-r--r-- | drivers/net/wireless/orinoco/hw.c | 2 | ||||
-rw-r--r-- | drivers/net/wireless/rtl818x/rtl8187_dev.c | 14 | ||||
-rw-r--r-- | drivers/net/yellowfin.c | 28 |
16 files changed, 119 insertions, 72 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 5f6509a5f64..5ce7cbabd7a 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -1727,12 +1727,14 @@ config KS8842 tristate "Micrel KSZ8842" depends on HAS_IOMEM help - This platform driver is for Micrel KSZ8842 chip. + This platform driver is for Micrel KSZ8842 / KS8842 + 2-port ethernet switch chip (managed, VLAN, QoS). config KS8851 tristate "Micrel KS8851 SPI" depends on SPI select MII + select CRC32 help SPI driver for Micrel KS8851 SPI attached network chip. diff --git a/drivers/net/arm/w90p910_ether.c b/drivers/net/arm/w90p910_ether.c index 616fb7985a3..ddd231cb54b 100644 --- a/drivers/net/arm/w90p910_ether.c +++ b/drivers/net/arm/w90p910_ether.c @@ -1080,7 +1080,7 @@ static struct platform_driver w90p910_ether_driver = { .probe = w90p910_ether_probe, .remove = __devexit_p(w90p910_ether_remove), .driver = { - .name = "w90p910-emc", + .name = "nuc900-emc", .owner = THIS_MODULE, }, }; @@ -1101,5 +1101,5 @@ module_exit(w90p910_ether_exit); MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>"); MODULE_DESCRIPTION("w90p910 MAC driver!"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:w90p910-emc"); +MODULE_ALIAS("platform:nuc900-emc"); diff --git a/drivers/net/e100.c b/drivers/net/e100.c index 41b648a67fe..3a6735dc9f6 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c @@ -1899,7 +1899,7 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx, nic->ru_running = RU_SUSPENDED; pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr, sizeof(struct rfd), - PCI_DMA_BIDIRECTIONAL); + PCI_DMA_FROMDEVICE); return -ENODATA; } diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c index cc786333d95..c40113f5896 100644 --- a/drivers/net/fec_mpc52xx.c +++ b/drivers/net/fec_mpc52xx.c @@ -309,6 +309,7 @@ static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct mpc52xx_fec_priv *priv = netdev_priv(dev); struct bcom_fec_bd *bd; + unsigned long flags; if (bcom_queue_full(priv->tx_dmatsk)) { if (net_ratelimit()) @@ -316,7 +317,7 @@ static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_BUSY; } - spin_lock_irq(&priv->lock); + spin_lock_irqsave(&priv->lock, flags); dev->trans_start = jiffies; bd = (struct bcom_fec_bd *) @@ -332,7 +333,7 @@ static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev) netif_stop_queue(dev); } - spin_unlock_irq(&priv->lock); + spin_unlock_irqrestore(&priv->lock, flags); return NETDEV_TX_OK; } diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c index beb84213b67..f0f89080371 100644 --- a/drivers/net/ibm_newemac/core.c +++ b/drivers/net/ibm_newemac/core.c @@ -1305,6 +1305,8 @@ static int emac_close(struct net_device *ndev) free_irq(dev->emac_irq, dev); + netif_carrier_off(ndev); + return 0; } diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c index c4361d46659..ee1cff5c9b2 100644 --- a/drivers/net/irda/au1k_ir.c +++ b/drivers/net/irda/au1k_ir.c @@ -23,7 +23,6 @@ #include <linux/init.h> #include <linux/errno.h> #include <linux/netdevice.h> -#include <linux/etherdevice.h> #include <linux/slab.h> #include <linux/rtnetlink.h> #include <linux/interrupt.h> @@ -205,9 +204,6 @@ static const struct net_device_ops au1k_irda_netdev_ops = { .ndo_start_xmit = au1k_irda_hard_xmit, .ndo_tx_timeout = au1k_tx_timeout, .ndo_do_ioctl = au1k_irda_ioctl, - .ndo_change_mtu = eth_change_mtu, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, }; static int au1k_irda_net_init(struct net_device *dev) diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c index 3376a4f39e0..77d10edefd2 100644 --- a/drivers/net/irda/pxaficp_ir.c +++ b/drivers/net/irda/pxaficp_ir.c @@ -803,9 +803,6 @@ static const struct net_device_ops pxa_irda_netdev_ops = { .ndo_stop = pxa_irda_stop, .ndo_start_xmit = pxa_irda_hard_xmit, .ndo_do_ioctl = pxa_irda_ioctl, - .ndo_change_mtu = eth_change_mtu, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, }; static int pxa_irda_probe(struct platform_device *pdev) @@ -830,6 +827,7 @@ static int pxa_irda_probe(struct platform_device *pdev) if (!dev) goto err_mem_3; + SET_NETDEV_DEV(dev, &pdev->dev); si = netdev_priv(dev); si->dev = &pdev->dev; si->pdata = pdev->dev.platform_data; diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c index 2aeb2e6aec1..b039cb081e9 100644 --- a/drivers/net/irda/sa1100_ir.c +++ b/drivers/net/irda/sa1100_ir.c @@ -24,7 +24,6 @@ #include <linux/init.h> #include <linux/errno.h> #include <linux/netdevice.h> -#include <linux/etherdevice.h> #include <linux/slab.h> #include <linux/rtnetlink.h> #include <linux/interrupt.h> @@ -881,9 +880,6 @@ static const struct net_device_ops sa1100_irda_netdev_ops = { .ndo_stop = sa1100_irda_stop, .ndo_start_xmit = sa1100_irda_hard_xmit, .ndo_do_ioctl = sa1100_irda_ioctl, - .ndo_change_mtu = eth_change_mtu, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, }; static int sa1100_irda_probe(struct platform_device *pdev) diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c index 2a0174b62e9..92fb8235c76 100644 --- a/drivers/net/ixp2000/ixpdev.c +++ b/drivers/net/ixp2000/ixpdev.c @@ -41,6 +41,7 @@ static int ixpdev_xmit(struct sk_buff *skb, struct net_device *dev) struct ixpdev_priv *ip = netdev_priv(dev); struct ixpdev_tx_desc *desc; int entry; + unsigned long flags; if (unlikely(skb->len > PAGE_SIZE)) { /* @@@ Count drops. */ @@ -63,11 +64,11 @@ static int ixpdev_xmit(struct sk_buff *skb, struct net_device *dev) dev->trans_start = jiffies; - local_irq_disable(); + local_irq_save(flags); ip->tx_queue_entries++; if (ip->tx_queue_entries == TX_BUF_COUNT_PER_CHAN) netif_stop_queue(dev); - local_irq_enable(); + local_irq_restore(flags); return 0; } diff --git a/drivers/net/macb.c b/drivers/net/macb.c index 5b5c25368d1..e3601cf3f93 100644 --- a/drivers/net/macb.c +++ b/drivers/net/macb.c @@ -620,6 +620,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) dma_addr_t mapping; unsigned int len, entry; u32 ctrl; + unsigned long flags; #ifdef DEBUG int i; @@ -635,12 +636,12 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) #endif len = skb->len; - spin_lock_irq(&bp->lock); + spin_lock_irqsave(&bp->lock, flags); /* This is a hard error, log it. */ if (TX_BUFFS_AVAIL(bp) < 1) { netif_stop_queue(dev); - spin_unlock_irq(&bp->lock); + spin_unlock_irqrestore(&bp->lock, flags); dev_err(&bp->pdev->dev, "BUG! Tx Ring full when queue awake!\n"); dev_dbg(&bp->pdev->dev, "tx_head = %u, tx_tail = %u\n", @@ -674,7 +675,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) if (TX_BUFFS_AVAIL(bp) < 1) netif_stop_queue(dev); - spin_unlock_irq(&bp->lock); + spin_unlock_irqrestore(&bp->lock, flags); dev->trans_start = jiffies; diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c index 5a88b3f5769..62208401c4d 100644 --- a/drivers/net/mlx4/en_tx.c +++ b/drivers/net/mlx4/en_tx.c @@ -437,6 +437,7 @@ static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind) { struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind]; struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind]; + unsigned long flags; /* If we don't have a pending timer, set one up to catch our recent post in case the interface becomes idle */ @@ -445,9 +446,9 @@ static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind) /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */ if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0) - if (spin_trylock_irq(&ring->comp_lock)) { + if (spin_trylock_irqsave(&ring->comp_lock, flags)) { mlx4_en_process_tx_cq(priv->dev, cq); - spin_unlock_irq(&ring->comp_lock); + spin_unlock_irqrestore(&ring->comp_lock, flags); } } diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c index 1c70e999cc5..7567f510eff 100644 --- a/drivers/net/smc91x.c +++ b/drivers/net/smc91x.c @@ -196,21 +196,23 @@ static void PRINT_PKT(u_char *buf, int length) /* this enables an interrupt in the interrupt mask register */ #define SMC_ENABLE_INT(lp, x) do { \ unsigned char mask; \ - spin_lock_irq(&lp->lock); \ + unsigned long smc_enable_flags; \ + spin_lock_irqsave(&lp->lock, smc_enable_flags); \ mask = SMC_GET_INT_MASK(lp); \ mask |= (x); \ SMC_SET_INT_MASK(lp, mask); \ - spin_unlock_irq(&lp->lock); \ + spin_unlock_irqrestore(&lp->lock, smc_enable_flags); \ } while (0) /* this disables an interrupt from the interrupt mask register */ #define SMC_DISABLE_INT(lp, x) do { \ unsigned char mask; \ - spin_lock_irq(&lp->lock); \ + unsigned long smc_disable_flags; \ + spin_lock_irqsave(&lp->lock, smc_disable_flags); \ mask = SMC_GET_INT_MASK(lp); \ mask &= ~(x); \ SMC_SET_INT_MASK(lp, mask); \ - spin_unlock_irq(&lp->lock); \ + spin_unlock_irqrestore(&lp->lock, smc_disable_flags); \ } while (0) /* @@ -520,21 +522,21 @@ static inline void smc_rcv(struct net_device *dev) * any other concurrent access and C would always interrupt B. But life * isn't that easy in a SMP world... */ -#define smc_special_trylock(lock) \ +#define smc_special_trylock(lock, flags) \ ({ \ int __ret; \ - local_irq_disable(); \ + local_irq_save(flags); \ __ret = spin_trylock(lock); \ if (!__ret) \ - local_irq_enable(); \ + local_irq_restore(flags); \ __ret; \ }) -#define smc_special_lock(lock) spin_lock_irq(lock) -#define smc_special_unlock(lock) spin_unlock_irq(lock) +#define smc_special_lock(lock, flags) spin_lock_irqsave(lock, flags) +#define smc_special_unlock(lock, flags) spin_unlock_irqrestore(lock, flags) #else -#define smc_special_trylock(lock) (1) -#define smc_special_lock(lock) do { } while (0) -#define smc_special_unlock(lock) do { } while (0) +#define smc_special_trylock(lock, flags) (1) +#define smc_special_lock(lock, flags) do { } while (0) +#define smc_special_unlock(lock, flags) do { } while (0) #endif /* @@ -548,10 +550,11 @@ static void smc_hardware_send_pkt(unsigned long data) struct sk_buff *skb; unsigned int packet_no, len; unsigned char *buf; + unsigned long flags; DBG(3, "%s: %s\n", dev->name, __func__); - if (!smc_special_trylock(&lp->lock)) { + if (!smc_special_trylock(&lp->lock, flags)) { netif_stop_queue(dev); tasklet_schedule(&lp->tx_task); return; @@ -559,7 +562,7 @@ static void smc_hardware_send_pkt(unsigned long data) skb = lp->pending_tx_skb; if (unlikely(!skb)) { - smc_special_unlock(&lp->lock); + smc_special_unlock(&lp->lock, flags); return; } lp->pending_tx_skb = NULL; @@ -569,7 +572,7 @@ static void smc_hardware_send_pkt(unsigned long data) printk("%s: Memory allocation failed.\n", dev->name); dev->stats.tx_errors++; dev->stats.tx_fifo_errors++; - smc_special_unlock(&lp->lock); + smc_special_unlock(&lp->lock, flags); goto done; } @@ -608,7 +611,7 @@ static void smc_hardware_send_pkt(unsigned long data) /* queue the packet for TX */ SMC_SET_MMU_CMD(lp, MC_ENQUEUE); - smc_special_unlock(&lp->lock); + smc_special_unlock(&lp->lock, flags); dev->trans_start = jiffies; dev->stats.tx_packets++; @@ -633,6 +636,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) struct smc_local *lp = netdev_priv(dev); void __iomem *ioaddr = lp->base; unsigned int numPages, poll_count, status; + unsigned long flags; DBG(3, "%s: %s\n", dev->name, __func__); @@ -658,7 +662,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) return 0; } - smc_special_lock(&lp->lock); + smc_special_lock(&lp->lock, flags); /* now, try to allocate the memory */ SMC_SET_MMU_CMD(lp, MC_ALLOC | numPages); @@ -676,7 +680,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) } } while (--poll_count); - smc_special_unlock(&lp->lock); + smc_special_unlock(&lp->lock, flags); lp->pending_tx_skb = skb; if (!poll_count) { diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 2a6e81d5b57..bbedf03a212 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -70,6 +70,9 @@ struct virtnet_info struct sk_buff_head recv; struct sk_buff_head send; + /* Work struct for refilling if we run low on memory. */ + struct delayed_work refill; + /* Chain pages by the private ptr. */ struct page *pages; }; @@ -273,19 +276,22 @@ drop: dev_kfree_skb(skb); } -static void try_fill_recv_maxbufs(struct virtnet_info *vi) +static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp) { struct sk_buff *skb; struct scatterlist sg[2+MAX_SKB_FRAGS]; int num, err, i; + bool oom = false; sg_init_table(sg, 2+MAX_SKB_FRAGS); for (;;) { struct virtio_net_hdr *hdr; skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN + NET_IP_ALIGN); - if (unlikely(!skb)) + if (unlikely(!skb)) { + oom = true; break; + } skb_reserve(skb, NET_IP_ALIGN); skb_put(skb, MAX_PACKET_LEN); @@ -296,7 +302,7 @@ static void try_fill_recv_maxbufs(struct virtnet_info *vi) if (vi->big_packets) { for (i = 0; i < MAX_SKB_FRAGS; i++) { skb_frag_t *f = &skb_shinfo(skb)->frags[i]; - f->page = get_a_page(vi, GFP_ATOMIC); + f->page = get_a_page(vi, gfp); if (!f->page) break; @@ -325,31 +331,35 @@ static void try_fill_recv_maxbufs(struct virtnet_info *vi) if (unlikely(vi->num > vi->max)) vi->max = vi->num; vi->rvq->vq_ops->kick(vi->rvq); + return !oom; } -static void try_fill_recv(struct virtnet_info *vi) +/* Returns false if we couldn't fill entirely (OOM). */ +static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp) { struct sk_buff *skb; struct scatterlist sg[1]; int err; + bool oom = false; - if (!vi->mergeable_rx_bufs) { - try_fill_recv_maxbufs(vi); - return; - } + if (!vi->mergeable_rx_bufs) + return try_fill_recv_maxbufs(vi, gfp); for (;;) { skb_frag_t *f; skb = netdev_alloc_skb(vi->dev, GOOD_COPY_LEN + NET_IP_ALIGN); - if (unlikely(!skb)) + if (unlikely(!skb)) { + oom = true; break; + } skb_reserve(skb, NET_IP_ALIGN); f = &skb_shinfo(skb)->frags[0]; - f->page = get_a_page(vi, GFP_ATOMIC); + f->page = get_a_page(vi, gfp); if (!f->page) { + oom = true; kfree_skb(skb); break; } @@ -373,6 +383,7 @@ static void try_fill_recv(struct virtnet_info *vi) if (unlikely(vi->num > vi->max)) vi->max = vi->num; vi->rvq->vq_ops->kick(vi->rvq); + return !oom; } static void skb_recv_done(struct virtqueue *rvq) @@ -385,6 +396,23 @@ static void skb_recv_done(struct virtqueue *rvq) } } +static void refill_work(struct work_struct *work) +{ + struct virtnet_info *vi; + bool still_empty; + + vi = container_of(work, struct virtnet_info, refill.work); + napi_disable(&vi->napi); + try_fill_recv(vi, GFP_KERNEL); + still_empty = (vi->num == 0); + napi_enable(&vi->napi); + + /* In theory, this can happen: if we don't get any buffers in + * we will *never* try to fill again. */ + if (still_empty) + schedule_delayed_work(&vi->refill, HZ/2); +} + static int virtnet_poll(struct napi_struct *napi, int budget) { struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi); @@ -400,10 +428,10 @@ again: received++; } - /* FIXME: If we oom and completely run out of inbufs, we need - * to start a timer trying to fill more. */ - if (vi->num < vi->max / 2) - try_fill_recv(vi); + if (vi->num < vi->max / 2) { + if (!try_fill_recv(vi, GFP_ATOMIC)) + schedule_delayed_work(&vi->refill, 0); + } /* Out of packets? */ if (received < budget) { @@ -893,6 +921,7 @@ static int virtnet_probe(struct virtio_device *vdev) vi->vdev = vdev; vdev->priv = vi; vi->pages = NULL; + INIT_DELAYED_WORK(&vi->refill, refill_work); /* If they give us a callback when all buffers are done, we don't need * the timer. */ @@ -941,7 +970,7 @@ static int virtnet_probe(struct virtio_device *vdev) } /* Last of all, set up some receive buffers. */ - try_fill_recv(vi); + try_fill_recv(vi, GFP_KERNEL); /* If we didn't even get one input buffer, we're useless. */ if (vi->num == 0) { @@ -958,6 +987,7 @@ static int virtnet_probe(struct virtio_device *vdev) unregister: unregister_netdev(dev); + cancel_delayed_work_sync(&vi->refill); free_vqs: vdev->config->del_vqs(vdev); free: @@ -986,6 +1016,7 @@ static void virtnet_remove(struct virtio_device *vdev) BUG_ON(vi->num != 0); unregister_netdev(vi->dev); + cancel_delayed_work_sync(&vi->refill); vdev->config->del_vqs(vi->vdev); diff --git a/drivers/net/wireless/orinoco/hw.c b/drivers/net/wireless/orinoco/hw.c index 632fac86a30..b3946272c72 100644 --- a/drivers/net/wireless/orinoco/hw.c +++ b/drivers/net/wireless/orinoco/hw.c @@ -70,7 +70,7 @@ int orinoco_hw_get_tkip_iv(struct orinoco_private *priv, int key, u8 *tsc) int err = 0; u8 tsc_arr[4][IW_ENCODE_SEQ_MAX_SIZE]; - if ((key < 0) || (key > 4)) + if ((key < 0) || (key >= 4)) return -EINVAL; err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_TKIP_IV, diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c index 294250e294d..87a95588a8e 100644 --- a/drivers/net/wireless/rtl818x/rtl8187_dev.c +++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c @@ -869,6 +869,9 @@ static int rtl8187b_init_hw(struct ieee80211_hw *dev) priv->aifsn[3] = 3; /* AIFSN[AC_BE] */ rtl818x_iowrite8(priv, &priv->map->ACM_CONTROL, 0); + /* ENEDCA flag must always be set, transmit issues? */ + rtl818x_iowrite8(priv, &priv->map->MSR, RTL818X_MSR_ENEDCA); + return 0; } @@ -1173,13 +1176,16 @@ static void rtl8187_bss_info_changed(struct ieee80211_hw *dev, rtl818x_iowrite8(priv, &priv->map->BSSID[i], info->bssid[i]); + if (priv->is_rtl8187b) + reg = RTL818X_MSR_ENEDCA; + else + reg = 0; + if (is_valid_ether_addr(info->bssid)) { - reg = RTL818X_MSR_INFRA; - if (priv->is_rtl8187b) - reg |= RTL818X_MSR_ENEDCA; + reg |= RTL818X_MSR_INFRA; rtl818x_iowrite8(priv, &priv->map->MSR, reg); } else { - reg = RTL818X_MSR_NO_LINK; + reg |= RTL818X_MSR_NO_LINK; rtl818x_iowrite8(priv, &priv->map->MSR, reg); } diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c index a07580138e8..c2fd6187773 100644 --- a/drivers/net/yellowfin.c +++ b/drivers/net/yellowfin.c @@ -346,7 +346,7 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static int yellowfin_open(struct net_device *dev); static void yellowfin_timer(unsigned long data); static void yellowfin_tx_timeout(struct net_device *dev); -static void yellowfin_init_ring(struct net_device *dev); +static int yellowfin_init_ring(struct net_device *dev); static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev); static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance); static int yellowfin_rx(struct net_device *dev); @@ -573,19 +573,24 @@ static int yellowfin_open(struct net_device *dev) { struct yellowfin_private *yp = netdev_priv(dev); void __iomem *ioaddr = yp->base; - int i; + int i, ret; /* Reset the chip. */ iowrite32(0x80000000, ioaddr + DMACtrl); - i = request_irq(dev->irq, &yellowfin_interrupt, IRQF_SHARED, dev->name, dev); - if (i) return i; + ret = request_irq(dev->irq, &yellowfin_interrupt, IRQF_SHARED, dev->name, dev); + if (ret) + return ret; if (yellowfin_debug > 1) printk(KERN_DEBUG "%s: yellowfin_open() irq %d.\n", dev->name, dev->irq); - yellowfin_init_ring(dev); + ret = yellowfin_init_ring(dev); + if (ret) { + free_irq(dev->irq, dev); + return ret; + } iowrite32(yp->rx_ring_dma, ioaddr + RxPtr); iowrite32(yp->tx_ring_dma, ioaddr + TxPtr); @@ -725,10 +730,10 @@ static void yellowfin_tx_timeout(struct net_device *dev) } /* Initialize the Rx and Tx rings, along with various 'dev' bits. */ -static void yellowfin_init_ring(struct net_device *dev) +static int yellowfin_init_ring(struct net_device *dev) { struct yellowfin_private *yp = netdev_priv(dev); - int i; + int i, j; yp->tx_full = 0; yp->cur_rx = yp->cur_tx = 0; @@ -753,6 +758,11 @@ static void yellowfin_init_ring(struct net_device *dev) yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev, skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE)); } + if (i != RX_RING_SIZE) { + for (j = 0; j < i; j++) + dev_kfree_skb(yp->rx_skbuff[j]); + return -ENOMEM; + } yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP); yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE); @@ -769,8 +779,6 @@ static void yellowfin_init_ring(struct net_device *dev) yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS); #else { - int j; - /* Tx ring needs a pair of descriptors, the second for the status. */ for (i = 0; i < TX_RING_SIZE; i++) { j = 2*i; @@ -805,7 +813,7 @@ static void yellowfin_init_ring(struct net_device *dev) } #endif yp->tx_tail_desc = &yp->tx_status[0]; - return; + return 0; } static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev) |