aboutsummaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/3c501.c17
-rw-r--r--drivers/net/3c505.c18
-rw-r--r--drivers/net/3c507.c15
-rw-r--r--drivers/net/3c509.c25
-rw-r--r--drivers/net/3c515.c21
-rw-r--r--drivers/net/3c523.c25
-rw-r--r--drivers/net/3c527.c19
-rw-r--r--drivers/net/3c59x.c55
-rw-r--r--drivers/net/8139cp.c6
-rw-r--r--drivers/net/8139too.c6
-rw-r--r--drivers/net/82596.c17
-rw-r--r--drivers/net/Kconfig3
-rw-r--r--drivers/net/amd8111e.c6
-rw-r--r--drivers/net/arcnet/arc-rawmode.c2
-rw-r--r--drivers/net/arcnet/arcnet.c71
-rw-r--r--drivers/net/arcnet/capmode.c2
-rw-r--r--drivers/net/arcnet/com20020-isa.c2
-rw-r--r--drivers/net/arcnet/com20020-pci.c3
-rw-r--r--drivers/net/arcnet/com20020.c11
-rw-r--r--drivers/net/arcnet/rfc1051.c12
-rw-r--r--drivers/net/arcnet/rfc1201.c47
-rw-r--r--drivers/net/arm/ep93xx_eth.c8
-rw-r--r--drivers/net/arm/ixp4xx_eth.c12
-rw-r--r--drivers/net/atl1e/atl1e_main.c6
-rw-r--r--drivers/net/au1000_eth.c1043
-rw-r--r--drivers/net/b44.c6
-rw-r--r--drivers/net/bnx2.c22
-rw-r--r--drivers/net/bnx2x_main.c6
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/cassini.c8
-rw-r--r--drivers/net/chelsio/sge.c4
-rw-r--r--drivers/net/cpmac.c10
-rw-r--r--drivers/net/cxgb3/adapter.h14
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c65
-rw-r--r--drivers/net/cxgb3/sge.c119
-rw-r--r--drivers/net/e100.c6
-rw-r--r--drivers/net/e1000/e1000.h2
-rw-r--r--drivers/net/e1000/e1000_main.c39
-rw-r--r--drivers/net/e1000e/e1000.h2
-rw-r--r--drivers/net/e1000e/netdev.c52
-rw-r--r--drivers/net/ehea/ehea.h2
-rw-r--r--drivers/net/ehea/ehea_main.c128
-rw-r--r--drivers/net/ehea/ehea_qmr.c4
-rw-r--r--drivers/net/enic/enic_main.c12
-rw-r--r--drivers/net/epic100.c6
-rw-r--r--drivers/net/forcedeth.c10
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c4
-rw-r--r--drivers/net/gianfar.c6
-rw-r--r--drivers/net/hamradio/6pack.c16
-rw-r--r--drivers/net/hamradio/baycom_epp.c43
-rw-r--r--drivers/net/hamradio/bpqether.c38
-rw-r--r--drivers/net/hamradio/dmascc.c54
-rw-r--r--drivers/net/hamradio/hdlcdrv.c45
-rw-r--r--drivers/net/hamradio/mkiss.c46
-rw-r--r--drivers/net/hamradio/scc.c21
-rw-r--r--drivers/net/hamradio/yam.c61
-rw-r--r--drivers/net/ibmveth.c8
-rw-r--r--drivers/net/igb/e1000_82575.c7
-rw-r--r--drivers/net/igb/igb.h16
-rw-r--r--drivers/net/igb/igb_ethtool.c17
-rw-r--r--drivers/net/igb/igb_main.c103
-rw-r--r--drivers/net/ixgb/ixgb_main.c6
-rw-r--r--drivers/net/ixgbe/ixgbe.h9
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c11
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c93
-rw-r--r--drivers/net/ixp2000/ixpdev.c4
-rw-r--r--drivers/net/jme.h6
-rw-r--r--drivers/net/korina.c4
-rw-r--r--drivers/net/macb.c10
-rw-r--r--drivers/net/mlx4/en_rx.c4
-rw-r--r--drivers/net/myri10ge/myri10ge.c6
-rw-r--r--drivers/net/natsemi.c6
-rw-r--r--drivers/net/netxen/netxen_nic_main.c2
-rw-r--r--drivers/net/niu.c6
-rw-r--r--drivers/net/pasemi_mac.c6
-rw-r--r--drivers/net/pcnet32.c6
-rw-r--r--drivers/net/phy/mdio-gpio.c13
-rw-r--r--drivers/net/ppp_generic.c275
-rw-r--r--drivers/net/pppoe.c529
-rw-r--r--drivers/net/pppol2tp.c166
-rw-r--r--drivers/net/pppox.c3
-rw-r--r--drivers/net/ps3_gelic_net.c24
-rw-r--r--drivers/net/ps3_gelic_wireless.c28
-rw-r--r--drivers/net/qla3xxx.c6
-rw-r--r--drivers/net/qlge/qlge_main.c6
-rw-r--r--drivers/net/r6040.c4
-rw-r--r--drivers/net/r8169.c6
-rw-r--r--drivers/net/s2io.c8
-rw-r--r--drivers/net/sb1250-mac.c6
-rw-r--r--drivers/net/sc92031.c27
-rw-r--r--drivers/net/sfc/Kconfig1
-rw-r--r--drivers/net/sfc/efx.c15
-rw-r--r--drivers/net/sfc/efx.h2
-rw-r--r--drivers/net/sfc/net_driver.h9
-rw-r--r--drivers/net/sfc/rx.c207
-rw-r--r--drivers/net/sfc/rx.h3
-rw-r--r--drivers/net/sfc/sfe4001.c1
-rw-r--r--drivers/net/sfc/tenxpress.c1
-rw-r--r--drivers/net/skge.c6
-rw-r--r--drivers/net/smc91x.c116
-rw-r--r--drivers/net/smc91x.h10
-rw-r--r--drivers/net/smsc911x.c8
-rw-r--r--drivers/net/smsc9420.c4
-rw-r--r--drivers/net/spider_net.c12
-rw-r--r--drivers/net/starfire.c6
-rw-r--r--drivers/net/sungem.c6
-rw-r--r--drivers/net/tc35815.c6
-rw-r--r--drivers/net/tehuti.c6
-rw-r--r--drivers/net/tg3.c18
-rw-r--r--drivers/net/tokenring/3c359.c17
-rw-r--r--drivers/net/tokenring/abyss.c10
-rw-r--r--drivers/net/tokenring/ibmtr.c43
-rw-r--r--drivers/net/tokenring/lanstreamer.c43
-rw-r--r--drivers/net/tokenring/lanstreamer.h1
-rw-r--r--drivers/net/tokenring/olympic.c38
-rw-r--r--drivers/net/tokenring/olympic.h1
-rw-r--r--drivers/net/tokenring/tms380tr.c21
-rw-r--r--drivers/net/tokenring/tms380tr.h1
-rw-r--r--drivers/net/tokenring/tmspci.c4
-rw-r--r--drivers/net/tsi108_eth.c8
-rw-r--r--drivers/net/tulip/interrupt.c10
-rw-r--r--drivers/net/tun.c371
-rw-r--r--drivers/net/typhoon.c8
-rw-r--r--drivers/net/typhoon.h234
-rw-r--r--drivers/net/ucc_geth.c6
-rw-r--r--drivers/net/usb/smsc95xx.c8
-rw-r--r--drivers/net/via-rhine.c4
-rw-r--r--drivers/net/virtio_net.c55
-rw-r--r--drivers/net/wan/c101.c12
-rw-r--r--drivers/net/wan/cosa.c14
-rw-r--r--drivers/net/wan/dscc4.c18
-rw-r--r--drivers/net/wan/farsync.c18
-rw-r--r--drivers/net/wan/hd64572.c4
-rw-r--r--drivers/net/wan/hdlc.c29
-rw-r--r--drivers/net/wan/hdlc_cisco.c1
-rw-r--r--drivers/net/wan/hdlc_fr.c28
-rw-r--r--drivers/net/wan/hdlc_ppp.c2
-rw-r--r--drivers/net/wan/hdlc_raw.c3
-rw-r--r--drivers/net/wan/hdlc_raw_eth.c8
-rw-r--r--drivers/net/wan/hdlc_x25.c2
-rw-r--r--drivers/net/wan/hostess_sv11.c12
-rw-r--r--drivers/net/wan/ixp4xx_hss.c24
-rw-r--r--drivers/net/wan/lmc/lmc_main.c19
-rw-r--r--drivers/net/wan/lmc/lmc_proto.c17
-rw-r--r--drivers/net/wan/n2.c12
-rw-r--r--drivers/net/wan/pc300too.c12
-rw-r--r--drivers/net/wan/pci200syn.c12
-rw-r--r--drivers/net/wan/sealevel.c12
-rw-r--r--drivers/net/wan/wanxl.c14
-rw-r--r--drivers/net/wimax/i2400m/netdev.c14
-rw-r--r--drivers/net/wireless/ath5k/debug.c2
-rw-r--r--drivers/net/wireless/libertas/debugfs.c14
-rw-r--r--drivers/net/wireless/strip.c2
-rw-r--r--drivers/net/xen-netfront.c8
-rw-r--r--drivers/net/znet.c17
155 files changed, 2706 insertions, 2680 deletions
diff --git a/drivers/net/3c501.c b/drivers/net/3c501.c
index 3d1318a3e68..1c5344aa57c 100644
--- a/drivers/net/3c501.c
+++ b/drivers/net/3c501.c
@@ -197,6 +197,17 @@ out:
return ERR_PTR(err);
}
+static const struct net_device_ops el_netdev_ops = {
+ .ndo_open = el_open,
+ .ndo_stop = el1_close,
+ .ndo_start_xmit = el_start_xmit,
+ .ndo_tx_timeout = el_timeout,
+ .ndo_set_multicast_list = set_multicast_list,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
/**
* el1_probe1:
* @dev: The device structure to use
@@ -305,12 +316,8 @@ static int __init el1_probe1(struct net_device *dev, int ioaddr)
* The EL1-specific entries in the device structure.
*/
- dev->open = &el_open;
- dev->hard_start_xmit = &el_start_xmit;
- dev->tx_timeout = &el_timeout;
+ dev->netdev_ops = &el_netdev_ops;
dev->watchdog_timeo = HZ;
- dev->stop = &el1_close;
- dev->set_multicast_list = &set_multicast_list;
dev->ethtool_ops = &netdev_ethtool_ops;
return 0;
}
diff --git a/drivers/net/3c505.c b/drivers/net/3c505.c
index 6124605bef0..ea1ad8ce883 100644
--- a/drivers/net/3c505.c
+++ b/drivers/net/3c505.c
@@ -1348,6 +1348,17 @@ static int __init elp_autodetect(struct net_device *dev)
return 0; /* Because of this, the layer above will return -ENODEV */
}
+static const struct net_device_ops elp_netdev_ops = {
+ .ndo_open = elp_open,
+ .ndo_stop = elp_close,
+ .ndo_get_stats = elp_get_stats,
+ .ndo_start_xmit = elp_start_xmit,
+ .ndo_tx_timeout = elp_timeout,
+ .ndo_set_multicast_list = elp_set_mc_list,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
/******************************************************
*
@@ -1552,13 +1563,8 @@ static int __init elplus_setup(struct net_device *dev)
printk(KERN_ERR "%s: adapter configuration failed\n", dev->name);
}
- dev->open = elp_open; /* local */
- dev->stop = elp_close; /* local */
- dev->get_stats = elp_get_stats; /* local */
- dev->hard_start_xmit = elp_start_xmit; /* local */
- dev->tx_timeout = elp_timeout; /* local */
+ dev->netdev_ops = &elp_netdev_ops;
dev->watchdog_timeo = 10*HZ;
- dev->set_multicast_list = elp_set_mc_list; /* local */
dev->ethtool_ops = &netdev_ethtool_ops; /* local */
dev->mem_start = dev->mem_end = 0;
diff --git a/drivers/net/3c507.c b/drivers/net/3c507.c
index 423e65d0ba7..fbbaf826def 100644
--- a/drivers/net/3c507.c
+++ b/drivers/net/3c507.c
@@ -352,6 +352,16 @@ out:
return ERR_PTR(err);
}
+static const struct net_device_ops netdev_ops = {
+ .ndo_open = el16_open,
+ .ndo_stop = el16_close,
+ .ndo_start_xmit = el16_send_packet,
+ .ndo_tx_timeout = el16_tx_timeout,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
static int __init el16_probe1(struct net_device *dev, int ioaddr)
{
static unsigned char init_ID_done, version_printed;
@@ -449,10 +459,7 @@ static int __init el16_probe1(struct net_device *dev, int ioaddr)
goto out1;
}
- dev->open = el16_open;
- dev->stop = el16_close;
- dev->hard_start_xmit = el16_send_packet;
- dev->tx_timeout = el16_tx_timeout;
+ dev->netdev_ops = &netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
dev->ethtool_ops = &netdev_ethtool_ops;
dev->flags &= ~IFF_MULTICAST; /* Multicast doesn't work */
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c
index 535c234286e..d58919c7032 100644
--- a/drivers/net/3c509.c
+++ b/drivers/net/3c509.c
@@ -537,6 +537,21 @@ static struct mca_driver el3_mca_driver = {
static int mca_registered;
#endif /* CONFIG_MCA */
+static const struct net_device_ops netdev_ops = {
+ .ndo_open = el3_open,
+ .ndo_stop = el3_close,
+ .ndo_start_xmit = el3_start_xmit,
+ .ndo_get_stats = el3_get_stats,
+ .ndo_set_multicast_list = set_multicast_list,
+ .ndo_tx_timeout = el3_tx_timeout,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = el3_poll_controller,
+#endif
+};
+
static int __devinit el3_common_init(struct net_device *dev)
{
struct el3_private *lp = netdev_priv(dev);
@@ -553,16 +568,8 @@ static int __devinit el3_common_init(struct net_device *dev)
}
/* The EL3-specific entries in the device structure. */
- dev->open = &el3_open;
- dev->hard_start_xmit = &el3_start_xmit;
- dev->stop = &el3_close;
- dev->get_stats = &el3_get_stats;
- dev->set_multicast_list = &set_multicast_list;
- dev->tx_timeout = el3_tx_timeout;
+ dev->netdev_ops = &netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
-#ifdef CONFIG_NET_POLL_CONTROLLER
- dev->poll_controller = el3_poll_controller;
-#endif
SET_ETHTOOL_OPS(dev, &ethtool_ops);
err = register_netdev(dev);
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c
index 39ac12233aa..167bf23066e 100644
--- a/drivers/net/3c515.c
+++ b/drivers/net/3c515.c
@@ -563,6 +563,20 @@ no_pnp:
return NULL;
}
+
+static const struct net_device_ops netdev_ops = {
+ .ndo_open = corkscrew_open,
+ .ndo_stop = corkscrew_close,
+ .ndo_start_xmit = corkscrew_start_xmit,
+ .ndo_tx_timeout = corkscrew_timeout,
+ .ndo_get_stats = corkscrew_get_stats,
+ .ndo_set_multicast_list = set_rx_mode,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+
static int corkscrew_setup(struct net_device *dev, int ioaddr,
struct pnp_dev *idev, int card_number)
{
@@ -681,13 +695,8 @@ static int corkscrew_setup(struct net_device *dev, int ioaddr,
vp->full_bus_master_rx = (vp->capabilities & 0x20) ? 1 : 0;
/* The 3c51x-specific entries in the device structure. */
- dev->open = &corkscrew_open;
- dev->hard_start_xmit = &corkscrew_start_xmit;
- dev->tx_timeout = &corkscrew_timeout;
+ dev->netdev_ops = &netdev_ops;
dev->watchdog_timeo = (400 * HZ) / 1000;
- dev->stop = &corkscrew_close;
- dev->get_stats = &corkscrew_get_stats;
- dev->set_multicast_list = &set_rx_mode;
dev->ethtool_ops = &netdev_ethtool_ops;
return register_netdev(dev);
diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c
index ff41e1ff560..8f734d74b51 100644
--- a/drivers/net/3c523.c
+++ b/drivers/net/3c523.c
@@ -403,6 +403,20 @@ static int elmc_getinfo(char *buf, int slot, void *d)
return len;
} /* elmc_getinfo() */
+static const struct net_device_ops netdev_ops = {
+ .ndo_open = elmc_open,
+ .ndo_stop = elmc_close,
+ .ndo_get_stats = elmc_get_stats,
+ .ndo_start_xmit = elmc_send_packet,
+ .ndo_tx_timeout = elmc_timeout,
+#ifdef ELMC_MULTICAST
+ .ndo_set_multicast_list = set_multicast_list,
+#endif
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
/*****************************************************************/
static int __init do_elmc_probe(struct net_device *dev)
@@ -544,17 +558,8 @@ static int __init do_elmc_probe(struct net_device *dev)
printk(KERN_INFO "%s: hardware address %pM\n",
dev->name, dev->dev_addr);
- dev->open = &elmc_open;
- dev->stop = &elmc_close;
- dev->get_stats = &elmc_get_stats;
- dev->hard_start_xmit = &elmc_send_packet;
- dev->tx_timeout = &elmc_timeout;
+ dev->netdev_ops = &netdev_ops;
dev->watchdog_timeo = HZ;
-#ifdef ELMC_MULTICAST
- dev->set_multicast_list = &set_multicast_list;
-#else
- dev->set_multicast_list = NULL;
-#endif
dev->ethtool_ops = &netdev_ethtool_ops;
/* note that we haven't actually requested the IRQ from the kernel.
diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c
index 2df3af3b9b2..b61073c42bf 100644
--- a/drivers/net/3c527.c
+++ b/drivers/net/3c527.c
@@ -288,6 +288,18 @@ struct net_device *__init mc32_probe(int unit)
return ERR_PTR(-ENODEV);
}
+static const struct net_device_ops netdev_ops = {
+ .ndo_open = mc32_open,
+ .ndo_stop = mc32_close,
+ .ndo_start_xmit = mc32_send_packet,
+ .ndo_get_stats = mc32_get_stats,
+ .ndo_set_multicast_list = mc32_set_multicast_list,
+ .ndo_tx_timeout = mc32_timeout,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
/**
* mc32_probe1 - Check a given slot for a board and test the card
* @dev: Device structure to fill in
@@ -518,12 +530,7 @@ static int __init mc32_probe1(struct net_device *dev, int slot)
printk("%s: Firmware Rev %d. %d RX buffers, %d TX buffers. Base of 0x%08X.\n",
dev->name, lp->exec_box->data[12], lp->rx_len, lp->tx_len, lp->base);
- dev->open = mc32_open;
- dev->stop = mc32_close;
- dev->hard_start_xmit = mc32_send_packet;
- dev->get_stats = mc32_get_stats;
- dev->set_multicast_list = mc32_set_multicast_list;
- dev->tx_timeout = mc32_timeout;
+ dev->netdev_ops = &netdev_ops;
dev->watchdog_timeo = HZ*5; /* Board does all the work */
dev->ethtool_ops = &netdev_ethtool_ops;
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index cdbbb6226fc..b2563d384cf 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -992,6 +992,42 @@ out:
return rc;
}
+static const struct net_device_ops boomrang_netdev_ops = {
+ .ndo_open = vortex_open,
+ .ndo_stop = vortex_close,
+ .ndo_start_xmit = boomerang_start_xmit,
+ .ndo_tx_timeout = vortex_tx_timeout,
+ .ndo_get_stats = vortex_get_stats,
+#ifdef CONFIG_PCI
+ .ndo_do_ioctl = vortex_ioctl,
+#endif
+ .ndo_set_multicast_list = set_rx_mode,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = poll_vortex,
+#endif
+};
+
+static const struct net_device_ops vortex_netdev_ops = {
+ .ndo_open = vortex_open,
+ .ndo_stop = vortex_close,
+ .ndo_start_xmit = vortex_start_xmit,
+ .ndo_tx_timeout = vortex_tx_timeout,
+ .ndo_get_stats = vortex_get_stats,
+#ifdef CONFIG_PCI
+ .ndo_do_ioctl = vortex_ioctl,
+#endif
+ .ndo_set_multicast_list = set_rx_mode,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = poll_vortex,
+#endif
+};
+
/*
* Start up the PCI/EISA device which is described by *gendev.
* Return 0 on success.
@@ -1366,18 +1402,16 @@ static int __devinit vortex_probe1(struct device *gendev,
}
/* The 3c59x-specific entries in the device structure. */
- dev->open = vortex_open;
if (vp->full_bus_master_tx) {
- dev->hard_start_xmit = boomerang_start_xmit;
+ dev->netdev_ops = &boomrang_netdev_ops;
/* Actually, it still should work with iommu. */
if (card_idx < MAX_UNITS &&
((hw_checksums[card_idx] == -1 && (vp->drv_flags & HAS_HWCKSM)) ||
hw_checksums[card_idx] == 1)) {
dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
}
- } else {
- dev->hard_start_xmit = vortex_start_xmit;
- }
+ } else
+ dev->netdev_ops = &vortex_netdev_ops;
if (print_info) {
printk(KERN_INFO "%s: scatter/gather %sabled. h/w checksums %sabled\n",
@@ -1386,18 +1420,9 @@ static int __devinit vortex_probe1(struct device *gendev,
(dev->features & NETIF_F_IP_CSUM) ? "en":"dis");
}
- dev->stop = vortex_close;
- dev->get_stats = vortex_get_stats;
-#ifdef CONFIG_PCI
- dev->do_ioctl = vortex_ioctl;
-#endif
dev->ethtool_ops = &vortex_ethtool_ops;
- dev->set_multicast_list = set_rx_mode;
- dev->tx_timeout = vortex_tx_timeout;
dev->watchdog_timeo = (watchdog * HZ) / 1000;
-#ifdef CONFIG_NET_POLL_CONTROLLER
- dev->poll_controller = poll_vortex;
-#endif
+
if (pdev) {
vp->pm_state_valid = 1;
pci_save_state(VORTEX_PCI(vp));
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 4e19ae3ce6b..35517b06ec3 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -604,7 +604,7 @@ rx_next:
spin_lock_irqsave(&cp->lock, flags);
cpw16_f(IntrMask, cp_intr_mask);
- __netif_rx_complete(napi);
+ __napi_complete(napi);
spin_unlock_irqrestore(&cp->lock, flags);
}
@@ -641,9 +641,9 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
}
if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))
- if (netif_rx_schedule_prep(&cp->napi)) {
+ if (napi_schedule_prep(&cp->napi)) {
cpw16_f(IntrMask, cp_norx_intr_mask);
- __netif_rx_schedule(&cp->napi);
+ __napi_schedule(&cp->napi);
}
if (status & (TxOK | TxErr | TxEmpty | SWInt))
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index a5b24202d56..5341da604e8 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -2128,7 +2128,7 @@ static int rtl8139_poll(struct napi_struct *napi, int budget)
*/
spin_lock_irqsave(&tp->lock, flags);
RTL_W16_F(IntrMask, rtl8139_intr_mask);
- __netif_rx_complete(napi);
+ __napi_complete(napi);
spin_unlock_irqrestore(&tp->lock, flags);
}
spin_unlock(&tp->rx_lock);
@@ -2178,9 +2178,9 @@ static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance)
/* Receive packets are processed by poll routine.
If not running start it now. */
if (status & RxAckBits){
- if (netif_rx_schedule_prep(&tp->napi)) {
+ if (napi_schedule_prep(&tp->napi)) {
RTL_W16_F (IntrMask, rtl8139_norx_intr_mask);
- __netif_rx_schedule(&tp->napi);
+ __napi_schedule(&tp->napi);
}
}
diff --git a/drivers/net/82596.c b/drivers/net/82596.c
index b273596368e..cca94b9c08a 100644
--- a/drivers/net/82596.c
+++ b/drivers/net/82596.c
@@ -1122,6 +1122,17 @@ static void print_eth(unsigned char *add, char *str)
static int io = 0x300;
static int irq = 10;
+static const struct net_device_ops i596_netdev_ops = {
+ .ndo_open = i596_open,
+ .ndo_stop = i596_close,
+ .ndo_start_xmit = i596_start_xmit,
+ .ndo_set_multicast_list = set_multicast_list,
+ .ndo_tx_timeout = i596_tx_timeout,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
struct net_device * __init i82596_probe(int unit)
{
struct net_device *dev;
@@ -1232,11 +1243,7 @@ found:
DEB(DEB_PROBE,printk(KERN_INFO "%s", version));
/* The 82596-specific entries in the device structure. */
- dev->open = i596_open;
- dev->stop = i596_close;
- dev->hard_start_xmit = i596_start_xmit;
- dev->set_multicast_list = set_multicast_list;
- dev->tx_timeout = i596_tx_timeout;
+ dev->netdev_ops = &i596_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
dev->ml_priv = (void *)(dev->mem_start);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 9fe8cb7d43a..c4776a2adf0 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2022,7 +2022,6 @@ config IGB
config IGB_LRO
bool "Use software LRO"
depends on IGB && INET
- select INET_LRO
---help---
Say Y here if you want to use large receive offload.
@@ -2408,7 +2407,6 @@ config CHELSIO_T3
tristate "Chelsio Communications T3 10Gb Ethernet support"
depends on CHELSIO_T3_DEPENDS
select FW_LOADER
- select INET_LRO
help
This driver supports Chelsio T3-based gigabit and 10Gb Ethernet
adapters.
@@ -2444,7 +2442,6 @@ config ENIC
config IXGBE
tristate "Intel(R) 10GbE PCI Express adapters support"
depends on PCI && INET
- select INET_LRO
---help---
This driver supports Intel(R) 10GbE PCI Express family of
adapters. For more information on how to identify your adapter, go
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c
index 7709992bb6b..cb9c95d3ed0 100644
--- a/drivers/net/amd8111e.c
+++ b/drivers/net/amd8111e.c
@@ -831,7 +831,7 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
if (rx_pkt_limit > 0) {
/* Receive descriptor is empty now */
spin_lock_irqsave(&lp->lock, flags);
- __netif_rx_complete(napi);
+ __napi_complete(napi);
writel(VAL0|RINTEN0, mmio + INTEN0);
writel(VAL2 | RDMD0, mmio + CMD0);
spin_unlock_irqrestore(&lp->lock, flags);
@@ -1170,11 +1170,11 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
/* Check if Receive Interrupt has occurred. */
if (intr0 & RINT0) {
- if (netif_rx_schedule_prep(&lp->napi)) {
+ if (napi_schedule_prep(&lp->napi)) {
/* Disable receive interupts */
writel(RINTEN0, mmio + INTEN0);
/* Schedule a polling routine */
- __netif_rx_schedule(&lp->napi);
+ __napi_schedule(&lp->napi);
} else if (intren0 & RINTEN0) {
printk("************Driver bug! \
interrupt while in poll\n");
diff --git a/drivers/net/arcnet/arc-rawmode.c b/drivers/net/arcnet/arc-rawmode.c
index 3ff9affb1a9..da017cbb5f6 100644
--- a/drivers/net/arcnet/arc-rawmode.c
+++ b/drivers/net/arcnet/arc-rawmode.c
@@ -102,7 +102,7 @@ static void rx(struct net_device *dev, int bufnum,
skb = alloc_skb(length + ARC_HDR_SIZE, GFP_ATOMIC);
if (skb == NULL) {
BUGMSG(D_NORMAL, "Memory squeeze, dropping packet.\n");
- lp->stats.rx_dropped++;
+ dev->stats.rx_dropped++;
return;
}
skb_put(skb, length + ARC_HDR_SIZE);
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index 6b53e5ed125..a80d4a30a46 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -95,17 +95,16 @@ EXPORT_SYMBOL(arcnet_unregister_proto);
EXPORT_SYMBOL(arcnet_debug);
EXPORT_SYMBOL(alloc_arcdev);
EXPORT_SYMBOL(arcnet_interrupt);
+EXPORT_SYMBOL(arcnet_open);
+EXPORT_SYMBOL(arcnet_close);
+EXPORT_SYMBOL(arcnet_send_packet);
+EXPORT_SYMBOL(arcnet_timeout);
/* Internal function prototypes */
-static int arcnet_open(struct net_device *dev);
-static int arcnet_close(struct net_device *dev);
-static int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev);
-static void arcnet_timeout(struct net_device *dev);
static int arcnet_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type, const void *daddr,
const void *saddr, unsigned len);
static int arcnet_rebuild_header(struct sk_buff *skb);
-static struct net_device_stats *arcnet_get_stats(struct net_device *dev);
static int go_tx(struct net_device *dev);
static int debug = ARCNET_DEBUG;
@@ -322,11 +321,18 @@ static const struct header_ops arcnet_header_ops = {
.rebuild = arcnet_rebuild_header,
};
+static const struct net_device_ops arcnet_netdev_ops = {
+ .ndo_open = arcnet_open,
+ .ndo_stop = arcnet_close,
+ .ndo_start_xmit = arcnet_send_packet,
+ .ndo_tx_timeout = arcnet_timeout,
+};
/* Setup a struct device for ARCnet. */
static void arcdev_setup(struct net_device *dev)
{
dev->type = ARPHRD_ARCNET;
+ dev->netdev_ops = &arcnet_netdev_ops;
dev->header_ops = &arcnet_header_ops;
dev->hard_header_len = sizeof(struct archdr);
dev->mtu = choose_mtu();
@@ -339,18 +345,9 @@ static void arcdev_setup(struct net_device *dev)
/* New-style flags. */
dev->flags = IFF_BROADCAST;
- /*
- * Put in this stuff here, so we don't have to export the symbols to
- * the chipset drivers.
- */
- dev->open = arcnet_open;
- dev->stop = arcnet_close;
- dev->hard_start_xmit = arcnet_send_packet;
- dev->tx_timeout = arcnet_timeout;
- dev->get_stats = arcnet_get_stats;
}
-struct net_device *alloc_arcdev(char *name)
+struct net_device *alloc_arcdev(const char *name)
{
struct net_device *dev;
@@ -372,7 +369,7 @@ struct net_device *alloc_arcdev(char *name)
* that "should" only need to be set once at boot, so that there is
* non-reboot way to recover if something goes wrong.
*/
-static int arcnet_open(struct net_device *dev)
+int arcnet_open(struct net_device *dev)
{
struct arcnet_local *lp = netdev_priv(dev);
int count, newmtu, error;
@@ -472,7 +469,7 @@ static int arcnet_open(struct net_device *dev)
/* The inverse routine to arcnet_open - shuts down the card. */
-static int arcnet_close(struct net_device *dev)
+int arcnet_close(struct net_device *dev)
{
struct arcnet_local *lp = netdev_priv(dev);
@@ -583,8 +580,8 @@ static int arcnet_rebuild_header(struct sk_buff *skb)
} else {
BUGMSG(D_NORMAL,
"I don't understand ethernet protocol %Xh addresses!\n", type);
- lp->stats.tx_errors++;
- lp->stats.tx_aborted_errors++;
+ dev->stats.tx_errors++;
+ dev->stats.tx_aborted_errors++;
}
/* if we couldn't resolve the address... give up. */
@@ -601,7 +598,7 @@ static int arcnet_rebuild_header(struct sk_buff *skb)
/* Called by the kernel in order to transmit a packet. */
-static int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev)
+int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev)
{
struct arcnet_local *lp = netdev_priv(dev);
struct archdr *pkt;
@@ -645,7 +642,7 @@ static int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev)
!proto->ack_tx) {
/* done right away and we don't want to acknowledge
the package later - forget about it now */
- lp->stats.tx_bytes += skb->len;
+ dev->stats.tx_bytes += skb->len;
freeskb = 1;
} else {
/* do it the 'split' way */
@@ -709,7 +706,7 @@ static int go_tx(struct net_device *dev)
/* start sending */
ACOMMAND(TXcmd | (lp->cur_tx << 3));
- lp->stats.tx_packets++;
+ dev->stats.tx_packets++;
lp->lasttrans_dest = lp->lastload_dest;
lp->lastload_dest = 0;
lp->excnak_pending = 0;
@@ -720,7 +717,7 @@ static int go_tx(struct net_device *dev)
/* Called by the kernel when transmit times out */
-static void arcnet_timeout(struct net_device *dev)
+void arcnet_timeout(struct net_device *dev)
{
unsigned long flags;
struct arcnet_local *lp = netdev_priv(dev);
@@ -732,11 +729,11 @@ static void arcnet_timeout(struct net_device *dev)
msg = " - missed IRQ?";
} else {
msg = "";
- lp->stats.tx_aborted_errors++;
+ dev->stats.tx_aborted_errors++;
lp->timed_out = 1;
ACOMMAND(NOTXcmd | (lp->cur_tx << 3));
}
- lp->stats.tx_errors++;
+ dev->stats.tx_errors++;
/* make sure we didn't miss a TX or a EXC NAK IRQ */
AINTMASK(0);
@@ -865,8 +862,8 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
"transmit was not acknowledged! "
"(status=%Xh, dest=%02Xh)\n",
status, lp->lasttrans_dest);
- lp->stats.tx_errors++;
- lp->stats.tx_carrier_errors++;
+ dev->stats.tx_errors++;
+ dev->stats.tx_carrier_errors++;
} else {
BUGMSG(D_DURING,
"broadcast was not acknowledged; that's normal "
@@ -905,7 +902,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
if (txbuf != -1) {
if (lp->outgoing.proto->continue_tx(dev, txbuf)) {
/* that was the last segment */
- lp->stats.tx_bytes += lp->outgoing.skb->len;
+ dev->stats.tx_bytes += lp->outgoing.skb->len;
if(!lp->outgoing.proto->ack_tx)
{
dev_kfree_skb_irq(lp->outgoing.skb);
@@ -930,7 +927,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
}
if (status & lp->intmask & RECONflag) {
ACOMMAND(CFLAGScmd | CONFIGclear);
- lp->stats.tx_carrier_errors++;
+ dev->stats.tx_carrier_errors++;
BUGMSG(D_RECON, "Network reconfiguration detected (status=%Xh)\n",
status);
@@ -1038,8 +1035,8 @@ static void arcnet_rx(struct net_device *dev, int bufnum)
"(%d+4 bytes)\n",
bufnum, pkt.hard.source, pkt.hard.dest, length);
- lp->stats.rx_packets++;
- lp->stats.rx_bytes += length + ARC_HDR_SIZE;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += length + ARC_HDR_SIZE;
/* call the right receiver for the protocol */
if (arc_proto_map[soft->proto]->is_ip) {
@@ -1067,18 +1064,6 @@ static void arcnet_rx(struct net_device *dev, int bufnum)
}
-
-/*
- * Get the current statistics. This may be called with the card open or
- * closed.
- */
-static struct net_device_stats *arcnet_get_stats(struct net_device *dev)
-{
- struct arcnet_local *lp = netdev_priv(dev);
- return &lp->stats;
-}
-
-
static void null_rx(struct net_device *dev, int bufnum,
struct archdr *pkthdr, int length)
{
diff --git a/drivers/net/arcnet/capmode.c b/drivers/net/arcnet/capmode.c
index 30580bbe252..1613929ff30 100644
--- a/drivers/net/arcnet/capmode.c
+++ b/drivers/net/arcnet/capmode.c
@@ -119,7 +119,7 @@ static void rx(struct net_device *dev, int bufnum,
skb = alloc_skb(length + ARC_HDR_SIZE + sizeof(int), GFP_ATOMIC);
if (skb == NULL) {
BUGMSG(D_NORMAL, "Memory squeeze, dropping packet.\n");
- lp->stats.rx_dropped++;
+ dev->stats.rx_dropped++;
return;
}
skb_put(skb, length + ARC_HDR_SIZE + sizeof(int));
diff --git a/drivers/net/arcnet/com20020-isa.c b/drivers/net/arcnet/com20020-isa.c
index ea53a940272..db08fc24047 100644
--- a/drivers/net/arcnet/com20020-isa.c
+++ b/drivers/net/arcnet/com20020-isa.c
@@ -151,6 +151,8 @@ static int __init com20020_init(void)
if (node && node != 0xff)
dev->dev_addr[0] = node;
+ dev->netdev_ops = &com20020_netdev_ops;
+
lp = netdev_priv(dev);
lp->backplane = backplane;
lp->clockp = clockp & 7;
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index 8b51f632581..dbf4de39754 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -72,6 +72,9 @@ static int __devinit com20020pci_probe(struct pci_dev *pdev, const struct pci_de
dev = alloc_arcdev(device);
if (!dev)
return -ENOMEM;
+
+ dev->netdev_ops = &com20020_netdev_ops;
+
lp = netdev_priv(dev);
pci_set_drvdata(pdev, dev);
diff --git a/drivers/net/arcnet/com20020.c b/drivers/net/arcnet/com20020.c
index 103688358fb..651275a5f3d 100644
--- a/drivers/net/arcnet/com20020.c
+++ b/drivers/net/arcnet/com20020.c
@@ -149,6 +149,14 @@ int com20020_check(struct net_device *dev)
return 0;
}
+const struct net_device_ops com20020_netdev_ops = {
+ .ndo_open = arcnet_open,
+ .ndo_stop = arcnet_close,
+ .ndo_start_xmit = arcnet_send_packet,
+ .ndo_tx_timeout = arcnet_timeout,
+ .ndo_set_multicast_list = com20020_set_mc_list,
+};
+
/* Set up the struct net_device associated with this card. Called after
* probing succeeds.
*/
@@ -170,8 +178,6 @@ int com20020_found(struct net_device *dev, int shared)
lp->hw.copy_from_card = com20020_copy_from_card;
lp->hw.close = com20020_close;
- dev->set_multicast_list = com20020_set_mc_list;
-
if (!dev->dev_addr[0])
dev->dev_addr[0] = inb(ioaddr + BUS_ALIGN*8); /* FIXME: do this some other way! */
@@ -342,6 +348,7 @@ static void com20020_set_mc_list(struct net_device *dev)
defined(CONFIG_ARCNET_COM20020_CS_MODULE)
EXPORT_SYMBOL(com20020_check);
EXPORT_SYMBOL(com20020_found);
+EXPORT_SYMBOL(com20020_netdev_ops);
#endif
MODULE_LICENSE("GPL");
diff --git a/drivers/net/arcnet/rfc1051.c b/drivers/net/arcnet/rfc1051.c
index 49d39a9cb69..06f8fa2f8f2 100644
--- a/drivers/net/arcnet/rfc1051.c
+++ b/drivers/net/arcnet/rfc1051.c
@@ -88,7 +88,6 @@ MODULE_LICENSE("GPL");
*/
static __be16 type_trans(struct sk_buff *skb, struct net_device *dev)
{
- struct arcnet_local *lp = netdev_priv(dev);
struct archdr *pkt = (struct archdr *) skb->data;
struct arc_rfc1051 *soft = &pkt->soft.rfc1051;
int hdr_size = ARC_HDR_SIZE + RFC1051_HDR_SIZE;
@@ -112,8 +111,8 @@ static __be16 type_trans(struct sk_buff *skb, struct net_device *dev)
return htons(ETH_P_ARP);
default:
- lp->stats.rx_errors++;
- lp->stats.rx_crc_errors++;
+ dev->stats.rx_errors++;
+ dev->stats.rx_crc_errors++;
return 0;
}
@@ -140,7 +139,7 @@ static void rx(struct net_device *dev, int bufnum,
skb = alloc_skb(length + ARC_HDR_SIZE, GFP_ATOMIC);
if (skb == NULL) {
BUGMSG(D_NORMAL, "Memory squeeze, dropping packet.\n");
- lp->stats.rx_dropped++;
+ dev->stats.rx_dropped++;
return;
}
skb_put(skb, length + ARC_HDR_SIZE);
@@ -168,7 +167,6 @@ static void rx(struct net_device *dev, int bufnum,
static int build_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type, uint8_t daddr)
{
- struct arcnet_local *lp = netdev_priv(dev);
int hdr_size = ARC_HDR_SIZE + RFC1051_HDR_SIZE;
struct archdr *pkt = (struct archdr *) skb_push(skb, hdr_size);
struct arc_rfc1051 *soft = &pkt->soft.rfc1051;
@@ -184,8 +182,8 @@ static int build_header(struct sk_buff *skb, struct net_device *dev,
default:
BUGMSG(D_NORMAL, "RFC1051: I don't understand protocol %d (%Xh)\n",
type, type);
- lp->stats.tx_errors++;
- lp->stats.tx_aborted_errors++;
+ dev->stats.tx_errors++;
+ dev->stats.tx_aborted_errors++;
return 0;
}
diff --git a/drivers/net/arcnet/rfc1201.c b/drivers/net/arcnet/rfc1201.c
index 2303d3a1f4b..745530651c4 100644
--- a/drivers/net/arcnet/rfc1201.c
+++ b/drivers/net/arcnet/rfc1201.c
@@ -92,7 +92,6 @@ static __be16 type_trans(struct sk_buff *skb, struct net_device *dev)
{
struct archdr *pkt = (struct archdr *) skb->data;
struct arc_rfc1201 *soft = &pkt->soft.rfc1201;
- struct arcnet_local *lp = netdev_priv(dev);
int hdr_size = ARC_HDR_SIZE + RFC1201_HDR_SIZE;
/* Pull off the arcnet header. */
@@ -121,8 +120,8 @@ static __be16 type_trans(struct sk_buff *skb, struct net_device *dev)
case ARC_P_NOVELL_EC:
return htons(ETH_P_802_3);
default:
- lp->stats.rx_errors++;
- lp->stats.rx_crc_errors++;
+ dev->stats.rx_errors++;
+ dev->stats.rx_crc_errors++;
return 0;
}
@@ -172,8 +171,8 @@ static void rx(struct net_device *dev, int bufnum,
in->sequence, soft->split_flag, soft->sequence);
lp->rfc1201.aborted_seq = soft->sequence;
dev_kfree_skb_irq(in->skb);
- lp->stats.rx_errors++;
- lp->stats.rx_missed_errors++;
+ dev->stats.rx_errors++;
+ dev->stats.rx_missed_errors++;
in->skb = NULL;
}
in->sequence = soft->sequence;
@@ -181,7 +180,7 @@ static void rx(struct net_device *dev, int bufnum,
skb = alloc_skb(length + ARC_HDR_SIZE, GFP_ATOMIC);
if (skb == NULL) {
BUGMSG(D_NORMAL, "Memory squeeze, dropping packet.\n");
- lp->stats.rx_dropped++;
+ dev->stats.rx_dropped++;
return;
}
skb_put(skb, length + ARC_HDR_SIZE);
@@ -213,7 +212,7 @@ static void rx(struct net_device *dev, int bufnum,
BUGMSG(D_EXTRA,
"ARP source address was 00h, set to %02Xh.\n",
saddr);
- lp->stats.rx_crc_errors++;
+ dev->stats.rx_crc_errors++;
*cptr = saddr;
} else {
BUGMSG(D_DURING, "ARP source address (%Xh) is fine.\n",
@@ -222,8 +221,8 @@ static void rx(struct net_device *dev, int bufnum,
} else {
BUGMSG(D_NORMAL, "funny-shaped ARP packet. (%Xh, %Xh)\n",
arp->ar_hln, arp->ar_pln);
- lp->stats.rx_errors++;
- lp->stats.rx_crc_errors++;
+ dev->stats.rx_errors++;
+ dev->stats.rx_crc_errors++;
}
}
BUGLVL(D_SKB) arcnet_dump_skb(dev, skb, "rx");
@@ -257,8 +256,8 @@ static void rx(struct net_device *dev, int bufnum,
soft->split_flag);
dev_kfree_skb_irq(in->skb);
in->skb = NULL;
- lp->stats.rx_errors++;
- lp->stats.rx_missed_errors++;
+ dev->stats.rx_errors++;
+ dev->stats.rx_missed_errors++;
in->lastpacket = in->numpackets = 0;
}
if (soft->split_flag & 1) { /* first packet in split */
@@ -269,8 +268,8 @@ static void rx(struct net_device *dev, int bufnum,
"(splitflag=%d, seq=%d)\n",
in->sequence, soft->split_flag,
soft->sequence);
- lp->stats.rx_errors++;
- lp->stats.rx_missed_errors++;
+ dev->stats.rx_errors++;
+ dev->stats.rx_missed_errors++;
dev_kfree_skb_irq(in->skb);
}
in->sequence = soft->sequence;
@@ -281,8 +280,8 @@ static void rx(struct net_device *dev, int bufnum,
BUGMSG(D_EXTRA, "incoming packet more than 16 segments; dropping. (splitflag=%d)\n",
soft->split_flag);
lp->rfc1201.aborted_seq = soft->sequence;
- lp->stats.rx_errors++;
- lp->stats.rx_length_errors++;
+ dev->stats.rx_errors++;
+ dev->stats.rx_length_errors++;
return;
}
in->skb = skb = alloc_skb(508 * in->numpackets + ARC_HDR_SIZE,
@@ -290,7 +289,7 @@ static void rx(struct net_device *dev, int bufnum,
if (skb == NULL) {
BUGMSG(D_NORMAL, "(split) memory squeeze, dropping packet.\n");
lp->rfc1201.aborted_seq = soft->sequence;
- lp->stats.rx_dropped++;
+ dev->stats.rx_dropped++;
return;
}
skb->dev = dev;
@@ -314,8 +313,8 @@ static void rx(struct net_device *dev, int bufnum,
"first! (splitflag=%d, seq=%d, aborted=%d)\n",
soft->split_flag, soft->sequence,
lp->rfc1201.aborted_seq);
- lp->stats.rx_errors++;
- lp->stats.rx_missed_errors++;
+ dev->stats.rx_errors++;
+ dev->stats.rx_missed_errors++;
}
return;
}
@@ -325,8 +324,8 @@ static void rx(struct net_device *dev, int bufnum,
if (packetnum <= in->lastpacket - 1) {
BUGMSG(D_EXTRA, "duplicate splitpacket ignored! (splitflag=%d)\n",
soft->split_flag);
- lp->stats.rx_errors++;
- lp->stats.rx_frame_errors++;
+ dev->stats.rx_errors++;
+ dev->stats.rx_frame_errors++;
return;
}
/* "bad" duplicate, kill reassembly */
@@ -336,8 +335,8 @@ static void rx(struct net_device *dev, int bufnum,
lp->rfc1201.aborted_seq = soft->sequence;
dev_kfree_skb_irq(in->skb);
in->skb = NULL;
- lp->stats.rx_errors++;
- lp->stats.rx_missed_errors++;
+ dev->stats.rx_errors++;
+ dev->stats.rx_missed_errors++;
in->lastpacket = in->numpackets = 0;
return;
}
@@ -404,8 +403,8 @@ static int build_header(struct sk_buff *skb, struct net_device *dev,
default:
BUGMSG(D_NORMAL, "RFC1201: I don't understand protocol %d (%Xh)\n",
type, type);
- lp->stats.tx_errors++;
- lp->stats.tx_aborted_errors++;
+ dev->stats.tx_errors++;
+ dev->stats.tx_aborted_errors++;
return 0;
}
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index 3ec20cc18b0..cc7708775da 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -298,7 +298,7 @@ poll_some_more:
int more = 0;
spin_lock_irq(&ep->rx_lock);
- __netif_rx_complete(napi);
+ __napi_complete(napi);
wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX);
if (ep93xx_have_more_rx(ep)) {
wrl(ep, REG_INTEN, REG_INTEN_TX);
@@ -307,7 +307,7 @@ poll_some_more:
}
spin_unlock_irq(&ep->rx_lock);
- if (more && netif_rx_reschedule(napi))
+ if (more && napi_reschedule(napi))
goto poll_some_more;
}
@@ -415,9 +415,9 @@ static irqreturn_t ep93xx_irq(int irq, void *dev_id)
if (status & REG_INTSTS_RX) {
spin_lock(&ep->rx_lock);
- if (likely(netif_rx_schedule_prep(&ep->napi))) {
+ if (likely(napi_schedule_prep(&ep->napi))) {
wrl(ep, REG_INTEN, REG_INTEN_TX);
- __netif_rx_schedule(&ep->napi);
+ __napi_schedule(&ep->napi);
}
spin_unlock(&ep->rx_lock);
}
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c
index 5fce1d5c1a1..5fe17d5eaa5 100644
--- a/drivers/net/arm/ixp4xx_eth.c
+++ b/drivers/net/arm/ixp4xx_eth.c
@@ -473,7 +473,7 @@ static void eth_rx_irq(void *pdev)
printk(KERN_DEBUG "%s: eth_rx_irq\n", dev->name);
#endif
qmgr_disable_irq(port->plat->rxq);
- netif_rx_schedule(&port->napi);
+ napi_schedule(&port->napi);
}
static int eth_poll(struct napi_struct *napi, int budget)
@@ -498,16 +498,16 @@ static int eth_poll(struct napi_struct *napi, int budget)
if ((n = queue_get_desc(rxq, port, 0)) < 0) {
#if DEBUG_RX
- printk(KERN_DEBUG "%s: eth_poll netif_rx_complete\n",
+ printk(KERN_DEBUG "%s: eth_poll napi_complete\n",
dev->name);
#endif
- netif_rx_complete(napi);
+ napi_complete(napi);
qmgr_enable_irq(rxq);
if (!qmgr_stat_empty(rxq) &&
- netif_rx_reschedule(napi)) {
+ napi_reschedule(napi)) {
#if DEBUG_RX
printk(KERN_DEBUG "%s: eth_poll"
- " netif_rx_reschedule successed\n",
+ " napi_reschedule successed\n",
dev->name);
#endif
qmgr_disable_irq(rxq);
@@ -1036,7 +1036,7 @@ static int eth_open(struct net_device *dev)
}
ports_open++;
/* we may already have RX data, enables IRQ */
- netif_rx_schedule(&port->napi);
+ napi_schedule(&port->napi);
return 0;
}
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index bb9094d4cbc..c758884728a 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -1326,9 +1326,9 @@ static irqreturn_t atl1e_intr(int irq, void *data)
AT_WRITE_REG(hw, REG_IMR,
IMR_NORMAL_MASK & ~ISR_RX_EVENT);
AT_WRITE_FLUSH(hw);
- if (likely(netif_rx_schedule_prep(
+ if (likely(napi_schedule_prep(
&adapter->napi)))
- __netif_rx_schedule(&adapter->napi);
+ __napi_schedule(&adapter->napi);
}
} while (--max_ints > 0);
/* re-enable Interrupt*/
@@ -1514,7 +1514,7 @@ static int atl1e_clean(struct napi_struct *napi, int budget)
/* If no Tx and not enough Rx work done, exit the polling mode */
if (work_done < budget) {
quit_polling:
- netif_rx_complete(napi);
+ napi_complete(napi);
imr_data = AT_READ_REG(&adapter->hw, REG_IMR);
AT_WRITE_REG(&adapter->hw, REG_IMR, imr_data | ISR_RX_EVENT);
/* test debug */
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 9c875bb3f76..6d76ccb8e29 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -81,24 +81,6 @@ MODULE_AUTHOR(DRV_AUTHOR);
MODULE_DESCRIPTION(DRV_DESC);
MODULE_LICENSE("GPL");
-// prototypes
-static void hard_stop(struct net_device *);
-static void enable_rx_tx(struct net_device *dev);
-static struct net_device * au1000_probe(int port_num);
-static int au1000_init(struct net_device *);
-static int au1000_open(struct net_device *);
-static int au1000_close(struct net_device *);
-static int au1000_tx(struct sk_buff *, struct net_device *);
-static int au1000_rx(struct net_device *);
-static irqreturn_t au1000_interrupt(int, void *);
-static void au1000_tx_timeout(struct net_device *);
-static void set_rx_mode(struct net_device *);
-static int au1000_ioctl(struct net_device *, struct ifreq *, int);
-static int au1000_mdio_read(struct net_device *, int, int);
-static void au1000_mdio_write(struct net_device *, int, int, u16);
-static void au1000_adjust_link(struct net_device *);
-static void enable_mac(struct net_device *, int);
-
/*
* Theory of operation
*
@@ -188,6 +170,26 @@ struct au1000_private *au_macs[NUM_ETH_INTERFACES];
# error MAC0-associated PHY attached 2nd MACs MII bus not supported yet
#endif
+static void enable_mac(struct net_device *dev, int force_reset)
+{
+ unsigned long flags;
+ struct au1000_private *aup = netdev_priv(dev);
+
+ spin_lock_irqsave(&aup->lock, flags);
+
+ if(force_reset || (!aup->mac_enabled)) {
+ *aup->enable = MAC_EN_CLOCK_ENABLE;
+ au_sync_delay(2);
+ *aup->enable = (MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2
+ | MAC_EN_CLOCK_ENABLE);
+ au_sync_delay(2);
+
+ aup->mac_enabled = 1;
+ }
+
+ spin_unlock_irqrestore(&aup->lock, flags);
+}
+
/*
* MII operations
*/
@@ -281,6 +283,107 @@ static int au1000_mdiobus_reset(struct mii_bus *bus)
return 0;
}
+static void hard_stop(struct net_device *dev)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+
+ if (au1000_debug > 4)
+ printk(KERN_INFO "%s: hard stop\n", dev->name);
+
+ aup->mac->control &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE);
+ au_sync_delay(10);
+}
+
+static void enable_rx_tx(struct net_device *dev)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+
+ if (au1000_debug > 4)
+ printk(KERN_INFO "%s: enable_rx_tx\n", dev->name);
+
+ aup->mac->control |= (MAC_RX_ENABLE | MAC_TX_ENABLE);
+ au_sync_delay(10);
+}
+
+static void
+au1000_adjust_link(struct net_device *dev)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+ struct phy_device *phydev = aup->phy_dev;
+ unsigned long flags;
+
+ int status_change = 0;
+
+ BUG_ON(!aup->phy_dev);
+
+ spin_lock_irqsave(&aup->lock, flags);
+
+ if (phydev->link && (aup->old_speed != phydev->speed)) {
+ // speed changed
+
+ switch(phydev->speed) {
+ case SPEED_10:
+ case SPEED_100:
+ break;
+ default:
+ printk(KERN_WARNING
+ "%s: Speed (%d) is not 10/100 ???\n",
+ dev->name, phydev->speed);
+ break;
+ }
+
+ aup->old_speed = phydev->speed;
+
+ status_change = 1;
+ }
+
+ if (phydev->link && (aup->old_duplex != phydev->duplex)) {
+ // duplex mode changed
+
+ /* switching duplex mode requires to disable rx and tx! */
+ hard_stop(dev);
+
+ if (DUPLEX_FULL == phydev->duplex)
+ aup->mac->control = ((aup->mac->control
+ | MAC_FULL_DUPLEX)
+ & ~MAC_DISABLE_RX_OWN);
+ else
+ aup->mac->control = ((aup->mac->control
+ & ~MAC_FULL_DUPLEX)
+ | MAC_DISABLE_RX_OWN);
+ au_sync_delay(1);
+
+ enable_rx_tx(dev);
+ aup->old_duplex = phydev->duplex;
+
+ status_change = 1;
+ }
+
+ if(phydev->link != aup->old_link) {
+ // link state changed
+
+ if (!phydev->link) {
+ /* link went down */
+ aup->old_speed = 0;
+ aup->old_duplex = -1;
+ }
+
+ aup->old_link = phydev->link;
+ status_change = 1;
+ }
+
+ spin_unlock_irqrestore(&aup->lock, flags);
+
+ if (status_change) {
+ if (phydev->link)
+ printk(KERN_INFO "%s: link up (%d/%s)\n",
+ dev->name, phydev->speed,
+ DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
+ else
+ printk(KERN_INFO "%s: link down\n", dev->name);
+ }
+}
+
static int mii_probe (struct net_device *dev)
{
struct au1000_private *const aup = netdev_priv(dev);
@@ -412,48 +515,6 @@ void ReleaseDB(struct au1000_private *aup, db_dest_t *pDB)
aup->pDBfree = pDB;
}
-static void enable_rx_tx(struct net_device *dev)
-{
- struct au1000_private *aup = netdev_priv(dev);
-
- if (au1000_debug > 4)
- printk(KERN_INFO "%s: enable_rx_tx\n", dev->name);
-
- aup->mac->control |= (MAC_RX_ENABLE | MAC_TX_ENABLE);
- au_sync_delay(10);
-}
-
-static void hard_stop(struct net_device *dev)
-{
- struct au1000_private *aup = netdev_priv(dev);
-
- if (au1000_debug > 4)
- printk(KERN_INFO "%s: hard stop\n", dev->name);
-
- aup->mac->control &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE);
- au_sync_delay(10);
-}
-
-static void enable_mac(struct net_device *dev, int force_reset)
-{
- unsigned long flags;
- struct au1000_private *aup = netdev_priv(dev);
-
- spin_lock_irqsave(&aup->lock, flags);
-
- if(force_reset || (!aup->mac_enabled)) {
- *aup->enable = MAC_EN_CLOCK_ENABLE;
- au_sync_delay(2);
- *aup->enable = (MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2
- | MAC_EN_CLOCK_ENABLE);
- au_sync_delay(2);
-
- aup->mac_enabled = 1;
- }
-
- spin_unlock_irqrestore(&aup->lock, flags);
-}
-
static void reset_mac_unlocked(struct net_device *dev)
{
struct au1000_private *const aup = netdev_priv(dev);
@@ -542,30 +603,6 @@ static struct {
static int num_ifs;
/*
- * Setup the base address and interrupt of the Au1xxx ethernet macs
- * based on cpu type and whether the interface is enabled in sys_pinfunc
- * register. The last interface is enabled if SYS_PF_NI2 (bit 4) is 0.
- */
-static int __init au1000_init_module(void)
-{
- int ni = (int)((au_readl(SYS_PINFUNC) & (u32)(SYS_PF_NI2)) >> 4);
- struct net_device *dev;
- int i, found_one = 0;
-
- num_ifs = NUM_ETH_INTERFACES - ni;
-
- for(i = 0; i < num_ifs; i++) {
- dev = au1000_probe(i);
- iflist[i].dev = dev;
- if (dev)
- found_one++;
- }
- if (!found_one)
- return -ENODEV;
- return 0;
-}
-
-/*
* ethtool operations
*/
@@ -611,199 +648,6 @@ static const struct ethtool_ops au1000_ethtool_ops = {
.get_link = ethtool_op_get_link,
};
-static struct net_device * au1000_probe(int port_num)
-{
- static unsigned version_printed = 0;
- struct au1000_private *aup = NULL;
- struct net_device *dev = NULL;
- db_dest_t *pDB, *pDBfree;
- char ethaddr[6];
- int irq, i, err;
- u32 base, macen;
-
- if (port_num >= NUM_ETH_INTERFACES)
- return NULL;
-
- base = CPHYSADDR(iflist[port_num].base_addr );
- macen = CPHYSADDR(iflist[port_num].macen_addr);
- irq = iflist[port_num].irq;
-
- if (!request_mem_region( base, MAC_IOSIZE, "Au1x00 ENET") ||
- !request_mem_region(macen, 4, "Au1x00 ENET"))
- return NULL;
-
- if (version_printed++ == 0)
- printk("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR);
-
- dev = alloc_etherdev(sizeof(struct au1000_private));
- if (!dev) {
- printk(KERN_ERR "%s: alloc_etherdev failed\n", DRV_NAME);
- return NULL;
- }
-
- if ((err = register_netdev(dev)) != 0) {
- printk(KERN_ERR "%s: Cannot register net device, error %d\n",
- DRV_NAME, err);
- free_netdev(dev);
- return NULL;
- }
-
- printk("%s: Au1xx0 Ethernet found at 0x%x, irq %d\n",
- dev->name, base, irq);
-
- aup = netdev_priv(dev);
-
- spin_lock_init(&aup->lock);
-
- /* Allocate the data buffers */
- /* Snooping works fine with eth on all au1xxx */
- aup->vaddr = (u32)dma_alloc_noncoherent(NULL, MAX_BUF_SIZE *
- (NUM_TX_BUFFS + NUM_RX_BUFFS),
- &aup->dma_addr, 0);
- if (!aup->vaddr) {
- free_netdev(dev);
- release_mem_region( base, MAC_IOSIZE);
- release_mem_region(macen, 4);
- return NULL;
- }
-
- /* aup->mac is the base address of the MAC's registers */
- aup->mac = (volatile mac_reg_t *)iflist[port_num].base_addr;
-
- /* Setup some variables for quick register address access */
- aup->enable = (volatile u32 *)iflist[port_num].macen_addr;
- aup->mac_id = port_num;
- au_macs[port_num] = aup;
-
- if (port_num == 0) {
- if (prom_get_ethernet_addr(ethaddr) == 0)
- memcpy(au1000_mac_addr, ethaddr, sizeof(au1000_mac_addr));
- else {
- printk(KERN_INFO "%s: No MAC address found\n",
- dev->name);
- /* Use the hard coded MAC addresses */
- }
-
- setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR);
- } else if (port_num == 1)
- setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR);
-
- /*
- * Assign to the Ethernet ports two consecutive MAC addresses
- * to match those that are printed on their stickers
- */
- memcpy(dev->dev_addr, au1000_mac_addr, sizeof(au1000_mac_addr));
- dev->dev_addr[5] += port_num;
-
- *aup->enable = 0;
- aup->mac_enabled = 0;
-
- aup->mii_bus = mdiobus_alloc();
- if (aup->mii_bus == NULL)
- goto err_out;
-
- aup->mii_bus->priv = dev;
- aup->mii_bus->read = au1000_mdiobus_read;
- aup->mii_bus->write = au1000_mdiobus_write;
- aup->mii_bus->reset = au1000_mdiobus_reset;
- aup->mii_bus->name = "au1000_eth_mii";
- snprintf(aup->mii_bus->id, MII_BUS_ID_SIZE, "%x", aup->mac_id);
- aup->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
- for(i = 0; i < PHY_MAX_ADDR; ++i)
- aup->mii_bus->irq[i] = PHY_POLL;
-
- /* if known, set corresponding PHY IRQs */
-#if defined(AU1XXX_PHY_STATIC_CONFIG)
-# if defined(AU1XXX_PHY0_IRQ)
- if (AU1XXX_PHY0_BUSID == aup->mac_id)
- aup->mii_bus->irq[AU1XXX_PHY0_ADDR] = AU1XXX_PHY0_IRQ;
-# endif
-# if defined(AU1XXX_PHY1_IRQ)
- if (AU1XXX_PHY1_BUSID == aup->mac_id)
- aup->mii_bus->irq[AU1XXX_PHY1_ADDR] = AU1XXX_PHY1_IRQ;
-# endif
-#endif
- mdiobus_register(aup->mii_bus);
-
- if (mii_probe(dev) != 0) {
- goto err_out;
- }
-
- pDBfree = NULL;
- /* setup the data buffer descriptors and attach a buffer to each one */
- pDB = aup->db;
- for (i = 0; i < (NUM_TX_BUFFS+NUM_RX_BUFFS); i++) {
- pDB->pnext = pDBfree;
- pDBfree = pDB;
- pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i);
- pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
- pDB++;
- }
- aup->pDBfree = pDBfree;
-
- for (i = 0; i < NUM_RX_DMA; i++) {
- pDB = GetFreeDB(aup);
- if (!pDB) {
- goto err_out;
- }
- aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
- aup->rx_db_inuse[i] = pDB;
- }
- for (i = 0; i < NUM_TX_DMA; i++) {
- pDB = GetFreeDB(aup);
- if (!pDB) {
- goto err_out;
- }
- aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
- aup->tx_dma_ring[i]->len = 0;
- aup->tx_db_inuse[i] = pDB;
- }
-
- dev->base_addr = base;
- dev->irq = irq;
- dev->open = au1000_open;
- dev->hard_start_xmit = au1000_tx;
- dev->stop = au1000_close;
- dev->set_multicast_list = &set_rx_mode;
- dev->do_ioctl = &au1000_ioctl;
- SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops);
- dev->tx_timeout = au1000_tx_timeout;
- dev->watchdog_timeo = ETH_TX_TIMEOUT;
-
- /*
- * The boot code uses the ethernet controller, so reset it to start
- * fresh. au1000_init() expects that the device is in reset state.
- */
- reset_mac(dev);
-
- return dev;
-
-err_out:
- if (aup->mii_bus != NULL) {
- mdiobus_unregister(aup->mii_bus);
- mdiobus_free(aup->mii_bus);
- }
-
- /* here we should have a valid dev plus aup-> register addresses
- * so we can reset the mac properly.*/
- reset_mac(dev);
-
- for (i = 0; i < NUM_RX_DMA; i++) {
- if (aup->rx_db_inuse[i])
- ReleaseDB(aup, aup->rx_db_inuse[i]);
- }
- for (i = 0; i < NUM_TX_DMA; i++) {
- if (aup->tx_db_inuse[i])
- ReleaseDB(aup, aup->tx_db_inuse[i]);
- }
- dma_free_noncoherent(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
- (void *)aup->vaddr, aup->dma_addr);
- unregister_netdev(dev);
- free_netdev(dev);
- release_mem_region( base, MAC_IOSIZE);
- release_mem_region(macen, 4);
- return NULL;
-}
/*
* Initialize the interface.
@@ -864,83 +708,170 @@ static int au1000_init(struct net_device *dev)
return 0;
}
-static void
-au1000_adjust_link(struct net_device *dev)
+static inline void update_rx_stats(struct net_device *dev, u32 status)
{
struct au1000_private *aup = netdev_priv(dev);
- struct phy_device *phydev = aup->phy_dev;
- unsigned long flags;
+ struct net_device_stats *ps = &dev->stats;
- int status_change = 0;
+ ps->rx_packets++;
+ if (status & RX_MCAST_FRAME)
+ ps->multicast++;
- BUG_ON(!aup->phy_dev);
+ if (status & RX_ERROR) {
+ ps->rx_errors++;
+ if (status & RX_MISSED_FRAME)
+ ps->rx_missed_errors++;
+ if (status & (RX_OVERLEN | RX_OVERLEN | RX_LEN_ERROR))
+ ps->rx_length_errors++;
+ if (status & RX_CRC_ERROR)
+ ps->rx_crc_errors++;
+ if (status & RX_COLL)
+ ps->collisions++;
+ }
+ else
+ ps->rx_bytes += status & RX_FRAME_LEN_MASK;
- spin_lock_irqsave(&aup->lock, flags);
+}
- if (phydev->link && (aup->old_speed != phydev->speed)) {
- // speed changed
+/*
+ * Au1000 receive routine.
+ */
+static int au1000_rx(struct net_device *dev)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+ struct sk_buff *skb;
+ volatile rx_dma_t *prxd;
+ u32 buff_stat, status;
+ db_dest_t *pDB;
+ u32 frmlen;
- switch(phydev->speed) {
- case SPEED_10:
- case SPEED_100:
- break;
- default:
- printk(KERN_WARNING
- "%s: Speed (%d) is not 10/100 ???\n",
- dev->name, phydev->speed);
- break;
- }
+ if (au1000_debug > 5)
+ printk("%s: au1000_rx head %d\n", dev->name, aup->rx_head);
- aup->old_speed = phydev->speed;
+ prxd = aup->rx_dma_ring[aup->rx_head];
+ buff_stat = prxd->buff_stat;
+ while (buff_stat & RX_T_DONE) {
+ status = prxd->status;
+ pDB = aup->rx_db_inuse[aup->rx_head];
+ update_rx_stats(dev, status);
+ if (!(status & RX_ERROR)) {
- status_change = 1;
+ /* good frame */
+ frmlen = (status & RX_FRAME_LEN_MASK);
+ frmlen -= 4; /* Remove FCS */
+ skb = dev_alloc_skb(frmlen + 2);
+ if (skb == NULL) {
+ printk(KERN_ERR
+ "%s: Memory squeeze, dropping packet.\n",
+ dev->name);
+ dev->stats.rx_dropped++;
+ continue;
+ }
+ skb_reserve(skb, 2); /* 16 byte IP header align */
+ skb_copy_to_linear_data(skb,
+ (unsigned char *)pDB->vaddr, frmlen);
+ skb_put(skb, frmlen);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb); /* pass the packet to upper layers */
+ }
+ else {
+ if (au1000_debug > 4) {
+ if (status & RX_MISSED_FRAME)
+ printk("rx miss\n");
+ if (status & RX_WDOG_TIMER)
+ printk("rx wdog\n");
+ if (status & RX_RUNT)
+ printk("rx runt\n");
+ if (status & RX_OVERLEN)
+ printk("rx overlen\n");
+ if (status & RX_COLL)
+ printk("rx coll\n");
+ if (status & RX_MII_ERROR)
+ printk("rx mii error\n");
+ if (status & RX_CRC_ERROR)
+ printk("rx crc error\n");
+ if (status & RX_LEN_ERROR)
+ printk("rx len error\n");
+ if (status & RX_U_CNTRL_FRAME)
+ printk("rx u control frame\n");
+ if (status & RX_MISSED_FRAME)
+ printk("rx miss\n");
+ }
+ }
+ prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE);
+ aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1);
+ au_sync();
+
+ /* next descriptor */
+ prxd = aup->rx_dma_ring[aup->rx_head];
+ buff_stat = prxd->buff_stat;
}
+ return 0;
+}
- if (phydev->link && (aup->old_duplex != phydev->duplex)) {
- // duplex mode changed
+static void update_tx_stats(struct net_device *dev, u32 status)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+ struct net_device_stats *ps = &dev->stats;
- /* switching duplex mode requires to disable rx and tx! */
- hard_stop(dev);
+ if (status & TX_FRAME_ABORTED) {
+ if (!aup->phy_dev || (DUPLEX_FULL == aup->phy_dev->duplex)) {
+ if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) {
+ /* any other tx errors are only valid
+ * in half duplex mode */
+ ps->tx_errors++;
+ ps->tx_aborted_errors++;
+ }
+ }
+ else {
+ ps->tx_errors++;
+ ps->tx_aborted_errors++;
+ if (status & (TX_NO_CARRIER | TX_LOSS_CARRIER))
+ ps->tx_carrier_errors++;
+ }
+ }
+}
- if (DUPLEX_FULL == phydev->duplex)
- aup->mac->control = ((aup->mac->control
- | MAC_FULL_DUPLEX)
- & ~MAC_DISABLE_RX_OWN);
- else
- aup->mac->control = ((aup->mac->control
- & ~MAC_FULL_DUPLEX)
- | MAC_DISABLE_RX_OWN);
- au_sync_delay(1);
+/*
+ * Called from the interrupt service routine to acknowledge
+ * the TX DONE bits. This is a must if the irq is setup as
+ * edge triggered.
+ */
+static void au1000_tx_ack(struct net_device *dev)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+ volatile tx_dma_t *ptxd;
- enable_rx_tx(dev);
- aup->old_duplex = phydev->duplex;
+ ptxd = aup->tx_dma_ring[aup->tx_tail];
- status_change = 1;
- }
+ while (ptxd->buff_stat & TX_T_DONE) {
+ update_tx_stats(dev, ptxd->status);
+ ptxd->buff_stat &= ~TX_T_DONE;
+ ptxd->len = 0;
+ au_sync();
- if(phydev->link != aup->old_link) {
- // link state changed
+ aup->tx_tail = (aup->tx_tail + 1) & (NUM_TX_DMA - 1);
+ ptxd = aup->tx_dma_ring[aup->tx_tail];
- if (!phydev->link) {
- /* link went down */
- aup->old_speed = 0;
- aup->old_duplex = -1;
+ if (aup->tx_full) {
+ aup->tx_full = 0;
+ netif_wake_queue(dev);
}
-
- aup->old_link = phydev->link;
- status_change = 1;
}
+}
- spin_unlock_irqrestore(&aup->lock, flags);
+/*
+ * Au1000 interrupt service routine.
+ */
+static irqreturn_t au1000_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
- if (status_change) {
- if (phydev->link)
- printk(KERN_INFO "%s: link up (%d/%s)\n",
- dev->name, phydev->speed,
- DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
- else
- printk(KERN_INFO "%s: link down\n", dev->name);
- }
+ /* Handle RX interrupts first to minimize chance of overrun */
+
+ au1000_rx(dev);
+ au1000_tx_ack(dev);
+ return IRQ_RETVAL(1);
}
static int au1000_open(struct net_device *dev)
@@ -1003,88 +934,6 @@ static int au1000_close(struct net_device *dev)
return 0;
}
-static void __exit au1000_cleanup_module(void)
-{
- int i, j;
- struct net_device *dev;
- struct au1000_private *aup;
-
- for (i = 0; i < num_ifs; i++) {
- dev = iflist[i].dev;
- if (dev) {
- aup = netdev_priv(dev);
- unregister_netdev(dev);
- mdiobus_unregister(aup->mii_bus);
- mdiobus_free(aup->mii_bus);
- for (j = 0; j < NUM_RX_DMA; j++)
- if (aup->rx_db_inuse[j])
- ReleaseDB(aup, aup->rx_db_inuse[j]);
- for (j = 0; j < NUM_TX_DMA; j++)
- if (aup->tx_db_inuse[j])
- ReleaseDB(aup, aup->tx_db_inuse[j]);
- dma_free_noncoherent(NULL, MAX_BUF_SIZE *
- (NUM_TX_BUFFS + NUM_RX_BUFFS),
- (void *)aup->vaddr, aup->dma_addr);
- release_mem_region(dev->base_addr, MAC_IOSIZE);
- release_mem_region(CPHYSADDR(iflist[i].macen_addr), 4);
- free_netdev(dev);
- }
- }
-}
-
-static void update_tx_stats(struct net_device *dev, u32 status)
-{
- struct au1000_private *aup = netdev_priv(dev);
- struct net_device_stats *ps = &dev->stats;
-
- if (status & TX_FRAME_ABORTED) {
- if (!aup->phy_dev || (DUPLEX_FULL == aup->phy_dev->duplex)) {
- if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) {
- /* any other tx errors are only valid
- * in half duplex mode */
- ps->tx_errors++;
- ps->tx_aborted_errors++;
- }
- }
- else {
- ps->tx_errors++;
- ps->tx_aborted_errors++;
- if (status & (TX_NO_CARRIER | TX_LOSS_CARRIER))
- ps->tx_carrier_errors++;
- }
- }
-}
-
-
-/*
- * Called from the interrupt service routine to acknowledge
- * the TX DONE bits. This is a must if the irq is setup as
- * edge triggered.
- */
-static void au1000_tx_ack(struct net_device *dev)
-{
- struct au1000_private *aup = netdev_priv(dev);
- volatile tx_dma_t *ptxd;
-
- ptxd = aup->tx_dma_ring[aup->tx_tail];
-
- while (ptxd->buff_stat & TX_T_DONE) {
- update_tx_stats(dev, ptxd->status);
- ptxd->buff_stat &= ~TX_T_DONE;
- ptxd->len = 0;
- au_sync();
-
- aup->tx_tail = (aup->tx_tail + 1) & (NUM_TX_DMA - 1);
- ptxd = aup->tx_dma_ring[aup->tx_tail];
-
- if (aup->tx_full) {
- aup->tx_full = 0;
- netif_wake_queue(dev);
- }
- }
-}
-
-
/*
* Au1000 transmit routine.
*/
@@ -1142,123 +991,6 @@ static int au1000_tx(struct sk_buff *skb, struct net_device *dev)
return 0;
}
-static inline void update_rx_stats(struct net_device *dev, u32 status)
-{
- struct au1000_private *aup = netdev_priv(dev);
- struct net_device_stats *ps = &dev->stats;
-
- ps->rx_packets++;
- if (status & RX_MCAST_FRAME)
- ps->multicast++;
-
- if (status & RX_ERROR) {
- ps->rx_errors++;
- if (status & RX_MISSED_FRAME)
- ps->rx_missed_errors++;
- if (status & (RX_OVERLEN | RX_OVERLEN | RX_LEN_ERROR))
- ps->rx_length_errors++;
- if (status & RX_CRC_ERROR)
- ps->rx_crc_errors++;
- if (status & RX_COLL)
- ps->collisions++;
- }
- else
- ps->rx_bytes += status & RX_FRAME_LEN_MASK;
-
-}
-
-/*
- * Au1000 receive routine.
- */
-static int au1000_rx(struct net_device *dev)
-{
- struct au1000_private *aup = netdev_priv(dev);
- struct sk_buff *skb;
- volatile rx_dma_t *prxd;
- u32 buff_stat, status;
- db_dest_t *pDB;
- u32 frmlen;
-
- if (au1000_debug > 5)
- printk("%s: au1000_rx head %d\n", dev->name, aup->rx_head);
-
- prxd = aup->rx_dma_ring[aup->rx_head];
- buff_stat = prxd->buff_stat;
- while (buff_stat & RX_T_DONE) {
- status = prxd->status;
- pDB = aup->rx_db_inuse[aup->rx_head];
- update_rx_stats(dev, status);
- if (!(status & RX_ERROR)) {
-
- /* good frame */
- frmlen = (status & RX_FRAME_LEN_MASK);
- frmlen -= 4; /* Remove FCS */
- skb = dev_alloc_skb(frmlen + 2);
- if (skb == NULL) {
- printk(KERN_ERR
- "%s: Memory squeeze, dropping packet.\n",
- dev->name);
- dev->stats.rx_dropped++;
- continue;
- }
- skb_reserve(skb, 2); /* 16 byte IP header align */
- skb_copy_to_linear_data(skb,
- (unsigned char *)pDB->vaddr, frmlen);
- skb_put(skb, frmlen);
- skb->protocol = eth_type_trans(skb, dev);
- netif_rx(skb); /* pass the packet to upper layers */
- }
- else {
- if (au1000_debug > 4) {
- if (status & RX_MISSED_FRAME)
- printk("rx miss\n");
- if (status & RX_WDOG_TIMER)
- printk("rx wdog\n");
- if (status & RX_RUNT)
- printk("rx runt\n");
- if (status & RX_OVERLEN)
- printk("rx overlen\n");
- if (status & RX_COLL)
- printk("rx coll\n");
- if (status & RX_MII_ERROR)
- printk("rx mii error\n");
- if (status & RX_CRC_ERROR)
- printk("rx crc error\n");
- if (status & RX_LEN_ERROR)
- printk("rx len error\n");
- if (status & RX_U_CNTRL_FRAME)
- printk("rx u control frame\n");
- if (status & RX_MISSED_FRAME)
- printk("rx miss\n");
- }
- }
- prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE);
- aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1);
- au_sync();
-
- /* next descriptor */
- prxd = aup->rx_dma_ring[aup->rx_head];
- buff_stat = prxd->buff_stat;
- }
- return 0;
-}
-
-
-/*
- * Au1000 interrupt service routine.
- */
-static irqreturn_t au1000_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
-
- /* Handle RX interrupts first to minimize chance of overrun */
-
- au1000_rx(dev);
- au1000_tx_ack(dev);
- return IRQ_RETVAL(1);
-}
-
-
/*
* The Tx ring has been full longer than the watchdog timeout
* value. The transmitter must be hung?
@@ -1315,5 +1047,252 @@ static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return phy_mii_ioctl(aup->phy_dev, if_mii(rq), cmd);
}
+static struct net_device * au1000_probe(int port_num)
+{
+ static unsigned version_printed = 0;
+ struct au1000_private *aup = NULL;
+ struct net_device *dev = NULL;
+ db_dest_t *pDB, *pDBfree;
+ char ethaddr[6];
+ int irq, i, err;
+ u32 base, macen;
+
+ if (port_num >= NUM_ETH_INTERFACES)
+ return NULL;
+
+ base = CPHYSADDR(iflist[port_num].base_addr );
+ macen = CPHYSADDR(iflist[port_num].macen_addr);
+ irq = iflist[port_num].irq;
+
+ if (!request_mem_region( base, MAC_IOSIZE, "Au1x00 ENET") ||
+ !request_mem_region(macen, 4, "Au1x00 ENET"))
+ return NULL;
+
+ if (version_printed++ == 0)
+ printk("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR);
+
+ dev = alloc_etherdev(sizeof(struct au1000_private));
+ if (!dev) {
+ printk(KERN_ERR "%s: alloc_etherdev failed\n", DRV_NAME);
+ return NULL;
+ }
+
+ if ((err = register_netdev(dev)) != 0) {
+ printk(KERN_ERR "%s: Cannot register net device, error %d\n",
+ DRV_NAME, err);
+ free_netdev(dev);
+ return NULL;
+ }
+
+ printk("%s: Au1xx0 Ethernet found at 0x%x, irq %d\n",
+ dev->name, base, irq);
+
+ aup = netdev_priv(dev);
+
+ spin_lock_init(&aup->lock);
+
+ /* Allocate the data buffers */
+ /* Snooping works fine with eth on all au1xxx */
+ aup->vaddr = (u32)dma_alloc_noncoherent(NULL, MAX_BUF_SIZE *
+ (NUM_TX_BUFFS + NUM_RX_BUFFS),
+ &aup->dma_addr, 0);
+ if (!aup->vaddr) {
+ free_netdev(dev);
+ release_mem_region( base, MAC_IOSIZE);
+ release_mem_region(macen, 4);
+ return NULL;
+ }
+
+ /* aup->mac is the base address of the MAC's registers */
+ aup->mac = (volatile mac_reg_t *)iflist[port_num].base_addr;
+
+ /* Setup some variables for quick register address access */
+ aup->enable = (volatile u32 *)iflist[port_num].macen_addr;
+ aup->mac_id = port_num;
+ au_macs[port_num] = aup;
+
+ if (port_num == 0) {
+ if (prom_get_ethernet_addr(ethaddr) == 0)
+ memcpy(au1000_mac_addr, ethaddr, sizeof(au1000_mac_addr));
+ else {
+ printk(KERN_INFO "%s: No MAC address found\n",
+ dev->name);
+ /* Use the hard coded MAC addresses */
+ }
+
+ setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR);
+ } else if (port_num == 1)
+ setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR);
+
+ /*
+ * Assign to the Ethernet ports two consecutive MAC addresses
+ * to match those that are printed on their stickers
+ */
+ memcpy(dev->dev_addr, au1000_mac_addr, sizeof(au1000_mac_addr));
+ dev->dev_addr[5] += port_num;
+
+ *aup->enable = 0;
+ aup->mac_enabled = 0;
+
+ aup->mii_bus = mdiobus_alloc();
+ if (aup->mii_bus == NULL)
+ goto err_out;
+
+ aup->mii_bus->priv = dev;
+ aup->mii_bus->read = au1000_mdiobus_read;
+ aup->mii_bus->write = au1000_mdiobus_write;
+ aup->mii_bus->reset = au1000_mdiobus_reset;
+ aup->mii_bus->name = "au1000_eth_mii";
+ snprintf(aup->mii_bus->id, MII_BUS_ID_SIZE, "%x", aup->mac_id);
+ aup->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
+ for(i = 0; i < PHY_MAX_ADDR; ++i)
+ aup->mii_bus->irq[i] = PHY_POLL;
+
+ /* if known, set corresponding PHY IRQs */
+#if defined(AU1XXX_PHY_STATIC_CONFIG)
+# if defined(AU1XXX_PHY0_IRQ)
+ if (AU1XXX_PHY0_BUSID == aup->mac_id)
+ aup->mii_bus->irq[AU1XXX_PHY0_ADDR] = AU1XXX_PHY0_IRQ;
+# endif
+# if defined(AU1XXX_PHY1_IRQ)
+ if (AU1XXX_PHY1_BUSID == aup->mac_id)
+ aup->mii_bus->irq[AU1XXX_PHY1_ADDR] = AU1XXX_PHY1_IRQ;
+# endif
+#endif
+ mdiobus_register(aup->mii_bus);
+
+ if (mii_probe(dev) != 0) {
+ goto err_out;
+ }
+
+ pDBfree = NULL;
+ /* setup the data buffer descriptors and attach a buffer to each one */
+ pDB = aup->db;
+ for (i = 0; i < (NUM_TX_BUFFS+NUM_RX_BUFFS); i++) {
+ pDB->pnext = pDBfree;
+ pDBfree = pDB;
+ pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i);
+ pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
+ pDB++;
+ }
+ aup->pDBfree = pDBfree;
+
+ for (i = 0; i < NUM_RX_DMA; i++) {
+ pDB = GetFreeDB(aup);
+ if (!pDB) {
+ goto err_out;
+ }
+ aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
+ aup->rx_db_inuse[i] = pDB;
+ }
+ for (i = 0; i < NUM_TX_DMA; i++) {
+ pDB = GetFreeDB(aup);
+ if (!pDB) {
+ goto err_out;
+ }
+ aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
+ aup->tx_dma_ring[i]->len = 0;
+ aup->tx_db_inuse[i] = pDB;
+ }
+
+ dev->base_addr = base;
+ dev->irq = irq;
+ dev->open = au1000_open;
+ dev->hard_start_xmit = au1000_tx;
+ dev->stop = au1000_close;
+ dev->set_multicast_list = &set_rx_mode;
+ dev->do_ioctl = &au1000_ioctl;
+ SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops);
+ dev->tx_timeout = au1000_tx_timeout;
+ dev->watchdog_timeo = ETH_TX_TIMEOUT;
+
+ /*
+ * The boot code uses the ethernet controller, so reset it to start
+ * fresh. au1000_init() expects that the device is in reset state.
+ */
+ reset_mac(dev);
+
+ return dev;
+
+err_out:
+ if (aup->mii_bus != NULL) {
+ mdiobus_unregister(aup->mii_bus);
+ mdiobus_free(aup->mii_bus);
+ }
+
+ /* here we should have a valid dev plus aup-> register addresses
+ * so we can reset the mac properly.*/
+ reset_mac(dev);
+
+ for (i = 0; i < NUM_RX_DMA; i++) {
+ if (aup->rx_db_inuse[i])
+ ReleaseDB(aup, aup->rx_db_inuse[i]);
+ }
+ for (i = 0; i < NUM_TX_DMA; i++) {
+ if (aup->tx_db_inuse[i])
+ ReleaseDB(aup, aup->tx_db_inuse[i]);
+ }
+ dma_free_noncoherent(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
+ (void *)aup->vaddr, aup->dma_addr);
+ unregister_netdev(dev);
+ free_netdev(dev);
+ release_mem_region( base, MAC_IOSIZE);
+ release_mem_region(macen, 4);
+ return NULL;
+}
+
+/*
+ * Setup the base address and interrupt of the Au1xxx ethernet macs
+ * based on cpu type and whether the interface is enabled in sys_pinfunc
+ * register. The last interface is enabled if SYS_PF_NI2 (bit 4) is 0.
+ */
+static int __init au1000_init_module(void)
+{
+ int ni = (int)((au_readl(SYS_PINFUNC) & (u32)(SYS_PF_NI2)) >> 4);
+ struct net_device *dev;
+ int i, found_one = 0;
+
+ num_ifs = NUM_ETH_INTERFACES - ni;
+
+ for(i = 0; i < num_ifs; i++) {
+ dev = au1000_probe(i);
+ iflist[i].dev = dev;
+ if (dev)
+ found_one++;
+ }
+ if (!found_one)
+ return -ENODEV;
+ return 0;
+}
+
+static void __exit au1000_cleanup_module(void)
+{
+ int i, j;
+ struct net_device *dev;
+ struct au1000_private *aup;
+
+ for (i = 0; i < num_ifs; i++) {
+ dev = iflist[i].dev;
+ if (dev) {
+ aup = netdev_priv(dev);
+ unregister_netdev(dev);
+ mdiobus_unregister(aup->mii_bus);
+ mdiobus_free(aup->mii_bus);
+ for (j = 0; j < NUM_RX_DMA; j++)
+ if (aup->rx_db_inuse[j])
+ ReleaseDB(aup, aup->rx_db_inuse[j]);
+ for (j = 0; j < NUM_TX_DMA; j++)
+ if (aup->tx_db_inuse[j])
+ ReleaseDB(aup, aup->tx_db_inuse[j]);
+ dma_free_noncoherent(NULL, MAX_BUF_SIZE *
+ (NUM_TX_BUFFS + NUM_RX_BUFFS),
+ (void *)aup->vaddr, aup->dma_addr);
+ release_mem_region(dev->base_addr, MAC_IOSIZE);
+ release_mem_region(CPHYSADDR(iflist[i].macen_addr), 4);
+ free_netdev(dev);
+ }
+ }
+}
+
module_init(au1000_init_module);
module_exit(au1000_cleanup_module);
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index c38512ebcea..92aaaa1ee9f 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -874,7 +874,7 @@ static int b44_poll(struct napi_struct *napi, int budget)
}
if (work_done < budget) {
- netif_rx_complete(napi);
+ napi_complete(napi);
b44_enable_ints(bp);
}
@@ -906,13 +906,13 @@ static irqreturn_t b44_interrupt(int irq, void *dev_id)
goto irq_ack;
}
- if (netif_rx_schedule_prep(&bp->napi)) {
+ if (napi_schedule_prep(&bp->napi)) {
/* NOTE: These writes are posted by the readback of
* the ISTAT register below.
*/
bp->istat = istat;
__b44_disable_ints(bp);
- __netif_rx_schedule(&bp->napi);
+ __napi_schedule(&bp->napi);
} else {
printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
dev->name);
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index d4a3dac21dc..fe575b9a9b7 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -1497,6 +1497,8 @@ static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
static int
bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
+__releases(&bp->phy_lock)
+__acquires(&bp->phy_lock)
{
u32 speed_arg = 0, pause_adv;
@@ -1554,6 +1556,8 @@ bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
static int
bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
+__releases(&bp->phy_lock)
+__acquires(&bp->phy_lock)
{
u32 adv, bmcr;
u32 new_adv = 0;
@@ -1866,6 +1870,8 @@ bnx2_set_remote_link(struct bnx2 *bp)
static int
bnx2_setup_copper_phy(struct bnx2 *bp)
+__releases(&bp->phy_lock)
+__acquires(&bp->phy_lock)
{
u32 bmcr;
u32 new_bmcr;
@@ -1963,6 +1969,8 @@ bnx2_setup_copper_phy(struct bnx2 *bp)
static int
bnx2_setup_phy(struct bnx2 *bp, u8 port)
+__releases(&bp->phy_lock)
+__acquires(&bp->phy_lock)
{
if (bp->loopback == MAC_LOOPBACK)
return 0;
@@ -2176,6 +2184,8 @@ bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
static int
bnx2_init_phy(struct bnx2 *bp, int reset_phy)
+__releases(&bp->phy_lock)
+__acquires(&bp->phy_lock)
{
u32 val;
int rc = 0;
@@ -3053,7 +3063,7 @@ bnx2_msi(int irq, void *dev_instance)
if (unlikely(atomic_read(&bp->intr_sem) != 0))
return IRQ_HANDLED;
- netif_rx_schedule(&bnapi->napi);
+ napi_schedule(&bnapi->napi);
return IRQ_HANDLED;
}
@@ -3070,7 +3080,7 @@ bnx2_msi_1shot(int irq, void *dev_instance)
if (unlikely(atomic_read(&bp->intr_sem) != 0))
return IRQ_HANDLED;
- netif_rx_schedule(&bnapi->napi);
+ napi_schedule(&bnapi->napi);
return IRQ_HANDLED;
}
@@ -3106,9 +3116,9 @@ bnx2_interrupt(int irq, void *dev_instance)
if (unlikely(atomic_read(&bp->intr_sem) != 0))
return IRQ_HANDLED;
- if (netif_rx_schedule_prep(&bnapi->napi)) {
+ if (napi_schedule_prep(&bnapi->napi)) {
bnapi->last_status_idx = sblk->status_idx;
- __netif_rx_schedule(&bnapi->napi);
+ __napi_schedule(&bnapi->napi);
}
return IRQ_HANDLED;
@@ -3218,7 +3228,7 @@ static int bnx2_poll_msix(struct napi_struct *napi, int budget)
rmb();
if (likely(!bnx2_has_fast_work(bnapi))) {
- netif_rx_complete(napi);
+ napi_complete(napi);
REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
bnapi->last_status_idx);
@@ -3251,7 +3261,7 @@ static int bnx2_poll(struct napi_struct *napi, int budget)
rmb();
if (likely(!bnx2_has_work(bnapi))) {
- netif_rx_complete(napi);
+ napi_complete(napi);
if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index d3e7775a9cc..71f81c79d63 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -1654,7 +1654,7 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
prefetch(&fp->status_blk->c_status_block.status_block_index);
prefetch(&fp->status_blk->u_status_block.status_block_index);
- netif_rx_schedule(&bnx2x_fp(bp, index, napi));
+ napi_schedule(&bnx2x_fp(bp, index, napi));
return IRQ_HANDLED;
}
@@ -1693,7 +1693,7 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
prefetch(&fp->status_blk->c_status_block.status_block_index);
prefetch(&fp->status_blk->u_status_block.status_block_index);
- netif_rx_schedule(&bnx2x_fp(bp, 0, napi));
+ napi_schedule(&bnx2x_fp(bp, 0, napi));
status &= ~mask;
}
@@ -9374,7 +9374,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
#ifdef BNX2X_STOP_ON_ERROR
poll_panic:
#endif
- netif_rx_complete(napi);
+ napi_complete(napi);
bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 9fb388388fb..21bce2c0fde 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3369,7 +3369,7 @@ static int bond_info_seq_show(struct seq_file *seq, void *v)
return 0;
}
-static struct seq_operations bond_info_seq_ops = {
+static const struct seq_operations bond_info_seq_ops = {
.start = bond_info_seq_start,
.next = bond_info_seq_next,
.stop = bond_info_seq_stop,
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 840b3d1a22f..bb46be27533 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -2506,7 +2506,7 @@ static irqreturn_t cas_interruptN(int irq, void *dev_id)
if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
#ifdef USE_NAPI
cas_mask_intr(cp);
- netif_rx_schedule(&cp->napi);
+ napi_schedule(&cp->napi);
#else
cas_rx_ringN(cp, ring, 0);
#endif
@@ -2557,7 +2557,7 @@ static irqreturn_t cas_interrupt1(int irq, void *dev_id)
if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
#ifdef USE_NAPI
cas_mask_intr(cp);
- netif_rx_schedule(&cp->napi);
+ napi_schedule(&cp->napi);
#else
cas_rx_ringN(cp, 1, 0);
#endif
@@ -2613,7 +2613,7 @@ static irqreturn_t cas_interrupt(int irq, void *dev_id)
if (status & INTR_RX_DONE) {
#ifdef USE_NAPI
cas_mask_intr(cp);
- netif_rx_schedule(&cp->napi);
+ napi_schedule(&cp->napi);
#else
cas_rx_ringN(cp, 0, 0);
#endif
@@ -2691,7 +2691,7 @@ rx_comp:
#endif
spin_unlock_irqrestore(&cp->lock, flags);
if (enable_intr) {
- netif_rx_complete(napi);
+ napi_complete(napi);
cas_unmask_intr(cp);
}
return credits;
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index d984b799576..840da83fb3c 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -1612,7 +1612,7 @@ int t1_poll(struct napi_struct *napi, int budget)
int work_done = process_responses(adapter, budget);
if (likely(work_done < budget)) {
- netif_rx_complete(napi);
+ napi_complete(napi);
writel(adapter->sge->respQ.cidx,
adapter->regs + A_SG_SLEEPING);
}
@@ -1630,7 +1630,7 @@ irqreturn_t t1_interrupt(int irq, void *data)
if (napi_schedule_prep(&adapter->napi)) {
if (process_pure_responses(adapter))
- __netif_rx_schedule(&adapter->napi);
+ __napi_schedule(&adapter->napi);
else {
/* no data, no NAPI needed */
writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index f66548751c3..4dad04e91f6 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -428,7 +428,7 @@ static int cpmac_poll(struct napi_struct *napi, int budget)
printk(KERN_WARNING "%s: rx: polling, but no queue\n",
priv->dev->name);
spin_unlock(&priv->rx_lock);
- netif_rx_complete(napi);
+ napi_complete(napi);
return 0;
}
@@ -514,7 +514,7 @@ static int cpmac_poll(struct napi_struct *napi, int budget)
if (processed == 0) {
/* we ran out of packets to read,
* revert to interrupt-driven mode */
- netif_rx_complete(napi);
+ napi_complete(napi);
cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
return 0;
}
@@ -536,7 +536,7 @@ fatal_error:
}
spin_unlock(&priv->rx_lock);
- netif_rx_complete(napi);
+ napi_complete(napi);
netif_tx_stop_all_queues(priv->dev);
napi_disable(&priv->napi);
@@ -802,9 +802,9 @@ static irqreturn_t cpmac_irq(int irq, void *dev_id)
if (status & MAC_INT_RX) {
queue = (status >> 8) & 7;
- if (netif_rx_schedule_prep(&priv->napi)) {
+ if (napi_schedule_prep(&priv->napi)) {
cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue);
- __netif_rx_schedule(&priv->napi);
+ __napi_schedule(&priv->napi);
}
}
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index a89d8cc5120..fbe15699584 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -42,7 +42,6 @@
#include <linux/cache.h>
#include <linux/mutex.h>
#include <linux/bitops.h>
-#include <linux/inet_lro.h>
#include "t3cdev.h"
#include <asm/io.h>
@@ -178,15 +177,11 @@ enum { /* per port SGE statistics */
SGE_PSTAT_TX_CSUM, /* # of TX checksum offloads */
SGE_PSTAT_VLANEX, /* # of VLAN tag extractions */
SGE_PSTAT_VLANINS, /* # of VLAN tag insertions */
- SGE_PSTAT_LRO_AGGR, /* # of page chunks added to LRO sessions */
- SGE_PSTAT_LRO_FLUSHED, /* # of flushed LRO sessions */
- SGE_PSTAT_LRO_NO_DESC, /* # of overflown LRO sessions */
SGE_PSTAT_MAX /* must be last */
};
-#define T3_MAX_LRO_SES 8
-#define T3_MAX_LRO_MAX_PKTS 64
+struct napi_gro_fraginfo;
struct sge_qset { /* an SGE queue set */
struct adapter *adap;
@@ -194,12 +189,8 @@ struct sge_qset { /* an SGE queue set */
struct sge_rspq rspq;
struct sge_fl fl[SGE_RXQ_PER_SET];
struct sge_txq txq[SGE_TXQ_PER_SET];
- struct net_lro_mgr lro_mgr;
- struct net_lro_desc lro_desc[T3_MAX_LRO_SES];
- struct skb_frag_struct *lro_frag_tbl;
- int lro_nfrags;
+ struct napi_gro_fraginfo lro_frag_tbl;
int lro_enabled;
- int lro_frag_len;
void *lro_va;
struct net_device *netdev;
struct netdev_queue *tx_q; /* associated netdev TX queue */
@@ -230,6 +221,7 @@ struct adapter {
unsigned int slow_intr_mask;
unsigned long irq_stats[IRQ_NUM_STATS];
+ int msix_nvectors;
struct {
unsigned short vec;
char desc[22];
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 0089746b8d0..7381f378b4e 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -338,7 +338,7 @@ static void free_irq_resources(struct adapter *adapter)
free_irq(adapter->msix_info[0].vec, adapter);
for_each_port(adapter, i)
- n += adap2pinfo(adapter, i)->nqsets;
+ n += adap2pinfo(adapter, i)->nqsets;
for (i = 0; i < n; ++i)
free_irq(adapter->msix_info[i + 1].vec,
@@ -508,19 +508,9 @@ static void set_qset_lro(struct net_device *dev, int qset_idx, int val)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
- int i, lro_on = 1;
adapter->params.sge.qset[qset_idx].lro = !!val;
adapter->sge.qs[qset_idx].lro_enabled = !!val;
-
- /* let ethtool report LRO on only if all queues are LRO enabled */
- for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; ++i)
- lro_on &= adapter->params.sge.qset[i].lro;
-
- if (lro_on)
- dev->features |= NETIF_F_LRO;
- else
- dev->features &= ~NETIF_F_LRO;
}
/**
@@ -1433,9 +1423,9 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
- *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_AGGR);
- *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_FLUSHED);
- *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_NO_DESC);
+ *data++ = 0;
+ *data++ = 0;
+ *data++ = 0;
*data++ = s->rx_cong_drops;
*data++ = s->num_toggled;
@@ -1826,28 +1816,6 @@ static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
memset(&wol->sopass, 0, sizeof(wol->sopass));
}
-static int cxgb3_set_flags(struct net_device *dev, u32 data)
-{
- struct port_info *pi = netdev_priv(dev);
- int i;
-
- if (data & ETH_FLAG_LRO) {
- if (!(pi->rx_offload & T3_RX_CSUM))
- return -EINVAL;
-
- pi->rx_offload |= T3_LRO;
- for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++)
- set_qset_lro(dev, i, 1);
-
- } else {
- pi->rx_offload &= ~T3_LRO;
- for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++)
- set_qset_lro(dev, i, 0);
- }
-
- return 0;
-}
-
static const struct ethtool_ops cxgb_ethtool_ops = {
.get_settings = get_settings,
.set_settings = set_settings,
@@ -1877,8 +1845,6 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
.get_regs = get_regs,
.get_wol = get_wol,
.set_tso = ethtool_op_set_tso,
- .get_flags = ethtool_op_get_flags,
- .set_flags = cxgb3_set_flags,
};
static int in_range(int val, int lo, int hi)
@@ -2752,7 +2718,7 @@ static void set_nqsets(struct adapter *adap)
int i, j = 0;
int num_cpus = num_online_cpus();
int hwports = adap->params.nports;
- int nqsets = SGE_QSETS;
+ int nqsets = adap->msix_nvectors - 1;
if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
if (hwports == 2 &&
@@ -2781,18 +2747,25 @@ static void set_nqsets(struct adapter *adap)
static int __devinit cxgb_enable_msix(struct adapter *adap)
{
struct msix_entry entries[SGE_QSETS + 1];
+ int vectors;
int i, err;
- for (i = 0; i < ARRAY_SIZE(entries); ++i)
+ vectors = ARRAY_SIZE(entries);
+ for (i = 0; i < vectors; ++i)
entries[i].entry = i;
- err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
+ while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0)
+ vectors = err;
+
+ if (!err && vectors < (adap->params.nports + 1))
+ err = -1;
+
if (!err) {
- for (i = 0; i < ARRAY_SIZE(entries); ++i)
+ for (i = 0; i < vectors; ++i)
adap->msix_info[i].vec = entries[i].vector;
- } else if (err > 0)
- dev_info(&adap->pdev->dev,
- "only %d MSI-X vectors left, not using MSI-X\n", err);
+ adap->msix_nvectors = vectors;
+ }
+
return err;
}
@@ -2960,7 +2933,7 @@ static int __devinit init_one(struct pci_dev *pdev,
netdev->mem_end = mmio_start + mmio_len - 1;
netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
netdev->features |= NETIF_F_LLTX;
- netdev->features |= NETIF_F_LRO;
+ netdev->features |= NETIF_F_GRO;
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 379a1324db4..8299fb538f2 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -585,8 +585,7 @@ static void t3_reset_qset(struct sge_qset *q)
memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET);
q->txq_stopped = 0;
q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */
- kfree(q->lro_frag_tbl);
- q->lro_nfrags = q->lro_frag_len = 0;
+ q->lro_frag_tbl.nr_frags = q->lro_frag_tbl.len = 0;
}
@@ -1945,10 +1944,8 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
qs->port_stats[SGE_PSTAT_VLANEX]++;
if (likely(grp))
if (lro)
- lro_vlan_hwaccel_receive_skb(&qs->lro_mgr, skb,
- grp,
- ntohs(p->vlan),
- p);
+ vlan_gro_receive(&qs->napi, grp,
+ ntohs(p->vlan), skb);
else {
if (unlikely(pi->iscsi_ipv4addr &&
is_arp(skb))) {
@@ -1965,7 +1962,7 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
dev_kfree_skb_any(skb);
} else if (rq->polling) {
if (lro)
- lro_receive_skb(&qs->lro_mgr, skb, p);
+ napi_gro_receive(&qs->napi, skb);
else {
if (unlikely(pi->iscsi_ipv4addr && is_arp(skb)))
cxgb3_arp_process(adap, skb);
@@ -1981,59 +1978,6 @@ static inline int is_eth_tcp(u32 rss)
}
/**
- * lro_frame_ok - check if an ingress packet is eligible for LRO
- * @p: the CPL header of the packet
- *
- * Returns true if a received packet is eligible for LRO.
- * The following conditions must be true:
- * - packet is TCP/IP Ethernet II (checked elsewhere)
- * - not an IP fragment
- * - no IP options
- * - TCP/IP checksums are correct
- * - the packet is for this host
- */
-static inline int lro_frame_ok(const struct cpl_rx_pkt *p)
-{
- const struct ethhdr *eh = (struct ethhdr *)(p + 1);
- const struct iphdr *ih = (struct iphdr *)(eh + 1);
-
- return (*((u8 *)p + 1) & 0x90) == 0x10 && p->csum == htons(0xffff) &&
- eh->h_proto == htons(ETH_P_IP) && ih->ihl == (sizeof(*ih) >> 2);
-}
-
-static int t3_get_lro_header(void **eh, void **iph, void **tcph,
- u64 *hdr_flags, void *priv)
-{
- const struct cpl_rx_pkt *cpl = priv;
-
- if (!lro_frame_ok(cpl))
- return -1;
-
- *eh = (struct ethhdr *)(cpl + 1);
- *iph = (struct iphdr *)((struct ethhdr *)*eh + 1);
- *tcph = (struct tcphdr *)((struct iphdr *)*iph + 1);
-
- *hdr_flags = LRO_IPV4 | LRO_TCP;
- return 0;
-}
-
-static int t3_get_skb_header(struct sk_buff *skb,
- void **iph, void **tcph, u64 *hdr_flags,
- void *priv)
-{
- void *eh;
-
- return t3_get_lro_header(&eh, iph, tcph, hdr_flags, priv);
-}
-
-static int t3_get_frag_header(struct skb_frag_struct *frag, void **eh,
- void **iph, void **tcph, u64 *hdr_flags,
- void *priv)
-{
- return t3_get_lro_header(eh, iph, tcph, hdr_flags, priv);
-}
-
-/**
* lro_add_page - add a page chunk to an LRO session
* @adap: the adapter
* @qs: the associated queue set
@@ -2049,8 +1993,9 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
{
struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
struct cpl_rx_pkt *cpl;
- struct skb_frag_struct *rx_frag = qs->lro_frag_tbl;
- int nr_frags = qs->lro_nfrags, frag_len = qs->lro_frag_len;
+ struct skb_frag_struct *rx_frag = qs->lro_frag_tbl.frags;
+ int nr_frags = qs->lro_frag_tbl.nr_frags;
+ int frag_len = qs->lro_frag_tbl.len;
int offset = 0;
if (!nr_frags) {
@@ -2069,13 +2014,13 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
rx_frag->page_offset = sd->pg_chunk.offset + offset;
rx_frag->size = len;
frag_len += len;
- qs->lro_nfrags++;
- qs->lro_frag_len = frag_len;
+ qs->lro_frag_tbl.nr_frags++;
+ qs->lro_frag_tbl.len = frag_len;
if (!complete)
return;
- qs->lro_nfrags = qs->lro_frag_len = 0;
+ qs->lro_frag_tbl.ip_summed = CHECKSUM_UNNECESSARY;
cpl = qs->lro_va;
if (unlikely(cpl->vlan_valid)) {
@@ -2084,36 +2029,15 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
struct vlan_group *grp = pi->vlan_grp;
if (likely(grp != NULL)) {
- lro_vlan_hwaccel_receive_frags(&qs->lro_mgr,
- qs->lro_frag_tbl,
- frag_len, frag_len,
- grp, ntohs(cpl->vlan),
- cpl, 0);
- return;
+ vlan_gro_frags(&qs->napi, grp, ntohs(cpl->vlan),
+ &qs->lro_frag_tbl);
+ goto out;
}
}
- lro_receive_frags(&qs->lro_mgr, qs->lro_frag_tbl,
- frag_len, frag_len, cpl, 0);
-}
+ napi_gro_frags(&qs->napi, &qs->lro_frag_tbl);
-/**
- * init_lro_mgr - initialize a LRO manager object
- * @lro_mgr: the LRO manager object
- */
-static void init_lro_mgr(struct sge_qset *qs, struct net_lro_mgr *lro_mgr)
-{
- lro_mgr->dev = qs->netdev;
- lro_mgr->features = LRO_F_NAPI;
- lro_mgr->frag_align_pad = NET_IP_ALIGN;
- lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
- lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
- lro_mgr->max_desc = T3_MAX_LRO_SES;
- lro_mgr->lro_arr = qs->lro_desc;
- lro_mgr->get_frag_header = t3_get_frag_header;
- lro_mgr->get_skb_header = t3_get_skb_header;
- lro_mgr->max_aggr = T3_MAX_LRO_MAX_PKTS;
- if (lro_mgr->max_aggr > MAX_SKB_FRAGS)
- lro_mgr->max_aggr = MAX_SKB_FRAGS;
+out:
+ qs->lro_frag_tbl.nr_frags = qs->lro_frag_tbl.len = 0;
}
/**
@@ -2357,10 +2281,6 @@ next_fl:
}
deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
- lro_flush_all(&qs->lro_mgr);
- qs->port_stats[SGE_PSTAT_LRO_AGGR] = qs->lro_mgr.stats.aggregated;
- qs->port_stats[SGE_PSTAT_LRO_FLUSHED] = qs->lro_mgr.stats.flushed;
- qs->port_stats[SGE_PSTAT_LRO_NO_DESC] = qs->lro_mgr.stats.no_desc;
if (sleeping)
check_ring_db(adap, qs, sleeping);
@@ -2907,7 +2827,6 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
{
int i, avail, ret = -ENOMEM;
struct sge_qset *q = &adapter->sge.qs[id];
- struct net_lro_mgr *lro_mgr = &q->lro_mgr;
init_qset_cntxt(q, id);
setup_timer(&q->tx_reclaim_timer, sge_timer_cb, (unsigned long)q);
@@ -2987,10 +2906,6 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
q->fl[0].order = FL0_PG_ORDER;
q->fl[1].order = FL1_PG_ORDER;
- q->lro_frag_tbl = kcalloc(MAX_FRAME_SIZE / FL1_PG_CHUNK_SIZE + 1,
- sizeof(struct skb_frag_struct),
- GFP_KERNEL);
- q->lro_nfrags = q->lro_frag_len = 0;
spin_lock_irq(&adapter->sge.reg_lock);
/* FL threshold comparison uses < */
@@ -3042,8 +2957,6 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
q->tx_q = netdevq;
t3_update_qset_coalesce(q, p);
- init_lro_mgr(q, lro_mgr);
-
avail = refill_fl(adapter, &q->fl[0], q->fl[0].size,
GFP_KERNEL | __GFP_COMP);
if (!avail) {
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 86bb876fb12..861d2eeaa43 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -1944,9 +1944,9 @@ static irqreturn_t e100_intr(int irq, void *dev_id)
if (stat_ack & stat_ack_rnr)
nic->ru_running = RU_SUSPENDED;
- if (likely(netif_rx_schedule_prep(&nic->napi))) {
+ if (likely(napi_schedule_prep(&nic->napi))) {
e100_disable_irq(nic);
- __netif_rx_schedule(&nic->napi);
+ __napi_schedule(&nic->napi);
}
return IRQ_HANDLED;
@@ -1962,7 +1962,7 @@ static int e100_poll(struct napi_struct *napi, int budget)
/* If budget not fully consumed, exit the polling mode */
if (work_done < budget) {
- netif_rx_complete(napi);
+ napi_complete(napi);
e100_enable_irq(nic);
}
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index f5581de0475..e9a416f4016 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -182,7 +182,6 @@ struct e1000_tx_ring {
/* array of buffer information structs */
struct e1000_buffer *buffer_info;
- spinlock_t tx_lock;
u16 tdh;
u16 tdt;
bool last_tx_tso;
@@ -238,7 +237,6 @@ struct e1000_adapter {
u16 link_speed;
u16 link_duplex;
spinlock_t stats_lock;
- spinlock_t tx_queue_lock;
unsigned int total_tx_bytes;
unsigned int total_tx_packets;
unsigned int total_rx_bytes;
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 26474c92193..7ec1a0c5a0c 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -31,7 +31,7 @@
char e1000_driver_name[] = "e1000";
static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
-#define DRV_VERSION "7.3.20-k3-NAPI"
+#define DRV_VERSION "7.3.21-k2-NAPI"
const char e1000_driver_version[] = DRV_VERSION;
static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
@@ -1048,8 +1048,6 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
- netdev->features |= NETIF_F_LLTX;
-
netdev->vlan_features |= NETIF_F_TSO;
netdev->vlan_features |= NETIF_F_TSO6;
netdev->vlan_features |= NETIF_F_HW_CSUM;
@@ -1368,8 +1366,6 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
return -ENOMEM;
}
- spin_lock_init(&adapter->tx_queue_lock);
-
/* Explicitly disable IRQ since the NIC can be in any state. */
e1000_irq_disable(adapter);
@@ -1624,7 +1620,6 @@ setup_tx_desc_die:
txdr->next_to_use = 0;
txdr->next_to_clean = 0;
- spin_lock_init(&txdr->tx_lock);
return 0;
}
@@ -3185,7 +3180,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
unsigned int tx_flags = 0;
unsigned int len = skb->len - skb->data_len;
- unsigned long flags;
unsigned int nr_frags;
unsigned int mss;
int count = 0;
@@ -3290,22 +3284,15 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
(hw->mac_type == e1000_82573))
e1000_transfer_dhcp_info(adapter, skb);
- if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags))
- /* Collision - tell upper layer to requeue */
- return NETDEV_TX_LOCKED;
-
/* need: count + 2 desc gap to keep tail from touching
* head, otherwise try next time */
- if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) {
- spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
+ if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
return NETDEV_TX_BUSY;
- }
if (unlikely(hw->mac_type == e1000_82547)) {
if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
netif_stop_queue(netdev);
mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
- spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
return NETDEV_TX_BUSY;
}
}
@@ -3320,7 +3307,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
tso = e1000_tso(adapter, tx_ring, skb);
if (tso < 0) {
dev_kfree_skb_any(skb);
- spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
return NETDEV_TX_OK;
}
@@ -3345,7 +3331,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
/* Make sure there is space in the ring for the next send. */
e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
- spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
return NETDEV_TX_OK;
}
@@ -3687,12 +3672,12 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
- if (likely(netif_rx_schedule_prep(&adapter->napi))) {
+ if (likely(napi_schedule_prep(&adapter->napi))) {
adapter->total_tx_bytes = 0;
adapter->total_tx_packets = 0;
adapter->total_rx_bytes = 0;
adapter->total_rx_packets = 0;
- __netif_rx_schedule(&adapter->napi);
+ __napi_schedule(&adapter->napi);
} else
e1000_irq_enable(adapter);
@@ -3747,12 +3732,12 @@ static irqreturn_t e1000_intr(int irq, void *data)
ew32(IMC, ~0);
E1000_WRITE_FLUSH();
}
- if (likely(netif_rx_schedule_prep(&adapter->napi))) {
+ if (likely(napi_schedule_prep(&adapter->napi))) {
adapter->total_tx_bytes = 0;
adapter->total_tx_packets = 0;
adapter->total_rx_bytes = 0;
adapter->total_rx_packets = 0;
- __netif_rx_schedule(&adapter->napi);
+ __napi_schedule(&adapter->napi);
} else
/* this really should not happen! if it does it is basically a
* bug, but not a hard error, so enable ints and continue */
@@ -3773,15 +3758,7 @@ static int e1000_clean(struct napi_struct *napi, int budget)
adapter = netdev_priv(poll_dev);
- /* e1000_clean is called per-cpu. This lock protects
- * tx_ring[0] from being cleaned by multiple cpus
- * simultaneously. A failure obtaining the lock means
- * tx_ring[0] is currently being cleaned anyway. */
- if (spin_trylock(&adapter->tx_queue_lock)) {
- tx_cleaned = e1000_clean_tx_irq(adapter,
- &adapter->tx_ring[0]);
- spin_unlock(&adapter->tx_queue_lock);
- }
+ tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
adapter->clean_rx(adapter, &adapter->rx_ring[0],
&work_done, budget);
@@ -3793,7 +3770,7 @@ static int e1000_clean(struct napi_struct *napi, int budget)
if (work_done < budget) {
if (likely(adapter->itr_setting & 3))
e1000_set_itr(adapter);
- netif_rx_complete(napi);
+ napi_complete(napi);
e1000_irq_enable(adapter);
}
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 37bcb190eef..28bf9a51346 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -195,8 +195,6 @@ struct e1000_adapter {
u16 link_duplex;
u16 eeprom_vers;
- spinlock_t tx_queue_lock; /* prevent concurrent tail updates */
-
/* track device up/down/testing state */
unsigned long state;
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 91817d0afca..e04b392c9a5 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -47,7 +47,7 @@
#include "e1000.h"
-#define DRV_VERSION "0.3.3.3-k6"
+#define DRV_VERSION "0.3.3.4-k2"
char e1000e_driver_name[] = "e1000e";
const char e1000e_driver_version[] = DRV_VERSION;
@@ -99,8 +99,8 @@ static void e1000_receive_skb(struct e1000_adapter *adapter,
skb->protocol = eth_type_trans(skb, netdev);
if (adapter->vlgrp && (status & E1000_RXD_STAT_VP))
- vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
- le16_to_cpu(vlan));
+ vlan_gro_receive(&adapter->napi, adapter->vlgrp,
+ le16_to_cpu(vlan), skb);
else
napi_gro_receive(&adapter->napi, skb);
}
@@ -1179,12 +1179,12 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
- if (netif_rx_schedule_prep(&adapter->napi)) {
+ if (napi_schedule_prep(&adapter->napi)) {
adapter->total_tx_bytes = 0;
adapter->total_tx_packets = 0;
adapter->total_rx_bytes = 0;
adapter->total_rx_packets = 0;
- __netif_rx_schedule(&adapter->napi);
+ __napi_schedule(&adapter->napi);
}
return IRQ_HANDLED;
@@ -1246,12 +1246,12 @@ static irqreturn_t e1000_intr(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
- if (netif_rx_schedule_prep(&adapter->napi)) {
+ if (napi_schedule_prep(&adapter->napi)) {
adapter->total_tx_bytes = 0;
adapter->total_tx_packets = 0;
adapter->total_rx_bytes = 0;
adapter->total_rx_packets = 0;
- __netif_rx_schedule(&adapter->napi);
+ __napi_schedule(&adapter->napi);
}
return IRQ_HANDLED;
@@ -1320,10 +1320,10 @@ static irqreturn_t e1000_intr_msix_rx(int irq, void *data)
adapter->rx_ring->set_itr = 0;
}
- if (netif_rx_schedule_prep(&adapter->napi)) {
+ if (napi_schedule_prep(&adapter->napi)) {
adapter->total_rx_bytes = 0;
adapter->total_rx_packets = 0;
- __netif_rx_schedule(&adapter->napi);
+ __napi_schedule(&adapter->napi);
}
return IRQ_HANDLED;
}
@@ -1698,7 +1698,6 @@ int e1000e_setup_tx_resources(struct e1000_adapter *adapter)
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
- spin_lock_init(&adapter->tx_queue_lock);
return 0;
err:
@@ -2007,16 +2006,7 @@ static int e1000_clean(struct napi_struct *napi, int budget)
!(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
goto clean_rx;
- /*
- * e1000_clean is called per-cpu. This lock protects
- * tx_ring from being cleaned by multiple cpus
- * simultaneously. A failure obtaining the lock means
- * tx_ring is currently being cleaned anyway.
- */
- if (spin_trylock(&adapter->tx_queue_lock)) {
- tx_cleaned = e1000_clean_tx_irq(adapter);
- spin_unlock(&adapter->tx_queue_lock);
- }
+ tx_cleaned = e1000_clean_tx_irq(adapter);
clean_rx:
adapter->clean_rx(adapter, &work_done, budget);
@@ -2028,7 +2018,7 @@ clean_rx:
if (work_done < budget) {
if (adapter->itr_setting & 3)
e1000_set_itr(adapter);
- netif_rx_complete(napi);
+ napi_complete(napi);
if (adapter->msix_entries)
ew32(IMS, adapter->rx_ring->ims_val);
else
@@ -2922,8 +2912,6 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
if (e1000_alloc_queues(adapter))
return -ENOMEM;
- spin_lock_init(&adapter->tx_queue_lock);
-
/* Explicitly disable IRQ since the NIC can be in any state. */
e1000_irq_disable(adapter);
@@ -4069,7 +4057,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
unsigned int tx_flags = 0;
unsigned int len = skb->len - skb->data_len;
- unsigned long irq_flags;
unsigned int nr_frags;
unsigned int mss;
int count = 0;
@@ -4138,18 +4125,12 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
if (adapter->hw.mac.tx_pkt_filtering)
e1000_transfer_dhcp_info(adapter, skb);
- if (!spin_trylock_irqsave(&adapter->tx_queue_lock, irq_flags))
- /* Collision - tell upper layer to requeue */
- return NETDEV_TX_LOCKED;
-
/*
* need: count + 2 desc gap to keep tail from touching
* head, otherwise try next time
*/
- if (e1000_maybe_stop_tx(netdev, count + 2)) {
- spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags);
+ if (e1000_maybe_stop_tx(netdev, count + 2))
return NETDEV_TX_BUSY;
- }
if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
tx_flags |= E1000_TX_FLAGS_VLAN;
@@ -4161,7 +4142,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
tso = e1000_tso(adapter, skb);
if (tso < 0) {
dev_kfree_skb_any(skb);
- spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags);
return NETDEV_TX_OK;
}
@@ -4182,7 +4162,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
if (count < 0) {
/* handle pci_map_single() error in e1000_tx_map */
dev_kfree_skb_any(skb);
- spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags);
return NETDEV_TX_OK;
}
@@ -4193,7 +4172,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
/* Make sure there is space in the ring for the next send. */
e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2);
- spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags);
return NETDEV_TX_OK;
}
@@ -4922,12 +4900,6 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
- /*
- * We should not be using LLTX anymore, but we are still Tx faster with
- * it.
- */
- netdev->features |= NETIF_F_LLTX;
-
if (e1000e_enable_mng_pass_thru(&adapter->hw))
adapter->flags |= FLAG_MNG_PT_ENABLED;
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index 6271b9411cc..f7e2ccfd3e8 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -40,7 +40,7 @@
#include <asm/io.h>
#define DRV_NAME "ehea"
-#define DRV_VERSION "EHEA_0096"
+#define DRV_VERSION "EHEA_0097"
/* eHEA capability flags */
#define DLPAR_PORT_ADD_REM 1
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index dfe92264e82..19fccca74ce 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -308,7 +308,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
memset(stats, 0, sizeof(*stats));
- cb2 = kzalloc(PAGE_SIZE, GFP_ATOMIC);
+ cb2 = (void *)get_zeroed_page(GFP_ATOMIC);
if (!cb2) {
ehea_error("no mem for cb2");
goto out;
@@ -341,7 +341,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
stats->rx_packets = rx_packets;
out_herr:
- kfree(cb2);
+ free_page((unsigned long)cb2);
out:
return stats;
}
@@ -370,8 +370,6 @@ static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
EHEA_L_PKT_SIZE);
if (!skb_arr_rq1[index]) {
pr->rq1_skba.os_skbs = fill_wqes - i;
- ehea_error("%s: no mem for skb/%d wqes filled",
- dev->name, i);
break;
}
}
@@ -387,26 +385,19 @@ static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
ehea_update_rq1a(pr->qp, adder);
}
-static int ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
+static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
{
- int ret = 0;
struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
struct net_device *dev = pr->port->netdev;
int i;
for (i = 0; i < pr->rq1_skba.len; i++) {
skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
- if (!skb_arr_rq1[i]) {
- ehea_error("%s: no mem for skb/%d wqes filled",
- dev->name, i);
- ret = -ENOMEM;
- goto out;
- }
+ if (!skb_arr_rq1[i])
+ break;
}
/* Ring doorbell */
ehea_update_rq1a(pr->qp, nr_rq1a);
-out:
- return ret;
}
static int ehea_refill_rq_def(struct ehea_port_res *pr,
@@ -435,10 +426,12 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr,
u64 tmp_addr;
struct sk_buff *skb = netdev_alloc_skb(dev, packet_size);
if (!skb) {
- ehea_error("%s: no mem for skb/%d wqes filled",
- pr->port->netdev->name, i);
q_skba->os_skbs = fill_wqes - i;
- ret = -ENOMEM;
+ if (q_skba->os_skbs == q_skba->len - 2) {
+ ehea_info("%s: rq%i ran dry - no mem for skb",
+ pr->port->netdev->name, rq_nr);
+ ret = -ENOMEM;
+ }
break;
}
skb_reserve(skb, NET_IP_ALIGN);
@@ -830,7 +823,7 @@ static int ehea_poll(struct napi_struct *napi, int budget)
while ((rx != budget) || force_irq) {
pr->poll_counter = 0;
force_irq = 0;
- netif_rx_complete(napi);
+ napi_complete(napi);
ehea_reset_cq_ep(pr->recv_cq);
ehea_reset_cq_ep(pr->send_cq);
ehea_reset_cq_n1(pr->recv_cq);
@@ -841,7 +834,7 @@ static int ehea_poll(struct napi_struct *napi, int budget)
if (!cqe && !cqe_skb)
return rx;
- if (!netif_rx_reschedule(napi))
+ if (!napi_reschedule(napi))
return rx;
cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
@@ -859,7 +852,7 @@ static void ehea_netpoll(struct net_device *dev)
int i;
for (i = 0; i < port->num_def_qps; i++)
- netif_rx_schedule(&port->port_res[i].napi);
+ napi_schedule(&port->port_res[i].napi);
}
#endif
@@ -867,7 +860,7 @@ static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
{
struct ehea_port_res *pr = param;
- netif_rx_schedule(&pr->napi);
+ napi_schedule(&pr->napi);
return IRQ_HANDLED;
}
@@ -915,7 +908,7 @@ int ehea_sense_port_attr(struct ehea_port *port)
struct hcp_ehea_port_cb0 *cb0;
/* may be called via ehea_neq_tasklet() */
- cb0 = kzalloc(PAGE_SIZE, GFP_ATOMIC);
+ cb0 = (void *)get_zeroed_page(GFP_ATOMIC);
if (!cb0) {
ehea_error("no mem for cb0");
ret = -ENOMEM;
@@ -996,7 +989,7 @@ int ehea_sense_port_attr(struct ehea_port *port)
out_free:
if (ret || netif_msg_probe(port))
ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr");
- kfree(cb0);
+ free_page((unsigned long)cb0);
out:
return ret;
}
@@ -1007,7 +1000,7 @@ int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
u64 hret;
int ret = 0;
- cb4 = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ cb4 = (void *)get_zeroed_page(GFP_KERNEL);
if (!cb4) {
ehea_error("no mem for cb4");
ret = -ENOMEM;
@@ -1075,7 +1068,7 @@ int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP))
netif_carrier_on(port->netdev);
- kfree(cb4);
+ free_page((unsigned long)cb4);
out:
return ret;
}
@@ -1201,11 +1194,11 @@ static int ehea_fill_port_res(struct ehea_port_res *pr)
int ret;
struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
- ret = ehea_init_fill_rq1(pr, init_attr->act_nr_rwqes_rq1
- - init_attr->act_nr_rwqes_rq2
- - init_attr->act_nr_rwqes_rq3 - 1);
+ ehea_init_fill_rq1(pr, init_attr->act_nr_rwqes_rq1
+ - init_attr->act_nr_rwqes_rq2
+ - init_attr->act_nr_rwqes_rq3 - 1);
- ret |= ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
+ ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1);
@@ -1302,7 +1295,7 @@ static int ehea_configure_port(struct ehea_port *port)
struct hcp_ehea_port_cb0 *cb0;
ret = -ENOMEM;
- cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ cb0 = (void *)get_zeroed_page(GFP_KERNEL);
if (!cb0)
goto out;
@@ -1338,7 +1331,7 @@ static int ehea_configure_port(struct ehea_port *port)
ret = 0;
out_free:
- kfree(cb0);
+ free_page((unsigned long)cb0);
out:
return ret;
}
@@ -1748,7 +1741,7 @@ static int ehea_set_mac_addr(struct net_device *dev, void *sa)
goto out;
}
- cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ cb0 = (void *)get_zeroed_page(GFP_KERNEL);
if (!cb0) {
ehea_error("no mem for cb0");
ret = -ENOMEM;
@@ -1793,7 +1786,7 @@ out_upregs:
ehea_update_bcmc_registrations();
spin_unlock(&ehea_bcmc_regs.lock);
out_free:
- kfree(cb0);
+ free_page((unsigned long)cb0);
out:
return ret;
}
@@ -1817,7 +1810,7 @@ static void ehea_promiscuous(struct net_device *dev, int enable)
if ((enable && port->promisc) || (!enable && !port->promisc))
return;
- cb7 = kzalloc(PAGE_SIZE, GFP_ATOMIC);
+ cb7 = (void *)get_zeroed_page(GFP_ATOMIC);
if (!cb7) {
ehea_error("no mem for cb7");
goto out;
@@ -1836,7 +1829,7 @@ static void ehea_promiscuous(struct net_device *dev, int enable)
port->promisc = enable;
out:
- kfree(cb7);
+ free_page((unsigned long)cb7);
return;
}
@@ -2217,7 +2210,7 @@ static void ehea_vlan_rx_register(struct net_device *dev,
port->vgrp = grp;
- cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ cb1 = (void *)get_zeroed_page(GFP_KERNEL);
if (!cb1) {
ehea_error("no mem for cb1");
goto out;
@@ -2228,7 +2221,7 @@ static void ehea_vlan_rx_register(struct net_device *dev,
if (hret != H_SUCCESS)
ehea_error("modify_ehea_port failed");
- kfree(cb1);
+ free_page((unsigned long)cb1);
out:
return;
}
@@ -2241,7 +2234,7 @@ static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
int index;
u64 hret;
- cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ cb1 = (void *)get_zeroed_page(GFP_KERNEL);
if (!cb1) {
ehea_error("no mem for cb1");
goto out;
@@ -2262,7 +2255,7 @@ static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
if (hret != H_SUCCESS)
ehea_error("modify_ehea_port failed");
out:
- kfree(cb1);
+ free_page((unsigned long)cb1);
return;
}
@@ -2276,7 +2269,7 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
vlan_group_set_device(port->vgrp, vid, NULL);
- cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ cb1 = (void *)get_zeroed_page(GFP_KERNEL);
if (!cb1) {
ehea_error("no mem for cb1");
goto out;
@@ -2297,7 +2290,7 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
if (hret != H_SUCCESS)
ehea_error("modify_ehea_port failed");
out:
- kfree(cb1);
+ free_page((unsigned long)cb1);
return;
}
@@ -2309,7 +2302,7 @@ int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
u64 dummy64 = 0;
struct hcp_modify_qp_cb0 *cb0;
- cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ cb0 = (void *)get_zeroed_page(GFP_KERNEL);
if (!cb0) {
ret = -ENOMEM;
goto out;
@@ -2372,7 +2365,7 @@ int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
ret = 0;
out:
- kfree(cb0);
+ free_page((unsigned long)cb0);
return ret;
}
@@ -2664,7 +2657,7 @@ int ehea_stop_qps(struct net_device *dev)
u64 dummy64 = 0;
u16 dummy16 = 0;
- cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ cb0 = (void *)get_zeroed_page(GFP_KERNEL);
if (!cb0) {
ret = -ENOMEM;
goto out;
@@ -2716,7 +2709,7 @@ int ehea_stop_qps(struct net_device *dev)
ret = 0;
out:
- kfree(cb0);
+ free_page((unsigned long)cb0);
return ret;
}
@@ -2766,7 +2759,7 @@ int ehea_restart_qps(struct net_device *dev)
u64 dummy64 = 0;
u16 dummy16 = 0;
- cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ cb0 = (void *)get_zeroed_page(GFP_KERNEL);
if (!cb0) {
ret = -ENOMEM;
goto out;
@@ -2819,7 +2812,7 @@ int ehea_restart_qps(struct net_device *dev)
ehea_refill_rq3(pr, 0);
}
out:
- kfree(cb0);
+ free_page((unsigned long)cb0);
return ret;
}
@@ -2950,7 +2943,7 @@ int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
u64 hret;
int ret;
- cb = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ cb = (void *)get_zeroed_page(GFP_KERNEL);
if (!cb) {
ret = -ENOMEM;
goto out;
@@ -2967,7 +2960,7 @@ int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
ret = 0;
out_herr:
- kfree(cb);
+ free_page((unsigned long)cb);
out:
return ret;
}
@@ -2981,7 +2974,7 @@ int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
*jumbo = 0;
/* (Try to) enable *jumbo frames */
- cb4 = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ cb4 = (void *)get_zeroed_page(GFP_KERNEL);
if (!cb4) {
ehea_error("no mem for cb4");
ret = -ENOMEM;
@@ -3009,7 +3002,7 @@ int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
} else
ret = -EINVAL;
- kfree(cb4);
+ free_page((unsigned long)cb4);
}
out:
return ret;
@@ -3069,6 +3062,22 @@ static void ehea_unregister_port(struct ehea_port *port)
of_device_unregister(&port->ofdev);
}
+static const struct net_device_ops ehea_netdev_ops = {
+ .ndo_open = ehea_open,
+ .ndo_stop = ehea_stop,
+ .ndo_start_xmit = ehea_start_xmit,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = ehea_netpoll,
+#endif
+ .ndo_get_stats = ehea_get_stats,
+ .ndo_set_mac_address = ehea_set_mac_addr,
+ .ndo_set_multicast_list = ehea_set_multicast_list,
+ .ndo_change_mtu = ehea_change_mtu,
+ .ndo_vlan_rx_register = ehea_vlan_rx_register,
+ .ndo_vlan_rx_add_vid = ehea_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = ehea_vlan_rx_kill_vid
+};
+
struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
u32 logical_port_id,
struct device_node *dn)
@@ -3121,19 +3130,9 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
/* initialize net_device structure */
memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN);
- dev->open = ehea_open;
-#ifdef CONFIG_NET_POLL_CONTROLLER
- dev->poll_controller = ehea_netpoll;
-#endif
- dev->stop = ehea_stop;
- dev->hard_start_xmit = ehea_start_xmit;
- dev->get_stats = ehea_get_stats;
- dev->set_multicast_list = ehea_set_multicast_list;
- dev->set_mac_address = ehea_set_mac_addr;
- dev->change_mtu = ehea_change_mtu;
- dev->vlan_rx_register = ehea_vlan_rx_register;
- dev->vlan_rx_add_vid = ehea_vlan_rx_add_vid;
- dev->vlan_rx_kill_vid = ehea_vlan_rx_kill_vid;
+ dev->netdev_ops = &ehea_netdev_ops;
+ ehea_set_ethtool_ops(dev);
+
dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
| NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER
@@ -3142,7 +3141,6 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
INIT_WORK(&port->reset_task, ehea_reset_port);
- ehea_set_ethtool_ops(dev);
ret = register_netdev(dev);
if (ret) {
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
index 49d766ebbcf..3747457f5e6 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -1005,7 +1005,7 @@ void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle)
unsigned long ret;
u64 *rblock;
- rblock = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ rblock = (void *)get_zeroed_page(GFP_KERNEL);
if (!rblock) {
ehea_error("Cannot allocate rblock memory.");
return;
@@ -1022,5 +1022,5 @@ void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle)
else
ehea_error("Error data could not be fetched: %llX", res_handle);
- kfree(rblock);
+ free_page((unsigned long)rblock);
}
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index 7d60551d538..4617956821c 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -411,8 +411,8 @@ static irqreturn_t enic_isr_legacy(int irq, void *data)
}
if (ENIC_TEST_INTR(pba, ENIC_INTX_WQ_RQ)) {
- if (netif_rx_schedule_prep(&enic->napi))
- __netif_rx_schedule(&enic->napi);
+ if (napi_schedule_prep(&enic->napi))
+ __napi_schedule(&enic->napi);
} else {
vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]);
}
@@ -440,7 +440,7 @@ static irqreturn_t enic_isr_msi(int irq, void *data)
* writes).
*/
- netif_rx_schedule(&enic->napi);
+ napi_schedule(&enic->napi);
return IRQ_HANDLED;
}
@@ -450,7 +450,7 @@ static irqreturn_t enic_isr_msix_rq(int irq, void *data)
struct enic *enic = data;
/* schedule NAPI polling for RQ cleanup */
- netif_rx_schedule(&enic->napi);
+ napi_schedule(&enic->napi);
return IRQ_HANDLED;
}
@@ -1068,7 +1068,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
if (netdev->features & NETIF_F_LRO)
lro_flush_all(&enic->lro_mgr);
- netif_rx_complete(napi);
+ napi_complete(napi);
vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]);
}
@@ -1112,7 +1112,7 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
if (netdev->features & NETIF_F_LRO)
lro_flush_all(&enic->lro_mgr);
- netif_rx_complete(napi);
+ napi_complete(napi);
vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]);
}
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index a539bc3163c..b60e27dfcfa 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -1114,9 +1114,9 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance)
if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) {
spin_lock(&ep->napi_lock);
- if (netif_rx_schedule_prep(&ep->napi)) {
+ if (napi_schedule_prep(&ep->napi)) {
epic_napi_irq_off(dev, ep);
- __netif_rx_schedule(&ep->napi);
+ __napi_schedule(&ep->napi);
} else
ep->reschedule_in_poll++;
spin_unlock(&ep->napi_lock);
@@ -1293,7 +1293,7 @@ rx_action:
more = ep->reschedule_in_poll;
if (!more) {
- __netif_rx_complete(napi);
+ __napi_complete(napi);
outl(EpicNapiEvent, ioaddr + INTSTAT);
epic_napi_irq_on(dev, ep);
} else
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 5b910cf6374..875509d7d86 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -1760,7 +1760,7 @@ static void nv_do_rx_refill(unsigned long data)
struct fe_priv *np = netdev_priv(dev);
/* Just reschedule NAPI rx processing */
- netif_rx_schedule(&np->napi);
+ napi_schedule(&np->napi);
}
#else
static void nv_do_rx_refill(unsigned long data)
@@ -3406,7 +3406,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
#ifdef CONFIG_FORCEDETH_NAPI
if (events & NVREG_IRQ_RX_ALL) {
spin_lock(&np->lock);
- netif_rx_schedule(&np->napi);
+ napi_schedule(&np->napi);
/* Disable furthur receive irq's */
np->irqmask &= ~NVREG_IRQ_RX_ALL;
@@ -3523,7 +3523,7 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
#ifdef CONFIG_FORCEDETH_NAPI
if (events & NVREG_IRQ_RX_ALL) {
spin_lock(&np->lock);
- netif_rx_schedule(&np->napi);
+ napi_schedule(&np->napi);
/* Disable furthur receive irq's */
np->irqmask &= ~NVREG_IRQ_RX_ALL;
@@ -3680,7 +3680,7 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
/* re-enable receive interrupts */
spin_lock_irqsave(&np->lock, flags);
- __netif_rx_complete(napi);
+ __napi_complete(napi);
np->irqmask |= NVREG_IRQ_RX_ALL;
if (np->msi_flags & NV_MSI_X_ENABLED)
@@ -3706,7 +3706,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data)
writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
if (events) {
- netif_rx_schedule(&np->napi);
+ napi_schedule(&np->napi);
/* disable receive interrupts on the nic */
writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
pci_push(base);
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index ce900e54d8d..b037ce9857b 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -209,7 +209,7 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
if (received < budget) {
/* done */
- netif_rx_complete(napi);
+ napi_complete(napi);
(*fep->ops->napi_enable_rx)(dev);
}
return received;
@@ -478,7 +478,7 @@ fs_enet_interrupt(int irq, void *dev_id)
/* NOTE: it is possible for FCCs in NAPI mode */
/* to submit a spurious interrupt while in poll */
if (napi_ok)
- __netif_rx_schedule(&fep->napi);
+ __napi_schedule(&fep->napi);
}
}
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 3f7eab42aef..f5e6068f9b0 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -1623,9 +1623,9 @@ static void gfar_schedule_cleanup(struct net_device *dev)
spin_lock_irqsave(&priv->txlock, flags);
spin_lock(&priv->rxlock);
- if (netif_rx_schedule_prep(&priv->napi)) {
+ if (napi_schedule_prep(&priv->napi)) {
gfar_write(&priv->regs->imask, IMASK_RTX_DISABLED);
- __netif_rx_schedule(&priv->napi);
+ __napi_schedule(&priv->napi);
}
spin_unlock(&priv->rxlock);
@@ -1882,7 +1882,7 @@ static int gfar_poll(struct napi_struct *napi, int budget)
return budget;
if (rx_cleaned < budget) {
- netif_rx_complete(napi);
+ napi_complete(napi);
/* Clear the halt bit in RSTAT */
gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 2d4089894ec..3da9f394b4c 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -322,23 +322,25 @@ static const struct header_ops sp_header_ops = {
.rebuild = sp_rebuild_header,
};
+static const struct net_device_ops sp_netdev_ops = {
+ .ndo_open = sp_open_dev,
+ .ndo_stop = sp_close,
+ .ndo_start_xmit = sp_xmit,
+ .ndo_set_mac_address = sp_set_mac_address,
+};
+
static void sp_setup(struct net_device *dev)
{
/* Finish setting up the DEVICE info. */
- dev->mtu = SIXP_MTU;
- dev->hard_start_xmit = sp_xmit;
- dev->open = sp_open_dev;
+ dev->netdev_ops = &sp_netdev_ops;
dev->destructor = free_netdev;
- dev->stop = sp_close;
-
- dev->set_mac_address = sp_set_mac_address;
+ dev->mtu = SIXP_MTU;
dev->hard_header_len = AX25_MAX_HEADER_LEN;
dev->header_ops = &sp_header_ops;
dev->addr_len = AX25_ADDR_LEN;
dev->type = ARPHRD_AX25;
dev->tx_queue_len = 10;
- dev->tx_timeout = NULL;
/* Only activated in AX.25 mode */
memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index 81a65e3a1c0..bb78c11559c 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -203,7 +203,6 @@ struct baycom_state {
unsigned char buf[TXBUFFER_SIZE];
} hdlctx;
- struct net_device_stats stats;
unsigned int ptt_keyed;
struct sk_buff *skb; /* next transmit packet */
@@ -423,7 +422,7 @@ static void encode_hdlc(struct baycom_state *bc)
bc->hdlctx.bufptr = bc->hdlctx.buf;
bc->hdlctx.bufcnt = wp - bc->hdlctx.buf;
dev_kfree_skb(skb);
- bc->stats.tx_packets++;
+ bc->dev->stats.tx_packets++;
}
/* ---------------------------------------------------------------------- */
@@ -547,7 +546,7 @@ static void do_rxpacket(struct net_device *dev)
pktlen = bc->hdlcrx.bufcnt-2+1; /* KISS kludge */
if (!(skb = dev_alloc_skb(pktlen))) {
printk("%s: memory squeeze, dropping packet\n", dev->name);
- bc->stats.rx_dropped++;
+ dev->stats.rx_dropped++;
return;
}
cp = skb_put(skb, pktlen);
@@ -555,7 +554,7 @@ static void do_rxpacket(struct net_device *dev)
memcpy(cp, bc->hdlcrx.buf, pktlen - 1);
skb->protocol = ax25_type_trans(skb, dev);
netif_rx(skb);
- bc->stats.rx_packets++;
+ dev->stats.rx_packets++;
}
static int receive(struct net_device *dev, int cnt)
@@ -802,19 +801,6 @@ static int baycom_set_mac_address(struct net_device *dev, void *addr)
/* --------------------------------------------------------------------- */
-static struct net_device_stats *baycom_get_stats(struct net_device *dev)
-{
- struct baycom_state *bc = netdev_priv(dev);
-
- /*
- * Get the current statistics. This may be called with the
- * card open or closed.
- */
- return &bc->stats;
-}
-
-/* --------------------------------------------------------------------- */
-
static void epp_wakeup(void *handle)
{
struct net_device *dev = (struct net_device *)handle;
@@ -1065,10 +1051,10 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
hi.data.cs.ptt = !!(bc->stat & EPP_PTTBIT);
hi.data.cs.dcd = !(bc->stat & EPP_DCDBIT);
hi.data.cs.ptt_keyed = bc->ptt_keyed;
- hi.data.cs.tx_packets = bc->stats.tx_packets;
- hi.data.cs.tx_errors = bc->stats.tx_errors;
- hi.data.cs.rx_packets = bc->stats.rx_packets;
- hi.data.cs.rx_errors = bc->stats.rx_errors;
+ hi.data.cs.tx_packets = dev->stats.tx_packets;
+ hi.data.cs.tx_errors = dev->stats.tx_errors;
+ hi.data.cs.rx_packets = dev->stats.rx_packets;
+ hi.data.cs.rx_errors = dev->stats.rx_errors;
break;
case HDLCDRVCTL_OLDGETSTAT:
@@ -1116,6 +1102,14 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
/* --------------------------------------------------------------------- */
+static const struct net_device_ops baycom_netdev_ops = {
+ .ndo_open = epp_open,
+ .ndo_stop = epp_close,
+ .ndo_do_ioctl = baycom_ioctl,
+ .ndo_start_xmit = baycom_send_packet,
+ .ndo_set_mac_address = baycom_set_mac_address,
+};
+
/*
* Check for a network adaptor of this type, and return '0' if one exists.
* If dev->base_addr == 0, probe all likely locations.
@@ -1143,17 +1137,12 @@ static void baycom_probe(struct net_device *dev)
/*
* initialize the device struct
*/
- dev->open = epp_open;
- dev->stop = epp_close;
- dev->do_ioctl = baycom_ioctl;
- dev->hard_start_xmit = baycom_send_packet;
- dev->get_stats = baycom_get_stats;
/* Fill in the fields of the device structure */
bc->skb = NULL;
+ dev->netdev_ops = &baycom_netdev_ops;
dev->header_ops = &ax25_header_ops;
- dev->set_mac_address = baycom_set_mac_address;
dev->type = ARPHRD_AX25; /* AF_AX25 device */
dev->hard_header_len = AX25_MAX_HEADER_LEN + AX25_BPQ_HEADER_LEN;
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index 46f8f3390e7..1f65d1edf13 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -110,7 +110,6 @@ struct bpqdev {
struct list_head bpq_list; /* list of bpq devices chain */
struct net_device *ethdev; /* link to ethernet device */
struct net_device *axdev; /* bpq device (bpq#) */
- struct net_device_stats stats; /* some statistics */
char dest_addr[6]; /* ether destination address */
char acpt_addr[6]; /* accept ether frames from this address only */
};
@@ -222,8 +221,8 @@ static int bpq_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_ty
skb_pull(skb, 2); /* Remove the length bytes */
skb_trim(skb, len); /* Set the length of the data */
- bpq->stats.rx_packets++;
- bpq->stats.rx_bytes += len;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
ptr = skb_push(skb, 1);
*ptr = 0;
@@ -292,7 +291,7 @@ static int bpq_xmit(struct sk_buff *skb, struct net_device *dev)
bpq = netdev_priv(dev);
if ((dev = bpq_get_ether_dev(dev)) == NULL) {
- bpq->stats.tx_dropped++;
+ dev->stats.tx_dropped++;
kfree_skb(skb);
return -ENODEV;
}
@@ -300,8 +299,8 @@ static int bpq_xmit(struct sk_buff *skb, struct net_device *dev)
skb->protocol = ax25_type_trans(skb, dev);
skb_reset_network_header(skb);
dev_hard_header(skb, dev, ETH_P_BPQ, bpq->dest_addr, NULL, 0);
- bpq->stats.tx_packets++;
- bpq->stats.tx_bytes+=skb->len;
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes+=skb->len;
dev_queue_xmit(skb);
netif_wake_queue(dev);
@@ -309,16 +308,6 @@ static int bpq_xmit(struct sk_buff *skb, struct net_device *dev)
}
/*
- * Statistics
- */
-static struct net_device_stats *bpq_get_stats(struct net_device *dev)
-{
- struct bpqdev *bpq = netdev_priv(dev);
-
- return &bpq->stats;
-}
-
-/*
* Set AX.25 callsign
*/
static int bpq_set_mac_address(struct net_device *dev, void *addr)
@@ -454,7 +443,7 @@ static int bpq_seq_show(struct seq_file *seq, void *v)
return 0;
}
-static struct seq_operations bpq_seqops = {
+static const struct seq_operations bpq_seqops = {
.start = bpq_seq_start,
.next = bpq_seq_next,
.stop = bpq_seq_stop,
@@ -477,16 +466,17 @@ static const struct file_operations bpq_info_fops = {
/* ------------------------------------------------------------------------ */
+static const struct net_device_ops bpq_netdev_ops = {
+ .ndo_open = bpq_open,
+ .ndo_stop = bpq_close,
+ .ndo_start_xmit = bpq_xmit,
+ .ndo_set_mac_address = bpq_set_mac_address,
+ .ndo_do_ioctl = bpq_ioctl,
+};
static void bpq_setup(struct net_device *dev)
{
-
- dev->hard_start_xmit = bpq_xmit;
- dev->open = bpq_open;
- dev->stop = bpq_close;
- dev->set_mac_address = bpq_set_mac_address;
- dev->get_stats = bpq_get_stats;
- dev->do_ioctl = bpq_ioctl;
+ dev->netdev_ops = &bpq_netdev_ops;
dev->destructor = free_netdev;
memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c
index e67103396ed..881bf818bb4 100644
--- a/drivers/net/hamradio/dmascc.c
+++ b/drivers/net/hamradio/dmascc.c
@@ -195,7 +195,7 @@ struct scc_priv {
int chip;
struct net_device *dev;
struct scc_info *info;
- struct net_device_stats stats;
+
int channel;
int card_base, scc_cmd, scc_data;
int tmr_cnt, tmr_ctrl, tmr_mode;
@@ -239,7 +239,6 @@ static int scc_open(struct net_device *dev);
static int scc_close(struct net_device *dev);
static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
static int scc_send_packet(struct sk_buff *skb, struct net_device *dev);
-static struct net_device_stats *scc_get_stats(struct net_device *dev);
static int scc_set_mac_address(struct net_device *dev, void *sa);
static inline void tx_on(struct scc_priv *priv);
@@ -441,6 +440,13 @@ static void __init dev_setup(struct net_device *dev)
memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
}
+static const struct net_device_ops scc_netdev_ops = {
+ .ndo_open = scc_open,
+ .ndo_stop = scc_close,
+ .ndo_start_xmit = scc_send_packet,
+ .ndo_do_ioctl = scc_ioctl,
+};
+
static int __init setup_adapter(int card_base, int type, int n)
{
int i, irq, chip;
@@ -576,11 +582,7 @@ static int __init setup_adapter(int card_base, int type, int n)
sprintf(dev->name, "dmascc%i", 2 * n + i);
dev->base_addr = card_base;
dev->irq = irq;
- dev->open = scc_open;
- dev->stop = scc_close;
- dev->do_ioctl = scc_ioctl;
- dev->hard_start_xmit = scc_send_packet;
- dev->get_stats = scc_get_stats;
+ dev->netdev_ops = &scc_netdev_ops;
dev->header_ops = &ax25_header_ops;
dev->set_mac_address = scc_set_mac_address;
}
@@ -961,14 +963,6 @@ static int scc_send_packet(struct sk_buff *skb, struct net_device *dev)
}
-static struct net_device_stats *scc_get_stats(struct net_device *dev)
-{
- struct scc_priv *priv = dev->ml_priv;
-
- return &priv->stats;
-}
-
-
static int scc_set_mac_address(struct net_device *dev, void *sa)
{
memcpy(dev->dev_addr, ((struct sockaddr *) sa)->sa_data,
@@ -1216,17 +1210,17 @@ static void special_condition(struct scc_priv *priv, int rc)
}
if (priv->rx_over) {
/* We had an overrun */
- priv->stats.rx_errors++;
+ priv->dev->stats.rx_errors++;
if (priv->rx_over == 2)
- priv->stats.rx_length_errors++;
+ priv->dev->stats.rx_length_errors++;
else
- priv->stats.rx_fifo_errors++;
+ priv->dev->stats.rx_fifo_errors++;
priv->rx_over = 0;
} else if (rc & CRC_ERR) {
/* Count invalid CRC only if packet length >= minimum */
if (cb >= 15) {
- priv->stats.rx_errors++;
- priv->stats.rx_crc_errors++;
+ priv->dev->stats.rx_errors++;
+ priv->dev->stats.rx_crc_errors++;
}
} else {
if (cb >= 15) {
@@ -1239,8 +1233,8 @@ static void special_condition(struct scc_priv *priv, int rc)
priv->rx_count++;
schedule_work(&priv->rx_work);
} else {
- priv->stats.rx_errors++;
- priv->stats.rx_over_errors++;
+ priv->dev->stats.rx_errors++;
+ priv->dev->stats.rx_over_errors++;
}
}
}
@@ -1275,7 +1269,7 @@ static void rx_bh(struct work_struct *ugli_api)
skb = dev_alloc_skb(cb + 1);
if (skb == NULL) {
/* Drop packet */
- priv->stats.rx_dropped++;
+ priv->dev->stats.rx_dropped++;
} else {
/* Fill buffer */
data = skb_put(skb, cb + 1);
@@ -1283,8 +1277,8 @@ static void rx_bh(struct work_struct *ugli_api)
memcpy(&data[1], priv->rx_buf[i], cb);
skb->protocol = ax25_type_trans(skb, priv->dev);
netif_rx(skb);
- priv->stats.rx_packets++;
- priv->stats.rx_bytes += cb;
+ priv->dev->stats.rx_packets++;
+ priv->dev->stats.rx_bytes += cb;
}
spin_lock_irqsave(&priv->ring_lock, flags);
/* Move tail */
@@ -1351,15 +1345,15 @@ static void es_isr(struct scc_priv *priv)
write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
if (res) {
/* Update packet statistics */
- priv->stats.tx_errors++;
- priv->stats.tx_fifo_errors++;
+ priv->dev->stats.tx_errors++;
+ priv->dev->stats.tx_fifo_errors++;
/* Other underrun interrupts may already be waiting */
write_scc(priv, R0, RES_EXT_INT);
write_scc(priv, R0, RES_EXT_INT);
} else {
/* Update packet statistics */
- priv->stats.tx_packets++;
- priv->stats.tx_bytes += priv->tx_len[i];
+ priv->dev->stats.tx_packets++;
+ priv->dev->stats.tx_bytes += priv->tx_len[i];
/* Remove frame from FIFO */
priv->tx_tail = (i + 1) % NUM_TX_BUF;
priv->tx_count--;
@@ -1425,7 +1419,7 @@ static void tm_isr(struct scc_priv *priv)
write_scc(priv, R15, DCDIE);
priv->rr0 = read_scc(priv, R0);
if (priv->rr0 & DCD) {
- priv->stats.collisions++;
+ priv->dev->stats.collisions++;
rx_on(priv);
priv->state = RX_ON;
} else {
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index 8eba61a1d4a..61de56e45ee 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -154,7 +154,7 @@ static void hdlc_rx_flag(struct net_device *dev, struct hdlcdrv_state *s)
pkt_len = s->hdlcrx.len - 2 + 1; /* KISS kludge */
if (!(skb = dev_alloc_skb(pkt_len))) {
printk("%s: memory squeeze, dropping packet\n", dev->name);
- s->stats.rx_dropped++;
+ dev->stats.rx_dropped++;
return;
}
cp = skb_put(skb, pkt_len);
@@ -162,7 +162,7 @@ static void hdlc_rx_flag(struct net_device *dev, struct hdlcdrv_state *s)
memcpy(cp, s->hdlcrx.buffer, pkt_len - 1);
skb->protocol = ax25_type_trans(skb, dev);
netif_rx(skb);
- s->stats.rx_packets++;
+ dev->stats.rx_packets++;
}
void hdlcdrv_receiver(struct net_device *dev, struct hdlcdrv_state *s)
@@ -326,7 +326,7 @@ void hdlcdrv_transmitter(struct net_device *dev, struct hdlcdrv_state *s)
s->hdlctx.len = pkt_len+2; /* the appended CRC */
s->hdlctx.tx_state = 2;
s->hdlctx.bitstream = 0;
- s->stats.tx_packets++;
+ dev->stats.tx_packets++;
break;
case 2:
if (!s->hdlctx.len) {
@@ -427,19 +427,6 @@ static int hdlcdrv_set_mac_address(struct net_device *dev, void *addr)
}
/* --------------------------------------------------------------------- */
-
-static struct net_device_stats *hdlcdrv_get_stats(struct net_device *dev)
-{
- struct hdlcdrv_state *sm = netdev_priv(dev);
-
- /*
- * Get the current statistics. This may be called with the
- * card open or closed.
- */
- return &sm->stats;
-}
-
-/* --------------------------------------------------------------------- */
/*
* Open/initialize the board. This is called (in the current kernel)
* sometime after booting when the 'ifconfig' program is run.
@@ -568,10 +555,10 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
bi.data.cs.ptt = hdlcdrv_ptt(s);
bi.data.cs.dcd = s->hdlcrx.dcd;
bi.data.cs.ptt_keyed = s->ptt_keyed;
- bi.data.cs.tx_packets = s->stats.tx_packets;
- bi.data.cs.tx_errors = s->stats.tx_errors;
- bi.data.cs.rx_packets = s->stats.rx_packets;
- bi.data.cs.rx_errors = s->stats.rx_errors;
+ bi.data.cs.tx_packets = dev->stats.tx_packets;
+ bi.data.cs.tx_errors = dev->stats.tx_errors;
+ bi.data.cs.rx_packets = dev->stats.rx_packets;
+ bi.data.cs.rx_errors = dev->stats.rx_errors;
break;
case HDLCDRVCTL_OLDGETSTAT:
@@ -630,6 +617,14 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
/* --------------------------------------------------------------------- */
+static const struct net_device_ops hdlcdrv_netdev = {
+ .ndo_open = hdlcdrv_open,
+ .ndo_stop = hdlcdrv_close,
+ .ndo_start_xmit = hdlcdrv_send_packet,
+ .ndo_do_ioctl = hdlcdrv_ioctl,
+ .ndo_set_mac_address = hdlcdrv_set_mac_address,
+};
+
/*
* Initialize fields in hdlcdrv
*/
@@ -669,21 +664,13 @@ static void hdlcdrv_setup(struct net_device *dev)
s->bitbuf_hdlc.shreg = 0x80;
#endif /* HDLCDRV_DEBUG */
- /*
- * initialize the device struct
- */
- dev->open = hdlcdrv_open;
- dev->stop = hdlcdrv_close;
- dev->do_ioctl = hdlcdrv_ioctl;
- dev->hard_start_xmit = hdlcdrv_send_packet;
- dev->get_stats = hdlcdrv_get_stats;
/* Fill in the fields of the device structure */
s->skb = NULL;
+ dev->netdev_ops = &hdlcdrv_netdev;
dev->header_ops = &ax25_header_ops;
- dev->set_mac_address = hdlcdrv_set_mac_address;
dev->type = ARPHRD_AX25; /* AF_AX25 device */
dev->hard_header_len = AX25_MAX_HEADER_LEN + AX25_BPQ_HEADER_LEN;
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index bbdb311b842..ed5b37d4333 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -59,8 +59,6 @@ struct mkiss {
unsigned char *xhead; /* pointer to next byte to XMIT */
int xleft; /* bytes left in XMIT queue */
- struct net_device_stats stats;
-
/* Detailed SLIP statistics. */
int mtu; /* Our mtu (to spot changes!) */
int buffsize; /* Max buffers sizes */
@@ -253,7 +251,7 @@ static void ax_bump(struct mkiss *ax)
if (ax->rbuff[0] > 0x0f) {
if (ax->rbuff[0] & 0x80) {
if (check_crc_16(ax->rbuff, ax->rcount) < 0) {
- ax->stats.rx_errors++;
+ ax->dev->stats.rx_errors++;
spin_unlock_bh(&ax->buflock);
return;
@@ -268,7 +266,7 @@ static void ax_bump(struct mkiss *ax)
*ax->rbuff &= ~0x80;
} else if (ax->rbuff[0] & 0x20) {
if (check_crc_flex(ax->rbuff, ax->rcount) < 0) {
- ax->stats.rx_errors++;
+ ax->dev->stats.rx_errors++;
spin_unlock_bh(&ax->buflock);
return;
}
@@ -295,7 +293,7 @@ static void ax_bump(struct mkiss *ax)
if ((skb = dev_alloc_skb(count)) == NULL) {
printk(KERN_ERR "mkiss: %s: memory squeeze, dropping packet.\n",
ax->dev->name);
- ax->stats.rx_dropped++;
+ ax->dev->stats.rx_dropped++;
spin_unlock_bh(&ax->buflock);
return;
}
@@ -303,8 +301,8 @@ static void ax_bump(struct mkiss *ax)
memcpy(skb_put(skb,count), ax->rbuff, count);
skb->protocol = ax25_type_trans(skb, ax->dev);
netif_rx(skb);
- ax->stats.rx_packets++;
- ax->stats.rx_bytes += count;
+ ax->dev->stats.rx_packets++;
+ ax->dev->stats.rx_bytes += count;
spin_unlock_bh(&ax->buflock);
}
@@ -344,7 +342,7 @@ static void kiss_unesc(struct mkiss *ax, unsigned char s)
return;
}
- ax->stats.rx_over_errors++;
+ ax->dev->stats.rx_over_errors++;
set_bit(AXF_ERROR, &ax->flags);
}
spin_unlock_bh(&ax->buflock);
@@ -406,7 +404,7 @@ static void ax_changedmtu(struct mkiss *ax)
memcpy(ax->xbuff, ax->xhead, ax->xleft);
} else {
ax->xleft = 0;
- ax->stats.tx_dropped++;
+ dev->stats.tx_dropped++;
}
}
@@ -417,7 +415,7 @@ static void ax_changedmtu(struct mkiss *ax)
memcpy(ax->rbuff, orbuff, ax->rcount);
} else {
ax->rcount = 0;
- ax->stats.rx_over_errors++;
+ dev->stats.rx_over_errors++;
set_bit(AXF_ERROR, &ax->flags);
}
}
@@ -444,7 +442,7 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len)
if (len > ax->mtu) { /* Sigh, shouldn't occur BUT ... */
len = ax->mtu;
printk(KERN_ERR "mkiss: %s: truncating oversized transmit packet!\n", ax->dev->name);
- ax->stats.tx_dropped++;
+ dev->stats.tx_dropped++;
netif_start_queue(dev);
return;
}
@@ -518,8 +516,8 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len)
set_bit(TTY_DO_WRITE_WAKEUP, &ax->tty->flags);
actual = ax->tty->ops->write(ax->tty, ax->xbuff, count);
- ax->stats.tx_packets++;
- ax->stats.tx_bytes += actual;
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += actual;
ax->dev->trans_start = jiffies;
ax->xleft = count - actual;
@@ -664,32 +662,28 @@ static int ax_close(struct net_device *dev)
return 0;
}
-static struct net_device_stats *ax_get_stats(struct net_device *dev)
-{
- struct mkiss *ax = netdev_priv(dev);
-
- return &ax->stats;
-}
-
static const struct header_ops ax_header_ops = {
.create = ax_header,
.rebuild = ax_rebuild_header,
};
+static const struct net_device_ops ax_netdev_ops = {
+ .ndo_open = ax_open_dev,
+ .ndo_stop = ax_close,
+ .ndo_start_xmit = ax_xmit,
+ .ndo_set_mac_address = ax_set_mac_address,
+};
+
static void ax_setup(struct net_device *dev)
{
/* Finish setting up the DEVICE info. */
dev->mtu = AX_MTU;
- dev->hard_start_xmit = ax_xmit;
- dev->open = ax_open_dev;
- dev->stop = ax_close;
- dev->get_stats = ax_get_stats;
- dev->set_mac_address = ax_set_mac_address;
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->type = ARPHRD_AX25;
dev->tx_queue_len = 10;
dev->header_ops = &ax_header_ops;
+ dev->netdev_ops = &ax_netdev_ops;
memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
@@ -929,7 +923,7 @@ static void mkiss_receive_buf(struct tty_struct *tty, const unsigned char *cp,
while (count--) {
if (fp != NULL && *fp++) {
if (!test_and_set_bit(AXF_ERROR, &ax->flags))
- ax->stats.rx_errors++;
+ ax->dev->stats.rx_errors++;
cp++;
continue;
}
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index c011af7088e..2acb18f0697 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -1542,23 +1542,24 @@ static int scc_net_alloc(const char *name, struct scc_channel *scc)
/* * Network driver methods * */
/* ******************************************************************** */
+static const struct net_device_ops scc_netdev_ops = {
+ .ndo_open = scc_net_open,
+ .ndo_stop = scc_net_close,
+ .ndo_start_xmit = scc_net_tx,
+ .ndo_set_mac_address = scc_net_set_mac_address,
+ .ndo_get_stats = scc_net_get_stats,
+ .ndo_do_ioctl = scc_net_ioctl,
+};
+
/* ----> Initialize device <----- */
static void scc_net_setup(struct net_device *dev)
{
dev->tx_queue_len = 16; /* should be enough... */
- dev->open = scc_net_open;
- dev->stop = scc_net_close;
-
- dev->hard_start_xmit = scc_net_tx;
+ dev->netdev_ops = &scc_netdev_ops;
dev->header_ops = &ax25_header_ops;
- dev->set_mac_address = scc_net_set_mac_address;
- dev->get_stats = scc_net_get_stats;
- dev->do_ioctl = scc_net_ioctl;
- dev->tx_timeout = NULL;
-
memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
@@ -2073,7 +2074,7 @@ static int scc_net_seq_show(struct seq_file *seq, void *v)
return 0;
}
-static struct seq_operations scc_net_seq_ops = {
+static const struct seq_operations scc_net_seq_ops = {
.start = scc_net_seq_start,
.next = scc_net_seq_next,
.stop = scc_net_seq_stop,
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 5407f7486c9..82a8be7613d 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -115,10 +115,6 @@ struct yam_port {
struct net_device *dev;
- /* Stats section */
-
- struct net_device_stats stats;
-
int nb_rxint;
int nb_mdint;
@@ -507,7 +503,7 @@ static inline void yam_rx_flag(struct net_device *dev, struct yam_port *yp)
} else {
if (!(skb = dev_alloc_skb(pkt_len))) {
printk(KERN_WARNING "%s: memory squeeze, dropping packet\n", dev->name);
- ++yp->stats.rx_dropped;
+ ++dev->stats.rx_dropped;
} else {
unsigned char *cp;
cp = skb_put(skb, pkt_len);
@@ -515,7 +511,7 @@ static inline void yam_rx_flag(struct net_device *dev, struct yam_port *yp)
memcpy(cp, yp->rx_buf, pkt_len - 1);
skb->protocol = ax25_type_trans(skb, dev);
netif_rx(skb);
- ++yp->stats.rx_packets;
+ ++dev->stats.rx_packets;
}
}
}
@@ -677,7 +673,7 @@ static void yam_tx_byte(struct net_device *dev, struct yam_port *yp)
yp->tx_count = 1;
yp->tx_state = TX_HEAD;
}
- ++yp->stats.tx_packets;
+ ++dev->stats.tx_packets;
break;
case TX_TAIL:
if (--yp->tx_count <= 0) {
@@ -716,7 +712,7 @@ static irqreturn_t yam_interrupt(int irq, void *dev_id)
handled = 1;
if (lsr & LSR_OE)
- ++yp->stats.rx_fifo_errors;
+ ++dev->stats.rx_fifo_errors;
yp->dcd = (msr & RX_DCD) ? 1 : 0;
@@ -778,16 +774,16 @@ static int yam_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, " TxTail %u\n", yp->txtail);
seq_printf(seq, " SlotTime %u\n", yp->slot);
seq_printf(seq, " Persist %u\n", yp->pers);
- seq_printf(seq, " TxFrames %lu\n", yp->stats.tx_packets);
- seq_printf(seq, " RxFrames %lu\n", yp->stats.rx_packets);
+ seq_printf(seq, " TxFrames %lu\n", dev->stats.tx_packets);
+ seq_printf(seq, " RxFrames %lu\n", dev->stats.rx_packets);
seq_printf(seq, " TxInt %u\n", yp->nb_mdint);
seq_printf(seq, " RxInt %u\n", yp->nb_rxint);
- seq_printf(seq, " RxOver %lu\n", yp->stats.rx_fifo_errors);
+ seq_printf(seq, " RxOver %lu\n", dev->stats.rx_fifo_errors);
seq_printf(seq, "\n");
return 0;
}
-static struct seq_operations yam_seqops = {
+static const struct seq_operations yam_seqops = {
.start = yam_seq_start,
.next = yam_seq_next,
.stop = yam_seq_stop,
@@ -812,26 +808,6 @@ static const struct file_operations yam_info_fops = {
/* --------------------------------------------------------------------- */
-static struct net_device_stats *yam_get_stats(struct net_device *dev)
-{
- struct yam_port *yp;
-
- if (!dev)
- return NULL;
-
- yp = netdev_priv(dev);
- if (yp->magic != YAM_MAGIC)
- return NULL;
-
- /*
- * Get the current statistics. This may be called with the
- * card open or closed.
- */
- return &yp->stats;
-}
-
-/* --------------------------------------------------------------------- */
-
static int yam_open(struct net_device *dev)
{
struct yam_port *yp = netdev_priv(dev);
@@ -878,9 +854,9 @@ static int yam_open(struct net_device *dev)
/* Reset overruns for all ports - FPGA programming makes overruns */
for (i = 0; i < NR_PORTS; i++) {
struct net_device *dev = yam_devs[i];
- struct yam_port *yp = netdev_priv(dev);
+
inb(LSR(dev->base_addr));
- yp->stats.rx_fifo_errors = 0;
+ dev->stats.rx_fifo_errors = 0;
}
printk(KERN_INFO "%s at iobase 0x%lx irq %u uart %s\n", dev->name, dev->base_addr, dev->irq,
@@ -1068,6 +1044,14 @@ static int yam_set_mac_address(struct net_device *dev, void *addr)
/* --------------------------------------------------------------------- */
+static const struct net_device_ops yam_netdev_ops = {
+ .ndo_open = yam_open,
+ .ndo_stop = yam_close,
+ .ndo_start_xmit = yam_send_packet,
+ .ndo_do_ioctl = yam_ioctl,
+ .ndo_set_mac_address = yam_set_mac_address,
+};
+
static void yam_setup(struct net_device *dev)
{
struct yam_port *yp = netdev_priv(dev);
@@ -1088,18 +1072,11 @@ static void yam_setup(struct net_device *dev)
dev->base_addr = yp->iobase;
dev->irq = yp->irq;
- dev->open = yam_open;
- dev->stop = yam_close;
- dev->do_ioctl = yam_ioctl;
- dev->hard_start_xmit = yam_send_packet;
- dev->get_stats = yam_get_stats;
-
skb_queue_head_init(&yp->send_queue);
+ dev->netdev_ops = &yam_netdev_ops;
dev->header_ops = &ax25_header_ops;
- dev->set_mac_address = yam_set_mac_address;
-
dev->type = ARPHRD_AX25;
dev->hard_header_len = AX25_MAX_HEADER_LEN;
dev->mtu = AX25_MTU;
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index dfa6348ac1d..5c6315df86b 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -1028,10 +1028,10 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
ibmveth_assert(lpar_rc == H_SUCCESS);
- netif_rx_complete(napi);
+ napi_complete(napi);
if (ibmveth_rxq_pending_buffer(adapter) &&
- netif_rx_reschedule(napi)) {
+ napi_reschedule(napi)) {
lpar_rc = h_vio_signal(adapter->vdev->unit_address,
VIO_IRQ_DISABLE);
goto restart_poll;
@@ -1047,11 +1047,11 @@ static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
struct ibmveth_adapter *adapter = netdev_priv(netdev);
unsigned long lpar_rc;
- if (netif_rx_schedule_prep(&adapter->napi)) {
+ if (napi_schedule_prep(&adapter->napi)) {
lpar_rc = h_vio_signal(adapter->vdev->unit_address,
VIO_IRQ_DISABLE);
ibmveth_assert(lpar_rc == H_SUCCESS);
- __netif_rx_schedule(&adapter->napi);
+ __napi_schedule(&adapter->napi);
}
return IRQ_HANDLED;
}
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index f5e2e7235fc..9b367ba8e26 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -1103,6 +1103,13 @@ static s32 igb_setup_fiber_serdes_link_82575(struct e1000_hw *hw)
E1000_CTRL_SWDPIN1;
wr32(E1000_CTRL, reg);
+ /* Power on phy for 82576 fiber adapters */
+ if (hw->mac.type == e1000_82576) {
+ reg = rd32(E1000_CTRL_EXT);
+ reg &= ~E1000_CTRL_EXT_SDP7_DATA;
+ wr32(E1000_CTRL_EXT, reg);
+ }
+
/* Set switch control to serdes energy detect */
reg = rd32(E1000_CONNSW);
reg |= E1000_CONNSW_ENRGSRC;
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index 5a27825cc48..7d8c8873915 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -36,12 +36,6 @@
struct igb_adapter;
-#ifdef CONFIG_IGB_LRO
-#include <linux/inet_lro.h>
-#define MAX_LRO_AGGR 32
-#define MAX_LRO_DESCRIPTORS 8
-#endif
-
/* Interrupt defines */
#define IGB_MIN_DYN_ITR 3000
#define IGB_MAX_DYN_ITR 96000
@@ -176,10 +170,6 @@ struct igb_ring {
struct napi_struct napi;
int set_itr;
struct igb_ring *buddy;
-#ifdef CONFIG_IGB_LRO
- struct net_lro_mgr lro_mgr;
- bool lro_used;
-#endif
};
};
@@ -288,12 +278,6 @@ struct igb_adapter {
int need_ioport;
struct igb_ring *multi_tx_table[IGB_MAX_TX_QUEUES];
-#ifdef CONFIG_IGB_LRO
- unsigned int lro_max_aggr;
- unsigned int lro_aggregated;
- unsigned int lro_flushed;
- unsigned int lro_no_desc;
-#endif
unsigned int tx_ring_count;
unsigned int rx_ring_count;
};
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index 3c831f1472a..4606e63fc6f 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -93,11 +93,6 @@ static const struct igb_stats igb_gstrings_stats[] = {
{ "tx_smbus", IGB_STAT(stats.mgptc) },
{ "rx_smbus", IGB_STAT(stats.mgprc) },
{ "dropped_smbus", IGB_STAT(stats.mgpdc) },
-#ifdef CONFIG_IGB_LRO
- { "lro_aggregated", IGB_STAT(lro_aggregated) },
- { "lro_flushed", IGB_STAT(lro_flushed) },
- { "lro_no_desc", IGB_STAT(lro_no_desc) },
-#endif
};
#define IGB_QUEUE_STATS_LEN \
@@ -1921,18 +1916,6 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
int stat_count = sizeof(struct igb_queue_stats) / sizeof(u64);
int j;
int i;
-#ifdef CONFIG_IGB_LRO
- int aggregated = 0, flushed = 0, no_desc = 0;
-
- for (i = 0; i < adapter->num_rx_queues; i++) {
- aggregated += adapter->rx_ring[i].lro_mgr.stats.aggregated;
- flushed += adapter->rx_ring[i].lro_mgr.stats.flushed;
- no_desc += adapter->rx_ring[i].lro_mgr.stats.no_desc;
- }
- adapter->lro_aggregated = aggregated;
- adapter->lro_flushed = flushed;
- adapter->lro_no_desc = no_desc;
-#endif
igb_update_stats(adapter);
for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index b82b0fb2056..e11043d90db 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -115,9 +115,6 @@ static bool igb_clean_tx_irq(struct igb_ring *);
static int igb_poll(struct napi_struct *, int);
static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int);
static void igb_alloc_rx_buffers_adv(struct igb_ring *, int);
-#ifdef CONFIG_IGB_LRO
-static int igb_get_skb_hdr(struct sk_buff *skb, void **, void **, u64 *, void *);
-#endif
static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
static void igb_tx_timeout(struct net_device *);
static void igb_reset_task(struct work_struct *);
@@ -1189,7 +1186,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
netdev->features |= NETIF_F_TSO6;
#ifdef CONFIG_IGB_LRO
- netdev->features |= NETIF_F_LRO;
+ netdev->features |= NETIF_F_GRO;
#endif
netdev->vlan_features |= NETIF_F_TSO;
@@ -1200,7 +1197,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
- netdev->features |= NETIF_F_LLTX;
adapter->en_mng_pt = igb_enable_mng_pass_thru(&adapter->hw);
/* before reading the NVM, reset the controller to put the device in a
@@ -1739,14 +1735,6 @@ int igb_setup_rx_resources(struct igb_adapter *adapter,
struct pci_dev *pdev = adapter->pdev;
int size, desc_len;
-#ifdef CONFIG_IGB_LRO
- size = sizeof(struct net_lro_desc) * MAX_LRO_DESCRIPTORS;
- rx_ring->lro_mgr.lro_arr = vmalloc(size);
- if (!rx_ring->lro_mgr.lro_arr)
- goto err;
- memset(rx_ring->lro_mgr.lro_arr, 0, size);
-#endif
-
size = sizeof(struct igb_buffer) * rx_ring->count;
rx_ring->buffer_info = vmalloc(size);
if (!rx_ring->buffer_info)
@@ -1773,10 +1761,6 @@ int igb_setup_rx_resources(struct igb_adapter *adapter,
return 0;
err:
-#ifdef CONFIG_IGB_LRO
- vfree(rx_ring->lro_mgr.lro_arr);
- rx_ring->lro_mgr.lro_arr = NULL;
-#endif
vfree(rx_ring->buffer_info);
dev_err(&adapter->pdev->dev, "Unable to allocate memory for "
"the receive descriptor ring\n");
@@ -1930,16 +1914,6 @@ static void igb_configure_rx(struct igb_adapter *adapter)
rxdctl |= IGB_RX_HTHRESH << 8;
rxdctl |= IGB_RX_WTHRESH << 16;
wr32(E1000_RXDCTL(j), rxdctl);
-#ifdef CONFIG_IGB_LRO
- /* Intitial LRO Settings */
- ring->lro_mgr.max_aggr = MAX_LRO_AGGR;
- ring->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS;
- ring->lro_mgr.get_skb_header = igb_get_skb_hdr;
- ring->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
- ring->lro_mgr.dev = adapter->netdev;
- ring->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
- ring->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
-#endif
}
if (adapter->num_rx_queues > 1) {
@@ -2128,11 +2102,6 @@ void igb_free_rx_resources(struct igb_ring *rx_ring)
vfree(rx_ring->buffer_info);
rx_ring->buffer_info = NULL;
-#ifdef CONFIG_IGB_LRO
- vfree(rx_ring->lro_mgr.lro_arr);
- rx_ring->lro_mgr.lro_arr = NULL;
-#endif
-
pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
rx_ring->desc = NULL;
@@ -3386,8 +3355,8 @@ static irqreturn_t igb_msix_rx(int irq, void *data)
igb_write_itr(rx_ring);
- if (netif_rx_schedule_prep(&rx_ring->napi))
- __netif_rx_schedule(&rx_ring->napi);
+ if (napi_schedule_prep(&rx_ring->napi))
+ __napi_schedule(&rx_ring->napi);
#ifdef CONFIG_IGB_DCA
if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED)
@@ -3539,7 +3508,7 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
- netif_rx_schedule(&adapter->rx_ring[0].napi);
+ napi_schedule(&adapter->rx_ring[0].napi);
return IRQ_HANDLED;
}
@@ -3577,7 +3546,7 @@ static irqreturn_t igb_intr(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
- netif_rx_schedule(&adapter->rx_ring[0].napi);
+ napi_schedule(&adapter->rx_ring[0].napi);
return IRQ_HANDLED;
}
@@ -3612,7 +3581,7 @@ static int igb_poll(struct napi_struct *napi, int budget)
!netif_running(netdev)) {
if (adapter->itr_setting & 3)
igb_set_itr(adapter);
- netif_rx_complete(napi);
+ napi_complete(napi);
if (!test_bit(__IGB_DOWN, &adapter->state))
igb_irq_enable(adapter);
return 0;
@@ -3638,7 +3607,7 @@ static int igb_clean_rx_ring_msix(struct napi_struct *napi, int budget)
/* If not enough Rx work done, exit the polling mode */
if ((work_done == 0) || !netif_running(netdev)) {
- netif_rx_complete(napi);
+ napi_complete(napi);
if (adapter->itr_setting & 3) {
if (adapter->num_rx_queues == 1)
@@ -3768,39 +3737,6 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
return (count < tx_ring->count);
}
-#ifdef CONFIG_IGB_LRO
- /**
- * igb_get_skb_hdr - helper function for LRO header processing
- * @skb: pointer to sk_buff to be added to LRO packet
- * @iphdr: pointer to ip header structure
- * @tcph: pointer to tcp header structure
- * @hdr_flags: pointer to header flags
- * @priv: pointer to the receive descriptor for the current sk_buff
- **/
-static int igb_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph,
- u64 *hdr_flags, void *priv)
-{
- union e1000_adv_rx_desc *rx_desc = priv;
- u16 pkt_type = rx_desc->wb.lower.lo_dword.pkt_info &
- (E1000_RXDADV_PKTTYPE_IPV4 | E1000_RXDADV_PKTTYPE_TCP);
-
- /* Verify that this is a valid IPv4 TCP packet */
- if (pkt_type != (E1000_RXDADV_PKTTYPE_IPV4 |
- E1000_RXDADV_PKTTYPE_TCP))
- return -1;
-
- /* Set network headers */
- skb_reset_network_header(skb);
- skb_set_transport_header(skb, ip_hdrlen(skb));
- *iphdr = ip_hdr(skb);
- *tcph = tcp_hdr(skb);
- *hdr_flags = LRO_IPV4 | LRO_TCP;
-
- return 0;
-
-}
-#endif /* CONFIG_IGB_LRO */
-
/**
* igb_receive_skb - helper function to handle rx indications
* @ring: pointer to receive ring receving this packet
@@ -3815,28 +3751,20 @@ static void igb_receive_skb(struct igb_ring *ring, u8 status,
struct igb_adapter * adapter = ring->adapter;
bool vlan_extracted = (adapter->vlgrp && (status & E1000_RXD_STAT_VP));
-#ifdef CONFIG_IGB_LRO
- if (adapter->netdev->features & NETIF_F_LRO &&
- skb->ip_summed == CHECKSUM_UNNECESSARY) {
+ if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
if (vlan_extracted)
- lro_vlan_hwaccel_receive_skb(&ring->lro_mgr, skb,
- adapter->vlgrp,
- le16_to_cpu(rx_desc->wb.upper.vlan),
- rx_desc);
+ vlan_gro_receive(&ring->napi, adapter->vlgrp,
+ le16_to_cpu(rx_desc->wb.upper.vlan),
+ skb);
else
- lro_receive_skb(&ring->lro_mgr,skb, rx_desc);
- ring->lro_used = 1;
+ napi_gro_receive(&ring->napi, skb);
} else {
-#endif
if (vlan_extracted)
vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
le16_to_cpu(rx_desc->wb.upper.vlan));
else
-
netif_receive_skb(skb);
-#ifdef CONFIG_IGB_LRO
}
-#endif
}
@@ -3991,13 +3919,6 @@ next_desc:
rx_ring->next_to_clean = i;
cleaned_count = IGB_DESC_UNUSED(rx_ring);
-#ifdef CONFIG_IGB_LRO
- if (rx_ring->lro_used) {
- lro_flush_all(&rx_ring->lro_mgr);
- rx_ring->lro_used = 0;
- }
-#endif
-
if (cleaned_count)
igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index eee28d39568..e2ef16b2970 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -1721,14 +1721,14 @@ ixgb_intr(int irq, void *data)
if (!test_bit(__IXGB_DOWN, &adapter->flags))
mod_timer(&adapter->watchdog_timer, jiffies);
- if (netif_rx_schedule_prep(&adapter->napi)) {
+ if (napi_schedule_prep(&adapter->napi)) {
/* Disable interrupts and register for poll. The flush
of the posted write is intentionally left out.
*/
IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
- __netif_rx_schedule(&adapter->napi);
+ __napi_schedule(&adapter->napi);
}
return IRQ_HANDLED;
}
@@ -1749,7 +1749,7 @@ ixgb_clean(struct napi_struct *napi, int budget)
/* If budget not fully consumed, exit the polling mode */
if (work_done < budget) {
- netif_rx_complete(napi);
+ napi_complete(napi);
if (!test_bit(__IXGB_DOWN, &adapter->flags))
ixgb_irq_enable(adapter);
}
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index e112008f39c..6ac361a4b8a 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -31,7 +31,6 @@
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
-#include <linux/inet_lro.h>
#include <linux/aer.h>
#include "ixgbe_type.h"
@@ -88,9 +87,6 @@
#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
-#define IXGBE_MAX_LRO_DESCRIPTORS 8
-#define IXGBE_MAX_LRO_AGGREGATE 32
-
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */
struct ixgbe_tx_buffer {
@@ -142,8 +138,6 @@ struct ixgbe_ring {
/* cpu for tx queue */
int cpu;
#endif
- struct net_lro_mgr lro_mgr;
- bool lro_used;
struct ixgbe_queue_stats stats;
u16 v_idx; /* maps directly to the index for this ring in the hardware
* vector array, can also be used for finding the bit in EICR
@@ -301,9 +295,6 @@ struct ixgbe_adapter {
unsigned long state;
u64 tx_busy;
- u64 lro_aggregated;
- u64 lro_flushed;
- u64 lro_no_desc;
unsigned int tx_ring_count;
unsigned int rx_ring_count;
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 67f87a79154..4f6b5dfc78a 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -89,8 +89,6 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = {
{"rx_header_split", IXGBE_STAT(rx_hdr_split)},
{"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
{"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
- {"lro_aggregated", IXGBE_STAT(lro_aggregated)},
- {"lro_flushed", IXGBE_STAT(lro_flushed)},
};
#define IXGBE_QUEUE_STATS_LEN \
@@ -808,15 +806,6 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
int stat_count = sizeof(struct ixgbe_queue_stats) / sizeof(u64);
int j, k;
int i;
- u64 aggregated = 0, flushed = 0, no_desc = 0;
- for (i = 0; i < adapter->num_rx_queues; i++) {
- aggregated += adapter->rx_ring[i].lro_mgr.stats.aggregated;
- flushed += adapter->rx_ring[i].lro_mgr.stats.flushed;
- no_desc += adapter->rx_ring[i].lro_mgr.stats.no_desc;
- }
- adapter->lro_aggregated = aggregated;
- adapter->lro_flushed = flushed;
- adapter->lro_no_desc = no_desc;
ixgbe_update_stats(adapter);
for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index d2f4d5f508b..f7b592eff68 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -403,23 +403,20 @@ static int __ixgbe_notify_dca(struct device *dev, void *data)
* @rx_ring: rx descriptor ring (for a specific queue) to setup
* @rx_desc: rx descriptor
**/
-static void ixgbe_receive_skb(struct ixgbe_adapter *adapter,
+static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
struct sk_buff *skb, u8 status,
- struct ixgbe_ring *ring,
union ixgbe_adv_rx_desc *rx_desc)
{
+ struct ixgbe_adapter *adapter = q_vector->adapter;
+ struct napi_struct *napi = &q_vector->napi;
bool is_vlan = (status & IXGBE_RXD_STAT_VP);
u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
- if (adapter->netdev->features & NETIF_F_LRO &&
- skb->ip_summed == CHECKSUM_UNNECESSARY) {
+ if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
if (adapter->vlgrp && is_vlan && (tag != 0))
- lro_vlan_hwaccel_receive_skb(&ring->lro_mgr, skb,
- adapter->vlgrp, tag,
- rx_desc);
+ vlan_gro_receive(napi, adapter->vlgrp, tag, skb);
else
- lro_receive_skb(&ring->lro_mgr, skb, rx_desc);
- ring->lro_used = true;
+ napi_gro_receive(napi, skb);
} else {
if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
if (adapter->vlgrp && is_vlan && (tag != 0))
@@ -574,10 +571,11 @@ static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
}
-static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
+static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring *rx_ring,
int *work_done, int work_to_do)
{
+ struct ixgbe_adapter *adapter = q_vector->adapter;
struct pci_dev *pdev = adapter->pdev;
union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
@@ -678,7 +676,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
total_rx_packets++;
skb->protocol = eth_type_trans(skb, adapter->netdev);
- ixgbe_receive_skb(adapter, skb, staterr, rx_ring, rx_desc);
+ ixgbe_receive_skb(q_vector, skb, staterr, rx_desc);
next_desc:
rx_desc->wb.upper.status_error = 0;
@@ -696,11 +694,6 @@ next_desc:
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
}
- if (rx_ring->lro_used) {
- lro_flush_all(&rx_ring->lro_mgr);
- rx_ring->lro_used = false;
- }
-
rx_ring->next_to_clean = i;
cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
@@ -1015,7 +1008,7 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
rx_ring = &(adapter->rx_ring[r_idx]);
/* disable interrupts on this vector only */
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rx_ring->v_idx);
- netif_rx_schedule(&q_vector->napi);
+ napi_schedule(&q_vector->napi);
return IRQ_HANDLED;
}
@@ -1052,11 +1045,11 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
ixgbe_update_rx_dca(adapter, rx_ring);
#endif
- ixgbe_clean_rx_irq(adapter, rx_ring, &work_done, budget);
+ ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
/* If all Rx work done, exit the polling mode */
if (work_done < budget) {
- netif_rx_complete(napi);
+ napi_complete(napi);
if (adapter->itr_setting & 3)
ixgbe_set_itr_msix(q_vector);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
@@ -1095,7 +1088,7 @@ static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget)
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
ixgbe_update_rx_dca(adapter, rx_ring);
#endif
- ixgbe_clean_rx_irq(adapter, rx_ring, &work_done, budget);
+ ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
enable_mask |= rx_ring->v_idx;
r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
r_idx + 1);
@@ -1105,7 +1098,7 @@ static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget)
rx_ring = &(adapter->rx_ring[r_idx]);
/* If all Rx work done, exit the polling mode */
if (work_done < budget) {
- netif_rx_complete(napi);
+ napi_complete(napi);
if (adapter->itr_setting & 3)
ixgbe_set_itr_msix(q_vector);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
@@ -1381,13 +1374,13 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
ixgbe_check_fan_failure(adapter, eicr);
- if (netif_rx_schedule_prep(&adapter->q_vector[0].napi)) {
+ if (napi_schedule_prep(&adapter->q_vector[0].napi)) {
adapter->tx_ring[0].total_packets = 0;
adapter->tx_ring[0].total_bytes = 0;
adapter->rx_ring[0].total_packets = 0;
adapter->rx_ring[0].total_bytes = 0;
/* would disable interrupts here but EIAM disabled it */
- __netif_rx_schedule(&adapter->q_vector[0].napi);
+ __napi_schedule(&adapter->q_vector[0].napi);
}
return IRQ_HANDLED;
@@ -1568,33 +1561,6 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index)
IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl);
}
-/**
- * ixgbe_get_skb_hdr - helper function for LRO header processing
- * @skb: pointer to sk_buff to be added to LRO packet
- * @iphdr: pointer to ip header structure
- * @tcph: pointer to tcp header structure
- * @hdr_flags: pointer to header flags
- * @priv: private data
- **/
-static int ixgbe_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph,
- u64 *hdr_flags, void *priv)
-{
- union ixgbe_adv_rx_desc *rx_desc = priv;
-
- /* Verify that this is a valid IPv4 TCP packet */
- if (!((ixgbe_get_pkt_info(rx_desc) & IXGBE_RXDADV_PKTTYPE_IPV4) &&
- (ixgbe_get_pkt_info(rx_desc) & IXGBE_RXDADV_PKTTYPE_TCP)))
- return -1;
-
- /* Set network headers */
- skb_reset_network_header(skb);
- skb_set_transport_header(skb, ip_hdrlen(skb));
- *iphdr = ip_hdr(skb);
- *tcph = tcp_hdr(skb);
- *hdr_flags = LRO_IPV4 | LRO_TCP;
- return 0;
-}
-
#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
(((S) & (PAGE_SIZE - 1)) ? 1 : 0))
@@ -1666,16 +1632,6 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
adapter->rx_ring[i].head = IXGBE_RDH(j);
adapter->rx_ring[i].tail = IXGBE_RDT(j);
adapter->rx_ring[i].rx_buf_len = rx_buf_len;
- /* Intitial LRO Settings */
- adapter->rx_ring[i].lro_mgr.max_aggr = IXGBE_MAX_LRO_AGGREGATE;
- adapter->rx_ring[i].lro_mgr.max_desc = IXGBE_MAX_LRO_DESCRIPTORS;
- adapter->rx_ring[i].lro_mgr.get_skb_header = ixgbe_get_skb_hdr;
- adapter->rx_ring[i].lro_mgr.features = LRO_F_EXTRACT_VLAN_ID;
- if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
- adapter->rx_ring[i].lro_mgr.features |= LRO_F_NAPI;
- adapter->rx_ring[i].lro_mgr.dev = adapter->netdev;
- adapter->rx_ring[i].lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
- adapter->rx_ring[i].lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
ixgbe_configure_srrctl(adapter, j);
}
@@ -2310,14 +2266,14 @@ static int ixgbe_poll(struct napi_struct *napi, int budget)
#endif
tx_cleaned = ixgbe_clean_tx_irq(adapter, adapter->tx_ring);
- ixgbe_clean_rx_irq(adapter, adapter->rx_ring, &work_done, budget);
+ ixgbe_clean_rx_irq(q_vector, adapter->rx_ring, &work_done, budget);
if (tx_cleaned)
work_done = budget;
/* If budget not fully consumed, exit the polling mode */
if (work_done < budget) {
- netif_rx_complete(napi);
+ napi_complete(napi);
if (adapter->itr_setting & 3)
ixgbe_set_itr(adapter);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
@@ -2926,12 +2882,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
struct pci_dev *pdev = adapter->pdev;
int size;
- size = sizeof(struct net_lro_desc) * IXGBE_MAX_LRO_DESCRIPTORS;
- rx_ring->lro_mgr.lro_arr = vmalloc(size);
- if (!rx_ring->lro_mgr.lro_arr)
- return -ENOMEM;
- memset(rx_ring->lro_mgr.lro_arr, 0, size);
-
size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
rx_ring->rx_buffer_info = vmalloc(size);
if (!rx_ring->rx_buffer_info) {
@@ -2960,8 +2910,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
return 0;
alloc_failed:
- vfree(rx_ring->lro_mgr.lro_arr);
- rx_ring->lro_mgr.lro_arr = NULL;
return -ENOMEM;
}
@@ -3039,9 +2987,6 @@ void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
{
struct pci_dev *pdev = adapter->pdev;
- vfree(rx_ring->lro_mgr.lro_arr);
- rx_ring->lro_mgr.lro_arr = NULL;
-
ixgbe_clean_rx_ring(adapter, rx_ring);
vfree(rx_ring->rx_buffer_info);
@@ -4141,7 +4086,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
netdev->features |= NETIF_F_IPV6_CSUM;
netdev->features |= NETIF_F_TSO;
netdev->features |= NETIF_F_TSO6;
- netdev->features |= NETIF_F_LRO;
+ netdev->features |= NETIF_F_GRO;
netdev->vlan_features |= NETIF_F_TSO;
netdev->vlan_features |= NETIF_F_TSO6;
diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c
index 01474572056..d3bf2f017cc 100644
--- a/drivers/net/ixp2000/ixpdev.c
+++ b/drivers/net/ixp2000/ixpdev.c
@@ -141,7 +141,7 @@ static int ixpdev_poll(struct napi_struct *napi, int budget)
break;
} while (ixp2000_reg_read(IXP2000_IRQ_THD_RAW_STATUS_A_0) & 0x00ff);
- netif_rx_complete(napi);
+ napi_complete(napi);
ixp2000_reg_write(IXP2000_IRQ_THD_ENABLE_SET_A_0, 0x00ff);
return rx;
@@ -204,7 +204,7 @@ static irqreturn_t ixpdev_interrupt(int irq, void *dev_id)
ixp2000_reg_wrb(IXP2000_IRQ_THD_ENABLE_CLEAR_A_0, 0x00ff);
if (likely(napi_schedule_prep(&ip->napi))) {
- __netif_rx_schedule(&ip->napi);
+ __napi_schedule(&ip->napi);
} else {
printk(KERN_CRIT "ixp2000: irq while polling!!\n");
}
diff --git a/drivers/net/jme.h b/drivers/net/jme.h
index 5154411b5e6..e321c678b11 100644
--- a/drivers/net/jme.h
+++ b/drivers/net/jme.h
@@ -398,15 +398,15 @@ struct jme_ring {
#define JME_NAPI_WEIGHT(w) int w
#define JME_NAPI_WEIGHT_VAL(w) w
#define JME_NAPI_WEIGHT_SET(w, r)
-#define JME_RX_COMPLETE(dev, napis) netif_rx_complete(napis)
+#define JME_RX_COMPLETE(dev, napis) napi_complete(napis)
#define JME_NAPI_ENABLE(priv) napi_enable(&priv->napi);
#define JME_NAPI_DISABLE(priv) \
if (!napi_disable_pending(&priv->napi)) \
napi_disable(&priv->napi);
#define JME_RX_SCHEDULE_PREP(priv) \
- netif_rx_schedule_prep(&priv->napi)
+ napi_schedule_prep(&priv->napi)
#define JME_RX_SCHEDULE(priv) \
- __netif_rx_schedule(&priv->napi);
+ __napi_schedule(&priv->napi);
/*
* Jmac Adapter Private data
diff --git a/drivers/net/korina.c b/drivers/net/korina.c
index 75010cac76a..38d6649a29c 100644
--- a/drivers/net/korina.c
+++ b/drivers/net/korina.c
@@ -334,7 +334,7 @@ static irqreturn_t korina_rx_dma_interrupt(int irq, void *dev_id)
DMA_STAT_HALT | DMA_STAT_ERR),
&lp->rx_dma_regs->dmasm);
- netif_rx_schedule(&lp->napi);
+ napi_schedule(&lp->napi);
if (dmas & DMA_STAT_ERR)
printk(KERN_ERR DRV_NAME "%s: DMA error\n", dev->name);
@@ -468,7 +468,7 @@ static int korina_poll(struct napi_struct *napi, int budget)
work_done = korina_rx(dev, budget);
if (work_done < budget) {
- netif_rx_complete(napi);
+ napi_complete(napi);
writel(readl(&lp->rx_dma_regs->dmasm) &
~(DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR),
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index f6c4936e2fa..dc33d51213d 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -527,7 +527,7 @@ static int macb_poll(struct napi_struct *napi, int budget)
* this function was called last time, and no packets
* have been received since.
*/
- netif_rx_complete(napi);
+ napi_complete(napi);
goto out;
}
@@ -538,13 +538,13 @@ static int macb_poll(struct napi_struct *napi, int budget)
dev_warn(&bp->pdev->dev,
"No RX buffers complete, status = %02lx\n",
(unsigned long)status);
- netif_rx_complete(napi);
+ napi_complete(napi);
goto out;
}
work_done = macb_rx(bp, budget);
if (work_done < budget)
- netif_rx_complete(napi);
+ napi_complete(napi);
/*
* We've done what we can to clean the buffers. Make sure we
@@ -579,7 +579,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
}
if (status & MACB_RX_INT_FLAGS) {
- if (netif_rx_schedule_prep(&bp->napi)) {
+ if (napi_schedule_prep(&bp->napi)) {
/*
* There's no point taking any more interrupts
* until we have processed the buffers
@@ -587,7 +587,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
dev_dbg(&bp->pdev->dev,
"scheduling RX softirq\n");
- __netif_rx_schedule(&bp->napi);
+ __napi_schedule(&bp->napi);
}
}
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
index c61b0bdca1a..ac55ebd2f14 100644
--- a/drivers/net/mlx4/en_rx.c
+++ b/drivers/net/mlx4/en_rx.c
@@ -814,7 +814,7 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
struct mlx4_en_priv *priv = netdev_priv(cq->dev);
if (priv->port_up)
- netif_rx_schedule(&cq->napi);
+ napi_schedule(&cq->napi);
else
mlx4_en_arm_cq(priv, cq);
}
@@ -834,7 +834,7 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
INC_PERF_COUNTER(priv->pstats.napi_quota);
else {
/* Done for now */
- netif_rx_complete(napi);
+ napi_complete(napi);
mlx4_en_arm_cq(priv, cq);
}
return done;
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index e9c1296b267..2dacb8852dc 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -1514,7 +1514,7 @@ static int myri10ge_poll(struct napi_struct *napi, int budget)
work_done = myri10ge_clean_rx_done(ss, budget);
if (work_done < budget) {
- netif_rx_complete(napi);
+ napi_complete(napi);
put_be32(htonl(3), ss->irq_claim);
}
return work_done;
@@ -1532,7 +1532,7 @@ static irqreturn_t myri10ge_intr(int irq, void *arg)
/* an interrupt on a non-zero receive-only slice is implicitly
* valid since MSI-X irqs are not shared */
if ((mgp->dev->real_num_tx_queues == 1) && (ss != mgp->ss)) {
- netif_rx_schedule(&ss->napi);
+ napi_schedule(&ss->napi);
return (IRQ_HANDLED);
}
@@ -1543,7 +1543,7 @@ static irqreturn_t myri10ge_intr(int irq, void *arg)
/* low bit indicates receives are present, so schedule
* napi poll handler */
if (stats->valid & 1)
- netif_rx_schedule(&ss->napi);
+ napi_schedule(&ss->napi);
if (!mgp->msi_enabled && !mgp->msix_enabled) {
put_be32(0, mgp->irq_deassert);
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index c5dec54251b..c23a58624a3 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -2198,10 +2198,10 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
prefetch(&np->rx_skbuff[np->cur_rx % RX_RING_SIZE]);
- if (netif_rx_schedule_prep(&np->napi)) {
+ if (napi_schedule_prep(&np->napi)) {
/* Disable interrupts and register for poll */
natsemi_irq_disable(dev);
- __netif_rx_schedule(&np->napi);
+ __napi_schedule(&np->napi);
} else
printk(KERN_WARNING
"%s: Ignoring interrupt, status %#08x, mask %#08x.\n",
@@ -2253,7 +2253,7 @@ static int natsemi_poll(struct napi_struct *napi, int budget)
np->intr_status = readl(ioaddr + IntrStatus);
} while (np->intr_status);
- netif_rx_complete(napi);
+ napi_complete(napi);
/* Reenable interrupts providing nothing is trying to shut
* the chip down. */
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 645d384fe87..cc06cc5429f 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -1640,7 +1640,7 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget)
}
if ((work_done < budget) && tx_complete) {
- netif_rx_complete(&adapter->napi);
+ napi_complete(&adapter->napi);
netxen_nic_enable_int(adapter);
}
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 0c0b752315c..4a5a089fa30 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -3669,7 +3669,7 @@ static int niu_poll(struct napi_struct *napi, int budget)
work_done = niu_poll_core(np, lp, budget);
if (work_done < budget) {
- netif_rx_complete(napi);
+ napi_complete(napi);
niu_ldg_rearm(np, lp, 1);
}
return work_done;
@@ -4088,12 +4088,12 @@ static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0)
static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp,
u64 v0, u64 v1, u64 v2)
{
- if (likely(netif_rx_schedule_prep(&lp->napi))) {
+ if (likely(napi_schedule_prep(&lp->napi))) {
lp->v0 = v0;
lp->v1 = v1;
lp->v2 = v2;
__niu_fastpath_interrupt(np, lp->ldg_num, v0);
- __netif_rx_schedule(&lp->napi);
+ __napi_schedule(&lp->napi);
}
}
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c
index d0349e7d73e..5eeb5a87b73 100644
--- a/drivers/net/pasemi_mac.c
+++ b/drivers/net/pasemi_mac.c
@@ -970,7 +970,7 @@ static irqreturn_t pasemi_mac_rx_intr(int irq, void *data)
if (*chan->status & PAS_STATUS_ERROR)
reg |= PAS_IOB_DMA_RXCH_RESET_DINTC;
- netif_rx_schedule(&mac->napi);
+ napi_schedule(&mac->napi);
write_iob_reg(PAS_IOB_DMA_RXCH_RESET(chan->chno), reg);
@@ -1010,7 +1010,7 @@ static irqreturn_t pasemi_mac_tx_intr(int irq, void *data)
mod_timer(&txring->clean_timer, jiffies + (TX_CLEAN_INTERVAL)*2);
- netif_rx_schedule(&mac->napi);
+ napi_schedule(&mac->napi);
if (reg)
write_iob_reg(PAS_IOB_DMA_TXCH_RESET(chan->chno), reg);
@@ -1639,7 +1639,7 @@ static int pasemi_mac_poll(struct napi_struct *napi, int budget)
pkts = pasemi_mac_clean_rx(rx_ring(mac), budget);
if (pkts < budget) {
/* all done, no more packets present */
- netif_rx_complete(napi);
+ napi_complete(napi);
pasemi_mac_restart_rx_intr(mac);
pasemi_mac_restart_tx_intr(mac);
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 665a4286da3..80124fac65f 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -1397,7 +1397,7 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
if (work_done < budget) {
spin_lock_irqsave(&lp->lock, flags);
- __netif_rx_complete(napi);
+ __napi_complete(napi);
/* clear interrupt masks */
val = lp->a.read_csr(ioaddr, CSR3);
@@ -2592,14 +2592,14 @@ pcnet32_interrupt(int irq, void *dev_id)
dev->name, csr0);
/* unlike for the lance, there is no restart needed */
}
- if (netif_rx_schedule_prep(&lp->napi)) {
+ if (napi_schedule_prep(&lp->napi)) {
u16 val;
/* set interrupt masks */
val = lp->a.read_csr(ioaddr, CSR3);
val |= 0x5f00;
lp->a.write_csr(ioaddr, CSR3, val);
mmiowb();
- __netif_rx_schedule(&lp->napi);
+ __napi_schedule(&lp->napi);
break;
}
csr0 = lp->a.read_csr(ioaddr, CSR0);
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index a439ebeb431..3f460c56492 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -200,16 +200,21 @@ static int __devinit mdio_ofgpio_probe(struct of_device *ofdev,
{
struct device_node *np = NULL;
struct mdio_gpio_platform_data *pdata;
+ int ret;
pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
- pdata->mdc = of_get_gpio(ofdev->node, 0);
- pdata->mdio = of_get_gpio(ofdev->node, 1);
-
- if (pdata->mdc < 0 || pdata->mdio < 0)
+ ret = of_get_gpio(ofdev->node, 0);
+ if (ret < 0)
goto out_free;
+ pdata->mdc = ret;
+
+ ret = of_get_gpio(ofdev->node, 1);
+ if (ret < 0)
+ goto out_free;
+ pdata->mdio = ret;
while ((np = of_get_next_child(ofdev->node, np)))
if (!strcmp(np->type, "ethernet-phy"))
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 7b2728b8f1b..4405a76ed3d 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -49,6 +49,10 @@
#include <net/slhc_vj.h>
#include <asm/atomic.h>
+#include <linux/nsproxy.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+
#define PPP_VERSION "2.4.2"
/*
@@ -131,6 +135,7 @@ struct ppp {
struct sock_filter *active_filter;/* filter for pkts to reset idle */
unsigned pass_len, active_len;
#endif /* CONFIG_PPP_FILTER */
+ struct net *ppp_net; /* the net we belong to */
};
/*
@@ -155,6 +160,7 @@ struct channel {
struct rw_semaphore chan_sem; /* protects `chan' during chan ioctl */
spinlock_t downl; /* protects `chan', file.xq dequeue */
struct ppp *ppp; /* ppp unit we're connected to */
+ struct net *chan_net; /* the net channel belongs to */
struct list_head clist; /* link in list of channels per unit */
rwlock_t upl; /* protects `ppp' */
#ifdef CONFIG_PPP_MULTILINK
@@ -173,26 +179,35 @@ struct channel {
* channel.downl.
*/
-/*
- * all_ppp_mutex protects the all_ppp_units mapping.
- * It also ensures that finding a ppp unit in the all_ppp_units map
- * and updating its file.refcnt field is atomic.
- */
-static DEFINE_MUTEX(all_ppp_mutex);
static atomic_t ppp_unit_count = ATOMIC_INIT(0);
-static DEFINE_IDR(ppp_units_idr);
-
-/*
- * all_channels_lock protects all_channels and last_channel_index,
- * and the atomicity of find a channel and updating its file.refcnt
- * field.
- */
-static DEFINE_SPINLOCK(all_channels_lock);
-static LIST_HEAD(all_channels);
-static LIST_HEAD(new_channels);
-static int last_channel_index;
static atomic_t channel_count = ATOMIC_INIT(0);
+/* per-net private data for this module */
+static unsigned int ppp_net_id;
+struct ppp_net {
+ /* units to ppp mapping */
+ struct idr units_idr;
+
+ /*
+ * all_ppp_mutex protects the units_idr mapping.
+ * It also ensures that finding a ppp unit in the units_idr
+ * map and updating its file.refcnt field is atomic.
+ */
+ struct mutex all_ppp_mutex;
+
+ /* channels */
+ struct list_head all_channels;
+ struct list_head new_channels;
+ int last_channel_index;
+
+ /*
+ * all_channels_lock protects all_channels and
+ * last_channel_index, and the atomicity of find
+ * a channel and updating its file.refcnt field.
+ */
+ spinlock_t all_channels_lock;
+};
+
/* Get the PPP protocol number from a skb */
#define PPP_PROTO(skb) (((skb)->data[0] << 8) + (skb)->data[1])
@@ -216,8 +231,8 @@ static atomic_t channel_count = ATOMIC_INIT(0);
#define seq_after(a, b) ((s32)((a) - (b)) > 0)
/* Prototypes. */
-static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file,
- unsigned int cmd, unsigned long arg);
+static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
+ struct file *file, unsigned int cmd, unsigned long arg);
static void ppp_xmit_process(struct ppp *ppp);
static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb);
static void ppp_push(struct ppp *ppp);
@@ -240,12 +255,12 @@ static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound);
static void ppp_ccp_closed(struct ppp *ppp);
static struct compressor *find_compressor(int type);
static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st);
-static struct ppp *ppp_create_interface(int unit, int *retp);
+static struct ppp *ppp_create_interface(struct net *net, int unit, int *retp);
static void init_ppp_file(struct ppp_file *pf, int kind);
static void ppp_shutdown_interface(struct ppp *ppp);
static void ppp_destroy_interface(struct ppp *ppp);
-static struct ppp *ppp_find_unit(int unit);
-static struct channel *ppp_find_channel(int unit);
+static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit);
+static struct channel *ppp_find_channel(struct ppp_net *pn, int unit);
static int ppp_connect_channel(struct channel *pch, int unit);
static int ppp_disconnect_channel(struct channel *pch);
static void ppp_destroy_channel(struct channel *pch);
@@ -256,6 +271,14 @@ static void *unit_find(struct idr *p, int n);
static struct class *ppp_class;
+/* per net-namespace data */
+static inline struct ppp_net *ppp_pernet(struct net *net)
+{
+ BUG_ON(!net);
+
+ return net_generic(net, ppp_net_id);
+}
+
/* Translates a PPP protocol number to a NP index (NP == network protocol) */
static inline int proto_to_npindex(int proto)
{
@@ -544,7 +567,8 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
int __user *p = argp;
if (!pf)
- return ppp_unattached_ioctl(pf, file, cmd, arg);
+ return ppp_unattached_ioctl(current->nsproxy->net_ns,
+ pf, file, cmd, arg);
if (cmd == PPPIOCDETACH) {
/*
@@ -763,12 +787,13 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return err;
}
-static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file,
- unsigned int cmd, unsigned long arg)
+static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
+ struct file *file, unsigned int cmd, unsigned long arg)
{
int unit, err = -EFAULT;
struct ppp *ppp;
struct channel *chan;
+ struct ppp_net *pn;
int __user *p = (int __user *)arg;
lock_kernel();
@@ -777,7 +802,7 @@ static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file,
/* Create a new ppp unit */
if (get_user(unit, p))
break;
- ppp = ppp_create_interface(unit, &err);
+ ppp = ppp_create_interface(net, unit, &err);
if (!ppp)
break;
file->private_data = &ppp->file;
@@ -792,29 +817,31 @@ static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file,
/* Attach to an existing ppp unit */
if (get_user(unit, p))
break;
- mutex_lock(&all_ppp_mutex);
err = -ENXIO;
- ppp = ppp_find_unit(unit);
+ pn = ppp_pernet(net);
+ mutex_lock(&pn->all_ppp_mutex);
+ ppp = ppp_find_unit(pn, unit);
if (ppp) {
atomic_inc(&ppp->file.refcnt);
file->private_data = &ppp->file;
err = 0;
}
- mutex_unlock(&all_ppp_mutex);
+ mutex_unlock(&pn->all_ppp_mutex);
break;
case PPPIOCATTCHAN:
if (get_user(unit, p))
break;
- spin_lock_bh(&all_channels_lock);
err = -ENXIO;
- chan = ppp_find_channel(unit);
+ pn = ppp_pernet(net);
+ spin_lock_bh(&pn->all_channels_lock);
+ chan = ppp_find_channel(pn, unit);
if (chan) {
atomic_inc(&chan->file.refcnt);
file->private_data = &chan->file;
err = 0;
}
- spin_unlock_bh(&all_channels_lock);
+ spin_unlock_bh(&pn->all_channels_lock);
break;
default:
@@ -834,6 +861,51 @@ static const struct file_operations ppp_device_fops = {
.release = ppp_release
};
+static __net_init int ppp_init_net(struct net *net)
+{
+ struct ppp_net *pn;
+ int err;
+
+ pn = kzalloc(sizeof(*pn), GFP_KERNEL);
+ if (!pn)
+ return -ENOMEM;
+
+ idr_init(&pn->units_idr);
+ mutex_init(&pn->all_ppp_mutex);
+
+ INIT_LIST_HEAD(&pn->all_channels);
+ INIT_LIST_HEAD(&pn->new_channels);
+
+ spin_lock_init(&pn->all_channels_lock);
+
+ err = net_assign_generic(net, ppp_net_id, pn);
+ if (err) {
+ kfree(pn);
+ return err;
+ }
+
+ return 0;
+}
+
+static __net_exit void ppp_exit_net(struct net *net)
+{
+ struct ppp_net *pn;
+
+ pn = net_generic(net, ppp_net_id);
+ idr_destroy(&pn->units_idr);
+ /*
+ * if someone has cached our net then
+ * further net_generic call will return NULL
+ */
+ net_assign_generic(net, ppp_net_id, NULL);
+ kfree(pn);
+}
+
+static __net_initdata struct pernet_operations ppp_net_ops = {
+ .init = ppp_init_net,
+ .exit = ppp_exit_net,
+};
+
#define PPP_MAJOR 108
/* Called at boot time if ppp is compiled into the kernel,
@@ -843,25 +915,36 @@ static int __init ppp_init(void)
int err;
printk(KERN_INFO "PPP generic driver version " PPP_VERSION "\n");
- err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops);
- if (!err) {
- ppp_class = class_create(THIS_MODULE, "ppp");
- if (IS_ERR(ppp_class)) {
- err = PTR_ERR(ppp_class);
- goto out_chrdev;
- }
- device_create(ppp_class, NULL, MKDEV(PPP_MAJOR, 0), NULL,
- "ppp");
+
+ err = register_pernet_gen_device(&ppp_net_id, &ppp_net_ops);
+ if (err) {
+ printk(KERN_ERR "failed to register PPP pernet device (%d)\n", err);
+ goto out;
}
-out:
- if (err)
+ err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops);
+ if (err) {
printk(KERN_ERR "failed to register PPP device (%d)\n", err);
- return err;
+ goto out_net;
+ }
+
+ ppp_class = class_create(THIS_MODULE, "ppp");
+ if (IS_ERR(ppp_class)) {
+ err = PTR_ERR(ppp_class);
+ goto out_chrdev;
+ }
+
+ /* not a big deal if we fail here :-) */
+ device_create(ppp_class, NULL, MKDEV(PPP_MAJOR, 0), NULL, "ppp");
+
+ return 0;
out_chrdev:
unregister_chrdev(PPP_MAJOR, "ppp");
- goto out;
+out_net:
+ unregister_pernet_gen_device(ppp_net_id, &ppp_net_ops);
+out:
+ return err;
}
/*
@@ -969,6 +1052,7 @@ static void ppp_setup(struct net_device *dev)
dev->tx_queue_len = 3;
dev->type = ARPHRD_PPP;
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+ dev->features |= NETIF_F_NETNS_LOCAL;
}
/*
@@ -1986,19 +2070,27 @@ ppp_mp_reconstruct(struct ppp *ppp)
* Channel interface.
*/
-/*
- * Create a new, unattached ppp channel.
- */
-int
-ppp_register_channel(struct ppp_channel *chan)
+/* Create a new, unattached ppp channel. */
+int ppp_register_channel(struct ppp_channel *chan)
+{
+ return ppp_register_net_channel(current->nsproxy->net_ns, chan);
+}
+
+/* Create a new, unattached ppp channel for specified net. */
+int ppp_register_net_channel(struct net *net, struct ppp_channel *chan)
{
struct channel *pch;
+ struct ppp_net *pn;
pch = kzalloc(sizeof(struct channel), GFP_KERNEL);
if (!pch)
return -ENOMEM;
+
+ pn = ppp_pernet(net);
+
pch->ppp = NULL;
pch->chan = chan;
+ pch->chan_net = net;
chan->ppp = pch;
init_ppp_file(&pch->file, CHANNEL);
pch->file.hdrlen = chan->hdrlen;
@@ -2008,11 +2100,13 @@ ppp_register_channel(struct ppp_channel *chan)
init_rwsem(&pch->chan_sem);
spin_lock_init(&pch->downl);
rwlock_init(&pch->upl);
- spin_lock_bh(&all_channels_lock);
- pch->file.index = ++last_channel_index;
- list_add(&pch->list, &new_channels);
+
+ spin_lock_bh(&pn->all_channels_lock);
+ pch->file.index = ++pn->last_channel_index;
+ list_add(&pch->list, &pn->new_channels);
atomic_inc(&channel_count);
- spin_unlock_bh(&all_channels_lock);
+ spin_unlock_bh(&pn->all_channels_lock);
+
return 0;
}
@@ -2053,9 +2147,11 @@ void
ppp_unregister_channel(struct ppp_channel *chan)
{
struct channel *pch = chan->ppp;
+ struct ppp_net *pn;
if (!pch)
return; /* should never happen */
+
chan->ppp = NULL;
/*
@@ -2068,9 +2164,12 @@ ppp_unregister_channel(struct ppp_channel *chan)
spin_unlock_bh(&pch->downl);
up_write(&pch->chan_sem);
ppp_disconnect_channel(pch);
- spin_lock_bh(&all_channels_lock);
+
+ pn = ppp_pernet(pch->chan_net);
+ spin_lock_bh(&pn->all_channels_lock);
list_del(&pch->list);
- spin_unlock_bh(&all_channels_lock);
+ spin_unlock_bh(&pn->all_channels_lock);
+
pch->file.dead = 1;
wake_up_interruptible(&pch->file.rwait);
if (atomic_dec_and_test(&pch->file.refcnt))
@@ -2395,9 +2494,10 @@ ppp_get_stats(struct ppp *ppp, struct ppp_stats *st)
* unit == -1 means allocate a new number.
*/
static struct ppp *
-ppp_create_interface(int unit, int *retp)
+ppp_create_interface(struct net *net, int unit, int *retp)
{
struct ppp *ppp;
+ struct ppp_net *pn;
struct net_device *dev = NULL;
int ret = -ENOMEM;
int i;
@@ -2406,6 +2506,8 @@ ppp_create_interface(int unit, int *retp)
if (!dev)
goto out1;
+ pn = ppp_pernet(net);
+
ppp = netdev_priv(dev);
ppp->dev = dev;
ppp->mru = PPP_MRU;
@@ -2421,17 +2523,23 @@ ppp_create_interface(int unit, int *retp)
skb_queue_head_init(&ppp->mrq);
#endif /* CONFIG_PPP_MULTILINK */
+ /*
+ * drum roll: don't forget to set
+ * the net device is belong to
+ */
+ dev_net_set(dev, net);
+
ret = -EEXIST;
- mutex_lock(&all_ppp_mutex);
+ mutex_lock(&pn->all_ppp_mutex);
if (unit < 0) {
- unit = unit_get(&ppp_units_idr, ppp);
+ unit = unit_get(&pn->units_idr, ppp);
if (unit < 0) {
*retp = unit;
goto out2;
}
} else {
- if (unit_find(&ppp_units_idr, unit))
+ if (unit_find(&pn->units_idr, unit))
goto out2; /* unit already exists */
/*
* if caller need a specified unit number
@@ -2442,7 +2550,7 @@ ppp_create_interface(int unit, int *retp)
* fair but at least pppd will ask us to allocate
* new unit in this case so user is happy :)
*/
- unit = unit_set(&ppp_units_idr, ppp, unit);
+ unit = unit_set(&pn->units_idr, ppp, unit);
if (unit < 0)
goto out2;
}
@@ -2453,20 +2561,22 @@ ppp_create_interface(int unit, int *retp)
ret = register_netdev(dev);
if (ret != 0) {
- unit_put(&ppp_units_idr, unit);
+ unit_put(&pn->units_idr, unit);
printk(KERN_ERR "PPP: couldn't register device %s (%d)\n",
dev->name, ret);
goto out2;
}
+ ppp->ppp_net = net;
+
atomic_inc(&ppp_unit_count);
- mutex_unlock(&all_ppp_mutex);
+ mutex_unlock(&pn->all_ppp_mutex);
*retp = 0;
return ppp;
out2:
- mutex_unlock(&all_ppp_mutex);
+ mutex_unlock(&pn->all_ppp_mutex);
free_netdev(dev);
out1:
*retp = ret;
@@ -2492,7 +2602,11 @@ init_ppp_file(struct ppp_file *pf, int kind)
*/
static void ppp_shutdown_interface(struct ppp *ppp)
{
- mutex_lock(&all_ppp_mutex);
+ struct ppp_net *pn;
+
+ pn = ppp_pernet(ppp->ppp_net);
+ mutex_lock(&pn->all_ppp_mutex);
+
/* This will call dev_close() for us. */
ppp_lock(ppp);
if (!ppp->closing) {
@@ -2502,11 +2616,12 @@ static void ppp_shutdown_interface(struct ppp *ppp)
} else
ppp_unlock(ppp);
- unit_put(&ppp_units_idr, ppp->file.index);
+ unit_put(&pn->units_idr, ppp->file.index);
ppp->file.dead = 1;
ppp->owner = NULL;
wake_up_interruptible(&ppp->file.rwait);
- mutex_unlock(&all_ppp_mutex);
+
+ mutex_unlock(&pn->all_ppp_mutex);
}
/*
@@ -2554,9 +2669,9 @@ static void ppp_destroy_interface(struct ppp *ppp)
* The caller should have locked the all_ppp_mutex.
*/
static struct ppp *
-ppp_find_unit(int unit)
+ppp_find_unit(struct ppp_net *pn, int unit)
{
- return unit_find(&ppp_units_idr, unit);
+ return unit_find(&pn->units_idr, unit);
}
/*
@@ -2568,20 +2683,22 @@ ppp_find_unit(int unit)
* when we have a lot of channels in use.
*/
static struct channel *
-ppp_find_channel(int unit)
+ppp_find_channel(struct ppp_net *pn, int unit)
{
struct channel *pch;
- list_for_each_entry(pch, &new_channels, list) {
+ list_for_each_entry(pch, &pn->new_channels, list) {
if (pch->file.index == unit) {
- list_move(&pch->list, &all_channels);
+ list_move(&pch->list, &pn->all_channels);
return pch;
}
}
- list_for_each_entry(pch, &all_channels, list) {
+
+ list_for_each_entry(pch, &pn->all_channels, list) {
if (pch->file.index == unit)
return pch;
}
+
return NULL;
}
@@ -2592,11 +2709,14 @@ static int
ppp_connect_channel(struct channel *pch, int unit)
{
struct ppp *ppp;
+ struct ppp_net *pn;
int ret = -ENXIO;
int hdrlen;
- mutex_lock(&all_ppp_mutex);
- ppp = ppp_find_unit(unit);
+ pn = ppp_pernet(pch->chan_net);
+
+ mutex_lock(&pn->all_ppp_mutex);
+ ppp = ppp_find_unit(pn, unit);
if (!ppp)
goto out;
write_lock_bh(&pch->upl);
@@ -2620,7 +2740,7 @@ ppp_connect_channel(struct channel *pch, int unit)
outl:
write_unlock_bh(&pch->upl);
out:
- mutex_unlock(&all_ppp_mutex);
+ mutex_unlock(&pn->all_ppp_mutex);
return ret;
}
@@ -2677,7 +2797,7 @@ static void __exit ppp_cleanup(void)
unregister_chrdev(PPP_MAJOR, "ppp");
device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0));
class_destroy(ppp_class);
- idr_destroy(&ppp_units_idr);
+ unregister_pernet_gen_device(ppp_net_id, &ppp_net_ops);
}
/*
@@ -2743,6 +2863,7 @@ static void *unit_find(struct idr *p, int n)
module_init(ppp_init);
module_exit(ppp_cleanup);
+EXPORT_SYMBOL(ppp_register_net_channel);
EXPORT_SYMBOL(ppp_register_channel);
EXPORT_SYMBOL(ppp_unregister_channel);
EXPORT_SYMBOL(ppp_channel_index);
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index c22b30533a1..798b8cf5f9a 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -78,38 +78,73 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <linux/nsproxy.h>
#include <net/net_namespace.h>
+#include <net/netns/generic.h>
#include <net/sock.h>
#include <asm/uaccess.h>
#define PPPOE_HASH_BITS 4
-#define PPPOE_HASH_SIZE (1<<PPPOE_HASH_BITS)
-
-static struct ppp_channel_ops pppoe_chan_ops;
+#define PPPOE_HASH_SIZE (1 << PPPOE_HASH_BITS)
+#define PPPOE_HASH_MASK (PPPOE_HASH_SIZE - 1)
static int pppoe_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb);
static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb);
static const struct proto_ops pppoe_ops;
-static DEFINE_RWLOCK(pppoe_hash_lock);
-
static struct ppp_channel_ops pppoe_chan_ops;
+/* per-net private data for this module */
+static unsigned int pppoe_net_id;
+struct pppoe_net {
+ /*
+ * we could use _single_ hash table for all
+ * nets by injecting net id into the hash but
+ * it would increase hash chains and add
+ * a few additional math comparations messy
+ * as well, moreover in case of SMP less locking
+ * controversy here
+ */
+ struct pppox_sock *hash_table[PPPOE_HASH_SIZE];
+ rwlock_t hash_lock;
+};
+
+/* to eliminate a race btw pppoe_flush_dev and pppoe_release */
+static DEFINE_SPINLOCK(flush_lock);
+
+/*
+ * PPPoE could be in the following stages:
+ * 1) Discovery stage (to obtain remote MAC and Session ID)
+ * 2) Session stage (MAC and SID are known)
+ *
+ * Ethernet frames have a special tag for this but
+ * we use simplier approach based on session id
+ */
+static inline bool stage_session(__be16 sid)
+{
+ return sid != 0;
+}
+
+static inline struct pppoe_net *pppoe_pernet(struct net *net)
+{
+ BUG_ON(!net);
+
+ return net_generic(net, pppoe_net_id);
+}
+
static inline int cmp_2_addr(struct pppoe_addr *a, struct pppoe_addr *b)
{
- return (a->sid == b->sid &&
- (memcmp(a->remote, b->remote, ETH_ALEN) == 0));
+ return a->sid == b->sid && !memcmp(a->remote, b->remote, ETH_ALEN);
}
static inline int cmp_addr(struct pppoe_addr *a, __be16 sid, char *addr)
{
- return (a->sid == sid &&
- (memcmp(a->remote,addr,ETH_ALEN) == 0));
+ return a->sid == sid && !memcmp(a->remote, addr, ETH_ALEN);
}
-#if 8%PPPOE_HASH_BITS
+#if 8 % PPPOE_HASH_BITS
#error 8 must be a multiple of PPPOE_HASH_BITS
#endif
@@ -118,69 +153,71 @@ static int hash_item(__be16 sid, unsigned char *addr)
unsigned char hash = 0;
unsigned int i;
- for (i = 0 ; i < ETH_ALEN ; i++) {
+ for (i = 0; i < ETH_ALEN; i++)
hash ^= addr[i];
- }
- for (i = 0 ; i < sizeof(sid_t)*8 ; i += 8 ){
- hash ^= (__force __u32)sid>>i;
- }
- for (i = 8 ; (i>>=1) >= PPPOE_HASH_BITS ; ) {
- hash ^= hash>>i;
- }
+ for (i = 0; i < sizeof(sid_t) * 8; i += 8)
+ hash ^= (__force __u32)sid >> i;
+ for (i = 8; (i >>= 1) >= PPPOE_HASH_BITS;)
+ hash ^= hash >> i;
- return hash & ( PPPOE_HASH_SIZE - 1 );
+ return hash & PPPOE_HASH_MASK;
}
-/* zeroed because its in .bss */
-static struct pppox_sock *item_hash_table[PPPOE_HASH_SIZE];
-
/**********************************************************************
*
* Set/get/delete/rehash items (internal versions)
*
**********************************************************************/
-static struct pppox_sock *__get_item(__be16 sid, unsigned char *addr, int ifindex)
+static struct pppox_sock *__get_item(struct pppoe_net *pn, __be16 sid,
+ unsigned char *addr, int ifindex)
{
int hash = hash_item(sid, addr);
struct pppox_sock *ret;
- ret = item_hash_table[hash];
+ ret = pn->hash_table[hash];
+ while (ret) {
+ if (cmp_addr(&ret->pppoe_pa, sid, addr) &&
+ ret->pppoe_ifindex == ifindex)
+ return ret;
- while (ret && !(cmp_addr(&ret->pppoe_pa, sid, addr) && ret->pppoe_ifindex == ifindex))
ret = ret->next;
+ }
- return ret;
+ return NULL;
}
-static int __set_item(struct pppox_sock *po)
+static int __set_item(struct pppoe_net *pn, struct pppox_sock *po)
{
int hash = hash_item(po->pppoe_pa.sid, po->pppoe_pa.remote);
struct pppox_sock *ret;
- ret = item_hash_table[hash];
+ ret = pn->hash_table[hash];
while (ret) {
- if (cmp_2_addr(&ret->pppoe_pa, &po->pppoe_pa) && ret->pppoe_ifindex == po->pppoe_ifindex)
+ if (cmp_2_addr(&ret->pppoe_pa, &po->pppoe_pa) &&
+ ret->pppoe_ifindex == po->pppoe_ifindex)
return -EALREADY;
ret = ret->next;
}
- po->next = item_hash_table[hash];
- item_hash_table[hash] = po;
+ po->next = pn->hash_table[hash];
+ pn->hash_table[hash] = po;
return 0;
}
-static struct pppox_sock *__delete_item(__be16 sid, char *addr, int ifindex)
+static struct pppox_sock *__delete_item(struct pppoe_net *pn, __be16 sid,
+ char *addr, int ifindex)
{
int hash = hash_item(sid, addr);
struct pppox_sock *ret, **src;
- ret = item_hash_table[hash];
- src = &item_hash_table[hash];
+ ret = pn->hash_table[hash];
+ src = &pn->hash_table[hash];
while (ret) {
- if (cmp_addr(&ret->pppoe_pa, sid, addr) && ret->pppoe_ifindex == ifindex) {
+ if (cmp_addr(&ret->pppoe_pa, sid, addr) &&
+ ret->pppoe_ifindex == ifindex) {
*src = ret->next;
break;
}
@@ -197,46 +234,54 @@ static struct pppox_sock *__delete_item(__be16 sid, char *addr, int ifindex)
* Set/get/delete/rehash items
*
**********************************************************************/
-static inline struct pppox_sock *get_item(__be16 sid,
- unsigned char *addr, int ifindex)
+static inline struct pppox_sock *get_item(struct pppoe_net *pn, __be16 sid,
+ unsigned char *addr, int ifindex)
{
struct pppox_sock *po;
- read_lock_bh(&pppoe_hash_lock);
- po = __get_item(sid, addr, ifindex);
+ read_lock_bh(&pn->hash_lock);
+ po = __get_item(pn, sid, addr, ifindex);
if (po)
sock_hold(sk_pppox(po));
- read_unlock_bh(&pppoe_hash_lock);
+ read_unlock_bh(&pn->hash_lock);
return po;
}
-static inline struct pppox_sock *get_item_by_addr(struct sockaddr_pppox *sp)
+static inline struct pppox_sock *get_item_by_addr(struct net *net,
+ struct sockaddr_pppox *sp)
{
struct net_device *dev;
+ struct pppoe_net *pn;
+ struct pppox_sock *pppox_sock;
+
int ifindex;
- dev = dev_get_by_name(&init_net, sp->sa_addr.pppoe.dev);
- if(!dev)
+ dev = dev_get_by_name(net, sp->sa_addr.pppoe.dev);
+ if (!dev)
return NULL;
+
ifindex = dev->ifindex;
+ pn = net_generic(net, pppoe_net_id);
+ pppox_sock = get_item(pn, sp->sa_addr.pppoe.sid,
+ sp->sa_addr.pppoe.remote, ifindex);
dev_put(dev);
- return get_item(sp->sa_addr.pppoe.sid, sp->sa_addr.pppoe.remote, ifindex);
+
+ return pppox_sock;
}
-static inline struct pppox_sock *delete_item(__be16 sid, char *addr, int ifindex)
+static inline struct pppox_sock *delete_item(struct pppoe_net *pn, __be16 sid,
+ char *addr, int ifindex)
{
struct pppox_sock *ret;
- write_lock_bh(&pppoe_hash_lock);
- ret = __delete_item(sid, addr, ifindex);
- write_unlock_bh(&pppoe_hash_lock);
+ write_lock_bh(&pn->hash_lock);
+ ret = __delete_item(pn, sid, addr, ifindex);
+ write_unlock_bh(&pn->hash_lock);
return ret;
}
-
-
/***************************************************************************
*
* Handler for device events.
@@ -246,25 +291,33 @@ static inline struct pppox_sock *delete_item(__be16 sid, char *addr, int ifindex
static void pppoe_flush_dev(struct net_device *dev)
{
- int hash;
+ struct pppoe_net *pn;
+ int i;
+
BUG_ON(dev == NULL);
- write_lock_bh(&pppoe_hash_lock);
- for (hash = 0; hash < PPPOE_HASH_SIZE; hash++) {
- struct pppox_sock *po = item_hash_table[hash];
+ pn = pppoe_pernet(dev_net(dev));
+ if (!pn) /* already freed */
+ return;
+
+ write_lock_bh(&pn->hash_lock);
+ for (i = 0; i < PPPOE_HASH_SIZE; i++) {
+ struct pppox_sock *po = pn->hash_table[i];
while (po != NULL) {
- struct sock *sk = sk_pppox(po);
+ struct sock *sk;
if (po->pppoe_dev != dev) {
po = po->next;
continue;
}
+ sk = sk_pppox(po);
+ spin_lock(&flush_lock);
po->pppoe_dev = NULL;
+ spin_unlock(&flush_lock);
dev_put(dev);
-
/* We always grab the socket lock, followed by the
- * pppoe_hash_lock, in that order. Since we should
+ * hash_lock, in that order. Since we should
* hold the sock lock while doing any unbinding,
* we need to release the lock we're holding.
* Hold a reference to the sock so it doesn't disappear
@@ -273,7 +326,7 @@ static void pppoe_flush_dev(struct net_device *dev)
sock_hold(sk);
- write_unlock_bh(&pppoe_hash_lock);
+ write_unlock_bh(&pn->hash_lock);
lock_sock(sk);
if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
@@ -289,20 +342,17 @@ static void pppoe_flush_dev(struct net_device *dev)
* While the lock was dropped the chain contents may
* have changed.
*/
- write_lock_bh(&pppoe_hash_lock);
- po = item_hash_table[hash];
+ write_lock_bh(&pn->hash_lock);
+ po = pn->hash_table[i];
}
}
- write_unlock_bh(&pppoe_hash_lock);
+ write_unlock_bh(&pn->hash_lock);
}
static int pppoe_device_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
- struct net_device *dev = (struct net_device *) ptr;
-
- if (dev_net(dev) != &init_net)
- return NOTIFY_DONE;
+ struct net_device *dev = (struct net_device *)ptr;
/* Only look at sockets that are using this specific device. */
switch (event) {
@@ -324,12 +374,10 @@ static int pppoe_device_event(struct notifier_block *this,
return NOTIFY_DONE;
}
-
static struct notifier_block pppoe_notifier = {
.notifier_call = pppoe_device_event,
};
-
/************************************************************************
*
* Do the real work of receiving a PPPoE Session frame.
@@ -343,8 +391,8 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb)
if (sk->sk_state & PPPOX_BOUND) {
ppp_input(&po->chan, skb);
} else if (sk->sk_state & PPPOX_RELAY) {
- relay_po = get_item_by_addr(&po->pppoe_relay);
-
+ relay_po = get_item_by_addr(dev_net(po->pppoe_dev),
+ &po->pppoe_relay);
if (relay_po == NULL)
goto abort_kfree;
@@ -373,22 +421,18 @@ abort_kfree:
* Receive wrapper called in BH context.
*
***********************************************************************/
-static int pppoe_rcv(struct sk_buff *skb,
- struct net_device *dev,
- struct packet_type *pt,
- struct net_device *orig_dev)
-
+static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *orig_dev)
{
struct pppoe_hdr *ph;
struct pppox_sock *po;
+ struct pppoe_net *pn;
int len;
- if (!(skb = skb_share_check(skb, GFP_ATOMIC)))
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb)
goto out;
- if (dev_net(dev) != &init_net)
- goto drop;
-
if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr)))
goto drop;
@@ -402,7 +446,8 @@ static int pppoe_rcv(struct sk_buff *skb,
if (pskb_trim_rcsum(skb, len))
goto drop;
- po = get_item(ph->sid, eth_hdr(skb)->h_source, dev->ifindex);
+ pn = pppoe_pernet(dev_net(dev));
+ po = get_item(pn, ph->sid, eth_hdr(skb)->h_source, dev->ifindex);
if (!po)
goto drop;
@@ -420,19 +465,16 @@ out:
* This is solely for detection of PADT frames
*
***********************************************************************/
-static int pppoe_disc_rcv(struct sk_buff *skb,
- struct net_device *dev,
- struct packet_type *pt,
- struct net_device *orig_dev)
+static int pppoe_disc_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *orig_dev)
{
struct pppoe_hdr *ph;
struct pppox_sock *po;
+ struct pppoe_net *pn;
- if (dev_net(dev) != &init_net)
- goto abort;
-
- if (!(skb = skb_share_check(skb, GFP_ATOMIC)))
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb)
goto out;
if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr)))
@@ -442,7 +484,8 @@ static int pppoe_disc_rcv(struct sk_buff *skb,
if (ph->code != PADT_CODE)
goto abort;
- po = get_item(ph->sid, eth_hdr(skb)->h_source, dev->ifindex);
+ pn = pppoe_pernet(dev_net(dev));
+ po = get_item(pn, ph->sid, eth_hdr(skb)->h_source, dev->ifindex);
if (po) {
struct sock *sk = sk_pppox(po);
@@ -493,38 +536,37 @@ static struct proto pppoe_sk_proto = {
**********************************************************************/
static int pppoe_create(struct net *net, struct socket *sock)
{
- int error = -ENOMEM;
struct sock *sk;
sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppoe_sk_proto);
if (!sk)
- goto out;
+ return -ENOMEM;
sock_init_data(sock, sk);
- sock->state = SS_UNCONNECTED;
- sock->ops = &pppoe_ops;
+ sock->state = SS_UNCONNECTED;
+ sock->ops = &pppoe_ops;
- sk->sk_backlog_rcv = pppoe_rcv_core;
- sk->sk_state = PPPOX_NONE;
- sk->sk_type = SOCK_STREAM;
- sk->sk_family = PF_PPPOX;
- sk->sk_protocol = PX_PROTO_OE;
+ sk->sk_backlog_rcv = pppoe_rcv_core;
+ sk->sk_state = PPPOX_NONE;
+ sk->sk_type = SOCK_STREAM;
+ sk->sk_family = PF_PPPOX;
+ sk->sk_protocol = PX_PROTO_OE;
- error = 0;
-out: return error;
+ return 0;
}
static int pppoe_release(struct socket *sock)
{
struct sock *sk = sock->sk;
struct pppox_sock *po;
+ struct pppoe_net *pn;
if (!sk)
return 0;
lock_sock(sk);
- if (sock_flag(sk, SOCK_DEAD)){
+ if (sock_flag(sk, SOCK_DEAD)) {
release_sock(sk);
return -EBADF;
}
@@ -534,26 +576,39 @@ static int pppoe_release(struct socket *sock)
/* Signal the death of the socket. */
sk->sk_state = PPPOX_DEAD;
+ /*
+ * pppoe_flush_dev could lead to a race with
+ * this routine so we use flush_lock to eliminate
+ * such a case (we only need per-net specific data)
+ */
+ spin_lock(&flush_lock);
+ po = pppox_sk(sk);
+ if (!po->pppoe_dev) {
+ spin_unlock(&flush_lock);
+ goto out;
+ }
+ pn = pppoe_pernet(dev_net(po->pppoe_dev));
+ spin_unlock(&flush_lock);
- /* Write lock on hash lock protects the entire "po" struct from
- * concurrent updates via pppoe_flush_dev. The "po" struct should
- * be considered part of the hash table contents, thus protected
- * by the hash table lock */
- write_lock_bh(&pppoe_hash_lock);
+ /*
+ * protect "po" from concurrent updates
+ * on pppoe_flush_dev
+ */
+ write_lock_bh(&pn->hash_lock);
po = pppox_sk(sk);
- if (po->pppoe_pa.sid) {
- __delete_item(po->pppoe_pa.sid,
- po->pppoe_pa.remote, po->pppoe_ifindex);
- }
+ if (stage_session(po->pppoe_pa.sid))
+ __delete_item(pn, po->pppoe_pa.sid, po->pppoe_pa.remote,
+ po->pppoe_ifindex);
if (po->pppoe_dev) {
dev_put(po->pppoe_dev);
po->pppoe_dev = NULL;
}
- write_unlock_bh(&pppoe_hash_lock);
+ write_unlock_bh(&pn->hash_lock);
+out:
sock_orphan(sk);
sock->sk = NULL;
@@ -564,14 +619,14 @@ static int pppoe_release(struct socket *sock)
return 0;
}
-
static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
int sockaddr_len, int flags)
{
struct sock *sk = sock->sk;
- struct net_device *dev;
- struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
+ struct sockaddr_pppox *sp = (struct sockaddr_pppox *)uservaddr;
struct pppox_sock *po = pppox_sk(sk);
+ struct net_device *dev;
+ struct pppoe_net *pn;
int error;
lock_sock(sk);
@@ -582,44 +637,45 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
/* Check for already bound sockets */
error = -EBUSY;
- if ((sk->sk_state & PPPOX_CONNECTED) && sp->sa_addr.pppoe.sid)
+ if ((sk->sk_state & PPPOX_CONNECTED) &&
+ stage_session(sp->sa_addr.pppoe.sid))
goto end;
/* Check for already disconnected sockets, on attempts to disconnect */
error = -EALREADY;
- if ((sk->sk_state & PPPOX_DEAD) && !sp->sa_addr.pppoe.sid )
+ if ((sk->sk_state & PPPOX_DEAD) &&
+ !stage_session(sp->sa_addr.pppoe.sid))
goto end;
error = 0;
- if (po->pppoe_pa.sid) {
- pppox_unbind_sock(sk);
- /* Delete the old binding */
- delete_item(po->pppoe_pa.sid,po->pppoe_pa.remote,po->pppoe_ifindex);
-
- if(po->pppoe_dev)
+ /* Delete the old binding */
+ if (stage_session(po->pppoe_pa.sid)) {
+ pppox_unbind_sock(sk);
+ if (po->pppoe_dev) {
+ pn = pppoe_pernet(dev_net(po->pppoe_dev));
+ delete_item(pn, po->pppoe_pa.sid,
+ po->pppoe_pa.remote, po->pppoe_ifindex);
dev_put(po->pppoe_dev);
-
+ }
memset(sk_pppox(po) + 1, 0,
sizeof(struct pppox_sock) - sizeof(struct sock));
-
sk->sk_state = PPPOX_NONE;
}
- /* Don't re-bind if sid==0 */
- if (sp->sa_addr.pppoe.sid != 0) {
- dev = dev_get_by_name(&init_net, sp->sa_addr.pppoe.dev);
-
+ /* Re-bind in session stage only */
+ if (stage_session(sp->sa_addr.pppoe.sid)) {
error = -ENODEV;
+ dev = dev_get_by_name(sock_net(sk), sp->sa_addr.pppoe.dev);
if (!dev)
goto end;
po->pppoe_dev = dev;
po->pppoe_ifindex = dev->ifindex;
-
- write_lock_bh(&pppoe_hash_lock);
- if (!(dev->flags & IFF_UP)){
- write_unlock_bh(&pppoe_hash_lock);
+ pn = pppoe_pernet(dev_net(dev));
+ write_lock_bh(&pn->hash_lock);
+ if (!(dev->flags & IFF_UP)) {
+ write_unlock_bh(&pn->hash_lock);
goto err_put;
}
@@ -627,8 +683,8 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
&sp->sa_addr.pppoe,
sizeof(struct pppoe_addr));
- error = __set_item(po);
- write_unlock_bh(&pppoe_hash_lock);
+ error = __set_item(pn, po);
+ write_unlock_bh(&pn->hash_lock);
if (error < 0)
goto err_put;
@@ -639,7 +695,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
po->chan.private = sk;
po->chan.ops = &pppoe_chan_ops;
- error = ppp_register_channel(&po->chan);
+ error = ppp_register_net_channel(dev_net(dev), &po->chan);
if (error)
goto err_put;
@@ -648,7 +704,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
po->num = sp->sa_addr.pppoe.sid;
- end:
+end:
release_sock(sk);
return error;
err_put:
@@ -659,7 +715,6 @@ err_put:
goto end;
}
-
static int pppoe_getname(struct socket *sock, struct sockaddr *uaddr,
int *usockaddr_len, int peer)
{
@@ -678,7 +733,6 @@ static int pppoe_getname(struct socket *sock, struct sockaddr *uaddr,
return 0;
}
-
static int pppoe_ioctl(struct socket *sock, unsigned int cmd,
unsigned long arg)
{
@@ -690,7 +744,6 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd,
switch (cmd) {
case PPPIOCGMRU:
err = -ENXIO;
-
if (!(sk->sk_state & PPPOX_CONNECTED))
break;
@@ -698,7 +751,7 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd,
if (put_user(po->pppoe_dev->mtu -
sizeof(struct pppoe_hdr) -
PPP_HDRLEN,
- (int __user *) arg))
+ (int __user *)arg))
break;
err = 0;
break;
@@ -709,7 +762,7 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd,
break;
err = -EFAULT;
- if (get_user(val,(int __user *) arg))
+ if (get_user(val, (int __user *)arg))
break;
if (val < (po->pppoe_dev->mtu
@@ -722,7 +775,7 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd,
case PPPIOCSFLAGS:
err = -EFAULT;
- if (get_user(val, (int __user *) arg))
+ if (get_user(val, (int __user *)arg))
break;
err = 0;
break;
@@ -749,13 +802,12 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd,
err = -EINVAL;
if (po->pppoe_relay.sa_family != AF_PPPOX ||
- po->pppoe_relay.sa_protocol!= PX_PROTO_OE)
+ po->pppoe_relay.sa_protocol != PX_PROTO_OE)
break;
/* Check that the socket referenced by the address
actually exists. */
- relay_po = get_item_by_addr(&po->pppoe_relay);
-
+ relay_po = get_item_by_addr(sock_net(sk), &po->pppoe_relay);
if (!relay_po)
break;
@@ -781,7 +833,6 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd,
return err;
}
-
static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t total_len)
{
@@ -808,7 +859,7 @@ static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock,
dev = po->pppoe_dev;
error = -EMSGSIZE;
- if (total_len > (dev->mtu + dev->hard_header_len))
+ if (total_len > (dev->mtu + dev->hard_header_len))
goto end;
@@ -828,11 +879,10 @@ static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock,
skb->priority = sk->sk_priority;
skb->protocol = __constant_htons(ETH_P_PPP_SES);
- ph = (struct pppoe_hdr *) skb_put(skb, total_len + sizeof(struct pppoe_hdr));
- start = (char *) &ph->tag[0];
+ ph = (struct pppoe_hdr *)skb_put(skb, total_len + sizeof(struct pppoe_hdr));
+ start = (char *)&ph->tag[0];
error = memcpy_fromiovec(start, m->msg_iov, total_len);
-
if (error < 0) {
kfree_skb(skb);
goto end;
@@ -853,7 +903,6 @@ end:
return error;
}
-
/************************************************************************
*
* xmit function for internal use.
@@ -903,7 +952,6 @@ abort:
return 1;
}
-
/************************************************************************
*
* xmit function called by generic PPP driver
@@ -912,11 +960,10 @@ abort:
***********************************************************************/
static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb)
{
- struct sock *sk = (struct sock *) chan->private;
+ struct sock *sk = (struct sock *)chan->private;
return __pppoe_xmit(sk, skb);
}
-
static struct ppp_channel_ops pppoe_chan_ops = {
.start_xmit = pppoe_xmit,
};
@@ -935,7 +982,6 @@ static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock,
skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
flags & MSG_DONTWAIT, &error);
-
if (error < 0)
goto end;
@@ -968,44 +1014,47 @@ static int pppoe_seq_show(struct seq_file *seq, void *v)
dev_name = po->pppoe_pa.dev;
seq_printf(seq, "%08X %pM %8s\n",
- po->pppoe_pa.sid, po->pppoe_pa.remote, dev_name);
+ po->pppoe_pa.sid, po->pppoe_pa.remote, dev_name);
out:
return 0;
}
-static __inline__ struct pppox_sock *pppoe_get_idx(loff_t pos)
+static inline struct pppox_sock *pppoe_get_idx(struct pppoe_net *pn, loff_t pos)
{
struct pppox_sock *po;
- int i = 0;
+ int i;
- for (; i < PPPOE_HASH_SIZE; i++) {
- po = item_hash_table[i];
+ for (i = 0; i < PPPOE_HASH_SIZE; i++) {
+ po = pn->hash_table[i];
while (po) {
if (!pos--)
goto out;
po = po->next;
}
}
+
out:
return po;
}
static void *pppoe_seq_start(struct seq_file *seq, loff_t *pos)
- __acquires(pppoe_hash_lock)
+ __acquires(pn->hash_lock)
{
+ struct pppoe_net *pn = pppoe_pernet(seq->private);
loff_t l = *pos;
- read_lock_bh(&pppoe_hash_lock);
- return l ? pppoe_get_idx(--l) : SEQ_START_TOKEN;
+ read_lock_bh(&pn->hash_lock);
+ return l ? pppoe_get_idx(pn, --l) : SEQ_START_TOKEN;
}
static void *pppoe_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
+ struct pppoe_net *pn = pppoe_pernet(seq->private);
struct pppox_sock *po;
++*pos;
if (v == SEQ_START_TOKEN) {
- po = pppoe_get_idx(0);
+ po = pppoe_get_idx(pn, 0);
goto out;
}
po = v;
@@ -1015,22 +1064,24 @@ static void *pppoe_seq_next(struct seq_file *seq, void *v, loff_t *pos)
int hash = hash_item(po->pppoe_pa.sid, po->pppoe_pa.remote);
while (++hash < PPPOE_HASH_SIZE) {
- po = item_hash_table[hash];
+ po = pn->hash_table[hash];
if (po)
break;
}
}
+
out:
return po;
}
static void pppoe_seq_stop(struct seq_file *seq, void *v)
- __releases(pppoe_hash_lock)
+ __releases(pn->hash_lock)
{
- read_unlock_bh(&pppoe_hash_lock);
+ struct pppoe_net *pn = pppoe_pernet(seq->private);
+ read_unlock_bh(&pn->hash_lock);
}
-static struct seq_operations pppoe_seq_ops = {
+static const struct seq_operations pppoe_seq_ops = {
.start = pppoe_seq_start,
.next = pppoe_seq_next,
.stop = pppoe_seq_stop,
@@ -1039,7 +1090,30 @@ static struct seq_operations pppoe_seq_ops = {
static int pppoe_seq_open(struct inode *inode, struct file *file)
{
- return seq_open(file, &pppoe_seq_ops);
+ struct seq_file *m;
+ struct net *net;
+ int err;
+
+ err = seq_open(file, &pppoe_seq_ops);
+ if (err)
+ return err;
+
+ m = file->private_data;
+ net = maybe_get_net(PDE_NET(PDE(inode)));
+ BUG_ON(!net);
+ m->private = net;
+
+ return err;
+}
+
+static int pppoe_seq_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *m;
+
+ m = file->private_data;
+ put_net((struct net*)m->private);
+
+ return seq_release(inode, file);
}
static const struct file_operations pppoe_seq_fops = {
@@ -1047,74 +1121,115 @@ static const struct file_operations pppoe_seq_fops = {
.open = pppoe_seq_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = seq_release,
+ .release = pppoe_seq_release,
};
-static int __init pppoe_proc_init(void)
-{
- struct proc_dir_entry *p;
-
- p = proc_net_fops_create(&init_net, "pppoe", S_IRUGO, &pppoe_seq_fops);
- if (!p)
- return -ENOMEM;
- return 0;
-}
-#else /* CONFIG_PROC_FS */
-static inline int pppoe_proc_init(void) { return 0; }
#endif /* CONFIG_PROC_FS */
static const struct proto_ops pppoe_ops = {
- .family = AF_PPPOX,
- .owner = THIS_MODULE,
- .release = pppoe_release,
- .bind = sock_no_bind,
- .connect = pppoe_connect,
- .socketpair = sock_no_socketpair,
- .accept = sock_no_accept,
- .getname = pppoe_getname,
- .poll = datagram_poll,
- .listen = sock_no_listen,
- .shutdown = sock_no_shutdown,
- .setsockopt = sock_no_setsockopt,
- .getsockopt = sock_no_getsockopt,
- .sendmsg = pppoe_sendmsg,
- .recvmsg = pppoe_recvmsg,
- .mmap = sock_no_mmap,
- .ioctl = pppox_ioctl,
+ .family = AF_PPPOX,
+ .owner = THIS_MODULE,
+ .release = pppoe_release,
+ .bind = sock_no_bind,
+ .connect = pppoe_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = sock_no_accept,
+ .getname = pppoe_getname,
+ .poll = datagram_poll,
+ .listen = sock_no_listen,
+ .shutdown = sock_no_shutdown,
+ .setsockopt = sock_no_setsockopt,
+ .getsockopt = sock_no_getsockopt,
+ .sendmsg = pppoe_sendmsg,
+ .recvmsg = pppoe_recvmsg,
+ .mmap = sock_no_mmap,
+ .ioctl = pppox_ioctl,
};
static struct pppox_proto pppoe_proto = {
- .create = pppoe_create,
- .ioctl = pppoe_ioctl,
- .owner = THIS_MODULE,
+ .create = pppoe_create,
+ .ioctl = pppoe_ioctl,
+ .owner = THIS_MODULE,
};
+static __net_init int pppoe_init_net(struct net *net)
+{
+ struct pppoe_net *pn;
+ struct proc_dir_entry *pde;
+ int err;
+
+ pn = kzalloc(sizeof(*pn), GFP_KERNEL);
+ if (!pn)
+ return -ENOMEM;
+
+ rwlock_init(&pn->hash_lock);
+
+ err = net_assign_generic(net, pppoe_net_id, pn);
+ if (err)
+ goto out;
+
+ pde = proc_net_fops_create(net, "pppoe", S_IRUGO, &pppoe_seq_fops);
+#ifdef CONFIG_PROC_FS
+ if (!pde) {
+ err = -ENOMEM;
+ goto out;
+ }
+#endif
+
+ return 0;
+
+out:
+ kfree(pn);
+ return err;
+}
+
+static __net_exit void pppoe_exit_net(struct net *net)
+{
+ struct pppoe_net *pn;
+
+ proc_net_remove(net, "pppoe");
+ pn = net_generic(net, pppoe_net_id);
+ /*
+ * if someone has cached our net then
+ * further net_generic call will return NULL
+ */
+ net_assign_generic(net, pppoe_net_id, NULL);
+ kfree(pn);
+}
+
+static __net_initdata struct pernet_operations pppoe_net_ops = {
+ .init = pppoe_init_net,
+ .exit = pppoe_exit_net,
+};
static int __init pppoe_init(void)
{
- int err = proto_register(&pppoe_sk_proto, 0);
+ int err;
+ err = proto_register(&pppoe_sk_proto, 0);
if (err)
goto out;
- err = register_pppox_proto(PX_PROTO_OE, &pppoe_proto);
+ err = register_pppox_proto(PX_PROTO_OE, &pppoe_proto);
if (err)
goto out_unregister_pppoe_proto;
- err = pppoe_proc_init();
+ err = register_pernet_gen_device(&pppoe_net_id, &pppoe_net_ops);
if (err)
goto out_unregister_pppox_proto;
dev_add_pack(&pppoes_ptype);
dev_add_pack(&pppoed_ptype);
register_netdevice_notifier(&pppoe_notifier);
-out:
- return err;
+
+ return 0;
+
out_unregister_pppox_proto:
unregister_pppox_proto(PX_PROTO_OE);
out_unregister_pppoe_proto:
proto_unregister(&pppoe_sk_proto);
- goto out;
+out:
+ return err;
}
static void __exit pppoe_exit(void)
@@ -1123,7 +1238,7 @@ static void __exit pppoe_exit(void)
dev_remove_pack(&pppoes_ptype);
dev_remove_pack(&pppoed_ptype);
unregister_netdevice_notifier(&pppoe_notifier);
- remove_proc_entry("pppoe", init_net.proc_net);
+ unregister_pernet_gen_device(pppoe_net_id, &pppoe_net_ops);
proto_unregister(&pppoe_sk_proto);
}
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
index f1a946785c6..056e22a784b 100644
--- a/drivers/net/pppol2tp.c
+++ b/drivers/net/pppol2tp.c
@@ -90,7 +90,9 @@
#include <linux/hash.h>
#include <linux/sort.h>
#include <linux/proc_fs.h>
+#include <linux/nsproxy.h>
#include <net/net_namespace.h>
+#include <net/netns/generic.h>
#include <net/dst.h>
#include <net/ip.h>
#include <net/udp.h>
@@ -204,6 +206,7 @@ struct pppol2tp_tunnel
struct sock *sock; /* Parent socket */
struct list_head list; /* Keep a list of all open
* prepared sockets */
+ struct net *pppol2tp_net; /* the net we belong to */
atomic_t ref_count;
};
@@ -227,8 +230,20 @@ static atomic_t pppol2tp_tunnel_count;
static atomic_t pppol2tp_session_count;
static struct ppp_channel_ops pppol2tp_chan_ops = { pppol2tp_xmit , NULL };
static struct proto_ops pppol2tp_ops;
-static LIST_HEAD(pppol2tp_tunnel_list);
-static DEFINE_RWLOCK(pppol2tp_tunnel_list_lock);
+
+/* per-net private data for this module */
+static unsigned int pppol2tp_net_id;
+struct pppol2tp_net {
+ struct list_head pppol2tp_tunnel_list;
+ rwlock_t pppol2tp_tunnel_list_lock;
+};
+
+static inline struct pppol2tp_net *pppol2tp_pernet(struct net *net)
+{
+ BUG_ON(!net);
+
+ return net_generic(net, pppol2tp_net_id);
+}
/* Helpers to obtain tunnel/session contexts from sockets.
*/
@@ -321,18 +336,19 @@ pppol2tp_session_find(struct pppol2tp_tunnel *tunnel, u16 session_id)
/* Lookup a tunnel by id
*/
-static struct pppol2tp_tunnel *pppol2tp_tunnel_find(u16 tunnel_id)
+static struct pppol2tp_tunnel *pppol2tp_tunnel_find(struct net *net, u16 tunnel_id)
{
- struct pppol2tp_tunnel *tunnel = NULL;
+ struct pppol2tp_tunnel *tunnel;
+ struct pppol2tp_net *pn = pppol2tp_pernet(net);
- read_lock_bh(&pppol2tp_tunnel_list_lock);
- list_for_each_entry(tunnel, &pppol2tp_tunnel_list, list) {
+ read_lock_bh(&pn->pppol2tp_tunnel_list_lock);
+ list_for_each_entry(tunnel, &pn->pppol2tp_tunnel_list, list) {
if (tunnel->stats.tunnel_id == tunnel_id) {
- read_unlock_bh(&pppol2tp_tunnel_list_lock);
+ read_unlock_bh(&pn->pppol2tp_tunnel_list_lock);
return tunnel;
}
}
- read_unlock_bh(&pppol2tp_tunnel_list_lock);
+ read_unlock_bh(&pn->pppol2tp_tunnel_list_lock);
return NULL;
}
@@ -1287,10 +1303,12 @@ again:
*/
static void pppol2tp_tunnel_free(struct pppol2tp_tunnel *tunnel)
{
+ struct pppol2tp_net *pn = pppol2tp_pernet(tunnel->pppol2tp_net);
+
/* Remove from socket list */
- write_lock_bh(&pppol2tp_tunnel_list_lock);
+ write_lock_bh(&pn->pppol2tp_tunnel_list_lock);
list_del_init(&tunnel->list);
- write_unlock_bh(&pppol2tp_tunnel_list_lock);
+ write_unlock_bh(&pn->pppol2tp_tunnel_list_lock);
atomic_dec(&pppol2tp_tunnel_count);
kfree(tunnel);
@@ -1444,13 +1462,14 @@ error:
/* Internal function to prepare a tunnel (UDP) socket to have PPPoX
* sockets attached to it.
*/
-static struct sock *pppol2tp_prepare_tunnel_socket(int fd, u16 tunnel_id,
- int *error)
+static struct sock *pppol2tp_prepare_tunnel_socket(struct net *net,
+ int fd, u16 tunnel_id, int *error)
{
int err;
struct socket *sock = NULL;
struct sock *sk;
struct pppol2tp_tunnel *tunnel;
+ struct pppol2tp_net *pn;
struct sock *ret = NULL;
/* Get the tunnel UDP socket from the fd, which was opened by
@@ -1524,11 +1543,15 @@ static struct sock *pppol2tp_prepare_tunnel_socket(int fd, u16 tunnel_id,
/* Misc init */
rwlock_init(&tunnel->hlist_lock);
+ /* The net we belong to */
+ tunnel->pppol2tp_net = net;
+ pn = pppol2tp_pernet(net);
+
/* Add tunnel to our list */
INIT_LIST_HEAD(&tunnel->list);
- write_lock_bh(&pppol2tp_tunnel_list_lock);
- list_add(&tunnel->list, &pppol2tp_tunnel_list);
- write_unlock_bh(&pppol2tp_tunnel_list_lock);
+ write_lock_bh(&pn->pppol2tp_tunnel_list_lock);
+ list_add(&tunnel->list, &pn->pppol2tp_tunnel_list);
+ write_unlock_bh(&pn->pppol2tp_tunnel_list_lock);
atomic_inc(&pppol2tp_tunnel_count);
/* Bump the reference count. The tunnel context is deleted
@@ -1629,7 +1652,8 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
* tunnel id.
*/
if ((sp->pppol2tp.s_session == 0) && (sp->pppol2tp.d_session == 0)) {
- tunnel_sock = pppol2tp_prepare_tunnel_socket(sp->pppol2tp.fd,
+ tunnel_sock = pppol2tp_prepare_tunnel_socket(sock_net(sk),
+ sp->pppol2tp.fd,
sp->pppol2tp.s_tunnel,
&error);
if (tunnel_sock == NULL)
@@ -1637,7 +1661,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
tunnel = tunnel_sock->sk_user_data;
} else {
- tunnel = pppol2tp_tunnel_find(sp->pppol2tp.s_tunnel);
+ tunnel = pppol2tp_tunnel_find(sock_net(sk), sp->pppol2tp.s_tunnel);
/* Error if we can't find the tunnel */
error = -ENOENT;
@@ -1725,7 +1749,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
po->chan.ops = &pppol2tp_chan_ops;
po->chan.mtu = session->mtu;
- error = ppp_register_channel(&po->chan);
+ error = ppp_register_net_channel(sock_net(sk), &po->chan);
if (error)
goto end_put_tun;
@@ -2347,8 +2371,9 @@ end:
#include <linux/seq_file.h>
struct pppol2tp_seq_data {
- struct pppol2tp_tunnel *tunnel; /* current tunnel */
- struct pppol2tp_session *session; /* NULL means get first session in tunnel */
+ struct net *seq_net; /* net of inode */
+ struct pppol2tp_tunnel *tunnel; /* current tunnel */
+ struct pppol2tp_session *session; /* NULL means get first session in tunnel */
};
static struct pppol2tp_session *next_session(struct pppol2tp_tunnel *tunnel, struct pppol2tp_session *curr)
@@ -2384,17 +2409,18 @@ out:
return session;
}
-static struct pppol2tp_tunnel *next_tunnel(struct pppol2tp_tunnel *curr)
+static struct pppol2tp_tunnel *next_tunnel(struct pppol2tp_net *pn,
+ struct pppol2tp_tunnel *curr)
{
struct pppol2tp_tunnel *tunnel = NULL;
- read_lock_bh(&pppol2tp_tunnel_list_lock);
- if (list_is_last(&curr->list, &pppol2tp_tunnel_list)) {
+ read_lock_bh(&pn->pppol2tp_tunnel_list_lock);
+ if (list_is_last(&curr->list, &pn->pppol2tp_tunnel_list)) {
goto out;
}
tunnel = list_entry(curr->list.next, struct pppol2tp_tunnel, list);
out:
- read_unlock_bh(&pppol2tp_tunnel_list_lock);
+ read_unlock_bh(&pn->pppol2tp_tunnel_list_lock);
return tunnel;
}
@@ -2402,6 +2428,7 @@ out:
static void *pppol2tp_seq_start(struct seq_file *m, loff_t *offs)
{
struct pppol2tp_seq_data *pd = SEQ_START_TOKEN;
+ struct pppol2tp_net *pn;
loff_t pos = *offs;
if (!pos)
@@ -2409,14 +2436,15 @@ static void *pppol2tp_seq_start(struct seq_file *m, loff_t *offs)
BUG_ON(m->private == NULL);
pd = m->private;
+ pn = pppol2tp_pernet(pd->seq_net);
if (pd->tunnel == NULL) {
- if (!list_empty(&pppol2tp_tunnel_list))
- pd->tunnel = list_entry(pppol2tp_tunnel_list.next, struct pppol2tp_tunnel, list);
+ if (!list_empty(&pn->pppol2tp_tunnel_list))
+ pd->tunnel = list_entry(pn->pppol2tp_tunnel_list.next, struct pppol2tp_tunnel, list);
} else {
pd->session = next_session(pd->tunnel, pd->session);
if (pd->session == NULL) {
- pd->tunnel = next_tunnel(pd->tunnel);
+ pd->tunnel = next_tunnel(pn, pd->tunnel);
}
}
@@ -2517,7 +2545,7 @@ out:
return 0;
}
-static struct seq_operations pppol2tp_seq_ops = {
+static const struct seq_operations pppol2tp_seq_ops = {
.start = pppol2tp_seq_start,
.next = pppol2tp_seq_next,
.stop = pppol2tp_seq_stop,
@@ -2532,6 +2560,7 @@ static int pppol2tp_proc_open(struct inode *inode, struct file *file)
{
struct seq_file *m;
struct pppol2tp_seq_data *pd;
+ struct net *net;
int ret = 0;
ret = seq_open(file, &pppol2tp_seq_ops);
@@ -2542,12 +2571,15 @@ static int pppol2tp_proc_open(struct inode *inode, struct file *file)
/* Allocate and fill our proc_data for access later */
ret = -ENOMEM;
- m->private = kzalloc(sizeof(struct pppol2tp_seq_data), GFP_KERNEL);
+ m->private = kzalloc(sizeof(*pd), GFP_KERNEL);
if (m->private == NULL)
goto out;
pd = m->private;
- ret = 0;
+ net = maybe_get_net(PDE_NET(PDE(inode)));
+ BUG_ON(!net);
+ pd->seq_net = net;
+ return 0;
out:
return ret;
@@ -2558,6 +2590,9 @@ out:
static int pppol2tp_proc_release(struct inode *inode, struct file *file)
{
struct seq_file *m = (struct seq_file *)file->private_data;
+ struct pppol2tp_seq_data *pd = m->private;
+
+ put_net(pd->seq_net);
kfree(m->private);
m->private = NULL;
@@ -2565,7 +2600,7 @@ static int pppol2tp_proc_release(struct inode *inode, struct file *file)
return seq_release(inode, file);
}
-static struct file_operations pppol2tp_proc_fops = {
+static const struct file_operations pppol2tp_proc_fops = {
.owner = THIS_MODULE,
.open = pppol2tp_proc_open,
.read = seq_read,
@@ -2573,8 +2608,6 @@ static struct file_operations pppol2tp_proc_fops = {
.release = pppol2tp_proc_release,
};
-static struct proc_dir_entry *pppol2tp_proc;
-
#endif /* CONFIG_PROC_FS */
/*****************************************************************************
@@ -2606,6 +2639,57 @@ static struct pppox_proto pppol2tp_proto = {
.ioctl = pppol2tp_ioctl
};
+static __net_init int pppol2tp_init_net(struct net *net)
+{
+ struct pppol2tp_net *pn;
+ struct proc_dir_entry *pde;
+ int err;
+
+ pn = kzalloc(sizeof(*pn), GFP_KERNEL);
+ if (!pn)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&pn->pppol2tp_tunnel_list);
+ rwlock_init(&pn->pppol2tp_tunnel_list_lock);
+
+ err = net_assign_generic(net, pppol2tp_net_id, pn);
+ if (err)
+ goto out;
+
+ pde = proc_net_fops_create(net, "pppol2tp", S_IRUGO, &pppol2tp_proc_fops);
+#ifdef CONFIG_PROC_FS
+ if (!pde) {
+ err = -ENOMEM;
+ goto out;
+ }
+#endif
+
+ return 0;
+
+out:
+ kfree(pn);
+ return err;
+}
+
+static __net_exit void pppol2tp_exit_net(struct net *net)
+{
+ struct pppoe_net *pn;
+
+ proc_net_remove(net, "pppol2tp");
+ pn = net_generic(net, pppol2tp_net_id);
+ /*
+ * if someone has cached our net then
+ * further net_generic call will return NULL
+ */
+ net_assign_generic(net, pppol2tp_net_id, NULL);
+ kfree(pn);
+}
+
+static __net_initdata struct pernet_operations pppol2tp_net_ops = {
+ .init = pppol2tp_init_net,
+ .exit = pppol2tp_exit_net,
+};
+
static int __init pppol2tp_init(void)
{
int err;
@@ -2617,23 +2701,17 @@ static int __init pppol2tp_init(void)
if (err)
goto out_unregister_pppol2tp_proto;
-#ifdef CONFIG_PROC_FS
- pppol2tp_proc = proc_net_fops_create(&init_net, "pppol2tp", 0,
- &pppol2tp_proc_fops);
- if (!pppol2tp_proc) {
- err = -ENOMEM;
+ err = register_pernet_gen_device(&pppol2tp_net_id, &pppol2tp_net_ops);
+ if (err)
goto out_unregister_pppox_proto;
- }
-#endif /* CONFIG_PROC_FS */
+
printk(KERN_INFO "PPPoL2TP kernel driver, %s\n",
PPPOL2TP_DRV_VERSION);
out:
return err;
-#ifdef CONFIG_PROC_FS
out_unregister_pppox_proto:
unregister_pppox_proto(PX_PROTO_OL2TP);
-#endif
out_unregister_pppol2tp_proto:
proto_unregister(&pppol2tp_sk_proto);
goto out;
@@ -2642,10 +2720,6 @@ out_unregister_pppol2tp_proto:
static void __exit pppol2tp_exit(void)
{
unregister_pppox_proto(PX_PROTO_OL2TP);
-
-#ifdef CONFIG_PROC_FS
- remove_proc_entry("pppol2tp", init_net.proc_net);
-#endif
proto_unregister(&pppol2tp_sk_proto);
}
diff --git a/drivers/net/pppox.c b/drivers/net/pppox.c
index 03aecc97fb4..4f6d33fbc67 100644
--- a/drivers/net/pppox.c
+++ b/drivers/net/pppox.c
@@ -108,9 +108,6 @@ static int pppox_create(struct net *net, struct socket *sock, int protocol)
{
int rc = -EPROTOTYPE;
- if (net != &init_net)
- return -EAFNOSUPPORT;
-
if (protocol < 0 || protocol > PX_MAX_PROTO)
goto out;
diff --git a/drivers/net/ps3_gelic_net.c b/drivers/net/ps3_gelic_net.c
index 4b564eda5bd..06649d0c209 100644
--- a/drivers/net/ps3_gelic_net.c
+++ b/drivers/net/ps3_gelic_net.c
@@ -1403,6 +1403,19 @@ void gelic_net_tx_timeout(struct net_device *netdev)
atomic_dec(&card->tx_timeout_task_counter);
}
+static const struct net_device_ops gelic_netdevice_ops = {
+ .ndo_open = gelic_net_open,
+ .ndo_stop = gelic_net_stop,
+ .ndo_start_xmit = gelic_net_xmit,
+ .ndo_set_multicast_list = gelic_net_set_multi,
+ .ndo_change_mtu = gelic_net_change_mtu,
+ .ndo_tx_timeout = gelic_net_tx_timeout,
+ .ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = gelic_net_poll_controller,
+#endif
+};
+
/**
* gelic_ether_setup_netdev_ops - initialization of net_device operations
* @netdev: net_device structure
@@ -1412,21 +1425,12 @@ void gelic_net_tx_timeout(struct net_device *netdev)
static void gelic_ether_setup_netdev_ops(struct net_device *netdev,
struct napi_struct *napi)
{
- netdev->open = &gelic_net_open;
- netdev->stop = &gelic_net_stop;
- netdev->hard_start_xmit = &gelic_net_xmit;
- netdev->set_multicast_list = &gelic_net_set_multi;
- netdev->change_mtu = &gelic_net_change_mtu;
- /* tx watchdog */
- netdev->tx_timeout = &gelic_net_tx_timeout;
netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT;
/* NAPI */
netif_napi_add(netdev, napi,
gelic_net_poll, GELIC_NET_NAPI_WEIGHT);
netdev->ethtool_ops = &gelic_ether_ethtool_ops;
-#ifdef CONFIG_NET_POLL_CONTROLLER
- netdev->poll_controller = gelic_net_poll_controller;
-#endif
+ netdev->netdev_ops = &gelic_netdevice_ops;
}
/**
diff --git a/drivers/net/ps3_gelic_wireless.c b/drivers/net/ps3_gelic_wireless.c
index ec231424668..708ae067c33 100644
--- a/drivers/net/ps3_gelic_wireless.c
+++ b/drivers/net/ps3_gelic_wireless.c
@@ -2697,6 +2697,19 @@ static int gelic_wl_stop(struct net_device *netdev)
/* -- */
+static const struct net_device_ops gelic_wl_netdevice_ops = {
+ .ndo_open = gelic_wl_open,
+ .ndo_stop = gelic_wl_stop,
+ .ndo_start_xmit = gelic_net_xmit,
+ .ndo_set_multicast_list = gelic_net_set_multi,
+ .ndo_change_mtu = gelic_net_change_mtu,
+ .ndo_tx_timeout = gelic_net_tx_timeout,
+ .ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = gelic_net_poll_controller,
+#endif
+};
+
static struct ethtool_ops gelic_wl_ethtool_ops = {
.get_drvinfo = gelic_net_get_drvinfo,
.get_link = gelic_wl_get_link,
@@ -2711,21 +2724,12 @@ static void gelic_wl_setup_netdev_ops(struct net_device *netdev)
struct gelic_wl_info *wl;
wl = port_wl(netdev_priv(netdev));
BUG_ON(!wl);
- netdev->open = &gelic_wl_open;
- netdev->stop = &gelic_wl_stop;
- netdev->hard_start_xmit = &gelic_net_xmit;
- netdev->set_multicast_list = &gelic_net_set_multi;
- netdev->change_mtu = &gelic_net_change_mtu;
- netdev->wireless_data = &wl->wireless_data;
- netdev->wireless_handlers = &gelic_wl_wext_handler_def;
- /* tx watchdog */
- netdev->tx_timeout = &gelic_net_tx_timeout;
netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT;
netdev->ethtool_ops = &gelic_wl_ethtool_ops;
-#ifdef CONFIG_NET_POLL_CONTROLLER
- netdev->poll_controller = gelic_net_poll_controller;
-#endif
+ netdev->netdev_ops = &gelic_wl_netdevice_ops;
+ netdev->wireless_data = &wl->wireless_data;
+ netdev->wireless_handlers = &gelic_wl_wext_handler_def;
}
/*
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 189ec29ac7a..8b2823c8dcc 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -2292,7 +2292,7 @@ static int ql_poll(struct napi_struct *napi, int budget)
if (tx_cleaned + rx_cleaned != budget) {
spin_lock_irqsave(&qdev->hw_lock, hw_flags);
- __netif_rx_complete(napi);
+ __napi_complete(napi);
ql_update_small_bufq_prod_index(qdev);
ql_update_lrg_bufq_prod_index(qdev);
writel(qdev->rsp_consumer_index,
@@ -2351,8 +2351,8 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
spin_unlock(&qdev->adapter_lock);
} else if (value & ISP_IMR_DISABLE_CMPL_INT) {
ql_disable_interrupts(qdev);
- if (likely(netif_rx_schedule_prep(&qdev->napi))) {
- __netif_rx_schedule(&qdev->napi);
+ if (likely(napi_schedule_prep(&qdev->napi))) {
+ __napi_schedule(&qdev->napi);
}
} else {
return IRQ_NONE;
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 45421c8b601..16eb9dd8528 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -1642,7 +1642,7 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
rx_ring->cq_id);
if (work_done < budget) {
- __netif_rx_complete(napi);
+ __napi_complete(napi);
ql_enable_completion_interrupt(qdev, rx_ring->irq);
}
return work_done;
@@ -1727,7 +1727,7 @@ static irqreturn_t qlge_msix_tx_isr(int irq, void *dev_id)
static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
{
struct rx_ring *rx_ring = dev_id;
- netif_rx_schedule(&rx_ring->napi);
+ napi_schedule(&rx_ring->napi);
return IRQ_HANDLED;
}
@@ -1813,7 +1813,7 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
&rx_ring->rx_work,
0);
else
- netif_rx_schedule(&rx_ring->napi);
+ napi_schedule(&rx_ring->napi);
work_done++;
}
}
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 72fd9e97c19..cc0f886b0c2 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -677,7 +677,7 @@ static int r6040_poll(struct napi_struct *napi, int budget)
work_done = r6040_rx(dev, budget);
if (work_done < budget) {
- netif_rx_complete(napi);
+ napi_complete(napi);
/* Enable RX interrupt */
iowrite16(ioread16(ioaddr + MIER) | RX_INTS, ioaddr + MIER);
}
@@ -714,7 +714,7 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id)
/* Mask off RX interrupt */
misr &= ~RX_INTS;
- netif_rx_schedule(&lp->napi);
+ napi_schedule(&lp->napi);
}
/* TX interrupt request */
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 2c73ca606b3..1c4a980253f 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -3581,8 +3581,8 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event);
tp->intr_mask = ~tp->napi_event;
- if (likely(netif_rx_schedule_prep(&tp->napi)))
- __netif_rx_schedule(&tp->napi);
+ if (likely(napi_schedule_prep(&tp->napi)))
+ __napi_schedule(&tp->napi);
else if (netif_msg_intr(tp)) {
printk(KERN_INFO "%s: interrupt %04x in poll\n",
dev->name, status);
@@ -3603,7 +3603,7 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
rtl8169_tx_interrupt(dev, tp, ioaddr);
if (work_done < budget) {
- netif_rx_complete(napi);
+ napi_complete(napi);
tp->intr_mask = 0xffff;
/*
* 20040426: the barrier is not strictly required but the
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index f5c57c059bc..2a96a10fd0c 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -2852,7 +2852,7 @@ static int s2io_poll_msix(struct napi_struct *napi, int budget)
s2io_chk_rx_buffers(nic, ring);
if (pkts_processed < budget_org) {
- netif_rx_complete(napi);
+ napi_complete(napi);
/*Re Enable MSI-Rx Vector*/
addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
addr += 7 - ring->ring_no;
@@ -2889,7 +2889,7 @@ static int s2io_poll_inta(struct napi_struct *napi, int budget)
break;
}
if (pkts_processed < budget_org) {
- netif_rx_complete(napi);
+ napi_complete(napi);
/* Re enable the Rx interrupts for the ring */
writeq(0, &bar0->rx_traffic_mask);
readl(&bar0->rx_traffic_mask);
@@ -4342,7 +4342,7 @@ static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
writeb(val8, addr);
val8 = readb(addr);
- netif_rx_schedule(&ring->napi);
+ napi_schedule(&ring->napi);
} else {
rx_intr_handler(ring, 0);
s2io_chk_rx_buffers(sp, ring);
@@ -4789,7 +4789,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id)
if (config->napi) {
if (reason & GEN_INTR_RXTRAFFIC) {
- netif_rx_schedule(&sp->napi);
+ napi_schedule(&sp->napi);
writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
readl(&bar0->rx_traffic_int);
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c
index 31e38fae017..3e11c1d6d79 100644
--- a/drivers/net/sb1250-mac.c
+++ b/drivers/net/sb1250-mac.c
@@ -2039,9 +2039,9 @@ static irqreturn_t sbmac_intr(int irq,void *dev_instance)
sbdma_tx_process(sc,&(sc->sbm_txdma), 0);
if (isr & (M_MAC_INT_CHANNEL << S_MAC_RX_CH0)) {
- if (netif_rx_schedule_prep(&sc->napi)) {
+ if (napi_schedule_prep(&sc->napi)) {
__raw_writeq(0, sc->sbm_imr);
- __netif_rx_schedule(&sc->napi);
+ __napi_schedule(&sc->napi);
/* Depend on the exit from poll to reenable intr */
}
else {
@@ -2667,7 +2667,7 @@ static int sbmac_poll(struct napi_struct *napi, int budget)
sbdma_tx_process(sc, &(sc->sbm_txdma), 1);
if (work_done < budget) {
- netif_rx_complete(napi);
+ napi_complete(napi);
#ifdef CONFIG_SBMAC_COALESCE
__raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) |
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
index 8b75bef4a84..c13cbf099b8 100644
--- a/drivers/net/sc92031.c
+++ b/drivers/net/sc92031.c
@@ -13,6 +13,9 @@
* Both are almost identical and seem to be based on pci-skeleton.c
*
* Rewritten for 2.6 by Cesar Eduardo Barros
+ *
+ * A datasheet for this chip can be found at
+ * http://www.silan.com.cn/english/products/pdf/SC92031AY.pdf
*/
/* Note about set_mac_address: I don't know how to change the hardware
@@ -31,13 +34,7 @@
#include <asm/irq.h>
-#define PCI_VENDOR_ID_SILAN 0x1904
-#define PCI_DEVICE_ID_SILAN_SC92031 0x2031
-#define PCI_DEVICE_ID_SILAN_8139D 0x8139
-
#define SC92031_NAME "sc92031"
-#define SC92031_DESCRIPTION "Silan SC92031 PCI Fast Ethernet Adapter driver"
-#define SC92031_VERSION "2.0c"
/* BAR 0 is MMIO, BAR 1 is PIO */
#ifndef SC92031_USE_BAR
@@ -1264,7 +1261,6 @@ static void sc92031_ethtool_get_drvinfo(struct net_device *dev,
struct pci_dev *pdev = priv->pdev;
strcpy(drvinfo->driver, SC92031_NAME);
- strcpy(drvinfo->version, SC92031_VERSION);
strcpy(drvinfo->bus_info, pci_name(pdev));
}
@@ -1423,6 +1419,7 @@ static int __devinit sc92031_probe(struct pci_dev *pdev,
struct net_device *dev;
struct sc92031_priv *priv;
u32 mac0, mac1;
+ unsigned long base_addr;
err = pci_enable_device(pdev);
if (unlikely(err < 0))
@@ -1497,6 +1494,14 @@ static int __devinit sc92031_probe(struct pci_dev *pdev,
if (err < 0)
goto out_register_netdev;
+#if SC92031_USE_BAR == 0
+ base_addr = dev->mem_start;
+#elif SC92031_USE_BAR == 1
+ base_addr = dev->base_addr;
+#endif
+ printk(KERN_INFO "%s: SC92031 at 0x%lx, %pM, IRQ %d\n", dev->name,
+ base_addr, dev->dev_addr, dev->irq);
+
return 0;
out_register_netdev:
@@ -1586,8 +1591,8 @@ out:
}
static struct pci_device_id sc92031_pci_device_id_table[] __devinitdata = {
- { PCI_DEVICE(PCI_VENDOR_ID_SILAN, PCI_DEVICE_ID_SILAN_SC92031) },
- { PCI_DEVICE(PCI_VENDOR_ID_SILAN, PCI_DEVICE_ID_SILAN_8139D) },
+ { PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x2031) },
+ { PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x8139) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, sc92031_pci_device_id_table);
@@ -1603,7 +1608,6 @@ static struct pci_driver sc92031_pci_driver = {
static int __init sc92031_init(void)
{
- printk(KERN_INFO SC92031_DESCRIPTION " " SC92031_VERSION "\n");
return pci_register_driver(&sc92031_pci_driver);
}
@@ -1617,5 +1621,4 @@ module_exit(sc92031_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Cesar Eduardo Barros <cesarb@cesarb.net>");
-MODULE_DESCRIPTION(SC92031_DESCRIPTION);
-MODULE_VERSION(SC92031_VERSION);
+MODULE_DESCRIPTION("Silan SC92031 PCI Fast Ethernet Adapter driver");
diff --git a/drivers/net/sfc/Kconfig b/drivers/net/sfc/Kconfig
index c535408ad6b..12a82966b57 100644
--- a/drivers/net/sfc/Kconfig
+++ b/drivers/net/sfc/Kconfig
@@ -2,7 +2,6 @@ config SFC
tristate "Solarflare Solarstorm SFC4000 support"
depends on PCI && INET
select MII
- select INET_LRO
select CRC32
select I2C
select I2C_ALGOBIT
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 7673fd92eaf..3ee2a4548cb 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -182,7 +182,6 @@ static int efx_process_channel(struct efx_channel *channel, int rx_quota)
channel->rx_pkt = NULL;
}
- efx_flush_lro(channel);
efx_rx_strategy(channel);
efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]);
@@ -225,11 +224,11 @@ static int efx_poll(struct napi_struct *napi, int budget)
if (rx_packets < budget) {
/* There is no race here; although napi_disable() will
- * only wait for netif_rx_complete(), this isn't a problem
+ * only wait for napi_complete(), this isn't a problem
* since efx_channel_processed() will have no effect if
* interrupts have already been disabled.
*/
- netif_rx_complete(napi);
+ napi_complete(napi);
efx_channel_processed(channel);
}
@@ -1269,18 +1268,11 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
static int efx_init_napi(struct efx_nic *efx)
{
struct efx_channel *channel;
- int rc;
efx_for_each_channel(channel, efx) {
channel->napi_dev = efx->net_dev;
- rc = efx_lro_init(&channel->lro_mgr, efx);
- if (rc)
- goto err;
}
return 0;
- err:
- efx_fini_napi(efx);
- return rc;
}
static void efx_fini_napi(struct efx_nic *efx)
@@ -1288,7 +1280,6 @@ static void efx_fini_napi(struct efx_nic *efx)
struct efx_channel *channel;
efx_for_each_channel(channel, efx) {
- efx_lro_fini(&channel->lro_mgr);
channel->napi_dev = NULL;
}
}
@@ -2097,7 +2088,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
net_dev->features |= (NETIF_F_IP_CSUM | NETIF_F_SG |
NETIF_F_HIGHDMA | NETIF_F_TSO);
if (lro)
- net_dev->features |= NETIF_F_LRO;
+ net_dev->features |= NETIF_F_GRO;
/* Mask for features that also apply to VLAN devices */
net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
NETIF_F_HIGHDMA | NETIF_F_TSO);
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index 0dd7a532c78..fb1ac0e63c0 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -77,7 +77,7 @@ static inline void efx_schedule_channel(struct efx_channel *channel)
channel->channel, raw_smp_processor_id());
channel->work_pending = true;
- netif_rx_schedule(&channel->napi_str);
+ napi_schedule(&channel->napi_str);
}
#endif /* EFX_EFX_H */
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 5f255f75754..8643505788c 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -25,15 +25,11 @@
#include <linux/device.h>
#include <linux/highmem.h>
#include <linux/workqueue.h>
-#include <linux/inet_lro.h>
#include <linux/i2c.h>
#include "enum.h"
#include "bitfield.h"
-#define EFX_MAX_LRO_DESCRIPTORS 8
-#define EFX_MAX_LRO_AGGR MAX_SKB_FRAGS
-
/**************************************************************************
*
* Build definitions
@@ -340,13 +336,10 @@ enum efx_rx_alloc_method {
* @eventq_read_ptr: Event queue read pointer
* @last_eventq_read_ptr: Last event queue read pointer value.
* @eventq_magic: Event queue magic value for driver-generated test events
- * @lro_mgr: LRO state
* @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
* and diagnostic counters
* @rx_alloc_push_pages: RX allocation method currently in use for pushing
* descriptors
- * @rx_alloc_pop_pages: RX allocation method currently in use for popping
- * descriptors
* @n_rx_tobe_disc: Count of RX_TOBE_DISC errors
* @n_rx_ip_frag_err: Count of RX IP fragment errors
* @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors
@@ -371,10 +364,8 @@ struct efx_channel {
unsigned int last_eventq_read_ptr;
unsigned int eventq_magic;
- struct net_lro_mgr lro_mgr;
int rx_alloc_level;
int rx_alloc_push_pages;
- int rx_alloc_pop_pages;
unsigned n_rx_tobe_disc;
unsigned n_rx_ip_frag_err;
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index b8ba4bbad88..a0345b38097 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -99,109 +99,6 @@ static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
}
-/**************************************************************************
- *
- * Linux generic LRO handling
- *
- **************************************************************************
- */
-
-static int efx_lro_get_skb_hdr(struct sk_buff *skb, void **ip_hdr,
- void **tcpudp_hdr, u64 *hdr_flags, void *priv)
-{
- struct efx_channel *channel = priv;
- struct iphdr *iph;
- struct tcphdr *th;
-
- iph = (struct iphdr *)skb->data;
- if (skb->protocol != htons(ETH_P_IP) || iph->protocol != IPPROTO_TCP)
- goto fail;
-
- th = (struct tcphdr *)(skb->data + iph->ihl * 4);
-
- *tcpudp_hdr = th;
- *ip_hdr = iph;
- *hdr_flags = LRO_IPV4 | LRO_TCP;
-
- channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO;
- return 0;
-fail:
- channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
- return -1;
-}
-
-static int efx_get_frag_hdr(struct skb_frag_struct *frag, void **mac_hdr,
- void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags,
- void *priv)
-{
- struct efx_channel *channel = priv;
- struct ethhdr *eh;
- struct iphdr *iph;
-
- /* We support EtherII and VLAN encapsulated IPv4 */
- eh = page_address(frag->page) + frag->page_offset;
- *mac_hdr = eh;
-
- if (eh->h_proto == htons(ETH_P_IP)) {
- iph = (struct iphdr *)(eh + 1);
- } else {
- struct vlan_ethhdr *veh = (struct vlan_ethhdr *)eh;
- if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP))
- goto fail;
-
- iph = (struct iphdr *)(veh + 1);
- }
- *ip_hdr = iph;
-
- /* We can only do LRO over TCP */
- if (iph->protocol != IPPROTO_TCP)
- goto fail;
-
- *hdr_flags = LRO_IPV4 | LRO_TCP;
- *tcpudp_hdr = (struct tcphdr *)((u8 *) iph + iph->ihl * 4);
-
- channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO;
- return 0;
- fail:
- channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
- return -1;
-}
-
-int efx_lro_init(struct net_lro_mgr *lro_mgr, struct efx_nic *efx)
-{
- size_t s = sizeof(struct net_lro_desc) * EFX_MAX_LRO_DESCRIPTORS;
- struct net_lro_desc *lro_arr;
-
- /* Allocate the LRO descriptors structure */
- lro_arr = kzalloc(s, GFP_KERNEL);
- if (lro_arr == NULL)
- return -ENOMEM;
-
- lro_mgr->lro_arr = lro_arr;
- lro_mgr->max_desc = EFX_MAX_LRO_DESCRIPTORS;
- lro_mgr->max_aggr = EFX_MAX_LRO_AGGR;
- lro_mgr->frag_align_pad = EFX_PAGE_SKB_ALIGN;
-
- lro_mgr->get_skb_header = efx_lro_get_skb_hdr;
- lro_mgr->get_frag_header = efx_get_frag_hdr;
- lro_mgr->dev = efx->net_dev;
-
- lro_mgr->features = LRO_F_NAPI;
-
- /* We can pass packets up with the checksum intact */
- lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
-
- lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
-
- return 0;
-}
-
-void efx_lro_fini(struct net_lro_mgr *lro_mgr)
-{
- kfree(lro_mgr->lro_arr);
- lro_mgr->lro_arr = NULL;
-}
-
/**
* efx_init_rx_buffer_skb - create new RX buffer using skb-based allocation
*
@@ -549,77 +446,31 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
static void efx_rx_packet_lro(struct efx_channel *channel,
struct efx_rx_buffer *rx_buf)
{
- struct net_lro_mgr *lro_mgr = &channel->lro_mgr;
- void *priv = channel;
+ struct napi_struct *napi = &channel->napi_str;
/* Pass the skb/page into the LRO engine */
if (rx_buf->page) {
- struct skb_frag_struct frags;
+ struct napi_gro_fraginfo info;
- frags.page = rx_buf->page;
- frags.page_offset = efx_rx_buf_offset(rx_buf);
- frags.size = rx_buf->len;
+ info.frags[0].page = rx_buf->page;
+ info.frags[0].page_offset = efx_rx_buf_offset(rx_buf);
+ info.frags[0].size = rx_buf->len;
+ info.nr_frags = 1;
+ info.ip_summed = CHECKSUM_UNNECESSARY;
+ info.len = rx_buf->len;
- lro_receive_frags(lro_mgr, &frags, rx_buf->len,
- rx_buf->len, priv, 0);
+ napi_gro_frags(napi, &info);
EFX_BUG_ON_PARANOID(rx_buf->skb);
rx_buf->page = NULL;
} else {
EFX_BUG_ON_PARANOID(!rx_buf->skb);
- lro_receive_skb(lro_mgr, rx_buf->skb, priv);
+ napi_gro_receive(napi, rx_buf->skb);
rx_buf->skb = NULL;
}
}
-/* Allocate and construct an SKB around a struct page.*/
-static struct sk_buff *efx_rx_mk_skb(struct efx_rx_buffer *rx_buf,
- struct efx_nic *efx,
- int hdr_len)
-{
- struct sk_buff *skb;
-
- /* Allocate an SKB to store the headers */
- skb = netdev_alloc_skb(efx->net_dev, hdr_len + EFX_PAGE_SKB_ALIGN);
- if (unlikely(skb == NULL)) {
- EFX_ERR_RL(efx, "RX out of memory for skb\n");
- return NULL;
- }
-
- EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags);
- EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len);
-
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb_reserve(skb, EFX_PAGE_SKB_ALIGN);
-
- skb->len = rx_buf->len;
- skb->truesize = rx_buf->len + sizeof(struct sk_buff);
- memcpy(skb->data, rx_buf->data, hdr_len);
- skb->tail += hdr_len;
-
- /* Append the remaining page onto the frag list */
- if (unlikely(rx_buf->len > hdr_len)) {
- struct skb_frag_struct *frag = skb_shinfo(skb)->frags;
- frag->page = rx_buf->page;
- frag->page_offset = efx_rx_buf_offset(rx_buf) + hdr_len;
- frag->size = skb->len - hdr_len;
- skb_shinfo(skb)->nr_frags = 1;
- skb->data_len = frag->size;
- } else {
- __free_pages(rx_buf->page, efx->rx_buffer_order);
- skb->data_len = 0;
- }
-
- /* Ownership has transferred from the rx_buf to skb */
- rx_buf->page = NULL;
-
- /* Move past the ethernet header */
- skb->protocol = eth_type_trans(skb, efx->net_dev);
-
- return skb;
-}
-
void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
unsigned int len, bool checksummed, bool discard)
{
@@ -687,7 +538,6 @@ void __efx_rx_packet(struct efx_channel *channel,
{
struct efx_nic *efx = channel->efx;
struct sk_buff *skb;
- bool lro = !!(efx->net_dev->features & NETIF_F_LRO);
/* If we're in loopback test, then pass the packet directly to the
* loopback layer, and free the rx_buf here
@@ -709,41 +559,21 @@ void __efx_rx_packet(struct efx_channel *channel,
efx->net_dev);
}
- /* Both our generic-LRO and SFC-SSR support skb and page based
- * allocation, but neither support switching from one to the
- * other on the fly. If we spot that the allocation mode has
- * changed, then flush the LRO state.
- */
- if (unlikely(channel->rx_alloc_pop_pages != (rx_buf->page != NULL))) {
- efx_flush_lro(channel);
- channel->rx_alloc_pop_pages = (rx_buf->page != NULL);
- }
- if (likely(checksummed && lro)) {
+ if (likely(checksummed || rx_buf->page)) {
efx_rx_packet_lro(channel, rx_buf);
goto done;
}
- /* Form an skb if required */
- if (rx_buf->page) {
- int hdr_len = min(rx_buf->len, EFX_SKB_HEADERS);
- skb = efx_rx_mk_skb(rx_buf, efx, hdr_len);
- if (unlikely(skb == NULL)) {
- efx_free_rx_buffer(efx, rx_buf);
- goto done;
- }
- } else {
- /* We now own the SKB */
- skb = rx_buf->skb;
- rx_buf->skb = NULL;
- }
+ /* We now own the SKB */
+ skb = rx_buf->skb;
+ rx_buf->skb = NULL;
EFX_BUG_ON_PARANOID(rx_buf->page);
EFX_BUG_ON_PARANOID(rx_buf->skb);
EFX_BUG_ON_PARANOID(!skb);
/* Set the SKB flags */
- if (unlikely(!checksummed || !efx->rx_checksum_enabled))
- skb->ip_summed = CHECKSUM_NONE;
+ skb->ip_summed = CHECKSUM_NONE;
/* Pass the packet up */
netif_receive_skb(skb);
@@ -760,7 +590,7 @@ void efx_rx_strategy(struct efx_channel *channel)
enum efx_rx_alloc_method method = rx_alloc_method;
/* Only makes sense to use page based allocation if LRO is enabled */
- if (!(channel->efx->net_dev->features & NETIF_F_LRO)) {
+ if (!(channel->efx->net_dev->features & NETIF_F_GRO)) {
method = RX_ALLOC_METHOD_SKB;
} else if (method == RX_ALLOC_METHOD_AUTO) {
/* Constrain the rx_alloc_level */
@@ -865,11 +695,6 @@ void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
rx_queue->buffer = NULL;
}
-void efx_flush_lro(struct efx_channel *channel)
-{
- lro_flush_all(&channel->lro_mgr);
-}
-
module_param(rx_alloc_method, int, 0644);
MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers");
diff --git a/drivers/net/sfc/rx.h b/drivers/net/sfc/rx.h
index 0e88a9ddc1c..42ee7555a80 100644
--- a/drivers/net/sfc/rx.h
+++ b/drivers/net/sfc/rx.h
@@ -17,9 +17,6 @@ void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
-int efx_lro_init(struct net_lro_mgr *lro_mgr, struct efx_nic *efx);
-void efx_lro_fini(struct net_lro_mgr *lro_mgr);
-void efx_flush_lro(struct efx_channel *channel);
void efx_rx_strategy(struct efx_channel *channel);
void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
void efx_rx_work(struct work_struct *data);
diff --git a/drivers/net/sfc/sfe4001.c b/drivers/net/sfc/sfe4001.c
index 16b80acb999..d21d014bf0c 100644
--- a/drivers/net/sfc/sfe4001.c
+++ b/drivers/net/sfc/sfe4001.c
@@ -24,6 +24,7 @@
*/
#include <linux/delay.h>
+#include <linux/rtnetlink.h>
#include "net_driver.h"
#include "efx.h"
#include "phy.h"
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index 9ecb77da954..f1365097b4f 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -8,6 +8,7 @@
*/
#include <linux/delay.h>
+#include <linux/rtnetlink.h>
#include <linux/seq_file.h>
#include "efx.h"
#include "mdio_10g.h"
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index c9dbb06f8c9..952d37ffee5 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -3214,7 +3214,7 @@ static int skge_poll(struct napi_struct *napi, int to_do)
unsigned long flags;
spin_lock_irqsave(&hw->hw_lock, flags);
- __netif_rx_complete(napi);
+ __napi_complete(napi);
hw->intr_mask |= napimask[skge->port];
skge_write32(hw, B0_IMSK, hw->intr_mask);
skge_read32(hw, B0_IMSK);
@@ -3377,7 +3377,7 @@ static irqreturn_t skge_intr(int irq, void *dev_id)
if (status & (IS_XA1_F|IS_R1_F)) {
struct skge_port *skge = netdev_priv(hw->dev[0]);
hw->intr_mask &= ~(IS_XA1_F|IS_R1_F);
- netif_rx_schedule(&skge->napi);
+ napi_schedule(&skge->napi);
}
if (status & IS_PA_TO_TX1)
@@ -3397,7 +3397,7 @@ static irqreturn_t skge_intr(int irq, void *dev_id)
if (status & (IS_XA2_F|IS_R2_F)) {
hw->intr_mask &= ~(IS_XA2_F|IS_R2_F);
- netif_rx_schedule(&skge->napi);
+ napi_schedule(&skge->napi);
}
if (status & IS_PA_TO_RX2) {
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index b215a8d85e6..508e8da2f65 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -1643,6 +1643,117 @@ static void smc_ethtool_setmsglevel(struct net_device *dev, u32 level)
lp->msg_enable = level;
}
+static int smc_write_eeprom_word(struct net_device *dev, u16 addr, u16 word)
+{
+ u16 ctl;
+ struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+
+ spin_lock_irq(&lp->lock);
+ /* load word into GP register */
+ SMC_SELECT_BANK(lp, 1);
+ SMC_SET_GP(lp, word);
+ /* set the address to put the data in EEPROM */
+ SMC_SELECT_BANK(lp, 2);
+ SMC_SET_PTR(lp, addr);
+ /* tell it to write */
+ SMC_SELECT_BANK(lp, 1);
+ ctl = SMC_GET_CTL(lp);
+ SMC_SET_CTL(lp, ctl | (CTL_EEPROM_SELECT | CTL_STORE));
+ /* wait for it to finish */
+ do {
+ udelay(1);
+ } while (SMC_GET_CTL(lp) & CTL_STORE);
+ /* clean up */
+ SMC_SET_CTL(lp, ctl);
+ SMC_SELECT_BANK(lp, 2);
+ spin_unlock_irq(&lp->lock);
+ return 0;
+}
+
+static int smc_read_eeprom_word(struct net_device *dev, u16 addr, u16 *word)
+{
+ u16 ctl;
+ struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+
+ spin_lock_irq(&lp->lock);
+ /* set the EEPROM address to get the data from */
+ SMC_SELECT_BANK(lp, 2);
+ SMC_SET_PTR(lp, addr | PTR_READ);
+ /* tell it to load */
+ SMC_SELECT_BANK(lp, 1);
+ SMC_SET_GP(lp, 0xffff); /* init to known */
+ ctl = SMC_GET_CTL(lp);
+ SMC_SET_CTL(lp, ctl | (CTL_EEPROM_SELECT | CTL_RELOAD));
+ /* wait for it to finish */
+ do {
+ udelay(1);
+ } while (SMC_GET_CTL(lp) & CTL_RELOAD);
+ /* read word from GP register */
+ *word = SMC_GET_GP(lp);
+ /* clean up */
+ SMC_SET_CTL(lp, ctl);
+ SMC_SELECT_BANK(lp, 2);
+ spin_unlock_irq(&lp->lock);
+ return 0;
+}
+
+static int smc_ethtool_geteeprom_len(struct net_device *dev)
+{
+ return 0x23 * 2;
+}
+
+static int smc_ethtool_geteeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ int i;
+ int imax;
+
+ DBG(1, "Reading %d bytes at %d(0x%x)\n",
+ eeprom->len, eeprom->offset, eeprom->offset);
+ imax = smc_ethtool_geteeprom_len(dev);
+ for (i = 0; i < eeprom->len; i += 2) {
+ int ret;
+ u16 wbuf;
+ int offset = i + eeprom->offset;
+ if (offset > imax)
+ break;
+ ret = smc_read_eeprom_word(dev, offset >> 1, &wbuf);
+ if (ret != 0)
+ return ret;
+ DBG(2, "Read 0x%x from 0x%x\n", wbuf, offset >> 1);
+ data[i] = (wbuf >> 8) & 0xff;
+ data[i+1] = wbuf & 0xff;
+ }
+ return 0;
+}
+
+static int smc_ethtool_seteeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ int i;
+ int imax;
+
+ DBG(1, "Writing %d bytes to %d(0x%x)\n",
+ eeprom->len, eeprom->offset, eeprom->offset);
+ imax = smc_ethtool_geteeprom_len(dev);
+ for (i = 0; i < eeprom->len; i += 2) {
+ int ret;
+ u16 wbuf;
+ int offset = i + eeprom->offset;
+ if (offset > imax)
+ break;
+ wbuf = (data[i] << 8) | data[i + 1];
+ DBG(2, "Writing 0x%x to 0x%x\n", wbuf, offset >> 1);
+ ret = smc_write_eeprom_word(dev, offset >> 1, wbuf);
+ if (ret != 0)
+ return ret;
+ }
+ return 0;
+}
+
+
static const struct ethtool_ops smc_ethtool_ops = {
.get_settings = smc_ethtool_getsettings,
.set_settings = smc_ethtool_setsettings,
@@ -1652,8 +1763,9 @@ static const struct ethtool_ops smc_ethtool_ops = {
.set_msglevel = smc_ethtool_setmsglevel,
.nway_reset = smc_ethtool_nwayreset,
.get_link = ethtool_op_get_link,
-// .get_eeprom = smc_ethtool_geteeprom,
-// .set_eeprom = smc_ethtool_seteeprom,
+ .get_eeprom_len = smc_ethtool_geteeprom_len,
+ .get_eeprom = smc_ethtool_geteeprom,
+ .set_eeprom = smc_ethtool_seteeprom,
};
/*
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index c4ccd121bc9..ed9ae43523a 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -1141,6 +1141,16 @@ static const char * chip_ids[ 16 ] = {
#define SMC_GET_MII(lp) SMC_inw(ioaddr, MII_REG(lp))
+#define SMC_GET_GP(lp) SMC_inw(ioaddr, GP_REG(lp))
+
+#define SMC_SET_GP(lp, x) \
+ do { \
+ if (SMC_MUST_ALIGN_WRITE(lp)) \
+ SMC_outl((x)<<16, ioaddr, SMC_REG(lp, 8, 1)); \
+ else \
+ SMC_outw(x, ioaddr, GP_REG(lp)); \
+ } while (0)
+
#define SMC_SET_MII(lp, x) SMC_outw(x, ioaddr, MII_REG(lp))
#define SMC_GET_MIR(lp) SMC_inw(ioaddr, MIR_REG(lp))
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index f513bdf1c88..d271ae39c6f 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -984,7 +984,7 @@ static int smsc911x_poll(struct napi_struct *napi, int budget)
/* We processed all packets available. Tell NAPI it can
* stop polling then re-enable rx interrupts */
smsc911x_reg_write(pdata, INT_STS, INT_STS_RSFL_);
- netif_rx_complete(napi);
+ napi_complete(napi);
temp = smsc911x_reg_read(pdata, INT_EN);
temp |= INT_EN_RSFL_EN_;
smsc911x_reg_write(pdata, INT_EN, temp);
@@ -1485,16 +1485,16 @@ static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id)
}
if (likely(intsts & inten & INT_STS_RSFL_)) {
- if (likely(netif_rx_schedule_prep(&pdata->napi))) {
+ if (likely(napi_schedule_prep(&pdata->napi))) {
/* Disable Rx interrupts */
temp = smsc911x_reg_read(pdata, INT_EN);
temp &= (~INT_EN_RSFL_EN_);
smsc911x_reg_write(pdata, INT_EN, temp);
/* Schedule a NAPI poll */
- __netif_rx_schedule(&pdata->napi);
+ __napi_schedule(&pdata->napi);
} else {
SMSC_WARNING(RX_ERR,
- "netif_rx_schedule_prep failed");
+ "napi_schedule_prep failed");
}
serviced = IRQ_HANDLED;
}
diff --git a/drivers/net/smsc9420.c b/drivers/net/smsc9420.c
index c14a4c6452c..79f4c228b03 100644
--- a/drivers/net/smsc9420.c
+++ b/drivers/net/smsc9420.c
@@ -666,7 +666,7 @@ static irqreturn_t smsc9420_isr(int irq, void *dev_id)
smsc9420_pci_flush_write(pd);
ints_to_clear |= (DMAC_STS_RX_ | DMAC_STS_NIS_);
- netif_rx_schedule(&pd->napi);
+ napi_schedule(&pd->napi);
}
if (ints_to_clear)
@@ -889,7 +889,7 @@ static int smsc9420_rx_poll(struct napi_struct *napi, int budget)
smsc9420_pci_flush_write(pd);
if (work_done < budget) {
- netif_rx_complete(&pd->napi);
+ napi_complete(&pd->napi);
/* re-enable RX DMA interrupts */
dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA);
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 88d2c67788d..7f6b4a4052e 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -1301,7 +1301,7 @@ static int spider_net_poll(struct napi_struct *napi, int budget)
/* if all packets are in the stack, enable interrupts and return 0 */
/* if not, return 1 */
if (packets_done < budget) {
- netif_rx_complete(napi);
+ napi_complete(napi);
spider_net_rx_irq_on(card);
card->ignore_rx_ramfull = 0;
}
@@ -1528,7 +1528,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg,
spider_net_refill_rx_chain(card);
spider_net_enable_rxdmac(card);
card->num_rx_ints ++;
- netif_rx_schedule(&card->napi);
+ napi_schedule(&card->napi);
}
show_error = 0;
break;
@@ -1548,7 +1548,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg,
spider_net_refill_rx_chain(card);
spider_net_enable_rxdmac(card);
card->num_rx_ints ++;
- netif_rx_schedule(&card->napi);
+ napi_schedule(&card->napi);
show_error = 0;
break;
@@ -1562,7 +1562,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg,
spider_net_refill_rx_chain(card);
spider_net_enable_rxdmac(card);
card->num_rx_ints ++;
- netif_rx_schedule(&card->napi);
+ napi_schedule(&card->napi);
show_error = 0;
break;
@@ -1656,11 +1656,11 @@ spider_net_interrupt(int irq, void *ptr)
if (status_reg & SPIDER_NET_RXINT ) {
spider_net_rx_irq_off(card);
- netif_rx_schedule(&card->napi);
+ napi_schedule(&card->napi);
card->num_rx_ints ++;
}
if (status_reg & SPIDER_NET_TXINT)
- netif_rx_schedule(&card->napi);
+ napi_schedule(&card->napi);
if (status_reg & SPIDER_NET_LINKINT)
spider_net_link_reset(netdev);
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index da3a76b18ef..98fe79515ba 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -1342,8 +1342,8 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
if (intr_status & (IntrRxDone | IntrRxEmpty)) {
u32 enable;
- if (likely(netif_rx_schedule_prep(&np->napi))) {
- __netif_rx_schedule(&np->napi);
+ if (likely(napi_schedule_prep(&np->napi))) {
+ __napi_schedule(&np->napi);
enable = readl(ioaddr + IntrEnable);
enable &= ~(IntrRxDone | IntrRxEmpty);
writel(enable, ioaddr + IntrEnable);
@@ -1587,7 +1587,7 @@ static int netdev_poll(struct napi_struct *napi, int budget)
intr_status = readl(ioaddr + IntrStatus);
} while (intr_status & (IntrRxDone | IntrRxEmpty));
- netif_rx_complete(napi);
+ napi_complete(napi);
intr_status = readl(ioaddr + IntrEnable);
intr_status |= IntrRxDone | IntrRxEmpty;
writel(intr_status, ioaddr + IntrEnable);
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 86c765d83de..4942059109f 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -921,7 +921,7 @@ static int gem_poll(struct napi_struct *napi, int budget)
gp->status = readl(gp->regs + GREG_STAT);
} while (gp->status & GREG_STAT_NAPI);
- __netif_rx_complete(napi);
+ __napi_complete(napi);
gem_enable_ints(gp);
spin_unlock_irqrestore(&gp->lock, flags);
@@ -944,7 +944,7 @@ static irqreturn_t gem_interrupt(int irq, void *dev_id)
spin_lock_irqsave(&gp->lock, flags);
- if (netif_rx_schedule_prep(&gp->napi)) {
+ if (napi_schedule_prep(&gp->napi)) {
u32 gem_status = readl(gp->regs + GREG_STAT);
if (gem_status == 0) {
@@ -954,7 +954,7 @@ static irqreturn_t gem_interrupt(int irq, void *dev_id)
}
gp->status = gem_status;
gem_disable_ints(gp);
- __netif_rx_schedule(&gp->napi);
+ __napi_schedule(&gp->napi);
}
spin_unlock_irqrestore(&gp->lock, flags);
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index bcd0e60cbda..f42c67e93bf 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -1609,8 +1609,8 @@ static irqreturn_t tc35815_interrupt(int irq, void *dev_id)
if (!(dmactl & DMA_IntMask)) {
/* disable interrupts */
tc_writel(dmactl | DMA_IntMask, &tr->DMA_Ctl);
- if (netif_rx_schedule_prep(&lp->napi))
- __netif_rx_schedule(&lp->napi);
+ if (napi_schedule_prep(&lp->napi))
+ __napi_schedule(&lp->napi);
else {
printk(KERN_ERR "%s: interrupt taken in poll\n",
dev->name);
@@ -1919,7 +1919,7 @@ static int tc35815_poll(struct napi_struct *napi, int budget)
spin_unlock(&lp->lock);
if (received < budget) {
- netif_rx_complete(napi);
+ napi_complete(napi);
/* enable interrupts */
tc_writel(tc_readl(&tr->DMA_Ctl) & ~DMA_IntMask, &tr->DMA_Ctl);
}
diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c
index a7a4dc4d631..be9f38f8f0b 100644
--- a/drivers/net/tehuti.c
+++ b/drivers/net/tehuti.c
@@ -265,8 +265,8 @@ static irqreturn_t bdx_isr_napi(int irq, void *dev)
bdx_isr_extra(priv, isr);
if (isr & (IR_RX_DESC_0 | IR_TX_FREE_0)) {
- if (likely(netif_rx_schedule_prep(&priv->napi))) {
- __netif_rx_schedule(&priv->napi);
+ if (likely(napi_schedule_prep(&priv->napi))) {
+ __napi_schedule(&priv->napi);
RET(IRQ_HANDLED);
} else {
/* NOTE: we get here if intr has slipped into window
@@ -302,7 +302,7 @@ static int bdx_poll(struct napi_struct *napi, int budget)
* device lock and allow waiting tasks (eg rmmod) to advance) */
priv->napi_stop = 0;
- netif_rx_complete(napi);
+ napi_complete(napi);
bdx_enable_interrupts(priv);
}
return work_done;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 8b3f8468538..5b3d60568d5 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -860,7 +860,7 @@ static int tg3_bmcr_reset(struct tg3 *tp)
static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
{
- struct tg3 *tp = (struct tg3 *)bp->priv;
+ struct tg3 *tp = bp->priv;
u32 val;
if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
@@ -874,7 +874,7 @@ static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
{
- struct tg3 *tp = (struct tg3 *)bp->priv;
+ struct tg3 *tp = bp->priv;
if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
return -EAGAIN;
@@ -4460,7 +4460,7 @@ static int tg3_poll(struct napi_struct *napi, int budget)
sblk->status &= ~SD_STATUS_UPDATED;
if (likely(!tg3_has_work(tp))) {
- netif_rx_complete(napi);
+ napi_complete(napi);
tg3_restart_ints(tp);
break;
}
@@ -4470,7 +4470,7 @@ static int tg3_poll(struct napi_struct *napi, int budget)
tx_recovery:
/* work_done is guaranteed to be less than budget. */
- netif_rx_complete(napi);
+ napi_complete(napi);
schedule_work(&tp->reset_task);
return work_done;
}
@@ -4519,7 +4519,7 @@ static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
if (likely(!tg3_irq_sync(tp)))
- netif_rx_schedule(&tp->napi);
+ napi_schedule(&tp->napi);
return IRQ_HANDLED;
}
@@ -4544,7 +4544,7 @@ static irqreturn_t tg3_msi(int irq, void *dev_id)
*/
tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
if (likely(!tg3_irq_sync(tp)))
- netif_rx_schedule(&tp->napi);
+ napi_schedule(&tp->napi);
return IRQ_RETVAL(1);
}
@@ -4586,7 +4586,7 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id)
sblk->status &= ~SD_STATUS_UPDATED;
if (likely(tg3_has_work(tp))) {
prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
- netif_rx_schedule(&tp->napi);
+ napi_schedule(&tp->napi);
} else {
/* No work, shared interrupt perhaps? re-enable
* interrupts, and flush that PCI write
@@ -4632,7 +4632,7 @@ static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
if (tg3_irq_sync(tp))
goto out;
- if (netif_rx_schedule_prep(&tp->napi)) {
+ if (napi_schedule_prep(&tp->napi)) {
prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
/* Update last_tag to mark that this status has been
* seen. Because interrupt may be shared, we may be
@@ -4640,7 +4640,7 @@ static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
* if tg3_poll() is not scheduled.
*/
tp->last_tag = sblk->status_tag;
- __netif_rx_schedule(&tp->napi);
+ __napi_schedule(&tp->napi);
}
out:
return IRQ_RETVAL(handled);
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index 43853e3b210..4a65fc2dd92 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -274,6 +274,15 @@ static void xl_ee_write(struct net_device *dev, int ee_addr, u16 ee_value)
return ;
}
+
+static const struct net_device_ops xl_netdev_ops = {
+ .ndo_open = xl_open,
+ .ndo_stop = xl_close,
+ .ndo_start_xmit = xl_xmit,
+ .ndo_change_mtu = xl_change_mtu,
+ .ndo_set_multicast_list = xl_set_rx_mode,
+ .ndo_set_mac_address = xl_set_mac_address,
+};
static int __devinit xl_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
@@ -337,13 +346,7 @@ static int __devinit xl_probe(struct pci_dev *pdev,
return i ;
}
- dev->open=&xl_open;
- dev->hard_start_xmit=&xl_xmit;
- dev->change_mtu=&xl_change_mtu;
- dev->stop=&xl_close;
- dev->do_ioctl=NULL;
- dev->set_multicast_list=&xl_set_rx_mode;
- dev->set_mac_address=&xl_set_mac_address ;
+ dev->netdev_ops = &xl_netdev_ops;
SET_NETDEV_DEV(dev, &pdev->dev);
pci_set_drvdata(pdev,dev) ;
diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
index b566d6d79ec..b9db1b5a58a 100644
--- a/drivers/net/tokenring/abyss.c
+++ b/drivers/net/tokenring/abyss.c
@@ -92,6 +92,8 @@ static void abyss_sifwritew(struct net_device *dev, unsigned short val, unsigned
outw(val, dev->base_addr + reg);
}
+static struct net_device_ops abyss_netdev_ops;
+
static int __devinit abyss_attach(struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int versionprinted;
@@ -157,8 +159,7 @@ static int __devinit abyss_attach(struct pci_dev *pdev, const struct pci_device_
memcpy(tp->ProductID, "Madge PCI 16/4 Mk2", PROD_ID_SIZE + 1);
- dev->open = abyss_open;
- dev->stop = abyss_close;
+ dev->netdev_ops = &abyss_netdev_ops;
pci_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
@@ -450,6 +451,11 @@ static struct pci_driver abyss_driver = {
static int __init abyss_init (void)
{
+ abyss_netdev_ops = tms380tr_netdev_ops;
+
+ abyss_netdev_ops.ndo_open = abyss_open;
+ abyss_netdev_ops.ndo_stop = abyss_close;
+
return pci_register_driver(&abyss_driver);
}
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c
index fa7bce6e0c6..9d896116cf7 100644
--- a/drivers/net/tokenring/ibmtr.c
+++ b/drivers/net/tokenring/ibmtr.c
@@ -200,7 +200,6 @@ static void tr_rx(struct net_device *dev);
static void ibmtr_reset_timer(struct timer_list*tmr,struct net_device *dev);
static void tok_rerun(unsigned long dev_addr);
static void ibmtr_readlog(struct net_device *dev);
-static struct net_device_stats *tok_get_stats(struct net_device *dev);
static int ibmtr_change_mtu(struct net_device *dev, int mtu);
static void find_turbo_adapters(int *iolist);
@@ -816,18 +815,21 @@ static unsigned char __devinit get_sram_size(struct tok_info *adapt_info)
/*****************************************************************************/
+static const struct net_device_ops trdev_netdev_ops = {
+ .ndo_open = tok_open,
+ .ndo_stop = tok_close,
+ .ndo_start_xmit = tok_send_packet,
+ .ndo_set_multicast_list = tok_set_multicast_list,
+ .ndo_change_mtu = ibmtr_change_mtu,
+};
+
static int __devinit trdev_init(struct net_device *dev)
{
struct tok_info *ti = netdev_priv(dev);
SET_PAGE(ti->srb_page);
ti->open_failure = NO ;
- dev->open = tok_open;
- dev->stop = tok_close;
- dev->hard_start_xmit = tok_send_packet;
- dev->get_stats = tok_get_stats;
- dev->set_multicast_list = tok_set_multicast_list;
- dev->change_mtu = ibmtr_change_mtu;
+ dev->netdev_ops = &trdev_netdev_ops;
return 0;
}
@@ -1460,7 +1462,7 @@ static irqreturn_t tok_interrupt(int irq, void *dev_id)
"%02X\n",
(int)retcode, (int)readb(ti->ssb + 6));
else
- ti->tr_stats.tx_packets++;
+ dev->stats.tx_packets++;
break;
case XMIT_XID_CMD:
DPRINTK("xmit xid ret_code: %02X\n",
@@ -1646,7 +1648,7 @@ static void tr_tx(struct net_device *dev)
break;
}
writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
- ti->tr_stats.tx_bytes += ti->current_skb->len;
+ dev->stats.tx_bytes += ti->current_skb->len;
dev_kfree_skb_irq(ti->current_skb);
ti->current_skb = NULL;
netif_wake_queue(dev);
@@ -1722,7 +1724,7 @@ static void tr_rx(struct net_device *dev)
if (readb(llc + offsetof(struct trllc, llc)) != UI_CMD) {
SET_PAGE(ti->asb_page);
writeb(DATA_LOST, ti->asb + RETCODE_OFST);
- ti->tr_stats.rx_dropped++;
+ dev->stats.rx_dropped++;
writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
return;
}
@@ -1757,7 +1759,7 @@ static void tr_rx(struct net_device *dev)
if (!(skb = dev_alloc_skb(skb_size))) {
DPRINTK("out of memory. frame dropped.\n");
- ti->tr_stats.rx_dropped++;
+ dev->stats.rx_dropped++;
SET_PAGE(ti->asb_page);
writeb(DATA_LOST, ti->asb + offsetof(struct asb_rec, ret_code));
writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
@@ -1813,8 +1815,8 @@ static void tr_rx(struct net_device *dev)
writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
- ti->tr_stats.rx_bytes += skb->len;
- ti->tr_stats.rx_packets++;
+ dev->stats.rx_bytes += skb->len;
+ dev->stats.rx_packets++;
skb->protocol = tr_type_trans(skb, dev);
if (IPv4_p) {
@@ -1876,21 +1878,6 @@ static void ibmtr_readlog(struct net_device *dev)
/*****************************************************************************/
-/* tok_get_stats(): Basically a scaffold routine which will return
- the address of the tr_statistics structure associated with
- this device -- the tr.... structure is an ethnet look-alike
- so at least for this iteration may suffice. */
-
-static struct net_device_stats *tok_get_stats(struct net_device *dev)
-{
-
- struct tok_info *toki;
- toki = netdev_priv(dev);
- return (struct net_device_stats *) &toki->tr_stats;
-}
-
-/*****************************************************************************/
-
static int ibmtr_change_mtu(struct net_device *dev, int mtu)
{
struct tok_info *ti = netdev_priv(dev);
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c
index 239c75217b1..0b2b7925da2 100644
--- a/drivers/net/tokenring/lanstreamer.c
+++ b/drivers/net/tokenring/lanstreamer.c
@@ -207,7 +207,6 @@ static int streamer_xmit(struct sk_buff *skb, struct net_device *dev);
static int streamer_close(struct net_device *dev);
static void streamer_set_rx_mode(struct net_device *dev);
static irqreturn_t streamer_interrupt(int irq, void *dev_id);
-static struct net_device_stats *streamer_get_stats(struct net_device *dev);
static int streamer_set_mac_address(struct net_device *dev, void *addr);
static void streamer_arb_cmd(struct net_device *dev);
static int streamer_change_mtu(struct net_device *dev, int mtu);
@@ -222,6 +221,18 @@ struct streamer_private *dev_streamer=NULL;
#endif
#endif
+static const struct net_device_ops streamer_netdev_ops = {
+ .ndo_open = streamer_open,
+ .ndo_stop = streamer_close,
+ .ndo_start_xmit = streamer_xmit,
+ .ndo_change_mtu = streamer_change_mtu,
+#if STREAMER_IOCTL
+ .ndo_do_ioctl = streamer_ioctl,
+#endif
+ .ndo_set_multicast_list = streamer_set_rx_mode,
+ .ndo_set_mac_address = streamer_set_mac_address,
+};
+
static int __devinit streamer_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -321,18 +332,7 @@ static int __devinit streamer_init_one(struct pci_dev *pdev,
init_waitqueue_head(&streamer_priv->srb_wait);
init_waitqueue_head(&streamer_priv->trb_wait);
- dev->open = &streamer_open;
- dev->hard_start_xmit = &streamer_xmit;
- dev->change_mtu = &streamer_change_mtu;
- dev->stop = &streamer_close;
-#if STREAMER_IOCTL
- dev->do_ioctl = &streamer_ioctl;
-#else
- dev->do_ioctl = NULL;
-#endif
- dev->set_multicast_list = &streamer_set_rx_mode;
- dev->get_stats = &streamer_get_stats;
- dev->set_mac_address = &streamer_set_mac_address;
+ dev->netdev_ops = &streamer_netdev_ops;
dev->irq = pdev->irq;
dev->base_addr=pio_start;
SET_NETDEV_DEV(dev, &pdev->dev);
@@ -937,7 +937,7 @@ static void streamer_rx(struct net_device *dev)
if (skb == NULL)
{
printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n", dev->name);
- streamer_priv->streamer_stats.rx_dropped++;
+ dev->stats.rx_dropped++;
} else { /* we allocated an skb OK */
if (buffer_cnt == 1) {
/* release the DMA mapping */
@@ -1009,8 +1009,8 @@ static void streamer_rx(struct net_device *dev)
/* send up to the protocol */
netif_rx(skb);
}
- streamer_priv->streamer_stats.rx_packets++;
- streamer_priv->streamer_stats.rx_bytes += length;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += length;
} /* if skb == null */
} /* end received without errors */
@@ -1053,8 +1053,8 @@ static irqreturn_t streamer_interrupt(int irq, void *dev_id)
while(streamer_priv->streamer_tx_ring[(streamer_priv->tx_ring_last_status + 1) & (STREAMER_TX_RING_SIZE - 1)].status) {
streamer_priv->tx_ring_last_status = (streamer_priv->tx_ring_last_status + 1) & (STREAMER_TX_RING_SIZE - 1);
streamer_priv->free_tx_ring_entries++;
- streamer_priv->streamer_stats.tx_bytes += streamer_priv->tx_ring_skb[streamer_priv->tx_ring_last_status]->len;
- streamer_priv->streamer_stats.tx_packets++;
+ dev->stats.tx_bytes += streamer_priv->tx_ring_skb[streamer_priv->tx_ring_last_status]->len;
+ dev->stats.tx_packets++;
dev_kfree_skb_irq(streamer_priv->tx_ring_skb[streamer_priv->tx_ring_last_status]);
streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].buffer = 0xdeadbeef;
streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].status = 0;
@@ -1484,13 +1484,6 @@ static void streamer_srb_bh(struct net_device *dev)
} /* switch srb[0] */
}
-static struct net_device_stats *streamer_get_stats(struct net_device *dev)
-{
- struct streamer_private *streamer_priv;
- streamer_priv = netdev_priv(dev);
- return (struct net_device_stats *) &streamer_priv->streamer_stats;
-}
-
static int streamer_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr *saddr = addr;
diff --git a/drivers/net/tokenring/lanstreamer.h b/drivers/net/tokenring/lanstreamer.h
index 13ccee6449c..3c58d6a3fbc 100644
--- a/drivers/net/tokenring/lanstreamer.h
+++ b/drivers/net/tokenring/lanstreamer.h
@@ -299,7 +299,6 @@ struct streamer_private {
int tx_ring_free, tx_ring_last_status, rx_ring_last_received,
free_tx_ring_entries;
- struct net_device_stats streamer_stats;
__u16 streamer_lan_status;
__u8 streamer_ring_speed;
__u16 pkt_buf_sz;
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c
index ecb5c7c9691..77dc9da4c0b 100644
--- a/drivers/net/tokenring/olympic.c
+++ b/drivers/net/tokenring/olympic.c
@@ -187,7 +187,6 @@ static int olympic_close(struct net_device *dev);
static void olympic_set_rx_mode(struct net_device *dev);
static void olympic_freemem(struct net_device *dev) ;
static irqreturn_t olympic_interrupt(int irq, void *dev_id);
-static struct net_device_stats * olympic_get_stats(struct net_device *dev);
static int olympic_set_mac_address(struct net_device *dev, void *addr) ;
static void olympic_arb_cmd(struct net_device *dev);
static int olympic_change_mtu(struct net_device *dev, int mtu);
@@ -195,6 +194,15 @@ static void olympic_srb_bh(struct net_device *dev) ;
static void olympic_asb_bh(struct net_device *dev) ;
static int olympic_proc_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data) ;
+static const struct net_device_ops olympic_netdev_ops = {
+ .ndo_open = olympic_open,
+ .ndo_stop = olympic_close,
+ .ndo_start_xmit = olympic_xmit,
+ .ndo_change_mtu = olympic_change_mtu,
+ .ndo_set_multicast_list = olympic_set_rx_mode,
+ .ndo_set_mac_address = olympic_set_mac_address,
+};
+
static int __devinit olympic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *dev ;
@@ -253,14 +261,7 @@ static int __devinit olympic_probe(struct pci_dev *pdev, const struct pci_device
goto op_free_iomap;
}
- dev->open=&olympic_open;
- dev->hard_start_xmit=&olympic_xmit;
- dev->change_mtu=&olympic_change_mtu;
- dev->stop=&olympic_close;
- dev->do_ioctl=NULL;
- dev->set_multicast_list=&olympic_set_rx_mode;
- dev->get_stats=&olympic_get_stats ;
- dev->set_mac_address=&olympic_set_mac_address ;
+ dev->netdev_ops = &olympic_netdev_ops;
SET_NETDEV_DEV(dev, &pdev->dev);
pci_set_drvdata(pdev,dev) ;
@@ -785,7 +786,7 @@ static void olympic_rx(struct net_device *dev)
}
olympic_priv->rx_ring_last_received += i ;
olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ;
- olympic_priv->olympic_stats.rx_errors++;
+ dev->stats.rx_errors++;
} else {
if (buffer_cnt == 1) {
@@ -796,7 +797,7 @@ static void olympic_rx(struct net_device *dev)
if (skb == NULL) {
printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n",dev->name) ;
- olympic_priv->olympic_stats.rx_dropped++ ;
+ dev->stats.rx_dropped++;
/* Update counters even though we don't transfer the frame */
olympic_priv->rx_ring_last_received += i ;
olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ;
@@ -862,8 +863,8 @@ static void olympic_rx(struct net_device *dev)
skb->protocol = tr_type_trans(skb,dev);
netif_rx(skb) ;
}
- olympic_priv->olympic_stats.rx_packets++ ;
- olympic_priv->olympic_stats.rx_bytes += length ;
+ dev->stats.rx_packets++ ;
+ dev->stats.rx_bytes += length ;
} /* if skb == null */
} /* If status & 0x3b */
@@ -971,8 +972,8 @@ static irqreturn_t olympic_interrupt(int irq, void *dev_id)
olympic_priv->tx_ring_last_status++;
olympic_priv->tx_ring_last_status &= (OLYMPIC_TX_RING_SIZE-1);
olympic_priv->free_tx_ring_entries++;
- olympic_priv->olympic_stats.tx_bytes += olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len;
- olympic_priv->olympic_stats.tx_packets++ ;
+ dev->stats.tx_bytes += olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len;
+ dev->stats.tx_packets++ ;
pci_unmap_single(olympic_priv->pdev,
le32_to_cpu(olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_last_status].buffer),
olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len,PCI_DMA_TODEVICE);
@@ -1344,13 +1345,6 @@ static void olympic_srb_bh(struct net_device *dev)
}
-static struct net_device_stats * olympic_get_stats(struct net_device *dev)
-{
- struct olympic_private *olympic_priv ;
- olympic_priv=netdev_priv(dev);
- return (struct net_device_stats *) &olympic_priv->olympic_stats;
-}
-
static int olympic_set_mac_address (struct net_device *dev, void *addr)
{
struct sockaddr *saddr = addr ;
diff --git a/drivers/net/tokenring/olympic.h b/drivers/net/tokenring/olympic.h
index 10fbba08978..30631bae4c9 100644
--- a/drivers/net/tokenring/olympic.h
+++ b/drivers/net/tokenring/olympic.h
@@ -275,7 +275,6 @@ struct olympic_private {
struct sk_buff *tx_ring_skb[OLYMPIC_TX_RING_SIZE], *rx_ring_skb[OLYMPIC_RX_RING_SIZE];
int tx_ring_free, tx_ring_last_status, rx_ring_last_received,rx_status_last_received, free_tx_ring_entries;
- struct net_device_stats olympic_stats ;
u16 olympic_lan_status ;
u8 olympic_ring_speed ;
u16 pkt_buf_sz ;
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
index 5be34c2fd48..b11bb72dc7a 100644
--- a/drivers/net/tokenring/tms380tr.c
+++ b/drivers/net/tokenring/tms380tr.c
@@ -2330,6 +2330,17 @@ void tmsdev_term(struct net_device *dev)
DMA_BIDIRECTIONAL);
}
+const struct net_device_ops tms380tr_netdev_ops = {
+ .ndo_open = tms380tr_open,
+ .ndo_stop = tms380tr_close,
+ .ndo_start_xmit = tms380tr_send_packet,
+ .ndo_tx_timeout = tms380tr_timeout,
+ .ndo_get_stats = tms380tr_get_stats,
+ .ndo_set_multicast_list = tms380tr_set_multicast_list,
+ .ndo_set_mac_address = tms380tr_set_mac_address,
+};
+EXPORT_SYMBOL(tms380tr_netdev_ops);
+
int tmsdev_init(struct net_device *dev, struct device *pdev)
{
struct net_local *tms_local;
@@ -2353,16 +2364,8 @@ int tmsdev_init(struct net_device *dev, struct device *pdev)
return -ENOMEM;
}
- /* These can be overridden by the card driver if needed */
- dev->open = tms380tr_open;
- dev->stop = tms380tr_close;
- dev->do_ioctl = NULL;
- dev->hard_start_xmit = tms380tr_send_packet;
- dev->tx_timeout = tms380tr_timeout;
+ dev->netdev_ops = &tms380tr_netdev_ops;
dev->watchdog_timeo = HZ;
- dev->get_stats = tms380tr_get_stats;
- dev->set_multicast_list = &tms380tr_set_multicast_list;
- dev->set_mac_address = tms380tr_set_mac_address;
return 0;
}
diff --git a/drivers/net/tokenring/tms380tr.h b/drivers/net/tokenring/tms380tr.h
index 7af76d70884..60b30ee38dc 100644
--- a/drivers/net/tokenring/tms380tr.h
+++ b/drivers/net/tokenring/tms380tr.h
@@ -14,6 +14,7 @@
#include <linux/interrupt.h>
/* module prototypes */
+extern const struct net_device_ops tms380tr_netdev_ops;
int tms380tr_open(struct net_device *dev);
int tms380tr_close(struct net_device *dev);
irqreturn_t tms380tr_interrupt(int irq, void *dev_id);
diff --git a/drivers/net/tokenring/tmspci.c b/drivers/net/tokenring/tmspci.c
index 5f601773c26..b397e8785d6 100644
--- a/drivers/net/tokenring/tmspci.c
+++ b/drivers/net/tokenring/tmspci.c
@@ -157,8 +157,8 @@ static int __devinit tms_pci_attach(struct pci_dev *pdev, const struct pci_devic
tp->tmspriv = cardinfo;
- dev->open = tms380tr_open;
- dev->stop = tms380tr_close;
+ dev->netdev_ops = &tms380tr_netdev_ops;
+
pci_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c
index 75461dbd487..1138782e561 100644
--- a/drivers/net/tsi108_eth.c
+++ b/drivers/net/tsi108_eth.c
@@ -888,7 +888,7 @@ static int tsi108_poll(struct napi_struct *napi, int budget)
if (num_received < budget) {
data->rxpending = 0;
- netif_rx_complete(napi);
+ napi_complete(napi);
TSI_WRITE(TSI108_EC_INTMASK,
TSI_READ(TSI108_EC_INTMASK)
@@ -915,11 +915,11 @@ static void tsi108_rx_int(struct net_device *dev)
*
* This can happen if this code races with tsi108_poll(), which masks
* the interrupts after tsi108_irq_one() read the mask, but before
- * netif_rx_schedule is called. It could also happen due to calls
+ * napi_schedule is called. It could also happen due to calls
* from tsi108_check_rxring().
*/
- if (netif_rx_schedule_prep(&data->napi)) {
+ if (napi_schedule_prep(&data->napi)) {
/* Mask, rather than ack, the receive interrupts. The ack
* will happen in tsi108_poll().
*/
@@ -930,7 +930,7 @@ static void tsi108_rx_int(struct net_device *dev)
| TSI108_INT_RXTHRESH |
TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR |
TSI108_INT_RXWAIT);
- __netif_rx_schedule(&data->napi);
+ __napi_schedule(&data->napi);
} else {
if (!netif_running(dev)) {
/* This can happen if an interrupt occurs while the
diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/tulip/interrupt.c
index 6c3428a37c0..9f946d42108 100644
--- a/drivers/net/tulip/interrupt.c
+++ b/drivers/net/tulip/interrupt.c
@@ -103,7 +103,7 @@ void oom_timer(unsigned long data)
{
struct net_device *dev = (struct net_device *)data;
struct tulip_private *tp = netdev_priv(dev);
- netif_rx_schedule(&tp->napi);
+ napi_schedule(&tp->napi);
}
int tulip_poll(struct napi_struct *napi, int budget)
@@ -300,7 +300,7 @@ int tulip_poll(struct napi_struct *napi, int budget)
/* Remove us from polling list and enable RX intr. */
- netif_rx_complete(napi);
+ napi_complete(napi);
iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
/* The last op happens after poll completion. Which means the following:
@@ -333,10 +333,10 @@ int tulip_poll(struct napi_struct *napi, int budget)
/* Think: timer_pending() was an explicit signature of bug.
* Timer can be pending now but fired and completed
- * before we did netif_rx_complete(). See? We would lose it. */
+ * before we did napi_complete(). See? We would lose it. */
/* remove ourselves from the polling list */
- netif_rx_complete(napi);
+ napi_complete(napi);
return work_done;
}
@@ -519,7 +519,7 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
rxd++;
/* Mask RX intrs and add the device to poll list. */
iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7);
- netif_rx_schedule(&tp->napi);
+ napi_schedule(&tp->napi);
if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass)))
break;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index d7b81e4fdd5..e9bcbdfe015 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -63,6 +63,7 @@
#include <linux/virtio_net.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
+#include <net/rtnetlink.h>
#include <asm/system.h>
#include <asm/uaccess.h>
@@ -87,14 +88,19 @@ struct tap_filter {
unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN];
};
+struct tun_file {
+ atomic_t count;
+ struct tun_struct *tun;
+ struct net *net;
+ wait_queue_head_t read_wait;
+};
+
struct tun_struct {
- struct list_head list;
+ struct tun_file *tfile;
unsigned int flags;
- int attached;
uid_t owner;
gid_t group;
- wait_queue_head_t read_wait;
struct sk_buff_head readq;
struct net_device *dev;
@@ -107,6 +113,88 @@ struct tun_struct {
#endif
};
+static int tun_attach(struct tun_struct *tun, struct file *file)
+{
+ struct tun_file *tfile = file->private_data;
+ const struct cred *cred = current_cred();
+ int err;
+
+ ASSERT_RTNL();
+
+ /* Check permissions */
+ if (((tun->owner != -1 && cred->euid != tun->owner) ||
+ (tun->group != -1 && cred->egid != tun->group)) &&
+ !capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ netif_tx_lock_bh(tun->dev);
+
+ err = -EINVAL;
+ if (tfile->tun)
+ goto out;
+
+ err = -EBUSY;
+ if (tun->tfile)
+ goto out;
+
+ err = 0;
+ tfile->tun = tun;
+ tun->tfile = tfile;
+ dev_hold(tun->dev);
+ atomic_inc(&tfile->count);
+
+out:
+ netif_tx_unlock_bh(tun->dev);
+ return err;
+}
+
+static void __tun_detach(struct tun_struct *tun)
+{
+ struct tun_file *tfile = tun->tfile;
+
+ /* Detach from net device */
+ netif_tx_lock_bh(tun->dev);
+ tfile->tun = NULL;
+ tun->tfile = NULL;
+ netif_tx_unlock_bh(tun->dev);
+
+ /* Drop read queue */
+ skb_queue_purge(&tun->readq);
+
+ /* Drop the extra count on the net device */
+ dev_put(tun->dev);
+}
+
+static void tun_detach(struct tun_struct *tun)
+{
+ rtnl_lock();
+ __tun_detach(tun);
+ rtnl_unlock();
+}
+
+static struct tun_struct *__tun_get(struct tun_file *tfile)
+{
+ struct tun_struct *tun = NULL;
+
+ if (atomic_inc_not_zero(&tfile->count))
+ tun = tfile->tun;
+
+ return tun;
+}
+
+static struct tun_struct *tun_get(struct file *file)
+{
+ return __tun_get(file->private_data);
+}
+
+static void tun_put(struct tun_struct *tun)
+{
+ struct tun_file *tfile = tun->tfile;
+
+ if (atomic_dec_and_test(&tfile->count))
+ tun_detach(tfile->tun);
+}
+
/* TAP filterting */
static void addr_hash_set(u32 *mask, const u8 *addr)
{
@@ -213,13 +301,23 @@ static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
/* Network device part of the driver */
-static int tun_net_id;
-struct tun_net {
- struct list_head dev_list;
-};
-
static const struct ethtool_ops tun_ethtool_ops;
+/* Net device detach from fd. */
+static void tun_net_uninit(struct net_device *dev)
+{
+ struct tun_struct *tun = netdev_priv(dev);
+ struct tun_file *tfile = tun->tfile;
+
+ /* Inform the methods they need to stop using the dev.
+ */
+ if (tfile) {
+ wake_up_all(&tfile->read_wait);
+ if (atomic_dec_and_test(&tfile->count))
+ __tun_detach(tun);
+ }
+}
+
/* Net device open. */
static int tun_net_open(struct net_device *dev)
{
@@ -242,7 +340,7 @@ static int tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
DBG(KERN_INFO "%s: tun_net_xmit %d\n", tun->dev->name, skb->len);
/* Drop packet if interface is not attached */
- if (!tun->attached)
+ if (!tun->tfile)
goto drop;
/* Drop if the filter does not like it.
@@ -274,7 +372,7 @@ static int tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
/* Notify and wake up reader process */
if (tun->flags & TUN_FASYNC)
kill_fasync(&tun->fasync, SIGIO, POLL_IN);
- wake_up_interruptible(&tun->read_wait);
+ wake_up_interruptible(&tun->tfile->read_wait);
return 0;
drop:
@@ -306,6 +404,7 @@ tun_net_change_mtu(struct net_device *dev, int new_mtu)
}
static const struct net_device_ops tun_netdev_ops = {
+ .ndo_uninit = tun_net_uninit,
.ndo_open = tun_net_open,
.ndo_stop = tun_net_close,
.ndo_start_xmit = tun_net_xmit,
@@ -313,6 +412,7 @@ static const struct net_device_ops tun_netdev_ops = {
};
static const struct net_device_ops tap_netdev_ops = {
+ .ndo_uninit = tun_net_uninit,
.ndo_open = tun_net_open,
.ndo_stop = tun_net_close,
.ndo_start_xmit = tun_net_xmit,
@@ -359,19 +459,24 @@ static void tun_net_init(struct net_device *dev)
/* Poll */
static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
{
- struct tun_struct *tun = file->private_data;
+ struct tun_file *tfile = file->private_data;
+ struct tun_struct *tun = __tun_get(tfile);
unsigned int mask = POLLOUT | POLLWRNORM;
if (!tun)
- return -EBADFD;
+ return POLLERR;
DBG(KERN_INFO "%s: tun_chr_poll\n", tun->dev->name);
- poll_wait(file, &tun->read_wait, wait);
+ poll_wait(file, &tfile->read_wait, wait);
if (!skb_queue_empty(&tun->readq))
mask |= POLLIN | POLLRDNORM;
+ if (tun->dev->reg_state != NETREG_REGISTERED)
+ mask = POLLERR;
+
+ tun_put(tun);
return mask;
}
@@ -556,14 +661,18 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv,
static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
unsigned long count, loff_t pos)
{
- struct tun_struct *tun = iocb->ki_filp->private_data;
+ struct tun_struct *tun = tun_get(iocb->ki_filp);
+ ssize_t result;
if (!tun)
return -EBADFD;
DBG(KERN_INFO "%s: tun_chr_write %ld\n", tun->dev->name, count);
- return tun_get_user(tun, (struct iovec *) iv, iov_length(iv, count));
+ result = tun_get_user(tun, (struct iovec *) iv, iov_length(iv, count));
+
+ tun_put(tun);
+ return result;
}
/* Put packet to the user space buffer */
@@ -636,7 +745,8 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
unsigned long count, loff_t pos)
{
struct file *file = iocb->ki_filp;
- struct tun_struct *tun = file->private_data;
+ struct tun_file *tfile = file->private_data;
+ struct tun_struct *tun = __tun_get(tfile);
DECLARE_WAITQUEUE(wait, current);
struct sk_buff *skb;
ssize_t len, ret = 0;
@@ -647,10 +757,12 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name);
len = iov_length(iv, count);
- if (len < 0)
- return -EINVAL;
+ if (len < 0) {
+ ret = -EINVAL;
+ goto out;
+ }
- add_wait_queue(&tun->read_wait, &wait);
+ add_wait_queue(&tfile->read_wait, &wait);
while (len) {
current->state = TASK_INTERRUPTIBLE;
@@ -664,6 +776,10 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
ret = -ERESTARTSYS;
break;
}
+ if (tun->dev->reg_state != NETREG_REGISTERED) {
+ ret = -EIO;
+ break;
+ }
/* Nothing to read, let's sleep */
schedule();
@@ -677,8 +793,10 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
}
current->state = TASK_RUNNING;
- remove_wait_queue(&tun->read_wait, &wait);
+ remove_wait_queue(&tfile->read_wait, &wait);
+out:
+ tun_put(tun);
return ret;
}
@@ -687,54 +805,49 @@ static void tun_setup(struct net_device *dev)
struct tun_struct *tun = netdev_priv(dev);
skb_queue_head_init(&tun->readq);
- init_waitqueue_head(&tun->read_wait);
tun->owner = -1;
tun->group = -1;
dev->ethtool_ops = &tun_ethtool_ops;
dev->destructor = free_netdev;
- dev->features |= NETIF_F_NETNS_LOCAL;
}
-static struct tun_struct *tun_get_by_name(struct tun_net *tn, const char *name)
+/* Trivial set of netlink ops to allow deleting tun or tap
+ * device with netlink.
+ */
+static int tun_validate(struct nlattr *tb[], struct nlattr *data[])
{
- struct tun_struct *tun;
+ return -EINVAL;
+}
- ASSERT_RTNL();
- list_for_each_entry(tun, &tn->dev_list, list) {
- if (!strncmp(tun->dev->name, name, IFNAMSIZ))
- return tun;
- }
+static struct rtnl_link_ops tun_link_ops __read_mostly = {
+ .kind = DRV_NAME,
+ .priv_size = sizeof(struct tun_struct),
+ .setup = tun_setup,
+ .validate = tun_validate,
+};
- return NULL;
-}
static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
{
- struct tun_net *tn;
struct tun_struct *tun;
struct net_device *dev;
- const struct cred *cred = current_cred();
int err;
- tn = net_generic(net, tun_net_id);
- tun = tun_get_by_name(tn, ifr->ifr_name);
- if (tun) {
- if (tun->attached)
- return -EBUSY;
-
- /* Check permissions */
- if (((tun->owner != -1 &&
- cred->euid != tun->owner) ||
- (tun->group != -1 &&
- cred->egid != tun->group)) &&
- !capable(CAP_NET_ADMIN)) {
- return -EPERM;
- }
+ dev = __dev_get_by_name(net, ifr->ifr_name);
+ if (dev) {
+ if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
+ tun = netdev_priv(dev);
+ else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops)
+ tun = netdev_priv(dev);
+ else
+ return -EINVAL;
+
+ err = tun_attach(tun, file);
+ if (err < 0)
+ return err;
}
- else if (__dev_get_by_name(net, ifr->ifr_name))
- return -EINVAL;
else {
char *name;
unsigned long flags = 0;
@@ -765,6 +878,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
return -ENOMEM;
dev_net_set(dev, net);
+ dev->rtnl_link_ops = &tun_link_ops;
tun = netdev_priv(dev);
tun->dev = dev;
@@ -783,7 +897,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
if (err < 0)
goto err_free_dev;
- list_add(&tun->list, &tn->dev_list);
+ err = tun_attach(tun, file);
+ if (err < 0)
+ goto err_free_dev;
}
DBG(KERN_INFO "%s: tun_set_iff\n", tun->dev->name);
@@ -803,10 +919,6 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
else
tun->flags &= ~TUN_VNET_HDR;
- file->private_data = tun;
- tun->attached = 1;
- get_net(dev_net(tun->dev));
-
/* Make sure persistent devices do not get stuck in
* xoff state.
*/
@@ -824,7 +936,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
static int tun_get_iff(struct net *net, struct file *file, struct ifreq *ifr)
{
- struct tun_struct *tun = file->private_data;
+ struct tun_struct *tun = tun_get(file);
if (!tun)
return -EBADFD;
@@ -849,6 +961,7 @@ static int tun_get_iff(struct net *net, struct file *file, struct ifreq *ifr)
if (tun->flags & TUN_VNET_HDR)
ifr->ifr_flags |= IFF_VNET_HDR;
+ tun_put(tun);
return 0;
}
@@ -895,7 +1008,8 @@ static int set_offload(struct net_device *dev, unsigned long arg)
static int tun_chr_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg)
{
- struct tun_struct *tun = file->private_data;
+ struct tun_file *tfile = file->private_data;
+ struct tun_struct *tun;
void __user* argp = (void __user*)arg;
struct ifreq ifr;
int ret;
@@ -904,13 +1018,23 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
if (copy_from_user(&ifr, argp, sizeof ifr))
return -EFAULT;
+ if (cmd == TUNGETFEATURES) {
+ /* Currently this just means: "what IFF flags are valid?".
+ * This is needed because we never checked for invalid flags on
+ * TUNSETIFF. */
+ return put_user(IFF_TUN | IFF_TAP | IFF_NO_PI | IFF_ONE_QUEUE |
+ IFF_VNET_HDR,
+ (unsigned int __user*)argp);
+ }
+
+ tun = __tun_get(tfile);
if (cmd == TUNSETIFF && !tun) {
int err;
ifr.ifr_name[IFNAMSIZ-1] = '\0';
rtnl_lock();
- err = tun_set_iff(current->nsproxy->net_ns, file, &ifr);
+ err = tun_set_iff(tfile->net, file, &ifr);
rtnl_unlock();
if (err)
@@ -921,28 +1045,21 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
return 0;
}
- if (cmd == TUNGETFEATURES) {
- /* Currently this just means: "what IFF flags are valid?".
- * This is needed because we never checked for invalid flags on
- * TUNSETIFF. */
- return put_user(IFF_TUN | IFF_TAP | IFF_NO_PI | IFF_ONE_QUEUE |
- IFF_VNET_HDR,
- (unsigned int __user*)argp);
- }
if (!tun)
return -EBADFD;
DBG(KERN_INFO "%s: tun_chr_ioctl cmd %d\n", tun->dev->name, cmd);
+ ret = 0;
switch (cmd) {
case TUNGETIFF:
ret = tun_get_iff(current->nsproxy->net_ns, file, &ifr);
if (ret)
- return ret;
+ break;
if (copy_to_user(argp, &ifr, sizeof(ifr)))
- return -EFAULT;
+ ret = -EFAULT;
break;
case TUNSETNOCSUM:
@@ -994,7 +1111,7 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
ret = 0;
}
rtnl_unlock();
- return ret;
+ break;
#ifdef TUN_DEBUG
case TUNSETDEBUG:
@@ -1005,24 +1122,25 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
rtnl_lock();
ret = set_offload(tun->dev, arg);
rtnl_unlock();
- return ret;
+ break;
case TUNSETTXFILTER:
/* Can be set only for TAPs */
+ ret = -EINVAL;
if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
- return -EINVAL;
+ break;
rtnl_lock();
ret = update_filter(&tun->txflt, (void __user *)arg);
rtnl_unlock();
- return ret;
+ break;
case SIOCGIFHWADDR:
/* Get hw addres */
memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN);
ifr.ifr_hwaddr.sa_family = tun->dev->type;
if (copy_to_user(argp, &ifr, sizeof ifr))
- return -EFAULT;
- return 0;
+ ret = -EFAULT;
+ break;
case SIOCSIFHWADDR:
/* Set hw address */
@@ -1032,18 +1150,19 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
rtnl_lock();
ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr);
rtnl_unlock();
- return ret;
-
+ break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ break;
};
- return 0;
+ tun_put(tun);
+ return ret;
}
static int tun_chr_fasync(int fd, struct file *file, int on)
{
- struct tun_struct *tun = file->private_data;
+ struct tun_struct *tun = tun_get(file);
int ret;
if (!tun)
@@ -1065,42 +1184,48 @@ static int tun_chr_fasync(int fd, struct file *file, int on)
ret = 0;
out:
unlock_kernel();
+ tun_put(tun);
return ret;
}
static int tun_chr_open(struct inode *inode, struct file * file)
{
+ struct tun_file *tfile;
cycle_kernel_lock();
DBG1(KERN_INFO "tunX: tun_chr_open\n");
- file->private_data = NULL;
+
+ tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
+ if (!tfile)
+ return -ENOMEM;
+ atomic_set(&tfile->count, 0);
+ tfile->tun = NULL;
+ tfile->net = get_net(current->nsproxy->net_ns);
+ init_waitqueue_head(&tfile->read_wait);
+ file->private_data = tfile;
return 0;
}
static int tun_chr_close(struct inode *inode, struct file *file)
{
- struct tun_struct *tun = file->private_data;
+ struct tun_file *tfile = file->private_data;
+ struct tun_struct *tun = __tun_get(tfile);
- if (!tun)
- return 0;
-
- DBG(KERN_INFO "%s: tun_chr_close\n", tun->dev->name);
- rtnl_lock();
+ if (tun) {
+ DBG(KERN_INFO "%s: tun_chr_close\n", tun->dev->name);
- /* Detach from net device */
- file->private_data = NULL;
- tun->attached = 0;
- put_net(dev_net(tun->dev));
+ rtnl_lock();
+ __tun_detach(tun);
- /* Drop read queue */
- skb_queue_purge(&tun->readq);
+ /* If desireable, unregister the netdevice. */
+ if (!(tun->flags & TUN_PERSIST))
+ unregister_netdevice(tun->dev);
- if (!(tun->flags & TUN_PERSIST)) {
- list_del(&tun->list);
- unregister_netdevice(tun->dev);
+ rtnl_unlock();
}
- rtnl_unlock();
+ put_net(tfile->net);
+ kfree(tfile);
return 0;
}
@@ -1181,7 +1306,7 @@ static void tun_set_msglevel(struct net_device *dev, u32 value)
static u32 tun_get_link(struct net_device *dev)
{
struct tun_struct *tun = netdev_priv(dev);
- return tun->attached;
+ return !!tun->tfile;
}
static u32 tun_get_rx_csum(struct net_device *dev)
@@ -1210,45 +1335,6 @@ static const struct ethtool_ops tun_ethtool_ops = {
.set_rx_csum = tun_set_rx_csum
};
-static int tun_init_net(struct net *net)
-{
- struct tun_net *tn;
-
- tn = kmalloc(sizeof(*tn), GFP_KERNEL);
- if (tn == NULL)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&tn->dev_list);
-
- if (net_assign_generic(net, tun_net_id, tn)) {
- kfree(tn);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void tun_exit_net(struct net *net)
-{
- struct tun_net *tn;
- struct tun_struct *tun, *nxt;
-
- tn = net_generic(net, tun_net_id);
-
- rtnl_lock();
- list_for_each_entry_safe(tun, nxt, &tn->dev_list, list) {
- DBG(KERN_INFO "%s cleaned up\n", tun->dev->name);
- unregister_netdevice(tun->dev);
- }
- rtnl_unlock();
-
- kfree(tn);
-}
-
-static struct pernet_operations tun_net_ops = {
- .init = tun_init_net,
- .exit = tun_exit_net,
-};
static int __init tun_init(void)
{
@@ -1257,10 +1343,10 @@ static int __init tun_init(void)
printk(KERN_INFO "tun: %s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
printk(KERN_INFO "tun: %s\n", DRV_COPYRIGHT);
- ret = register_pernet_gen_device(&tun_net_id, &tun_net_ops);
+ ret = rtnl_link_register(&tun_link_ops);
if (ret) {
- printk(KERN_ERR "tun: Can't register pernet ops\n");
- goto err_pernet;
+ printk(KERN_ERR "tun: Can't register link_ops\n");
+ goto err_linkops;
}
ret = misc_register(&tun_miscdev);
@@ -1268,18 +1354,17 @@ static int __init tun_init(void)
printk(KERN_ERR "tun: Can't register misc device %d\n", TUN_MINOR);
goto err_misc;
}
- return 0;
-
+ return 0;
err_misc:
- unregister_pernet_gen_device(tun_net_id, &tun_net_ops);
-err_pernet:
+ rtnl_link_unregister(&tun_link_ops);
+err_linkops:
return ret;
}
static void tun_cleanup(void)
{
misc_deregister(&tun_miscdev);
- unregister_pernet_gen_device(tun_net_id, &tun_net_ops);
+ rtnl_link_unregister(&tun_link_ops);
}
module_init(tun_init);
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 3af9a9516cc..a8e5651f316 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -1783,7 +1783,7 @@ typhoon_poll(struct napi_struct *napi, int budget)
}
if (work_done < budget) {
- netif_rx_complete(napi);
+ napi_complete(napi);
iowrite32(TYPHOON_INTR_NONE,
tp->ioaddr + TYPHOON_REG_INTR_MASK);
typhoon_post_pci_writes(tp->ioaddr);
@@ -1806,10 +1806,10 @@ typhoon_interrupt(int irq, void *dev_instance)
iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS);
- if (netif_rx_schedule_prep(&tp->napi)) {
+ if (napi_schedule_prep(&tp->napi)) {
iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
typhoon_post_pci_writes(ioaddr);
- __netif_rx_schedule(&tp->napi);
+ __napi_schedule(&tp->napi);
} else {
printk(KERN_ERR "%s: Error, poll already scheduled\n",
dev->name);
@@ -1944,7 +1944,7 @@ typhoon_start_runtime(struct typhoon *tp)
goto error_out;
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE);
- xp_cmd.parm1 = __constant_cpu_to_le16(ETH_P_8021Q);
+ xp_cmd.parm1 = cpu_to_le16(ETH_P_8021Q);
err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
if(err < 0)
goto error_out;
diff --git a/drivers/net/typhoon.h b/drivers/net/typhoon.h
index dd7022ca735..673fd512591 100644
--- a/drivers/net/typhoon.h
+++ b/drivers/net/typhoon.h
@@ -174,18 +174,18 @@ struct tx_desc {
u64 tx_addr; /* opaque for hardware, for TX_DESC */
};
__le32 processFlags;
-#define TYPHOON_TX_PF_NO_CRC __constant_cpu_to_le32(0x00000001)
-#define TYPHOON_TX_PF_IP_CHKSUM __constant_cpu_to_le32(0x00000002)
-#define TYPHOON_TX_PF_TCP_CHKSUM __constant_cpu_to_le32(0x00000004)
-#define TYPHOON_TX_PF_TCP_SEGMENT __constant_cpu_to_le32(0x00000008)
-#define TYPHOON_TX_PF_INSERT_VLAN __constant_cpu_to_le32(0x00000010)
-#define TYPHOON_TX_PF_IPSEC __constant_cpu_to_le32(0x00000020)
-#define TYPHOON_TX_PF_VLAN_PRIORITY __constant_cpu_to_le32(0x00000040)
-#define TYPHOON_TX_PF_UDP_CHKSUM __constant_cpu_to_le32(0x00000080)
-#define TYPHOON_TX_PF_PAD_FRAME __constant_cpu_to_le32(0x00000100)
-#define TYPHOON_TX_PF_RESERVED __constant_cpu_to_le32(0x00000e00)
-#define TYPHOON_TX_PF_VLAN_MASK __constant_cpu_to_le32(0x0ffff000)
-#define TYPHOON_TX_PF_INTERNAL __constant_cpu_to_le32(0xf0000000)
+#define TYPHOON_TX_PF_NO_CRC cpu_to_le32(0x00000001)
+#define TYPHOON_TX_PF_IP_CHKSUM cpu_to_le32(0x00000002)
+#define TYPHOON_TX_PF_TCP_CHKSUM cpu_to_le32(0x00000004)
+#define TYPHOON_TX_PF_TCP_SEGMENT cpu_to_le32(0x00000008)
+#define TYPHOON_TX_PF_INSERT_VLAN cpu_to_le32(0x00000010)
+#define TYPHOON_TX_PF_IPSEC cpu_to_le32(0x00000020)
+#define TYPHOON_TX_PF_VLAN_PRIORITY cpu_to_le32(0x00000040)
+#define TYPHOON_TX_PF_UDP_CHKSUM cpu_to_le32(0x00000080)
+#define TYPHOON_TX_PF_PAD_FRAME cpu_to_le32(0x00000100)
+#define TYPHOON_TX_PF_RESERVED cpu_to_le32(0x00000e00)
+#define TYPHOON_TX_PF_VLAN_MASK cpu_to_le32(0x0ffff000)
+#define TYPHOON_TX_PF_INTERNAL cpu_to_le32(0xf0000000)
#define TYPHOON_TX_PF_VLAN_TAG_SHIFT 12
} __attribute__ ((packed));
@@ -203,8 +203,8 @@ struct tcpopt_desc {
u8 flags;
u8 numDesc;
__le16 mss_flags;
-#define TYPHOON_TSO_FIRST __constant_cpu_to_le16(0x1000)
-#define TYPHOON_TSO_LAST __constant_cpu_to_le16(0x2000)
+#define TYPHOON_TSO_FIRST cpu_to_le16(0x1000)
+#define TYPHOON_TSO_LAST cpu_to_le16(0x2000)
__le32 respAddrLo;
__le32 bytesTx;
__le32 status;
@@ -222,8 +222,8 @@ struct ipsec_desc {
u8 flags;
u8 numDesc;
__le16 ipsecFlags;
-#define TYPHOON_IPSEC_GEN_IV __constant_cpu_to_le16(0x0000)
-#define TYPHOON_IPSEC_USE_IV __constant_cpu_to_le16(0x0001)
+#define TYPHOON_IPSEC_GEN_IV cpu_to_le16(0x0000)
+#define TYPHOON_IPSEC_USE_IV cpu_to_le16(0x0001)
__le32 sa1;
__le32 sa2;
__le32 reserved;
@@ -248,41 +248,41 @@ struct rx_desc {
u32 addr; /* opaque, comes from virtAddr */
u32 addrHi; /* opaque, comes from virtAddrHi */
__le32 rxStatus;
-#define TYPHOON_RX_ERR_INTERNAL __constant_cpu_to_le32(0x00000000)
-#define TYPHOON_RX_ERR_FIFO_UNDERRUN __constant_cpu_to_le32(0x00000001)
-#define TYPHOON_RX_ERR_BAD_SSD __constant_cpu_to_le32(0x00000002)
-#define TYPHOON_RX_ERR_RUNT __constant_cpu_to_le32(0x00000003)
-#define TYPHOON_RX_ERR_CRC __constant_cpu_to_le32(0x00000004)
-#define TYPHOON_RX_ERR_OVERSIZE __constant_cpu_to_le32(0x00000005)
-#define TYPHOON_RX_ERR_ALIGN __constant_cpu_to_le32(0x00000006)
-#define TYPHOON_RX_ERR_DRIBBLE __constant_cpu_to_le32(0x00000007)
-#define TYPHOON_RX_PROTO_MASK __constant_cpu_to_le32(0x00000003)
-#define TYPHOON_RX_PROTO_UNKNOWN __constant_cpu_to_le32(0x00000000)
-#define TYPHOON_RX_PROTO_IP __constant_cpu_to_le32(0x00000001)
-#define TYPHOON_RX_PROTO_IPX __constant_cpu_to_le32(0x00000002)
-#define TYPHOON_RX_VLAN __constant_cpu_to_le32(0x00000004)
-#define TYPHOON_RX_IP_FRAG __constant_cpu_to_le32(0x00000008)
-#define TYPHOON_RX_IPSEC __constant_cpu_to_le32(0x00000010)
-#define TYPHOON_RX_IP_CHK_FAIL __constant_cpu_to_le32(0x00000020)
-#define TYPHOON_RX_TCP_CHK_FAIL __constant_cpu_to_le32(0x00000040)
-#define TYPHOON_RX_UDP_CHK_FAIL __constant_cpu_to_le32(0x00000080)
-#define TYPHOON_RX_IP_CHK_GOOD __constant_cpu_to_le32(0x00000100)
-#define TYPHOON_RX_TCP_CHK_GOOD __constant_cpu_to_le32(0x00000200)
-#define TYPHOON_RX_UDP_CHK_GOOD __constant_cpu_to_le32(0x00000400)
+#define TYPHOON_RX_ERR_INTERNAL cpu_to_le32(0x00000000)
+#define TYPHOON_RX_ERR_FIFO_UNDERRUN cpu_to_le32(0x00000001)
+#define TYPHOON_RX_ERR_BAD_SSD cpu_to_le32(0x00000002)
+#define TYPHOON_RX_ERR_RUNT cpu_to_le32(0x00000003)
+#define TYPHOON_RX_ERR_CRC cpu_to_le32(0x00000004)
+#define TYPHOON_RX_ERR_OVERSIZE cpu_to_le32(0x00000005)
+#define TYPHOON_RX_ERR_ALIGN cpu_to_le32(0x00000006)
+#define TYPHOON_RX_ERR_DRIBBLE cpu_to_le32(0x00000007)
+#define TYPHOON_RX_PROTO_MASK cpu_to_le32(0x00000003)
+#define TYPHOON_RX_PROTO_UNKNOWN cpu_to_le32(0x00000000)
+#define TYPHOON_RX_PROTO_IP cpu_to_le32(0x00000001)
+#define TYPHOON_RX_PROTO_IPX cpu_to_le32(0x00000002)
+#define TYPHOON_RX_VLAN cpu_to_le32(0x00000004)
+#define TYPHOON_RX_IP_FRAG cpu_to_le32(0x00000008)
+#define TYPHOON_RX_IPSEC cpu_to_le32(0x00000010)
+#define TYPHOON_RX_IP_CHK_FAIL cpu_to_le32(0x00000020)
+#define TYPHOON_RX_TCP_CHK_FAIL cpu_to_le32(0x00000040)
+#define TYPHOON_RX_UDP_CHK_FAIL cpu_to_le32(0x00000080)
+#define TYPHOON_RX_IP_CHK_GOOD cpu_to_le32(0x00000100)
+#define TYPHOON_RX_TCP_CHK_GOOD cpu_to_le32(0x00000200)
+#define TYPHOON_RX_UDP_CHK_GOOD cpu_to_le32(0x00000400)
__le16 filterResults;
-#define TYPHOON_RX_FILTER_MASK __constant_cpu_to_le16(0x7fff)
-#define TYPHOON_RX_FILTERED __constant_cpu_to_le16(0x8000)
+#define TYPHOON_RX_FILTER_MASK cpu_to_le16(0x7fff)
+#define TYPHOON_RX_FILTERED cpu_to_le16(0x8000)
__le16 ipsecResults;
-#define TYPHOON_RX_OUTER_AH_GOOD __constant_cpu_to_le16(0x0001)
-#define TYPHOON_RX_OUTER_ESP_GOOD __constant_cpu_to_le16(0x0002)
-#define TYPHOON_RX_INNER_AH_GOOD __constant_cpu_to_le16(0x0004)
-#define TYPHOON_RX_INNER_ESP_GOOD __constant_cpu_to_le16(0x0008)
-#define TYPHOON_RX_OUTER_AH_FAIL __constant_cpu_to_le16(0x0010)
-#define TYPHOON_RX_OUTER_ESP_FAIL __constant_cpu_to_le16(0x0020)
-#define TYPHOON_RX_INNER_AH_FAIL __constant_cpu_to_le16(0x0040)
-#define TYPHOON_RX_INNER_ESP_FAIL __constant_cpu_to_le16(0x0080)
-#define TYPHOON_RX_UNKNOWN_SA __constant_cpu_to_le16(0x0100)
-#define TYPHOON_RX_ESP_FORMAT_ERR __constant_cpu_to_le16(0x0200)
+#define TYPHOON_RX_OUTER_AH_GOOD cpu_to_le16(0x0001)
+#define TYPHOON_RX_OUTER_ESP_GOOD cpu_to_le16(0x0002)
+#define TYPHOON_RX_INNER_AH_GOOD cpu_to_le16(0x0004)
+#define TYPHOON_RX_INNER_ESP_GOOD cpu_to_le16(0x0008)
+#define TYPHOON_RX_OUTER_AH_FAIL cpu_to_le16(0x0010)
+#define TYPHOON_RX_OUTER_ESP_FAIL cpu_to_le16(0x0020)
+#define TYPHOON_RX_INNER_AH_FAIL cpu_to_le16(0x0040)
+#define TYPHOON_RX_INNER_ESP_FAIL cpu_to_le16(0x0080)
+#define TYPHOON_RX_UNKNOWN_SA cpu_to_le16(0x0100)
+#define TYPHOON_RX_ESP_FORMAT_ERR cpu_to_le16(0x0200)
__be32 vlanTag;
} __attribute__ ((packed));
@@ -318,31 +318,31 @@ struct cmd_desc {
u8 flags;
u8 numDesc;
__le16 cmd;
-#define TYPHOON_CMD_TX_ENABLE __constant_cpu_to_le16(0x0001)
-#define TYPHOON_CMD_TX_DISABLE __constant_cpu_to_le16(0x0002)
-#define TYPHOON_CMD_RX_ENABLE __constant_cpu_to_le16(0x0003)
-#define TYPHOON_CMD_RX_DISABLE __constant_cpu_to_le16(0x0004)
-#define TYPHOON_CMD_SET_RX_FILTER __constant_cpu_to_le16(0x0005)
-#define TYPHOON_CMD_READ_STATS __constant_cpu_to_le16(0x0007)
-#define TYPHOON_CMD_XCVR_SELECT __constant_cpu_to_le16(0x0013)
-#define TYPHOON_CMD_SET_MAX_PKT_SIZE __constant_cpu_to_le16(0x001a)
-#define TYPHOON_CMD_READ_MEDIA_STATUS __constant_cpu_to_le16(0x001b)
-#define TYPHOON_CMD_GOTO_SLEEP __constant_cpu_to_le16(0x0023)
-#define TYPHOON_CMD_SET_MULTICAST_HASH __constant_cpu_to_le16(0x0025)
-#define TYPHOON_CMD_SET_MAC_ADDRESS __constant_cpu_to_le16(0x0026)
-#define TYPHOON_CMD_READ_MAC_ADDRESS __constant_cpu_to_le16(0x0027)
-#define TYPHOON_CMD_VLAN_TYPE_WRITE __constant_cpu_to_le16(0x002b)
-#define TYPHOON_CMD_CREATE_SA __constant_cpu_to_le16(0x0034)
-#define TYPHOON_CMD_DELETE_SA __constant_cpu_to_le16(0x0035)
-#define TYPHOON_CMD_READ_VERSIONS __constant_cpu_to_le16(0x0043)
-#define TYPHOON_CMD_IRQ_COALESCE_CTRL __constant_cpu_to_le16(0x0045)
-#define TYPHOON_CMD_ENABLE_WAKE_EVENTS __constant_cpu_to_le16(0x0049)
-#define TYPHOON_CMD_SET_OFFLOAD_TASKS __constant_cpu_to_le16(0x004f)
-#define TYPHOON_CMD_HELLO_RESP __constant_cpu_to_le16(0x0057)
-#define TYPHOON_CMD_HALT __constant_cpu_to_le16(0x005d)
-#define TYPHOON_CMD_READ_IPSEC_INFO __constant_cpu_to_le16(0x005e)
-#define TYPHOON_CMD_GET_IPSEC_ENABLE __constant_cpu_to_le16(0x0067)
-#define TYPHOON_CMD_GET_CMD_LVL __constant_cpu_to_le16(0x0069)
+#define TYPHOON_CMD_TX_ENABLE cpu_to_le16(0x0001)
+#define TYPHOON_CMD_TX_DISABLE cpu_to_le16(0x0002)
+#define TYPHOON_CMD_RX_ENABLE cpu_to_le16(0x0003)
+#define TYPHOON_CMD_RX_DISABLE cpu_to_le16(0x0004)
+#define TYPHOON_CMD_SET_RX_FILTER cpu_to_le16(0x0005)
+#define TYPHOON_CMD_READ_STATS cpu_to_le16(0x0007)
+#define TYPHOON_CMD_XCVR_SELECT cpu_to_le16(0x0013)
+#define TYPHOON_CMD_SET_MAX_PKT_SIZE cpu_to_le16(0x001a)
+#define TYPHOON_CMD_READ_MEDIA_STATUS cpu_to_le16(0x001b)
+#define TYPHOON_CMD_GOTO_SLEEP cpu_to_le16(0x0023)
+#define TYPHOON_CMD_SET_MULTICAST_HASH cpu_to_le16(0x0025)
+#define TYPHOON_CMD_SET_MAC_ADDRESS cpu_to_le16(0x0026)
+#define TYPHOON_CMD_READ_MAC_ADDRESS cpu_to_le16(0x0027)
+#define TYPHOON_CMD_VLAN_TYPE_WRITE cpu_to_le16(0x002b)
+#define TYPHOON_CMD_CREATE_SA cpu_to_le16(0x0034)
+#define TYPHOON_CMD_DELETE_SA cpu_to_le16(0x0035)
+#define TYPHOON_CMD_READ_VERSIONS cpu_to_le16(0x0043)
+#define TYPHOON_CMD_IRQ_COALESCE_CTRL cpu_to_le16(0x0045)
+#define TYPHOON_CMD_ENABLE_WAKE_EVENTS cpu_to_le16(0x0049)
+#define TYPHOON_CMD_SET_OFFLOAD_TASKS cpu_to_le16(0x004f)
+#define TYPHOON_CMD_HELLO_RESP cpu_to_le16(0x0057)
+#define TYPHOON_CMD_HALT cpu_to_le16(0x005d)
+#define TYPHOON_CMD_READ_IPSEC_INFO cpu_to_le16(0x005e)
+#define TYPHOON_CMD_GET_IPSEC_ENABLE cpu_to_le16(0x0067)
+#define TYPHOON_CMD_GET_CMD_LVL cpu_to_le16(0x0069)
u16 seqNo;
__le16 parm1;
__le32 parm2;
@@ -380,11 +380,11 @@ struct resp_desc {
/* TYPHOON_CMD_SET_RX_FILTER filter bits (cmd.parm1)
*/
-#define TYPHOON_RX_FILTER_DIRECTED __constant_cpu_to_le16(0x0001)
-#define TYPHOON_RX_FILTER_ALL_MCAST __constant_cpu_to_le16(0x0002)
-#define TYPHOON_RX_FILTER_BROADCAST __constant_cpu_to_le16(0x0004)
-#define TYPHOON_RX_FILTER_PROMISCOUS __constant_cpu_to_le16(0x0008)
-#define TYPHOON_RX_FILTER_MCAST_HASH __constant_cpu_to_le16(0x0010)
+#define TYPHOON_RX_FILTER_DIRECTED cpu_to_le16(0x0001)
+#define TYPHOON_RX_FILTER_ALL_MCAST cpu_to_le16(0x0002)
+#define TYPHOON_RX_FILTER_BROADCAST cpu_to_le16(0x0004)
+#define TYPHOON_RX_FILTER_PROMISCOUS cpu_to_le16(0x0008)
+#define TYPHOON_RX_FILTER_MCAST_HASH cpu_to_le16(0x0010)
/* TYPHOON_CMD_READ_STATS response format
*/
@@ -416,40 +416,40 @@ struct stats_resp {
__le32 rxOverflow;
__le32 rxFiltered;
__le32 linkStatus;
-#define TYPHOON_LINK_STAT_MASK __constant_cpu_to_le32(0x00000001)
-#define TYPHOON_LINK_GOOD __constant_cpu_to_le32(0x00000001)
-#define TYPHOON_LINK_BAD __constant_cpu_to_le32(0x00000000)
-#define TYPHOON_LINK_SPEED_MASK __constant_cpu_to_le32(0x00000002)
-#define TYPHOON_LINK_100MBPS __constant_cpu_to_le32(0x00000002)
-#define TYPHOON_LINK_10MBPS __constant_cpu_to_le32(0x00000000)
-#define TYPHOON_LINK_DUPLEX_MASK __constant_cpu_to_le32(0x00000004)
-#define TYPHOON_LINK_FULL_DUPLEX __constant_cpu_to_le32(0x00000004)
-#define TYPHOON_LINK_HALF_DUPLEX __constant_cpu_to_le32(0x00000000)
+#define TYPHOON_LINK_STAT_MASK cpu_to_le32(0x00000001)
+#define TYPHOON_LINK_GOOD cpu_to_le32(0x00000001)
+#define TYPHOON_LINK_BAD cpu_to_le32(0x00000000)
+#define TYPHOON_LINK_SPEED_MASK cpu_to_le32(0x00000002)
+#define TYPHOON_LINK_100MBPS cpu_to_le32(0x00000002)
+#define TYPHOON_LINK_10MBPS cpu_to_le32(0x00000000)
+#define TYPHOON_LINK_DUPLEX_MASK cpu_to_le32(0x00000004)
+#define TYPHOON_LINK_FULL_DUPLEX cpu_to_le32(0x00000004)
+#define TYPHOON_LINK_HALF_DUPLEX cpu_to_le32(0x00000000)
__le32 unused2;
__le32 unused3;
} __attribute__ ((packed));
/* TYPHOON_CMD_XCVR_SELECT xcvr values (resp.parm1)
*/
-#define TYPHOON_XCVR_10HALF __constant_cpu_to_le16(0x0000)
-#define TYPHOON_XCVR_10FULL __constant_cpu_to_le16(0x0001)
-#define TYPHOON_XCVR_100HALF __constant_cpu_to_le16(0x0002)
-#define TYPHOON_XCVR_100FULL __constant_cpu_to_le16(0x0003)
-#define TYPHOON_XCVR_AUTONEG __constant_cpu_to_le16(0x0004)
+#define TYPHOON_XCVR_10HALF cpu_to_le16(0x0000)
+#define TYPHOON_XCVR_10FULL cpu_to_le16(0x0001)
+#define TYPHOON_XCVR_100HALF cpu_to_le16(0x0002)
+#define TYPHOON_XCVR_100FULL cpu_to_le16(0x0003)
+#define TYPHOON_XCVR_AUTONEG cpu_to_le16(0x0004)
/* TYPHOON_CMD_READ_MEDIA_STATUS (resp.parm1)
*/
-#define TYPHOON_MEDIA_STAT_CRC_STRIP_DISABLE __constant_cpu_to_le16(0x0004)
-#define TYPHOON_MEDIA_STAT_COLLISION_DETECT __constant_cpu_to_le16(0x0010)
-#define TYPHOON_MEDIA_STAT_CARRIER_SENSE __constant_cpu_to_le16(0x0020)
-#define TYPHOON_MEDIA_STAT_POLARITY_REV __constant_cpu_to_le16(0x0400)
-#define TYPHOON_MEDIA_STAT_NO_LINK __constant_cpu_to_le16(0x0800)
+#define TYPHOON_MEDIA_STAT_CRC_STRIP_DISABLE cpu_to_le16(0x0004)
+#define TYPHOON_MEDIA_STAT_COLLISION_DETECT cpu_to_le16(0x0010)
+#define TYPHOON_MEDIA_STAT_CARRIER_SENSE cpu_to_le16(0x0020)
+#define TYPHOON_MEDIA_STAT_POLARITY_REV cpu_to_le16(0x0400)
+#define TYPHOON_MEDIA_STAT_NO_LINK cpu_to_le16(0x0800)
/* TYPHOON_CMD_SET_MULTICAST_HASH enable values (cmd.parm1)
*/
-#define TYPHOON_MCAST_HASH_DISABLE __constant_cpu_to_le16(0x0000)
-#define TYPHOON_MCAST_HASH_ENABLE __constant_cpu_to_le16(0x0001)
-#define TYPHOON_MCAST_HASH_SET __constant_cpu_to_le16(0x0002)
+#define TYPHOON_MCAST_HASH_DISABLE cpu_to_le16(0x0000)
+#define TYPHOON_MCAST_HASH_ENABLE cpu_to_le16(0x0001)
+#define TYPHOON_MCAST_HASH_SET cpu_to_le16(0x0002)
/* TYPHOON_CMD_CREATE_SA descriptor and settings
*/
@@ -459,9 +459,9 @@ struct sa_descriptor {
u16 cmd;
u16 seqNo;
u16 mode;
-#define TYPHOON_SA_MODE_NULL __constant_cpu_to_le16(0x0000)
-#define TYPHOON_SA_MODE_AH __constant_cpu_to_le16(0x0001)
-#define TYPHOON_SA_MODE_ESP __constant_cpu_to_le16(0x0002)
+#define TYPHOON_SA_MODE_NULL cpu_to_le16(0x0000)
+#define TYPHOON_SA_MODE_AH cpu_to_le16(0x0001)
+#define TYPHOON_SA_MODE_ESP cpu_to_le16(0x0002)
u8 hashFlags;
#define TYPHOON_SA_HASH_ENABLE 0x01
#define TYPHOON_SA_HASH_SHA1 0x02
@@ -493,22 +493,22 @@ struct sa_descriptor {
/* TYPHOON_CMD_SET_OFFLOAD_TASKS bits (cmd.parm2 (Tx) & cmd.parm3 (Rx))
* This is all for IPv4.
*/
-#define TYPHOON_OFFLOAD_TCP_CHKSUM __constant_cpu_to_le32(0x00000002)
-#define TYPHOON_OFFLOAD_UDP_CHKSUM __constant_cpu_to_le32(0x00000004)
-#define TYPHOON_OFFLOAD_IP_CHKSUM __constant_cpu_to_le32(0x00000008)
-#define TYPHOON_OFFLOAD_IPSEC __constant_cpu_to_le32(0x00000010)
-#define TYPHOON_OFFLOAD_BCAST_THROTTLE __constant_cpu_to_le32(0x00000020)
-#define TYPHOON_OFFLOAD_DHCP_PREVENT __constant_cpu_to_le32(0x00000040)
-#define TYPHOON_OFFLOAD_VLAN __constant_cpu_to_le32(0x00000080)
-#define TYPHOON_OFFLOAD_FILTERING __constant_cpu_to_le32(0x00000100)
-#define TYPHOON_OFFLOAD_TCP_SEGMENT __constant_cpu_to_le32(0x00000200)
+#define TYPHOON_OFFLOAD_TCP_CHKSUM cpu_to_le32(0x00000002)
+#define TYPHOON_OFFLOAD_UDP_CHKSUM cpu_to_le32(0x00000004)
+#define TYPHOON_OFFLOAD_IP_CHKSUM cpu_to_le32(0x00000008)
+#define TYPHOON_OFFLOAD_IPSEC cpu_to_le32(0x00000010)
+#define TYPHOON_OFFLOAD_BCAST_THROTTLE cpu_to_le32(0x00000020)
+#define TYPHOON_OFFLOAD_DHCP_PREVENT cpu_to_le32(0x00000040)
+#define TYPHOON_OFFLOAD_VLAN cpu_to_le32(0x00000080)
+#define TYPHOON_OFFLOAD_FILTERING cpu_to_le32(0x00000100)
+#define TYPHOON_OFFLOAD_TCP_SEGMENT cpu_to_le32(0x00000200)
/* TYPHOON_CMD_ENABLE_WAKE_EVENTS bits (cmd.parm1)
*/
-#define TYPHOON_WAKE_MAGIC_PKT __constant_cpu_to_le16(0x01)
-#define TYPHOON_WAKE_LINK_EVENT __constant_cpu_to_le16(0x02)
-#define TYPHOON_WAKE_ICMP_ECHO __constant_cpu_to_le16(0x04)
-#define TYPHOON_WAKE_ARP __constant_cpu_to_le16(0x08)
+#define TYPHOON_WAKE_MAGIC_PKT cpu_to_le16(0x01)
+#define TYPHOON_WAKE_LINK_EVENT cpu_to_le16(0x02)
+#define TYPHOON_WAKE_ICMP_ECHO cpu_to_le16(0x04)
+#define TYPHOON_WAKE_ARP cpu_to_le16(0x08)
/* These are used to load the firmware image on the NIC
*/
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 11441225bf4..6def6f826a5 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -3251,7 +3251,7 @@ static int ucc_geth_poll(struct napi_struct *napi, int budget)
howmany += ucc_geth_rx(ugeth, i, budget - howmany);
if (howmany < budget) {
- netif_rx_complete(napi);
+ napi_complete(napi);
setbits32(ugeth->uccf->p_uccm, UCCE_RX_EVENTS);
}
@@ -3282,10 +3282,10 @@ static irqreturn_t ucc_geth_irq_handler(int irq, void *info)
/* check for receive events that require processing */
if (ucce & UCCE_RX_EVENTS) {
- if (netif_rx_schedule_prep(&ugeth->napi)) {
+ if (napi_schedule_prep(&ugeth->napi)) {
uccm &= ~UCCE_RX_EVENTS;
out_be32(uccf->p_uccm, uccm);
- __netif_rx_schedule(&ugeth->napi);
+ __napi_schedule(&ugeth->napi);
}
}
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 5574abe29c7..5b0b9647382 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -55,7 +55,6 @@ struct smsc95xx_priv {
struct usb_context {
struct usb_ctrlrequest req;
- struct completion notify;
struct usbnet *dev;
};
@@ -307,7 +306,7 @@ static int smsc95xx_write_eeprom(struct usbnet *dev, u32 offset, u32 length,
return 0;
}
-static void smsc95xx_async_cmd_callback(struct urb *urb, struct pt_regs *regs)
+static void smsc95xx_async_cmd_callback(struct urb *urb)
{
struct usb_context *usb_context = urb->context;
struct usbnet *dev = usb_context->dev;
@@ -316,8 +315,6 @@ static void smsc95xx_async_cmd_callback(struct urb *urb, struct pt_regs *regs)
if (status < 0)
devwarn(dev, "async callback failed with %d", status);
- complete(&usb_context->notify);
-
kfree(usb_context);
usb_free_urb(urb);
}
@@ -348,11 +345,10 @@ static int smsc95xx_write_reg_async(struct usbnet *dev, u16 index, u32 *data)
usb_context->req.wValue = 00;
usb_context->req.wIndex = cpu_to_le16(index);
usb_context->req.wLength = cpu_to_le16(size);
- init_completion(&usb_context->notify);
usb_fill_control_urb(urb, dev->udev, usb_sndctrlpipe(dev->udev, 0),
(void *)&usb_context->req, data, size,
- (usb_complete_t)smsc95xx_async_cmd_callback,
+ smsc95xx_async_cmd_callback,
(void *)usb_context);
status = usb_submit_urb(urb, GFP_ATOMIC);
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 3b8e6325427..4671436ecf0 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -589,7 +589,7 @@ static int rhine_napipoll(struct napi_struct *napi, int budget)
work_done = rhine_rx(dev, budget);
if (work_done < budget) {
- netif_rx_complete(napi);
+ napi_complete(napi);
iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
@@ -1319,7 +1319,7 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
IntrPCIErr | IntrStatsMax | IntrLinkChange,
ioaddr + IntrEnable);
- netif_rx_schedule(&rp->napi);
+ napi_schedule(&rp->napi);
}
if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 63ef2a8905f..3b6225a2c7d 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -43,6 +43,7 @@ struct virtnet_info
struct virtqueue *rvq, *svq;
struct net_device *dev;
struct napi_struct napi;
+ unsigned int status;
/* The skb we couldn't send because buffers were full. */
struct sk_buff *last_xmit_skb;
@@ -375,9 +376,9 @@ static void skb_recv_done(struct virtqueue *rvq)
{
struct virtnet_info *vi = rvq->vdev->priv;
/* Schedule NAPI, Suppress further interrupts if successful. */
- if (netif_rx_schedule_prep(&vi->napi)) {
+ if (napi_schedule_prep(&vi->napi)) {
rvq->vq_ops->disable_cb(rvq);
- __netif_rx_schedule(&vi->napi);
+ __napi_schedule(&vi->napi);
}
}
@@ -403,11 +404,11 @@ again:
/* Out of packets? */
if (received < budget) {
- netif_rx_complete(napi);
+ napi_complete(napi);
if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq))
&& napi_schedule_prep(napi)) {
vi->rvq->vq_ops->disable_cb(vi->rvq);
- __netif_rx_schedule(napi);
+ __napi_schedule(napi);
goto again;
}
}
@@ -581,9 +582,9 @@ static int virtnet_open(struct net_device *dev)
* won't get another interrupt, so process any outstanding packets
* now. virtnet_poll wants re-enable the queue, so we disable here.
* We synchronize against interrupts via NAPI_STATE_SCHED */
- if (netif_rx_schedule_prep(&vi->napi)) {
+ if (napi_schedule_prep(&vi->napi)) {
vi->rvq->vq_ops->disable_cb(vi->rvq);
- __netif_rx_schedule(&vi->napi);
+ __napi_schedule(&vi->napi);
}
return 0;
}
@@ -612,6 +613,7 @@ static struct ethtool_ops virtnet_ethtool_ops = {
.set_tx_csum = virtnet_set_tx_csum,
.set_sg = ethtool_op_set_sg,
.set_tso = ethtool_op_set_tso,
+ .get_link = ethtool_op_get_link,
};
#define MIN_MTU 68
@@ -637,6 +639,41 @@ static const struct net_device_ops virtnet_netdev = {
#endif
};
+static void virtnet_update_status(struct virtnet_info *vi)
+{
+ u16 v;
+
+ if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS))
+ return;
+
+ vi->vdev->config->get(vi->vdev,
+ offsetof(struct virtio_net_config, status),
+ &v, sizeof(v));
+
+ /* Ignore unknown (future) status bits */
+ v &= VIRTIO_NET_S_LINK_UP;
+
+ if (vi->status == v)
+ return;
+
+ vi->status = v;
+
+ if (vi->status & VIRTIO_NET_S_LINK_UP) {
+ netif_carrier_on(vi->dev);
+ netif_wake_queue(vi->dev);
+ } else {
+ netif_carrier_off(vi->dev);
+ netif_stop_queue(vi->dev);
+ }
+}
+
+static void virtnet_config_changed(struct virtio_device *vdev)
+{
+ struct virtnet_info *vi = vdev->priv;
+
+ virtnet_update_status(vi);
+}
+
static int virtnet_probe(struct virtio_device *vdev)
{
int err;
@@ -739,6 +776,9 @@ static int virtnet_probe(struct virtio_device *vdev)
goto unregister;
}
+ vi->status = VIRTIO_NET_S_LINK_UP;
+ virtnet_update_status(vi);
+
pr_debug("virtnet: registered device %s\n", dev->name);
return 0;
@@ -794,7 +834,7 @@ static unsigned int features[] = {
VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
VIRTIO_NET_F_GUEST_ECN, /* We don't yet handle UFO input. */
- VIRTIO_NET_F_MRG_RXBUF,
+ VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS,
VIRTIO_F_NOTIFY_ON_EMPTY,
};
@@ -806,6 +846,7 @@ static struct virtio_driver virtio_net = {
.id_table = id_table,
.probe = virtnet_probe,
.remove = __devexit_p(virtnet_remove),
+ .config_changed = virtnet_config_changed,
};
static int __init init(void)
diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c
index b46897996f7..9693b0fd323 100644
--- a/drivers/net/wan/c101.c
+++ b/drivers/net/wan/c101.c
@@ -296,7 +296,13 @@ static void c101_destroy_card(card_t *card)
kfree(card);
}
-
+static const struct net_device_ops c101_ops = {
+ .ndo_open = c101_open,
+ .ndo_stop = c101_close,
+ .ndo_change_mtu = hdlc_change_mtu,
+ .ndo_start_xmit = hdlc_start_xmit,
+ .ndo_do_ioctl = c101_ioctl,
+};
static int __init c101_run(unsigned long irq, unsigned long winbase)
{
@@ -367,9 +373,7 @@ static int __init c101_run(unsigned long irq, unsigned long winbase)
dev->mem_start = winbase;
dev->mem_end = winbase + C101_MAPPED_RAM_SIZE - 1;
dev->tx_queue_len = 50;
- dev->do_ioctl = c101_ioctl;
- dev->open = c101_open;
- dev->stop = c101_close;
+ dev->netdev_ops = &c101_ops;
hdlc->attach = sca_attach;
hdlc->xmit = sca_xmit;
card->settings.clock_type = CLOCK_EXT;
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index d80b72e22de..0d7ba117ef6 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -427,6 +427,15 @@ static void __exit cosa_exit(void)
}
module_exit(cosa_exit);
+static const struct net_device_ops cosa_ops = {
+ .ndo_open = cosa_net_open,
+ .ndo_stop = cosa_net_close,
+ .ndo_change_mtu = hdlc_change_mtu,
+ .ndo_start_xmit = hdlc_start_xmit,
+ .ndo_do_ioctl = cosa_net_ioctl,
+ .ndo_tx_timeout = cosa_net_timeout,
+};
+
static int cosa_probe(int base, int irq, int dma)
{
struct cosa_data *cosa = cosa_cards+nr_cards;
@@ -575,10 +584,7 @@ static int cosa_probe(int base, int irq, int dma)
}
dev_to_hdlc(chan->netdev)->attach = cosa_net_attach;
dev_to_hdlc(chan->netdev)->xmit = cosa_net_tx;
- chan->netdev->open = cosa_net_open;
- chan->netdev->stop = cosa_net_close;
- chan->netdev->do_ioctl = cosa_net_ioctl;
- chan->netdev->tx_timeout = cosa_net_timeout;
+ chan->netdev->netdev_ops = &cosa_ops;
chan->netdev->watchdog_timeo = TX_TIMEOUT;
chan->netdev->base_addr = chan->cosa->datareg;
chan->netdev->irq = chan->cosa->irq;
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 888025db2f0..8face5db8f3 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -883,6 +883,15 @@ static inline int dscc4_set_quartz(struct dscc4_dev_priv *dpriv, int hz)
return ret;
}
+static const struct net_device_ops dscc4_ops = {
+ .ndo_open = dscc4_open,
+ .ndo_stop = dscc4_close,
+ .ndo_change_mtu = hdlc_change_mtu,
+ .ndo_start_xmit = hdlc_start_xmit,
+ .ndo_do_ioctl = dscc4_ioctl,
+ .ndo_tx_timeout = dscc4_tx_timeout,
+};
+
static int dscc4_found1(struct pci_dev *pdev, void __iomem *ioaddr)
{
struct dscc4_pci_priv *ppriv;
@@ -916,13 +925,8 @@ static int dscc4_found1(struct pci_dev *pdev, void __iomem *ioaddr)
hdlc_device *hdlc = dev_to_hdlc(d);
d->base_addr = (unsigned long)ioaddr;
- d->init = NULL;
d->irq = pdev->irq;
- d->open = dscc4_open;
- d->stop = dscc4_close;
- d->set_multicast_list = NULL;
- d->do_ioctl = dscc4_ioctl;
- d->tx_timeout = dscc4_tx_timeout;
+ d->netdev_ops = &dscc4_ops;
d->watchdog_timeo = TX_TIMEOUT;
SET_NETDEV_DEV(d, &pdev->dev);
@@ -1048,7 +1052,7 @@ static int dscc4_open(struct net_device *dev)
struct dscc4_pci_priv *ppriv;
int ret = -EAGAIN;
- if ((dscc4_loopback_check(dpriv) < 0) || !dev->hard_start_xmit)
+ if ((dscc4_loopback_check(dpriv) < 0))
goto err;
if ((ret = hdlc_open(dev)))
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 48a2c9d2895..00945f7c1e9 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -2424,6 +2424,15 @@ fst_init_card(struct fst_card_info *card)
type_strings[card->type], card->irq, card->nports);
}
+static const struct net_device_ops fst_ops = {
+ .ndo_open = fst_open,
+ .ndo_stop = fst_close,
+ .ndo_change_mtu = hdlc_change_mtu,
+ .ndo_start_xmit = hdlc_start_xmit,
+ .ndo_do_ioctl = fst_ioctl,
+ .ndo_tx_timeout = fst_tx_timeout,
+};
+
/*
* Initialise card when detected.
* Returns 0 to indicate success, or errno otherwise.
@@ -2565,12 +2574,9 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->base_addr = card->pci_conf;
dev->irq = card->irq;
- dev->tx_queue_len = FST_TX_QUEUE_LEN;
- dev->open = fst_open;
- dev->stop = fst_close;
- dev->do_ioctl = fst_ioctl;
- dev->watchdog_timeo = FST_TX_TIMEOUT;
- dev->tx_timeout = fst_tx_timeout;
+ dev->netdev_ops = &fst_ops;
+ dev->tx_queue_len = FST_TX_QUEUE_LEN;
+ dev->watchdog_timeo = FST_TX_TIMEOUT;
hdlc->attach = fst_attach;
hdlc->xmit = fst_start_xmit;
}
diff --git a/drivers/net/wan/hd64572.c b/drivers/net/wan/hd64572.c
index 08b3536944f..497b003d723 100644
--- a/drivers/net/wan/hd64572.c
+++ b/drivers/net/wan/hd64572.c
@@ -341,7 +341,7 @@ static int sca_poll(struct napi_struct *napi, int budget)
received = sca_rx_done(port, budget);
if (received < budget) {
- netif_rx_complete(napi);
+ napi_complete(napi);
enable_intr(port);
}
@@ -359,7 +359,7 @@ static irqreturn_t sca_intr(int irq, void *dev_id)
if (port && (isr0 & (i ? 0x08002200 : 0x00080022))) {
handled = 1;
disable_intr(port);
- netif_rx_schedule(&port->napi);
+ napi_schedule(&port->napi);
}
}
diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c
index 1f2a140c9f7..43da8bd7297 100644
--- a/drivers/net/wan/hdlc.c
+++ b/drivers/net/wan/hdlc.c
@@ -44,7 +44,7 @@ static const char* version = "HDLC support module revision 1.22";
static struct hdlc_proto *first_proto;
-static int hdlc_change_mtu(struct net_device *dev, int new_mtu)
+int hdlc_change_mtu(struct net_device *dev, int new_mtu)
{
if ((new_mtu < 68) || (new_mtu > HDLC_MAX_MTU))
return -EINVAL;
@@ -52,15 +52,6 @@ static int hdlc_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
-
-
-static struct net_device_stats *hdlc_get_stats(struct net_device *dev)
-{
- return &dev->stats;
-}
-
-
-
static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *p, struct net_device *orig_dev)
{
@@ -75,7 +66,15 @@ static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev,
return hdlc->proto->netif_rx(skb);
}
+int hdlc_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+
+ if (hdlc->proto->xmit)
+ return hdlc->proto->xmit(skb, dev);
+ return hdlc->xmit(skb, dev); /* call hardware driver directly */
+}
static inline void hdlc_proto_start(struct net_device *dev)
{
@@ -102,11 +101,11 @@ static int hdlc_device_event(struct notifier_block *this, unsigned long event,
hdlc_device *hdlc;
unsigned long flags;
int on;
-
+
if (dev_net(dev) != &init_net)
return NOTIFY_DONE;
- if (dev->get_stats != hdlc_get_stats)
+ if (!(dev->priv_flags & IFF_WAN_HDLC))
return NOTIFY_DONE; /* not an HDLC device */
if (event != NETDEV_CHANGE)
@@ -233,15 +232,13 @@ static void hdlc_setup_dev(struct net_device *dev)
/* Re-init all variables changed by HDLC protocol drivers,
* including ether_setup() called from hdlc_raw_eth.c.
*/
- dev->get_stats = hdlc_get_stats;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+ dev->priv_flags = IFF_WAN_HDLC;
dev->mtu = HDLC_MAX_MTU;
dev->type = ARPHRD_RAWHDLC;
dev->hard_header_len = 16;
dev->addr_len = 0;
dev->header_ops = &hdlc_null_ops;
-
- dev->change_mtu = hdlc_change_mtu;
}
static void hdlc_setup(struct net_device *dev)
@@ -339,6 +336,8 @@ MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
MODULE_DESCRIPTION("HDLC support module");
MODULE_LICENSE("GPL v2");
+EXPORT_SYMBOL(hdlc_change_mtu);
+EXPORT_SYMBOL(hdlc_start_xmit);
EXPORT_SYMBOL(hdlc_open);
EXPORT_SYMBOL(hdlc_close);
EXPORT_SYMBOL(hdlc_ioctl);
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index 44e64b15dbd..af3fd4fead8 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -382,7 +382,6 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
memcpy(&state(hdlc)->settings, &new_settings, size);
spin_lock_init(&state(hdlc)->lock);
- dev->hard_start_xmit = hdlc->xmit;
dev->header_ops = &cisco_header_ops;
dev->type = ARPHRD_CISCO;
netif_dormant_on(dev);
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index f1ddd7c3459..70e57cebc95 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -444,18 +444,6 @@ static int pvc_xmit(struct sk_buff *skb, struct net_device *dev)
return 0;
}
-
-
-static int pvc_change_mtu(struct net_device *dev, int new_mtu)
-{
- if ((new_mtu < 68) || (new_mtu > HDLC_MAX_MTU))
- return -EINVAL;
- dev->mtu = new_mtu;
- return 0;
-}
-
-
-
static inline void fr_log_dlci_active(pvc_device *pvc)
{
printk(KERN_INFO "%s: DLCI %d [%s%s%s]%s %s\n",
@@ -1068,6 +1056,14 @@ static void pvc_setup(struct net_device *dev)
dev->addr_len = 2;
}
+static const struct net_device_ops pvc_ops = {
+ .ndo_open = pvc_open,
+ .ndo_stop = pvc_close,
+ .ndo_change_mtu = hdlc_change_mtu,
+ .ndo_start_xmit = pvc_xmit,
+ .ndo_do_ioctl = pvc_ioctl,
+};
+
static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
{
hdlc_device *hdlc = dev_to_hdlc(frad);
@@ -1104,11 +1100,7 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
*(__be16*)dev->dev_addr = htons(dlci);
dlci_to_q922(dev->broadcast, dlci);
}
- dev->hard_start_xmit = pvc_xmit;
- dev->open = pvc_open;
- dev->stop = pvc_close;
- dev->do_ioctl = pvc_ioctl;
- dev->change_mtu = pvc_change_mtu;
+ dev->netdev_ops = &pvc_ops;
dev->mtu = HDLC_MAX_MTU;
dev->tx_queue_len = 0;
dev->ml_priv = pvc;
@@ -1260,8 +1252,6 @@ static int fr_ioctl(struct net_device *dev, struct ifreq *ifr)
state(hdlc)->dce_pvc_count = 0;
}
memcpy(&state(hdlc)->settings, &new_settings, size);
-
- dev->hard_start_xmit = hdlc->xmit;
dev->type = ARPHRD_FRAD;
return 0;
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index 57fe714c1c7..7b8a5eae201 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -558,7 +558,6 @@ out:
return NET_RX_DROP;
}
-
static void ppp_timer(unsigned long arg)
{
struct proto *proto = (struct proto *)arg;
@@ -679,7 +678,6 @@ static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr)
ppp->keepalive_interval = 10;
ppp->keepalive_timeout = 60;
- dev->hard_start_xmit = hdlc->xmit;
dev->hard_header_len = sizeof(struct hdlc_header);
dev->header_ops = &ppp_header_ops;
dev->type = ARPHRD_PPP;
diff --git a/drivers/net/wan/hdlc_raw.c b/drivers/net/wan/hdlc_raw.c
index 8612311748f..6e92c64ebd0 100644
--- a/drivers/net/wan/hdlc_raw.c
+++ b/drivers/net/wan/hdlc_raw.c
@@ -30,8 +30,6 @@ static __be16 raw_type_trans(struct sk_buff *skb, struct net_device *dev)
return __constant_htons(ETH_P_IP);
}
-
-
static struct hdlc_proto proto = {
.type_trans = raw_type_trans,
.ioctl = raw_ioctl,
@@ -86,7 +84,6 @@ static int raw_ioctl(struct net_device *dev, struct ifreq *ifr)
if (result)
return result;
memcpy(hdlc->state, &new_settings, size);
- dev->hard_start_xmit = hdlc->xmit;
dev->type = ARPHRD_RAWHDLC;
netif_dormant_off(dev);
return 0;
diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c
index a13fc320752..49e68f5ca5f 100644
--- a/drivers/net/wan/hdlc_raw_eth.c
+++ b/drivers/net/wan/hdlc_raw_eth.c
@@ -45,6 +45,7 @@ static int eth_tx(struct sk_buff *skb, struct net_device *dev)
static struct hdlc_proto proto = {
.type_trans = eth_type_trans,
+ .xmit = eth_tx,
.ioctl = raw_eth_ioctl,
.module = THIS_MODULE,
};
@@ -56,9 +57,7 @@ static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr)
const size_t size = sizeof(raw_hdlc_proto);
raw_hdlc_proto new_settings;
hdlc_device *hdlc = dev_to_hdlc(dev);
- int result;
- int (*old_ch_mtu)(struct net_device *, int);
- int old_qlen;
+ int result, old_qlen;
switch (ifr->ifr_settings.type) {
case IF_GET_PROTO:
@@ -99,11 +98,8 @@ static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr)
if (result)
return result;
memcpy(hdlc->state, &new_settings, size);
- dev->hard_start_xmit = eth_tx;
- old_ch_mtu = dev->change_mtu;
old_qlen = dev->tx_queue_len;
ether_setup(dev);
- dev->change_mtu = old_ch_mtu;
dev->tx_queue_len = old_qlen;
random_ether_addr(dev->dev_addr);
netif_dormant_off(dev);
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
index cbcbf6f0414..b1dc29ed158 100644
--- a/drivers/net/wan/hdlc_x25.c
+++ b/drivers/net/wan/hdlc_x25.c
@@ -184,6 +184,7 @@ static struct hdlc_proto proto = {
.close = x25_close,
.ioctl = x25_ioctl,
.netif_rx = x25_rx,
+ .xmit = x25_xmit,
.module = THIS_MODULE,
};
@@ -213,7 +214,6 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
if ((result = attach_hdlc_protocol(dev, &proto, 0)))
return result;
- dev->hard_start_xmit = x25_xmit;
dev->type = ARPHRD_X25;
netif_dormant_off(dev);
return 0;
diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c
index af54f0cf1b3..567d4f5062d 100644
--- a/drivers/net/wan/hostess_sv11.c
+++ b/drivers/net/wan/hostess_sv11.c
@@ -173,6 +173,14 @@ static int hostess_attach(struct net_device *dev, unsigned short encoding,
* Description block for a Comtrol Hostess SV11 card
*/
+static const struct net_device_ops hostess_ops = {
+ .ndo_open = hostess_open,
+ .ndo_stop = hostess_close,
+ .ndo_change_mtu = hdlc_change_mtu,
+ .ndo_start_xmit = hdlc_start_xmit,
+ .ndo_do_ioctl = hostess_ioctl,
+};
+
static struct z8530_dev *sv11_init(int iobase, int irq)
{
struct z8530_dev *sv;
@@ -267,9 +275,7 @@ static struct z8530_dev *sv11_init(int iobase, int irq)
dev_to_hdlc(netdev)->attach = hostess_attach;
dev_to_hdlc(netdev)->xmit = hostess_queue_xmit;
- netdev->open = hostess_open;
- netdev->stop = hostess_close;
- netdev->do_ioctl = hostess_ioctl;
+ netdev->netdev_ops = &hostess_ops;
netdev->base_addr = iobase;
netdev->irq = irq;
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
index 0dbd85b0162..3bf7d3f447d 100644
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -622,7 +622,7 @@ static void hss_hdlc_rx_irq(void *pdev)
printk(KERN_DEBUG "%s: hss_hdlc_rx_irq\n", dev->name);
#endif
qmgr_disable_irq(queue_ids[port->id].rx);
- netif_rx_schedule(&port->napi);
+ napi_schedule(&port->napi);
}
static int hss_hdlc_poll(struct napi_struct *napi, int budget)
@@ -649,15 +649,15 @@ static int hss_hdlc_poll(struct napi_struct *napi, int budget)
if ((n = queue_get_desc(rxq, port, 0)) < 0) {
#if DEBUG_RX
printk(KERN_DEBUG "%s: hss_hdlc_poll"
- " netif_rx_complete\n", dev->name);
+ " napi_complete\n", dev->name);
#endif
- netif_rx_complete(napi);
+ napi_complete(napi);
qmgr_enable_irq(rxq);
if (!qmgr_stat_empty(rxq) &&
- netif_rx_reschedule(napi)) {
+ napi_reschedule(napi)) {
#if DEBUG_RX
printk(KERN_DEBUG "%s: hss_hdlc_poll"
- " netif_rx_reschedule succeeded\n",
+ " napi_reschedule succeeded\n",
dev->name);
#endif
qmgr_disable_irq(rxq);
@@ -1069,7 +1069,7 @@ static int hss_hdlc_open(struct net_device *dev)
hss_start_hdlc(port);
/* we may already have RX data, enables IRQ */
- netif_rx_schedule(&port->napi);
+ napi_schedule(&port->napi);
return 0;
err_unlock:
@@ -1230,6 +1230,14 @@ static int hss_hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
* initialization
****************************************************************************/
+static const struct net_device_ops hss_hdlc_ops = {
+ .ndo_open = hss_hdlc_open,
+ .ndo_stop = hss_hdlc_close,
+ .ndo_change_mtu = hdlc_change_mtu,
+ .ndo_start_xmit = hdlc_start_xmit,
+ .ndo_do_ioctl = hss_hdlc_ioctl,
+};
+
static int __devinit hss_init_one(struct platform_device *pdev)
{
struct port *port;
@@ -1254,9 +1262,7 @@ static int __devinit hss_init_one(struct platform_device *pdev)
hdlc = dev_to_hdlc(dev);
hdlc->attach = hss_hdlc_attach;
hdlc->xmit = hss_hdlc_xmit;
- dev->open = hss_hdlc_open;
- dev->stop = hss_hdlc_close;
- dev->do_ioctl = hss_hdlc_ioctl;
+ dev->netdev_ops = &hss_hdlc_ops;
dev->tx_queue_len = 100;
port->clock_type = CLOCK_EXT;
port->clock_rate = 2048000;
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index feac3b99f8f..45b1822c962 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -806,6 +806,16 @@ static int lmc_attach(struct net_device *dev, unsigned short encoding,
return -EINVAL;
}
+static const struct net_device_ops lmc_ops = {
+ .ndo_open = lmc_open,
+ .ndo_stop = lmc_close,
+ .ndo_change_mtu = hdlc_change_mtu,
+ .ndo_start_xmit = hdlc_start_xmit,
+ .ndo_do_ioctl = lmc_ioctl,
+ .ndo_tx_timeout = lmc_driver_timeout,
+ .ndo_get_stats = lmc_get_stats,
+};
+
static int __devinit lmc_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -849,11 +859,7 @@ static int __devinit lmc_init_one(struct pci_dev *pdev,
dev->type = ARPHRD_HDLC;
dev_to_hdlc(dev)->xmit = lmc_start_xmit;
dev_to_hdlc(dev)->attach = lmc_attach;
- dev->open = lmc_open;
- dev->stop = lmc_close;
- dev->get_stats = lmc_get_stats;
- dev->do_ioctl = lmc_ioctl;
- dev->tx_timeout = lmc_driver_timeout;
+ dev->netdev_ops = &lmc_ops;
dev->watchdog_timeo = HZ; /* 1 second */
dev->tx_queue_len = 100;
sc->lmc_device = dev;
@@ -1059,9 +1065,6 @@ static int lmc_open(struct net_device *dev)
if ((err = lmc_proto_open(sc)) != 0)
return err;
- dev->do_ioctl = lmc_ioctl;
-
-
netif_start_queue(dev);
sc->extra_stats.tx_tbusy0++;
diff --git a/drivers/net/wan/lmc/lmc_proto.c b/drivers/net/wan/lmc/lmc_proto.c
index 94b4c208b01..044a48175c4 100644
--- a/drivers/net/wan/lmc/lmc_proto.c
+++ b/drivers/net/wan/lmc/lmc_proto.c
@@ -51,30 +51,15 @@
void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/
{
lmc_trace(sc->lmc_device, "lmc_proto_attach in");
- switch(sc->if_type){
- case LMC_PPP:
- {
- struct net_device *dev = sc->lmc_device;
- dev->do_ioctl = lmc_ioctl;
- }
- break;
- case LMC_NET:
- {
+ if (sc->if_type == LMC_NET) {
struct net_device *dev = sc->lmc_device;
/*
* They set a few basics because they don't use HDLC
*/
dev->flags |= IFF_POINTOPOINT;
-
dev->hard_header_len = 0;
dev->addr_len = 0;
}
- case LMC_RAW: /* Setup the task queue, maybe we should notify someone? */
- {
- }
- default:
- break;
- }
lmc_trace(sc->lmc_device, "lmc_proto_attach out");
}
diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c
index 697715ae80f..83da596e205 100644
--- a/drivers/net/wan/n2.c
+++ b/drivers/net/wan/n2.c
@@ -324,7 +324,13 @@ static void n2_destroy_card(card_t *card)
kfree(card);
}
-
+static const struct net_device_ops n2_ops = {
+ .ndo_open = n2_open,
+ .ndo_stop = n2_close,
+ .ndo_change_mtu = hdlc_change_mtu,
+ .ndo_start_xmit = hdlc_start_xmit,
+ .ndo_do_ioctl = n2_ioctl,
+};
static int __init n2_run(unsigned long io, unsigned long irq,
unsigned long winbase, long valid0, long valid1)
@@ -460,9 +466,7 @@ static int __init n2_run(unsigned long io, unsigned long irq,
dev->mem_start = winbase;
dev->mem_end = winbase + USE_WINDOWSIZE - 1;
dev->tx_queue_len = 50;
- dev->do_ioctl = n2_ioctl;
- dev->open = n2_open;
- dev->stop = n2_close;
+ dev->netdev_ops = &n2_ops;
hdlc->attach = sca_attach;
hdlc->xmit = sca_xmit;
port->settings.clock_type = CLOCK_EXT;
diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c
index f247e5d9002..60ece54bdd9 100644
--- a/drivers/net/wan/pc300too.c
+++ b/drivers/net/wan/pc300too.c
@@ -287,7 +287,13 @@ static void pc300_pci_remove_one(struct pci_dev *pdev)
kfree(card);
}
-
+static const struct net_device_ops pc300_ops = {
+ .ndo_open = pc300_open,
+ .ndo_stop = pc300_close,
+ .ndo_change_mtu = hdlc_change_mtu,
+ .ndo_start_xmit = hdlc_start_xmit,
+ .ndo_do_ioctl = pc300_ioctl,
+};
static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
@@ -448,9 +454,7 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
dev->mem_start = ramphys;
dev->mem_end = ramphys + ramsize - 1;
dev->tx_queue_len = 50;
- dev->do_ioctl = pc300_ioctl;
- dev->open = pc300_open;
- dev->stop = pc300_close;
+ dev->netdev_ops = &pc300_ops;
hdlc->attach = sca_attach;
hdlc->xmit = sca_xmit;
port->settings.clock_type = CLOCK_EXT;
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index 1104d3a692f..e035d8c57e1 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -265,7 +265,13 @@ static void pci200_pci_remove_one(struct pci_dev *pdev)
kfree(card);
}
-
+static const struct net_device_ops pci200_ops = {
+ .ndo_open = pci200_open,
+ .ndo_stop = pci200_close,
+ .ndo_change_mtu = hdlc_change_mtu,
+ .ndo_start_xmit = hdlc_start_xmit,
+ .ndo_do_ioctl = pci200_ioctl,
+};
static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
@@ -395,9 +401,7 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
dev->mem_start = ramphys;
dev->mem_end = ramphys + ramsize - 1;
dev->tx_queue_len = 50;
- dev->do_ioctl = pci200_ioctl;
- dev->open = pci200_open;
- dev->stop = pci200_close;
+ dev->netdev_ops = &pci200_ops;
hdlc->attach = sca_attach;
hdlc->xmit = sca_xmit;
port->settings.clock_type = CLOCK_EXT;
diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c
index 0941a26f6e3..23b26902745 100644
--- a/drivers/net/wan/sealevel.c
+++ b/drivers/net/wan/sealevel.c
@@ -169,6 +169,14 @@ static int sealevel_attach(struct net_device *dev, unsigned short encoding,
return -EINVAL;
}
+static const struct net_device_ops sealevel_ops = {
+ .ndo_open = sealevel_open,
+ .ndo_stop = sealevel_close,
+ .ndo_change_mtu = hdlc_change_mtu,
+ .ndo_start_xmit = hdlc_start_xmit,
+ .ndo_do_ioctl = sealevel_ioctl,
+};
+
static int slvl_setup(struct slvl_device *sv, int iobase, int irq)
{
struct net_device *dev = alloc_hdlcdev(sv);
@@ -177,9 +185,7 @@ static int slvl_setup(struct slvl_device *sv, int iobase, int irq)
dev_to_hdlc(dev)->attach = sealevel_attach;
dev_to_hdlc(dev)->xmit = sealevel_queue_xmit;
- dev->open = sealevel_open;
- dev->stop = sealevel_close;
- dev->do_ioctl = sealevel_ioctl;
+ dev->netdev_ops = &sealevel_ops;
dev->base_addr = iobase;
dev->irq = irq;
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index 4bffb67ebca..887acb0dc80 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -547,6 +547,15 @@ static void wanxl_pci_remove_one(struct pci_dev *pdev)
#include "wanxlfw.inc"
+static const struct net_device_ops wanxl_ops = {
+ .ndo_open = wanxl_open,
+ .ndo_stop = wanxl_close,
+ .ndo_change_mtu = hdlc_change_mtu,
+ .ndo_start_xmit = hdlc_start_xmit,
+ .ndo_do_ioctl = wanxl_ioctl,
+ .ndo_get_stats = wanxl_get_stats,
+};
+
static int __devinit wanxl_pci_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -777,12 +786,9 @@ static int __devinit wanxl_pci_init_one(struct pci_dev *pdev,
hdlc = dev_to_hdlc(dev);
spin_lock_init(&port->lock);
dev->tx_queue_len = 50;
- dev->do_ioctl = wanxl_ioctl;
- dev->open = wanxl_open;
- dev->stop = wanxl_close;
+ dev->netdev_ops = &wanxl_ops;
hdlc->attach = wanxl_attach;
hdlc->xmit = wanxl_xmit;
- dev->get_stats = wanxl_get_stats;
port->card = card;
port->node = i;
get_status(port)->clocking = CLOCK_EXT;
diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c
index 63fe708e8a3..57159e4bbfe 100644
--- a/drivers/net/wimax/i2400m/netdev.c
+++ b/drivers/net/wimax/i2400m/netdev.c
@@ -493,6 +493,14 @@ error_skb_realloc:
i2400m, buf, buf_len);
}
+static const struct net_device_ops i2400m_netdev_ops = {
+ .ndo_open = i2400m_open,
+ .ndo_stop = i2400m_stop,
+ .ndo_start_xmit = i2400m_hard_start_xmit,
+ .ndo_tx_timeout = i2400m_tx_timeout,
+ .ndo_change_mtu = i2400m_change_mtu,
+};
+
/**
* i2400m_netdev_setup - Setup setup @net_dev's i2400m private data
@@ -513,11 +521,7 @@ void i2400m_netdev_setup(struct net_device *net_dev)
& (~IFF_BROADCAST /* i2400m is P2P */
& ~IFF_MULTICAST);
net_dev->watchdog_timeo = I2400M_TX_TIMEOUT;
- net_dev->open = i2400m_open;
- net_dev->stop = i2400m_stop;
- net_dev->hard_start_xmit = i2400m_hard_start_xmit;
- net_dev->change_mtu = i2400m_change_mtu;
- net_dev->tx_timeout = i2400m_tx_timeout;
+ net_dev->netdev_ops = &i2400m_netdev_ops;
d_fnend(3, NULL, "(net_dev %p) = void\n", net_dev);
}
EXPORT_SYMBOL_GPL(i2400m_netdev_setup);
diff --git a/drivers/net/wireless/ath5k/debug.c b/drivers/net/wireless/ath5k/debug.c
index ccaeb5c219d..d281b6e3862 100644
--- a/drivers/net/wireless/ath5k/debug.c
+++ b/drivers/net/wireless/ath5k/debug.c
@@ -165,7 +165,7 @@ static int reg_show(struct seq_file *seq, void *p)
return 0;
}
-static struct seq_operations register_seq_ops = {
+static const struct seq_operations register_seq_ops = {
.start = reg_start,
.next = reg_next,
.stop = reg_stop,
diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
index ec4efd7ff3c..50e28a0cdfe 100644
--- a/drivers/net/wireless/libertas/debugfs.c
+++ b/drivers/net/wireless/libertas/debugfs.c
@@ -629,7 +629,7 @@ static ssize_t lbs_rdrf_write(struct file *file,
res = -EFAULT;
goto out_unlock;
}
- priv->rf_offset = simple_strtoul((char *)buf, NULL, 16);
+ priv->rf_offset = simple_strtoul(buf, NULL, 16);
res = count;
out_unlock:
free_page(addr);
@@ -680,12 +680,12 @@ out_unlock:
}
struct lbs_debugfs_files {
- char *name;
+ const char *name;
int perm;
struct file_operations fops;
};
-static struct lbs_debugfs_files debugfs_files[] = {
+static const struct lbs_debugfs_files debugfs_files[] = {
{ "info", 0444, FOPS(lbs_dev_info, write_file_dummy), },
{ "getscantable", 0444, FOPS(lbs_getscantable,
write_file_dummy), },
@@ -693,7 +693,7 @@ static struct lbs_debugfs_files debugfs_files[] = {
lbs_sleepparams_write), },
};
-static struct lbs_debugfs_files debugfs_events_files[] = {
+static const struct lbs_debugfs_files debugfs_events_files[] = {
{"low_rssi", 0644, FOPS(lbs_lowrssi_read,
lbs_lowrssi_write), },
{"low_snr", 0644, FOPS(lbs_lowsnr_read,
@@ -708,7 +708,7 @@ static struct lbs_debugfs_files debugfs_events_files[] = {
lbs_highsnr_write), },
};
-static struct lbs_debugfs_files debugfs_regs_files[] = {
+static const struct lbs_debugfs_files debugfs_regs_files[] = {
{"rdmac", 0644, FOPS(lbs_rdmac_read, lbs_rdmac_write), },
{"wrmac", 0600, FOPS(NULL, lbs_wrmac_write), },
{"rdbbp", 0644, FOPS(lbs_rdbbp_read, lbs_rdbbp_write), },
@@ -735,7 +735,7 @@ void lbs_debugfs_remove(void)
void lbs_debugfs_init_one(struct lbs_private *priv, struct net_device *dev)
{
int i;
- struct lbs_debugfs_files *files;
+ const struct lbs_debugfs_files *files;
if (!lbs_dir)
goto exit;
@@ -938,7 +938,7 @@ static ssize_t lbs_debugfs_write(struct file *f, const char __user *buf,
return (ssize_t)cnt;
}
-static struct file_operations lbs_debug_fops = {
+static const struct file_operations lbs_debug_fops = {
.owner = THIS_MODULE,
.open = open_file_generic,
.write = lbs_debugfs_write,
diff --git a/drivers/net/wireless/strip.c b/drivers/net/wireless/strip.c
index 7015f248055..d6bf8d2ef8e 100644
--- a/drivers/net/wireless/strip.c
+++ b/drivers/net/wireless/strip.c
@@ -1125,7 +1125,7 @@ static int strip_seq_show(struct seq_file *seq, void *v)
}
-static struct seq_operations strip_seq_ops = {
+static const struct seq_operations strip_seq_ops = {
.start = strip_seq_start,
.next = strip_seq_next,
.stop = strip_seq_stop,
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index cd6184ee08e..9f102a6535c 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -196,7 +196,7 @@ static void rx_refill_timeout(unsigned long data)
{
struct net_device *dev = (struct net_device *)data;
struct netfront_info *np = netdev_priv(dev);
- netif_rx_schedule(&np->napi);
+ napi_schedule(&np->napi);
}
static int netfront_tx_slot_available(struct netfront_info *np)
@@ -328,7 +328,7 @@ static int xennet_open(struct net_device *dev)
xennet_alloc_rx_buffers(dev);
np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
- netif_rx_schedule(&np->napi);
+ napi_schedule(&np->napi);
}
spin_unlock_bh(&np->rx_lock);
@@ -979,7 +979,7 @@ err:
RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);
if (!more_to_do)
- __netif_rx_complete(napi);
+ __napi_complete(napi);
local_irq_restore(flags);
}
@@ -1317,7 +1317,7 @@ static irqreturn_t xennet_interrupt(int irq, void *dev_id)
xennet_tx_buf_gc(dev);
/* Under tx_lock: protects access to rx shared-ring indexes. */
if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
- netif_rx_schedule(&np->napi);
+ napi_schedule(&np->napi);
}
spin_unlock_irqrestore(&np->tx_lock, flags);
diff --git a/drivers/net/znet.c b/drivers/net/znet.c
index f0b15c9347d..0a6992d8611 100644
--- a/drivers/net/znet.c
+++ b/drivers/net/znet.c
@@ -358,6 +358,17 @@ static void znet_set_multicast_list (struct net_device *dev)
* multicast address configured isn't equal to IFF_ALLMULTI */
}
+static const struct net_device_ops znet_netdev_ops = {
+ .ndo_open = znet_open,
+ .ndo_stop = znet_close,
+ .ndo_start_xmit = znet_send_packet,
+ .ndo_set_multicast_list = znet_set_multicast_list,
+ .ndo_tx_timeout = znet_tx_timeout,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
/* The Z-Note probe is pretty easy. The NETIDBLK exists in the safe-to-probe
BIOS area. We just scan for the signature, and pull the vital parameters
out of the structure. */
@@ -440,11 +451,7 @@ static int __init znet_probe (void)
znet->tx_end = znet->tx_start + znet->tx_buf_len;
/* The ZNET-specific entries in the device structure. */
- dev->open = &znet_open;
- dev->hard_start_xmit = &znet_send_packet;
- dev->stop = &znet_close;
- dev->set_multicast_list = &znet_set_multicast_list;
- dev->tx_timeout = znet_tx_timeout;
+ dev->netdev_ops = &znet_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
err = register_netdev(dev);
if (err)