aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/networking/ip-sysctl.txt24
-rw-r--r--Documentation/networking/tcp-thin.txt47
-rw-r--r--MAINTAINERS3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c2
-rw-r--r--drivers/net/3c501.c2
-rw-r--r--drivers/net/3c505.c9
-rw-r--r--drivers/net/3c515.c2
-rw-r--r--drivers/net/3c523.c9
-rw-r--r--drivers/net/3c527.c8
-rw-r--r--drivers/net/3c59x.c2
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/benet/be_main.c12
-rw-r--r--drivers/net/depca.c2
-rw-r--r--drivers/net/macvtap.c463
-rw-r--r--drivers/net/s2io.c5
-rw-r--r--drivers/net/sb1250-mac.c6
-rw-r--r--drivers/net/sc92031.c2
-rw-r--r--drivers/net/sfc/efx.c6
-rw-r--r--drivers/net/sis190.c156
-rw-r--r--drivers/net/sis900.c5
-rw-r--r--drivers/net/skfp/skfddi.c10
-rw-r--r--drivers/net/skge.c10
-rw-r--r--drivers/net/sky2.c5
-rw-r--r--drivers/net/smsc911x.c27
-rw-r--r--drivers/net/smsc9420.c5
-rw-r--r--drivers/net/sonic.c7
-rw-r--r--drivers/net/spider_net.c2
-rw-r--r--drivers/net/starfire.c4
-rw-r--r--drivers/net/stmmac/dwmac100.c4
-rw-r--r--drivers/net/stmmac/dwmac1000_core.c4
-rw-r--r--drivers/net/sun3_82586.c8
-rw-r--r--drivers/net/sunbmac.c5
-rw-r--r--drivers/net/sundance.c3
-rw-r--r--drivers/net/sungem.c10
-rw-r--r--drivers/net/sunhme.c20
-rw-r--r--drivers/net/sunlance.c6
-rw-r--r--drivers/net/sunqe.c9
-rw-r--r--drivers/net/sunvnet.c2
-rw-r--r--drivers/net/tg3.c270
-rw-r--r--drivers/net/tokenring/3c359.c5
-rw-r--r--drivers/net/tokenring/ibmtr.c4
-rw-r--r--drivers/net/tokenring/lanstreamer.c4
-rw-r--r--drivers/net/tokenring/olympic.c5
-rw-r--r--drivers/net/tokenring/tms380tr.c8
-rw-r--r--drivers/net/tulip/de2104x.c19
-rw-r--r--drivers/net/tulip/de4x5.c14
-rw-r--r--drivers/net/tulip/dmfe.c27
-rw-r--r--drivers/net/tulip/tulip_core.c15
-rw-r--r--drivers/net/tulip/uli526x.c4
-rw-r--r--drivers/net/tulip/winbond-840.c6
-rw-r--r--drivers/net/usb/asix.c12
-rw-r--r--drivers/net/usb/catc.c5
-rw-r--r--drivers/net/usb/mcs7830.c6
-rw-r--r--drivers/net/usb/smsc95xx.c26
-rw-r--r--drivers/staging/arlan/arlan-main.c25
-rw-r--r--drivers/staging/et131x/et131x_netdev.c15
-rw-r--r--drivers/staging/octeon/ethernet.c2
-rw-r--r--drivers/staging/slicoss/slicoss.c16
-rw-r--r--drivers/staging/vt6655/device_main.c6
-rw-r--r--drivers/staging/vt6656/main_usb.c5
-rw-r--r--drivers/staging/wavelan/wavelan.c6
-rw-r--r--drivers/staging/wavelan/wavelan_cs.c9
-rw-r--r--drivers/staging/wlags49_h2/wl_netdev.c16
-rw-r--r--drivers/vhost/Kconfig2
-rw-r--r--drivers/vhost/net.c8
-rw-r--r--include/linux/icmpv6.h3
-rw-r--r--include/linux/if_macvlan.h13
-rw-r--r--include/linux/snmp.h1
-rw-r--r--include/linux/tcp.h7
-rw-r--r--include/net/ip6_fib.h2
-rw-r--r--include/net/netlink.h2
-rw-r--r--include/net/tcp.h13
-rw-r--r--kernel/taskstats.c6
-rw-r--r--net/dcb/dcbnl.c16
-rw-r--r--net/ipv4/ip_gre.c2
-rw-r--r--net/ipv4/sysctl_net_ipv4.c14
-rw-r--r--net/ipv4/tcp.c14
-rw-r--r--net/ipv4/tcp_input.c12
-rw-r--r--net/ipv4/tcp_timer.c21
-rw-r--r--net/ipv6/exthdrs.c2
-rw-r--r--net/ipv6/icmp.c10
-rw-r--r--net/ipv6/ip6_fib.c17
-rw-r--r--net/ipv6/ip6_input.c3
-rw-r--r--net/ipv6/ip6_output.c11
-rw-r--r--net/ipv6/ip6_tunnel.c6
-rw-r--r--net/ipv6/mip6.c2
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c2
-rw-r--r--net/ipv6/reassembly.c2
-rw-r--r--net/ipv6/route.c4
-rw-r--r--net/ipv6/sit.c2
-rw-r--r--net/ipv6/tunnel6.c4
-rw-r--r--net/ipv6/udp.c5
-rw-r--r--net/ipv6/xfrm6_output.c2
-rw-r--r--net/irda/irnetlink.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c10
-rw-r--r--net/unix/af_unix.c2
-rw-r--r--net/wimax/op-msg.c3
-rw-r--r--net/wimax/op-reset.c3
-rw-r--r--net/wimax/op-rfkill.c3
-rw-r--r--net/wimax/op-state-get.c3
-rw-r--r--net/wimax/stack.c3
-rw-r--r--net/wireless/nl80211.c14
-rw-r--r--net/xfrm/xfrm_policy.c3
-rw-r--r--net/xfrm/xfrm_proc.c1
105 files changed, 947 insertions, 751 deletions
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 2dc7a1d9768..2571a62d923 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -487,6 +487,30 @@ tcp_dma_copybreak - INTEGER
and CONFIG_NET_DMA is enabled.
Default: 4096
+tcp_thin_linear_timeouts - BOOLEAN
+ Enable dynamic triggering of linear timeouts for thin streams.
+ If set, a check is performed upon retransmission by timeout to
+ determine if the stream is thin (less than 4 packets in flight).
+ As long as the stream is found to be thin, up to 6 linear
+ timeouts may be performed before exponential backoff mode is
+ initiated. This improves retransmission latency for
+ non-aggressive thin streams, often found to be time-dependent.
+ For more information on thin streams, see
+ Documentation/networking/tcp-thin.txt
+ Default: 0
+
+tcp_thin_dupack - BOOLEAN
+ Enable dynamic triggering of retransmissions after one dupACK
+ for thin streams. If set, a check is performed upon reception
+ of a dupACK to determine if the stream is thin (less than 4
+ packets in flight). As long as the stream is found to be thin,
+ data is retransmitted on the first received dupACK. This
+ improves retransmission latency for non-aggressive thin
+ streams, often found to be time-dependent.
+ For more information on thin streams, see
+ Documentation/networking/tcp-thin.txt
+ Default: 0
+
UDP variables:
udp_mem - vector of 3 INTEGERs: min, pressure, max
diff --git a/Documentation/networking/tcp-thin.txt b/Documentation/networking/tcp-thin.txt
new file mode 100644
index 00000000000..151e229980f
--- /dev/null
+++ b/Documentation/networking/tcp-thin.txt
@@ -0,0 +1,47 @@
+Thin-streams and TCP
+====================
+A wide range of Internet-based services that use reliable transport
+protocols display what we call thin-stream properties. This means
+that the application sends data with such a low rate that the
+retransmission mechanisms of the transport protocol are not fully
+effective. In time-dependent scenarios (like online games, control
+systems, stock trading etc.) where the user experience depends
+on the data delivery latency, packet loss can be devastating for
+the service quality. Extreme latencies are caused by TCP's
+dependency on the arrival of new data from the application to trigger
+retransmissions effectively through fast retransmit instead of
+waiting for long timeouts.
+
+After analysing a large number of time-dependent interactive
+applications, we have seen that they often produce thin streams
+and also stay with this traffic pattern throughout its entire
+lifespan. The combination of time-dependency and the fact that the
+streams provoke high latencies when using TCP is unfortunate.
+
+In order to reduce application-layer latency when packets are lost,
+a set of mechanisms has been made, which address these latency issues
+for thin streams. In short, if the kernel detects a thin stream,
+the retransmission mechanisms are modified in the following manner:
+
+1) If the stream is thin, fast retransmit on the first dupACK.
+2) If the stream is thin, do not apply exponential backoff.
+
+These enhancements are applied only if the stream is detected as
+thin. This is accomplished by defining a threshold for the number
+of packets in flight. If there are less than 4 packets in flight,
+fast retransmissions can not be triggered, and the stream is prone
+to experience high retransmission latencies.
+
+Since these mechanisms are targeted at time-dependent applications,
+they must be specifically activated by the application using the
+TCP_THIN_LINEAR_TIMEOUTS and TCP_THIN_DUPACK IOCTLS or the
+tcp_thin_linear_timeouts and tcp_thin_dupack sysctls. Both
+modifications are turned off by default.
+
+References
+==========
+More information on the modifications, as well as a wide range of
+experimental data can be found here:
+"Improving latency for interactive, thin-stream applications over
+reliable transport"
+http://simula.no/research/nd/publications/Simula.nd.477/simula_pdf_file
diff --git a/MAINTAINERS b/MAINTAINERS
index 32f6915ae0e..2c8b0d3d712 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1733,10 +1733,9 @@ F: include/linux/tfrc.h
F: net/dccp/
DECnet NETWORK LAYER
-M: Christine Caulfield <christine.caulfield@googlemail.com>
W: http://linux-decnet.sourceforge.net
L: linux-decnet-user@lists.sourceforge.net
-S: Maintained
+S: Orphan
F: Documentation/networking/decnet.txt
F: net/decnet/
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 30bdf427ee6..83a7751c38d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1374,7 +1374,7 @@ static void ipoib_cm_skb_reap(struct work_struct *work)
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
else if (skb->protocol == htons(ETH_P_IPV6))
- icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, priv->dev);
+ icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
#endif
dev_kfree_skb_any(skb);
diff --git a/drivers/net/3c501.c b/drivers/net/3c501.c
index 4d4cad393dc..b6de7b1e2a5 100644
--- a/drivers/net/3c501.c
+++ b/drivers/net/3c501.c
@@ -812,7 +812,7 @@ static void set_multicast_list(struct net_device *dev)
if (dev->flags & IFF_PROMISC) {
outb(RX_PROM, RX_CMD);
inb(RX_STATUS);
- } else if (dev->mc_list || dev->flags & IFF_ALLMULTI) {
+ } else if (!netdev_mc_empty(dev) || dev->flags & IFF_ALLMULTI) {
/* Multicast or all multicast is the same */
outb(RX_MULT, RX_CMD);
inb(RX_STATUS); /* Clear status. */
diff --git a/drivers/net/3c505.c b/drivers/net/3c505.c
index dadb46a8833..04b5bba1902 100644
--- a/drivers/net/3c505.c
+++ b/drivers/net/3c505.c
@@ -1216,7 +1216,7 @@ static int elp_close(struct net_device *dev)
static void elp_set_mc_list(struct net_device *dev)
{
elp_device *adapter = netdev_priv(dev);
- struct dev_mc_list *dmi = dev->mc_list;
+ struct dev_mc_list *dmi;
int i;
unsigned long flags;
@@ -1230,10 +1230,9 @@ static void elp_set_mc_list(struct net_device *dev)
/* if num_addrs==0 the list will be cleared */
adapter->tx_pcb.command = CMD_LOAD_MULTICAST_LIST;
adapter->tx_pcb.length = 6 * netdev_mc_count(dev);
- for (i = 0; i < netdev_mc_count(dev); i++) {
- memcpy(adapter->tx_pcb.data.multicast[i], dmi->dmi_addr, 6);
- dmi = dmi->next;
- }
+ i = 0;
+ netdev_for_each_mc_addr(dmi, dev)
+ memcpy(adapter->tx_pcb.data.multicast[i++], dmi->dmi_addr, 6);
adapter->got[CMD_LOAD_MULTICAST_LIST] = 0;
if (!send_pcb(dev, &adapter->tx_pcb))
pr_err("%s: couldn't send set_multicast command\n", dev->name);
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c
index 063b049ffe5..1e898b1c806 100644
--- a/drivers/net/3c515.c
+++ b/drivers/net/3c515.c
@@ -1536,7 +1536,7 @@ static void set_rx_mode(struct net_device *dev)
pr_debug("%s: Setting promiscuous mode.\n",
dev->name);
new_mode = SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm;
- } else if ((dev->mc_list) || (dev->flags & IFF_ALLMULTI)) {
+ } else if (!netdev_mc_empty(dev) || dev->flags & IFF_ALLMULTI) {
new_mode = SetRxFilter | RxStation | RxMulticast | RxBroadcast;
} else
new_mode = SetRxFilter | RxStation | RxBroadcast;
diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c
index 6948d667fc5..beed4fa10c6 100644
--- a/drivers/net/3c523.c
+++ b/drivers/net/3c523.c
@@ -625,7 +625,7 @@ static int init586(struct net_device *dev)
volatile struct iasetup_cmd_struct *ias_cmd;
volatile struct tdr_cmd_struct *tdr_cmd;
volatile struct mcsetup_cmd_struct *mc_cmd;
- struct dev_mc_list *dmi = dev->mc_list;
+ struct dev_mc_list *dmi;
int num_addrs = netdev_mc_count(dev);
ptr = (void *) ((char *) p->scb + sizeof(struct scb_struct));
@@ -787,10 +787,9 @@ static int init586(struct net_device *dev)
mc_cmd->cmd_cmd = CMD_MCSETUP | CMD_LAST;
mc_cmd->cmd_link = 0xffff;
mc_cmd->mc_cnt = num_addrs * 6;
- for (i = 0; i < num_addrs; i++) {
- memcpy((char *) mc_cmd->mc_list[i], dmi->dmi_addr, 6);
- dmi = dmi->next;
- }
+ i = 0;
+ netdev_for_each_mc_addr(dmi, dev)
+ memcpy((char *) mc_cmd->mc_list[i++], dmi->dmi_addr, 6);
p->scb->cbl_offset = make16(mc_cmd);
p->scb->cmd = CUC_START;
elmc_id_attn586();
diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c
index ce982698051..5c07b147ec9 100644
--- a/drivers/net/3c527.c
+++ b/drivers/net/3c527.c
@@ -1533,9 +1533,7 @@ static void do_mc32_set_multicast_list(struct net_device *dev, int retry)
{
unsigned char block[62];
unsigned char *bp;
- struct dev_mc_list *dmc=dev->mc_list;
-
- int i;
+ struct dev_mc_list *dmc;
if(retry==0)
lp->mc_list_valid = 0;
@@ -1545,11 +1543,9 @@ static void do_mc32_set_multicast_list(struct net_device *dev, int retry)
block[0]=netdev_mc_count(dev);
bp=block+2;
- for(i=0;i<netdev_mc_count(dev);i++)
- {
+ netdev_for_each_mc_addr(dmc, dev) {
memcpy(bp, dmc->dmi_addr, 6);
bp+=6;
- dmc=dmc->next;
}
if(mc32_command_nowait(dev, 2, block,
2+6*netdev_mc_count(dev))==-1)
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 5df46c230b0..f965431f492 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -2970,7 +2970,7 @@ static void set_rx_mode(struct net_device *dev)
if (vortex_debug > 3)
pr_notice("%s: Setting promiscuous mode.\n", dev->name);
new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast|RxProm;
- } else if ((dev->mc_list) || (dev->flags & IFF_ALLMULTI)) {
+ } else if (!netdev_mc_empty(dev) || dev->flags & IFF_ALLMULTI) {
new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast;
} else
new_mode = SetRxFilter | RxStation | RxBroadcast;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 17ff15f6099..46af867af85 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -997,7 +997,7 @@ config ETHOC
config GRETH
tristate "Aeroflex Gaisler GRETH Ethernet MAC support"
- depends on OF
+ depends on SPARC
select PHYLIB
select CRC32
help
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 68e7848ac0b..545c8417fc2 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -1128,6 +1128,9 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
struct be_queue_info *txq = &adapter->tx_obj.q;
struct be_eth_tx_compl *txcp;
u16 end_idx, cmpl = 0, timeo = 0;
+ struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
+ struct sk_buff *sent_skb;
+ bool dummy_wrb;
/* Wait for a max of 200ms for all the tx-completions to arrive. */
do {
@@ -1151,6 +1154,15 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
if (atomic_read(&txq->used))
dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
atomic_read(&txq->used));
+
+ /* free posted tx for which compls will never arrive */
+ while (atomic_read(&txq->used)) {
+ sent_skb = sent_skbs[txq->tail];
+ end_idx = txq->tail;
+ index_adv(&end_idx,
+ wrb_cnt_for_skb(sent_skb, &dummy_wrb) - 1, txq->len);
+ be_tx_compl_process(adapter, end_idx);
+ }
}
static void be_mcc_queues_destroy(struct be_adapter *adapter)
diff --git a/drivers/net/depca.c b/drivers/net/depca.c
index 64a569bc92a..744c1928dfc 100644
--- a/drivers/net/depca.c
+++ b/drivers/net/depca.c
@@ -1272,7 +1272,7 @@ static void set_multicast_list(struct net_device *dev)
static void SetMulticastFilter(struct net_device *dev)
{
struct depca_private *lp = netdev_priv(dev);
- struct dev_mc_list *dmi = dev->mc_list;
+ struct dev_mc_list *dmi;
char *addrs;
int i, j, bit, byte;
u16 hashcode;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index fe7656bf68c..55ceae09738 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -17,6 +17,7 @@
#include <net/net_namespace.h>
#include <net/rtnetlink.h>
#include <net/sock.h>
+#include <linux/virtio_net.h>
/*
* A macvtap queue is the central object of this driver, it connects
@@ -37,6 +38,7 @@ struct macvtap_queue {
struct socket sock;
struct macvlan_dev *vlan;
struct file *file;
+ unsigned int flags;
};
static struct proto macvtap_proto = {
@@ -58,32 +60,23 @@ static unsigned int macvtap_major;
static struct class *macvtap_class;
static struct cdev macvtap_cdev;
+static const struct proto_ops macvtap_socket_ops;
+
/*
* RCU usage:
- * The macvtap_queue is referenced both from the chardev struct file
- * and from the struct macvlan_dev using rcu_read_lock.
- *
- * We never actually update the contents of a macvtap_queue atomically
- * with RCU but it is used for race-free destruction of a queue when
- * either the file or the macvlan_dev goes away. Pointers back to
- * the dev and the file are implicitly valid as long as the queue
- * exists.
- *
- * The callbacks from macvlan are always done with rcu_read_lock held
- * already. For calls from file_operations, we use the rcu_read_lock_bh
- * to get a reference count on the socket and the device.
+ * The macvtap_queue and the macvlan_dev are loosely coupled, the
+ * pointers from one to the other can only be read while rcu_read_lock
+ * or macvtap_lock is held.
*
- * When destroying a queue, we remove the pointers from the file and
- * from the dev and then synchronize_rcu to make sure no thread is
- * still using the queue. There may still be references to the struct
- * sock inside of the queue from outbound SKBs, but these never
- * reference back to the file or the dev. The data structure is freed
- * through __sk_free when both our references and any pending SKBs
- * are gone.
+ * Both the file and the macvlan_dev hold a reference on the macvtap_queue
+ * through sock_hold(&q->sk). When the macvlan_dev goes away first,
+ * q->vlan becomes inaccessible. When the files gets closed,
+ * macvtap_get_queue() fails.
*
- * macvtap_lock is only used to prevent multiple concurrent open()
- * calls to assign a new vlan->tap pointer. It could be moved into
- * the macvlan_dev itself but is extremely rarely used.
+ * There may still be references to the struct sock inside of the
+ * queue from outbound SKBs, but these never reference back to the
+ * file or the dev. The data structure is freed through __sk_free
+ * when both our references and any pending SKBs are gone.
*/
static DEFINE_SPINLOCK(macvtap_lock);
@@ -101,11 +94,12 @@ static int macvtap_set_queue(struct net_device *dev, struct file *file,
goto out;
err = 0;
- q->vlan = vlan;
+ rcu_assign_pointer(q->vlan, vlan);
rcu_assign_pointer(vlan->tap, q);
+ sock_hold(&q->sk);
q->file = file;
- rcu_assign_pointer(file->private_data, q);
+ file->private_data = q;
out:
spin_unlock(&macvtap_lock);
@@ -113,28 +107,25 @@ out:
}
/*
- * We must destroy each queue exactly once, when either
- * the netdev or the file go away.
+ * The file owning the queue got closed, give up both
+ * the reference that the files holds as well as the
+ * one from the macvlan_dev if that still exists.
*
* Using the spinlock makes sure that we don't get
* to the queue again after destroying it.
- *
- * synchronize_rcu serializes with the packet flow
- * that uses rcu_read_lock.
*/
-static void macvtap_del_queue(struct macvtap_queue **qp)
+static void macvtap_put_queue(struct macvtap_queue *q)
{
- struct macvtap_queue *q;
+ struct macvlan_dev *vlan;
spin_lock(&macvtap_lock);
- q = rcu_dereference(*qp);
- if (!q) {
- spin_unlock(&macvtap_lock);
- return;
+ vlan = rcu_dereference(q->vlan);
+ if (vlan) {
+ rcu_assign_pointer(vlan->tap, NULL);
+ rcu_assign_pointer(q->vlan, NULL);
+ sock_put(&q->sk);
}
- rcu_assign_pointer(q->vlan->tap, NULL);
- rcu_assign_pointer(q->file->private_data, NULL);
spin_unlock(&macvtap_lock);
synchronize_rcu();
@@ -152,29 +143,29 @@ static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
return rcu_dereference(vlan->tap);
}
+/*
+ * The net_device is going away, give up the reference
+ * that it holds on the queue (all the queues one day)
+ * and safely set the pointer from the queues to NULL.
+ */
static void macvtap_del_queues(struct net_device *dev)
{
struct macvlan_dev *vlan = netdev_priv(dev);
- macvtap_del_queue(&vlan->tap);
-}
-
-static inline struct macvtap_queue *macvtap_file_get_queue(struct file *file)
-{
struct macvtap_queue *q;
- rcu_read_lock_bh();
- q = rcu_dereference(file->private_data);
- if (q) {
- sock_hold(&q->sk);
- dev_hold(q->vlan->dev);
+
+ spin_lock(&macvtap_lock);
+ q = rcu_dereference(vlan->tap);
+ if (!q) {
+ spin_unlock(&macvtap_lock);
+ return;
}
- rcu_read_unlock_bh();
- return q;
-}
-static inline void macvtap_file_put_queue(struct macvtap_queue *q)
-{
+ rcu_assign_pointer(vlan->tap, NULL);
+ rcu_assign_pointer(q->vlan, NULL);
+ spin_unlock(&macvtap_lock);
+
+ synchronize_rcu();
sock_put(&q->sk);
- dev_put(q->vlan->dev);
}
/*
@@ -189,7 +180,7 @@ static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
return -ENOLINK;
skb_queue_tail(&q->sk.sk_receive_queue, skb);
- wake_up(q->sk.sk_sleep);
+ wake_up_interruptible_poll(q->sk.sk_sleep, POLLIN | POLLRDNORM | POLLRDBAND);
return 0;
}
@@ -255,7 +246,7 @@ static void macvtap_sock_write_space(struct sock *sk)
return;
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
- wake_up_interruptible_sync(sk->sk_sleep);
+ wake_up_interruptible_poll(sk->sk_sleep, POLLOUT | POLLWRNORM | POLLWRBAND);
}
static int macvtap_open(struct inode *inode, struct file *file)
@@ -283,9 +274,11 @@ static int macvtap_open(struct inode *inode, struct file *file)
init_waitqueue_head(&q->sock.wait);
q->sock.type = SOCK_RAW;
q->sock.state = SS_CONNECTED;
+ q->sock.file = file;
+ q->sock.ops = &macvtap_socket_ops;
sock_init_data(&q->sock, &q->sk);
- q->sk.sk_allocation = GFP_ATOMIC; /* for now */
q->sk.sk_write_space = macvtap_sock_write_space;
+ q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
err = macvtap_set_queue(dev, file, q);
if (err)
@@ -300,13 +293,14 @@ out:
static int macvtap_release(struct inode *inode, struct file *file)
{
- macvtap_del_queue((struct macvtap_queue **)&file->private_data);
+ struct macvtap_queue *q = file->private_data;
+ macvtap_put_queue(q);
return 0;
}
static unsigned int macvtap_poll(struct file *file, poll_table * wait)
{
- struct macvtap_queue *q = macvtap_file_get_queue(file);
+ struct macvtap_queue *q = file->private_data;
unsigned int mask = POLLERR;
if (!q)
@@ -323,44 +317,192 @@ static unsigned int macvtap_poll(struct file *file, poll_table * wait)
sock_writeable(&q->sk)))
mask |= POLLOUT | POLLWRNORM;
- macvtap_file_put_queue(q);
out:
return mask;
}
+static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad,
+ size_t len, size_t linear,
+ int noblock, int *err)
+{
+ struct sk_buff *skb;
+
+ /* Under a page? Don't bother with paged skb. */
+ if (prepad + len < PAGE_SIZE || !linear)
+ linear = len;
+
+ skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
+ err);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, prepad);
+ skb_put(skb, linear);
+ skb->data_len = len - linear;
+ skb->len += len - linear;
+
+ return skb;
+}
+
+/*
+ * macvtap_skb_from_vnet_hdr and macvtap_skb_to_vnet_hdr should
+ * be shared with the tun/tap driver.
+ */
+static int macvtap_skb_from_vnet_hdr(struct sk_buff *skb,
+ struct virtio_net_hdr *vnet_hdr)
+{
+ unsigned short gso_type = 0;
+ if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
+ switch (vnet_hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
+ case VIRTIO_NET_HDR_GSO_TCPV4:
+ gso_type = SKB_GSO_TCPV4;
+ break;
+ case VIRTIO_NET_HDR_GSO_TCPV6:
+ gso_type = SKB_GSO_TCPV6;
+ break;
+ case VIRTIO_NET_HDR_GSO_UDP:
+ gso_type = SKB_GSO_UDP;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (vnet_hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN)
+ gso_type |= SKB_GSO_TCP_ECN;
+
+ if (vnet_hdr->gso_size == 0)
+ return -EINVAL;
+ }
+
+ if (vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
+ if (!skb_partial_csum_set(skb, vnet_hdr->csum_start,
+ vnet_hdr->csum_offset))
+ return -EINVAL;
+ }
+
+ if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
+ skb_shinfo(skb)->gso_size = vnet_hdr->gso_size;
+ skb_shinfo(skb)->gso_type = gso_type;
+
+ /* Header must be checked, and gso_segs computed. */
+ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
+ skb_shinfo(skb)->gso_segs = 0;
+ }
+ return 0;
+}
+
+static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
+ struct virtio_net_hdr *vnet_hdr)
+{
+ memset(vnet_hdr, 0, sizeof(*vnet_hdr));
+
+ if (skb_is_gso(skb)) {
+ struct skb_shared_info *sinfo = skb_shinfo(skb);
+
+ /* This is a hint as to how much should be linear. */
+ vnet_hdr->hdr_len = skb_headlen(skb);
+ vnet_hdr->gso_size = sinfo->gso_size;
+ if (sinfo->gso_type & SKB_GSO_TCPV4)
+ vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
+ else if (sinfo->gso_type & SKB_GSO_TCPV6)
+ vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
+ else if (sinfo->gso_type & SKB_GSO_UDP)
+ vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
+ else
+ BUG();
+ if (sinfo->gso_type & SKB_GSO_TCP_ECN)
+ vnet_hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
+ } else
+ vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+ vnet_hdr->csum_start = skb->csum_start -
+ skb_headroom(skb);
+ vnet_hdr->csum_offset = skb->csum_offset;
+ } /* else everything is zero */
+
+ return 0;
+}
+
+
/* Get packet from user space buffer */
static ssize_t macvtap_get_user(struct macvtap_queue *q,
const struct iovec *iv, size_t count,
int noblock)
{
struct sk_buff *skb;
+ struct macvlan_dev *vlan;
size_t len = count;
int err;
+ struct virtio_net_hdr vnet_hdr = { 0 };
+ int vnet_hdr_len = 0;
+
+ if (q->flags & IFF_VNET_HDR) {
+ vnet_hdr_len = sizeof(vnet_hdr);
+
+ err = -EINVAL;
+ if ((len -= vnet_hdr_len) < 0)
+ goto err;
+
+ err = memcpy_fromiovecend((void *)&vnet_hdr, iv, 0,
+ vnet_hdr_len);
+ if (err < 0)
+ goto err;
+ if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
+ vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 >
+ vnet_hdr.hdr_len)
+ vnet_hdr.hdr_len = vnet_hdr.csum_start +
+ vnet_hdr.csum_offset + 2;
+ err = -EINVAL;
+ if (vnet_hdr.hdr_len > len)
+ goto err;
+ }
+ err = -EINVAL;
if (unlikely(len < ETH_HLEN))
- return -EINVAL;
+ goto err;
- skb = sock_alloc_send_skb(&q->sk, NET_IP_ALIGN + len, noblock, &err);
+ skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, len, vnet_hdr.hdr_len,
+ noblock, &err);
+ if (!skb)
+ goto err;
- if (!skb) {
- macvlan_count_rx(q->vlan, 0, false, false);
- return err;
- }
+ err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len, len);
+ if (err)
+ goto err_kfree;
+
+ skb_set_network_header(skb, ETH_HLEN);
+ skb_reset_mac_header(skb);
+ skb->protocol = eth_hdr(skb)->h_proto;
- skb_reserve(skb, NET_IP_ALIGN);
- skb_put(skb, count);
+ if (vnet_hdr_len) {
+ err = macvtap_skb_from_vnet_hdr(skb, &vnet_hdr);
+ if (err)
+ goto err_kfree;
+ }
- if (skb_copy_datagram_from_iovec(skb, 0, iv, 0, len)) {
- macvlan_count_rx(q->vlan, 0, false, false);
+ rcu_read_lock_bh();
+ vlan = rcu_dereference(q->vlan);
+ if (vlan)
+ macvlan_start_xmit(skb, vlan->dev);
+ else
kfree_skb(skb);
- return -EFAULT;
- }
+ rcu_read_unlock_bh();
- skb_set_network_header(skb, ETH_HLEN);
+ return count;
- macvlan_start_xmit(skb, q->vlan->dev);
+err_kfree:
+ kfree_skb(skb);
- return count;
+err:
+ rcu_read_lock_bh();
+ vlan = rcu_dereference(q->vlan);
+ if (vlan)
+ netdev_get_tx_queue(vlan->dev, 0)->tx_dropped++;
+ rcu_read_unlock_bh();
+
+ return err;
}
static ssize_t macvtap_aio_write(struct kiocb *iocb, const struct iovec *iv,
@@ -368,15 +510,10 @@ static ssize_t macvtap_aio_write(struct kiocb *iocb, const struct iovec *iv,
{
struct file *file = iocb->ki_filp;
ssize_t result = -ENOLINK;
- struct macvtap_queue *q = macvtap_file_get_queue(file);
-
- if (!q)
- goto out;
+ struct macvtap_queue *q = file->private_data;
result = macvtap_get_user(q, iv, iov_length(iv, count),
file->f_flags & O_NONBLOCK);
- macvtap_file_put_queue(q);
-out:
return result;
}
@@ -385,36 +522,44 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
const struct sk_buff *skb,
const struct iovec *iv, int len)
{
- struct macvlan_dev *vlan = q->vlan;
+ struct macvlan_dev *vlan;
int ret;
+ int vnet_hdr_len = 0;
+
+ if (q->flags & IFF_VNET_HDR) {
+ struct virtio_net_hdr vnet_hdr;
+ vnet_hdr_len = sizeof (vnet_hdr);
+ if ((len -= vnet_hdr_len) < 0)
+ return -EINVAL;
+
+ ret = macvtap_skb_to_vnet_hdr(skb, &vnet_hdr);
+ if (ret)
+ return ret;
+
+ if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, vnet_hdr_len))
+ return -EFAULT;
+ }
len = min_t(int, skb->len, len);
- ret = skb_copy_datagram_const_iovec(skb, 0, iv, 0, len);
+ ret = skb_copy_datagram_const_iovec(skb, 0, iv, vnet_hdr_len, len);
- macvlan_count_rx(vlan, len, ret == 0, 0);
+ rcu_read_lock_bh();
+ vlan = rcu_dereference(q->vlan);
+ if (vlan)
+ macvlan_count_rx(vlan, len, ret == 0, 0);
+ rcu_read_unlock_bh();
- return ret ? ret : len;
+ return ret ? ret : (len + vnet_hdr_len);
}
-static ssize_t macvtap_aio_read(struct kiocb *iocb, const struct iovec *iv,
- unsigned long count, loff_t pos)
+static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
+ const struct iovec *iv, unsigned long len,
+ int noblock)
{
- struct file *file = iocb->ki_filp;
- struct macvtap_queue *q = macvtap_file_get_queue(file);
-
DECLARE_WAITQUEUE(wait, current);
struct sk_buff *skb;
- ssize_t len, ret = 0;
-
- if (!q)
- return -ENOLINK;
-
- len = iov_length(iv, count);
- if (len < 0) {
- ret = -EINVAL;
- goto out;
- }
+ ssize_t ret = 0;
add_wait_queue(q->sk.sk_sleep, &wait);
while (len) {
@@ -423,7 +568,7 @@ static ssize_t macvtap_aio_read(struct kiocb *iocb, const struct iovec *iv,
/* Read frames from the queue */
skb = skb_dequeue(&q->sk.sk_receive_queue);
if (!skb) {
- if (file->f_flags & O_NONBLOCK) {
+ if (noblock) {
ret = -EAGAIN;
break;
}
@@ -442,9 +587,25 @@ static ssize_t macvtap_aio_read(struct kiocb *iocb, const struct iovec *iv,
current->state = TASK_RUNNING;
remove_wait_queue(q->sk.sk_sleep, &wait);
+ return ret;
+}
+
+static ssize_t macvtap_aio_read(struct kiocb *iocb, const struct iovec *iv,
+ unsigned long count, loff_t pos)
+{
+ struct file *file = iocb->ki_filp;
+ struct macvtap_queue *q = file->private_data;
+ ssize_t len, ret = 0;
+
+ len = iov_length(iv, count);
+ if (len < 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+ ret = macvtap_do_read(q, iocb, iv, len, file->f_flags & O_NONBLOCK);
+ ret = min_t(ssize_t, ret, len); /* XXX copied from tun.c. Why? */
out:
- macvtap_file_put_queue(q);
return ret;
}
@@ -454,36 +615,47 @@ out:
static long macvtap_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
- struct macvtap_queue *q;
+ struct macvtap_queue *q = file->private_data;
+ struct macvlan_dev *vlan;
void __user *argp = (void __user *)arg;
struct ifreq __user *ifr = argp;
unsigned int __user *up = argp;
unsigned int u;
- char devname[IFNAMSIZ];
+ int ret;
switch (cmd) {
case TUNSETIFF:
/* ignore the name, just look at flags */
if (get_user(u, &ifr->ifr_flags))
return -EFAULT;
- if (u != (IFF_TAP | IFF_NO_PI))
- return -EINVAL;
- return 0;
+
+ ret = 0;
+ if ((u & ~IFF_VNET_HDR) != (IFF_NO_PI | IFF_TAP))
+ ret = -EINVAL;
+ else
+ q->flags = u;
+
+ return ret;
case TUNGETIFF:
- q = macvtap_file_get_queue(file);
- if (!q)
+ rcu_read_lock_bh();
+ vlan = rcu_dereference(q->vlan);
+ if (vlan)
+ dev_hold(vlan->dev);
+ rcu_read_unlock_bh();
+
+ if (!vlan)
return -ENOLINK;
- memcpy(devname, q->vlan->dev->name, sizeof(devname));
- macvtap_file_put_queue(q);
+ ret = 0;
if (copy_to_user(&ifr->ifr_name, q->vlan->dev->name, IFNAMSIZ) ||
- put_user((TUN_TAP_DEV | TUN_NO_PI), &ifr->ifr_flags))
- return -EFAULT;
- return 0;
+ put_user(q->flags, &ifr->ifr_flags))
+ ret = -EFAULT;
+ dev_put(vlan->dev);
+ return ret;
case TUNGETFEATURES:
- if (put_user((IFF_TAP | IFF_NO_PI), up))
+ if (put_user(IFF_TAP | IFF_NO_PI | IFF_VNET_HDR, up))
return -EFAULT;
return 0;
@@ -491,25 +663,19 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
if (get_user(u, up))
return -EFAULT;
- q = macvtap_file_get_queue(file);
- if (!q)
- return -ENOLINK;
q->sk.sk_sndbuf = u;
- macvtap_file_put_queue(q);
return 0;
case TUNSETOFFLOAD:
/* let the user check for future flags */
if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
- TUN_F_TSO_ECN | TUN_F_UFO))
- return -EINVAL;
-
- /* TODO: add support for these, so far we don't
- support any offload */
- if (arg & (TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
- TUN_F_TSO_ECN | TUN_F_UFO))
+ TUN_F_TSO_ECN | TUN_F_UFO))
return -EINVAL;
+ /* TODO: only accept frames with the features that
+ got enabled for forwarded frames */
+ if (!(q->flags & IFF_VNET_HDR))
+ return -EINVAL;
return 0;
default:
@@ -539,6 +705,53 @@ static const struct file_operations macvtap_fops = {
#endif
};
+static int macvtap_sendmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t total_len)
+{
+ struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock);
+ return macvtap_get_user(q, m->msg_iov, total_len,
+ m->msg_flags & MSG_DONTWAIT);
+}
+
+static int macvtap_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t total_len,
+ int flags)
+{
+ struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock);
+ int ret;
+ if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
+ return -EINVAL;
+ ret = macvtap_do_read(q, iocb, m->msg_iov, total_len,
+ flags & MSG_DONTWAIT);
+ if (ret > total_len) {
+ m->msg_flags |= MSG_TRUNC;
+ ret = flags & MSG_TRUNC ? ret : total_len;
+ }
+ return ret;
+}
+
+/* Ops structure to mimic raw sockets with tun */
+static const struct proto_ops macvtap_socket_ops = {
+ .sendmsg = macvtap_sendmsg,
+ .recvmsg = macvtap_recvmsg,
+};
+
+/* Get an underlying socket object from tun file. Returns error unless file is
+ * attached to a device. The returned object works like a packet socket, it
+ * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for
+ * holding a reference to the file for as long as the socket is in use. */
+struct socket *macvtap_get_socket(struct file *file)
+{
+ struct macvtap_queue *q;
+ if (file->f_op != &macvtap_fops)
+ return ERR_PTR(-EINVAL);
+ q = file->private_data;
+ if (!q)
+ return ERR_PTR(-EBADFD);
+ return &q->sock;
+}
+EXPORT_SYMBOL_GPL(macvtap_get_socket);
+
static int macvtap_init(void)
{
int err;
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 102be16e9b5..43bc66aa840 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -5092,8 +5092,8 @@ static void s2io_set_multicast(struct net_device *dev)
}
/* Create the new Rx filter list and update the same in H/W. */
- for (i = 0, mclist = dev->mc_list; i < netdev_mc_count(dev);
- i++, mclist = mclist->next) {
+ i = 0;
+ netdev_for_each_mc_addr(mclist, dev) {
memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
ETH_ALEN);
mac_addr = 0;
@@ -5121,6 +5121,7 @@ static void s2io_set_multicast(struct net_device *dev)
dev->name);
return;
}
+ i++;
}
}
}
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c
index 564d4d7f855..9944e5d662c 100644
--- a/drivers/net/sb1250-mac.c
+++ b/drivers/net/sb1250-mac.c
@@ -2161,13 +2161,13 @@ static void sbmac_setmulti(struct sbmac_softc *sc)
* XXX if the table overflows */
idx = 1; /* skip station address */
- mclist = dev->mc_list;
- while (mclist && (idx < MAC_ADDR_COUNT)) {
+ netdev_for_each_mc_addr(mclist, dev) {
+ if (idx == MAC_ADDR_COUNT)
+ break;
reg = sbmac_addr2reg(mclist->dmi_addr);
port = sc->sbm_base + R_MAC_ADDR_BASE+(idx * sizeof(uint64_t));
__raw_writeq(reg, port);
idx++;
- mclist = mclist->next;
}
/*
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
index 00ff8995ad6..d87c4787fff 100644
--- a/drivers/net/sc92031.c
+++ b/drivers/net/sc92031.c
@@ -435,7 +435,7 @@ static void _sc92031_set_mar(struct net_device *dev)
else if (dev->flags & IFF_MULTICAST) {
struct dev_mc_list *mc_list;
- for (mc_list = dev->mc_list; mc_list; mc_list = mc_list->next) {
+ netdev_for_each_mc_addr(mc_list, dev) {
u32 crc;
unsigned bit = 0;
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index dc58d9fd0f3..88f2fb193ab 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -1602,11 +1602,10 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
static void efx_set_multicast_list(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
- struct dev_mc_list *mc_list = net_dev->mc_list;
+ struct dev_mc_list *mc_list;
union efx_multicast_hash *mc_hash = &efx->multicast_hash;
u32 crc;
int bit;
- int i;
efx->promiscuous = !!(net_dev->flags & IFF_PROMISC);
@@ -1615,11 +1614,10 @@ static void efx_set_multicast_list(struct net_device *net_dev)
memset(mc_hash, 0xff, sizeof(*mc_hash));
} else {
memset(mc_hash, 0x00, sizeof(*mc_hash));
- for (i = 0; i < netdev_mc_count(net_dev); i++) {
+ netdev_for_each_mc_addr(mc_list, net_dev) {
crc = ether_crc_le(ETH_ALEN, mc_list->dmi_addr);
bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
set_bit_le(bit, mc_hash->byte);
- mc_list = mc_list->next;
}
/* Broadcast packets go through the multicast hash filter.
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index bf2ffbb913f..4487b065ef7 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -17,7 +17,9 @@
See the file COPYING in this distribution for more information.
- */
+*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -32,17 +34,6 @@
#include <linux/dma-mapping.h>
#include <asm/irq.h>
-#define net_drv(p, arg...) if (netif_msg_drv(p)) \
- printk(arg)
-#define net_probe(p, arg...) if (netif_msg_probe(p)) \
- printk(arg)
-#define net_link(p, arg...) if (netif_msg_link(p)) \
- printk(arg)
-#define net_intr(p, arg...) if (netif_msg_intr(p)) \
- printk(arg)
-#define net_tx_err(p, arg...) if (netif_msg_tx_err(p)) \
- printk(arg)
-
#define PHY_MAX_ADDR 32
#define PHY_ID_ANY 0x1f
#define MII_REG_ANY 0x1f
@@ -50,7 +41,6 @@
#define DRV_VERSION "1.4"
#define DRV_NAME "sis190"
#define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
-#define PFX DRV_NAME ": "
#define sis190_rx_skb netif_rx
#define sis190_rx_quota(count, quota) count
@@ -382,7 +372,7 @@ static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
}
if (i > 99)
- printk(KERN_ERR PFX "PHY command failed !\n");
+ pr_err("PHY command failed !\n");
}
static void mdio_write(void __iomem *ioaddr, int phy_id, int reg, int val)
@@ -590,8 +580,7 @@ static int sis190_rx_interrupt(struct net_device *dev,
status = le32_to_cpu(desc->PSize);
- // net_intr(tp, KERN_INFO "%s: Rx PSize = %08x.\n", dev->name,
- // status);
+ //netif_info(tp, intr, dev, "Rx PSize = %08x\n", status);
if (sis190_rx_pkt_err(status, stats) < 0)
sis190_give_to_asic(desc, tp->rx_buf_sz);
@@ -602,9 +591,8 @@ static int sis190_rx_interrupt(struct net_device *dev,
struct pci_dev *pdev = tp->pci_dev;
if (unlikely(pkt_size > tp->rx_buf_sz)) {
- net_intr(tp, KERN_INFO
- "%s: (frag) status = %08x.\n",
- dev->name, status);
+ netif_info(tp, intr, dev,
+ "(frag) status = %08x\n", status);
stats->rx_dropped++;
stats->rx_length_errors++;
sis190_give_to_asic(desc, tp->rx_buf_sz);
@@ -638,12 +626,12 @@ static int sis190_rx_interrupt(struct net_device *dev,
tp->cur_rx = cur_rx;
delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
- if (!delta && count && netif_msg_intr(tp))
- printk(KERN_INFO "%s: no Rx buffer allocated.\n", dev->name);
+ if (!delta && count)
+ netif_info(tp, intr, dev, "no Rx buffer allocated\n");
tp->dirty_rx += delta;
- if (((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx) && netif_msg_intr(tp))
- printk(KERN_EMERG "%s: Rx buffers exhausted.\n", dev->name);
+ if ((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx)
+ netif_emerg(tp, intr, dev, "Rx buffers exhausted\n");
return count;
}
@@ -752,10 +740,10 @@ static irqreturn_t sis190_interrupt(int irq, void *__dev)
SIS_W32(IntrStatus, status);
- // net_intr(tp, KERN_INFO "%s: status = %08x.\n", dev->name, status);
+// netif_info(tp, intr, dev, "status = %08x\n", status);
if (status & LinkChange) {
- net_intr(tp, KERN_INFO "%s: link change.\n", dev->name);
+ netif_info(tp, intr, dev, "link change\n");
schedule_work(&tp->phy_task);
}
@@ -849,12 +837,10 @@ static void sis190_set_rx_mode(struct net_device *dev)
mc_filter[1] = mc_filter[0] = 0xffffffff;
} else {
struct dev_mc_list *mclist;
- unsigned int i;
rx_mode = AcceptBroadcast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
- for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
int bit_nr =
ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
@@ -933,8 +919,7 @@ static void sis190_phy_task(struct work_struct *work)
} else if (!(mdio_read_latched(ioaddr, phy_id, MII_BMSR) &
BMSR_ANEGCOMPLETE)) {
netif_carrier_off(dev);
- net_link(tp, KERN_WARNING "%s: auto-negotiating...\n",
- dev->name);
+ netif_warn(tp, link, dev, "auto-negotiating...\n");
mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
} else {
/* Rejoice ! */
@@ -960,13 +945,13 @@ static void sis190_phy_task(struct work_struct *work)
u16 adv, autoexp, gigadv, gigrec;
val = mdio_read(ioaddr, phy_id, 0x1f);
- net_link(tp, KERN_INFO "%s: mii ext = %04x.\n", dev->name, val);
+ netif_info(tp, link, dev, "mii ext = %04x\n", val);
val = mdio_read(ioaddr, phy_id, MII_LPA);
adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
autoexp = mdio_read(ioaddr, phy_id, MII_EXPANSION);
- net_link(tp, KERN_INFO "%s: mii lpa=%04x adv=%04x exp=%04x.\n",
- dev->name, val, adv, autoexp);
+ netif_info(tp, link, dev, "mii lpa=%04x adv=%04x exp=%04x\n",
+ val, adv, autoexp);
if (val & LPA_NPAGE && autoexp & EXPANSION_NWAY) {
/* check for gigabit speed */
@@ -1007,8 +992,7 @@ static void sis190_phy_task(struct work_struct *work)
tp->negotiated_lpa = p->val;
- net_link(tp, KERN_INFO "%s: link on %s mode.\n", dev->name,
- p->msg);
+ netif_info(tp, link, dev, "link on %s mode\n", p->msg);
netif_carrier_on(dev);
}
@@ -1194,9 +1178,8 @@ static netdev_tx_t sis190_start_xmit(struct sk_buff *skb,
if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
netif_stop_queue(dev);
- net_tx_err(tp, KERN_ERR PFX
- "%s: BUG! Tx Ring full when queue awake!\n",
- dev->name);
+ netif_err(tp, tx_err, dev,
+ "BUG! Tx Ring full when queue awake!\n");
return NETDEV_TX_BUSY;
}
@@ -1296,9 +1279,9 @@ static u16 sis190_default_phy(struct net_device *dev)
if (mii_if->phy_id != phy_default->phy_id) {
mii_if->phy_id = phy_default->phy_id;
- net_probe(tp, KERN_INFO
- "%s: Using transceiver at address %d as default.\n",
- pci_name(tp->pci_dev), mii_if->phy_id);
+ if (netif_msg_probe(tp))
+ pr_info("%s: Using transceiver at address %d as default\n",
+ pci_name(tp->pci_dev), mii_if->phy_id);
}
status = mdio_read(ioaddr, mii_if->phy_id, MII_BMCR);
@@ -1336,14 +1319,15 @@ static void sis190_init_phy(struct net_device *dev, struct sis190_private *tp,
((mii_status & (BMSR_100FULL | BMSR_100HALF)) ?
LAN : HOME) : p->type;
tp->features |= p->feature;
- net_probe(tp, KERN_INFO "%s: %s transceiver at address %d.\n",
- pci_name(tp->pci_dev), p->name, phy_id);
+ if (netif_msg_probe(tp))
+ pr_info("%s: %s transceiver at address %d\n",
+ pci_name(tp->pci_dev), p->name, phy_id);
} else {
phy->type = UNKNOWN;
- net_probe(tp, KERN_INFO
- "%s: unknown PHY 0x%x:0x%x transceiver at address %d\n",
- pci_name(tp->pci_dev),
- phy->id[0], (phy->id[1] & 0xfff0), phy_id);
+ if (netif_msg_probe(tp))
+ pr_info("%s: unknown PHY 0x%x:0x%x transceiver at address %d\n",
+ pci_name(tp->pci_dev),
+ phy->id[0], (phy->id[1] & 0xfff0), phy_id);
}
}
@@ -1407,8 +1391,9 @@ static int __devinit sis190_mii_probe(struct net_device *dev)
}
if (list_empty(&tp->first_phy)) {
- net_probe(tp, KERN_INFO "%s: No MII transceivers found!\n",
- pci_name(tp->pci_dev));
+ if (netif_msg_probe(tp))
+ pr_info("%s: No MII transceivers found!\n",
+ pci_name(tp->pci_dev));
rc = -EIO;
goto out;
}
@@ -1454,7 +1439,8 @@ static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
dev = alloc_etherdev(sizeof(*tp));
if (!dev) {
- net_drv(&debug, KERN_ERR PFX "unable to alloc new ethernet\n");
+ if (netif_msg_drv(&debug))
+ pr_err("unable to alloc new ethernet\n");
rc = -ENOMEM;
goto err_out_0;
}
@@ -1467,34 +1453,39 @@ static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
rc = pci_enable_device(pdev);
if (rc < 0) {
- net_probe(tp, KERN_ERR "%s: enable failure\n", pci_name(pdev));
+ if (netif_msg_probe(tp))
+ pr_err("%s: enable failure\n", pci_name(pdev));
goto err_free_dev_1;
}
rc = -ENODEV;
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
- net_probe(tp, KERN_ERR "%s: region #0 is no MMIO resource.\n",
- pci_name(pdev));
+ if (netif_msg_probe(tp))
+ pr_err("%s: region #0 is no MMIO resource\n",
+ pci_name(pdev));
goto err_pci_disable_2;
}
if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
- net_probe(tp, KERN_ERR "%s: invalid PCI region size(s).\n",
- pci_name(pdev));
+ if (netif_msg_probe(tp))
+ pr_err("%s: invalid PCI region size(s)\n",
+ pci_name(pdev));
goto err_pci_disable_2;
}
rc = pci_request_regions(pdev, DRV_NAME);
if (rc < 0) {
- net_probe(tp, KERN_ERR PFX "%s: could not request regions.\n",
- pci_name(pdev));
+ if (netif_msg_probe(tp))
+ pr_err("%s: could not request regions\n",
+ pci_name(pdev));
goto err_pci_disable_2;
}
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc < 0) {
- net_probe(tp, KERN_ERR "%s: DMA configuration failed.\n",
- pci_name(pdev));
+ if (netif_msg_probe(tp))
+ pr_err("%s: DMA configuration failed\n",
+ pci_name(pdev));
goto err_free_res_3;
}
@@ -1502,8 +1493,9 @@ static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
if (!ioaddr) {
- net_probe(tp, KERN_ERR "%s: cannot remap MMIO, aborting\n",
- pci_name(pdev));
+ if (netif_msg_probe(tp))
+ pr_err("%s: cannot remap MMIO, aborting\n",
+ pci_name(pdev));
rc = -EIO;
goto err_free_res_3;
}
@@ -1539,9 +1531,8 @@ static void sis190_tx_timeout(struct net_device *dev)
if (tmp8 & CmdTxEnb)
SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
-
- net_tx_err(tp, KERN_INFO "%s: Transmit timeout, status %08x %08x.\n",
- dev->name, SIS_R32(TxControl), SIS_R32(TxSts));
+ netif_info(tp, tx_err, dev, "Transmit timeout, status %08x %08x\n",
+ SIS_R32(TxControl), SIS_R32(TxSts));
/* Disable interrupts by clearing the interrupt mask. */
SIS_W32(IntrMask, 0x0000);
@@ -1570,15 +1561,16 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
u16 sig;
int i;
- net_probe(tp, KERN_INFO "%s: Read MAC address from EEPROM\n",
- pci_name(pdev));
+ if (netif_msg_probe(tp))
+ pr_info("%s: Read MAC address from EEPROM\n", pci_name(pdev));
/* Check to see if there is a sane EEPROM */
sig = (u16) sis190_read_eeprom(ioaddr, EEPROMSignature);
if ((sig == 0xffff) || (sig == 0x0000)) {
- net_probe(tp, KERN_INFO "%s: Error EEPROM read %x.\n",
- pci_name(pdev), sig);
+ if (netif_msg_probe(tp))
+ pr_info("%s: Error EEPROM read %x\n",
+ pci_name(pdev), sig);
return -EIO;
}
@@ -1612,8 +1604,8 @@ static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
u8 reg, tmp8;
unsigned int i;
- net_probe(tp, KERN_INFO "%s: Read MAC address from APC.\n",
- pci_name(pdev));
+ if (netif_msg_probe(tp))
+ pr_info("%s: Read MAC address from APC\n", pci_name(pdev));
for (i = 0; i < ARRAY_SIZE(ids); i++) {
isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, ids[i], NULL);
@@ -1622,8 +1614,9 @@ static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
}
if (!isa_bridge) {
- net_probe(tp, KERN_INFO "%s: Can not find ISA bridge.\n",
- pci_name(pdev));
+ if (netif_msg_probe(tp))
+ pr_info("%s: Can not find ISA bridge\n",
+ pci_name(pdev));
return -EIO;
}
@@ -1704,7 +1697,7 @@ static void sis190_set_speed_auto(struct net_device *dev)
int phy_id = tp->mii_if.phy_id;
int val;
- net_link(tp, KERN_INFO "%s: Enabling Auto-negotiation.\n", dev->name);
+ netif_info(tp, link, dev, "Enabling Auto-negotiation\n");
val = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
@@ -1831,7 +1824,8 @@ static int __devinit sis190_init_one(struct pci_dev *pdev,
int rc;
if (!printed_version) {
- net_drv(&debug, KERN_INFO SIS190_DRIVER_NAME " loaded.\n");
+ if (netif_msg_drv(&debug))
+ pr_info(SIS190_DRIVER_NAME " loaded\n");
printed_version = 1;
}
@@ -1871,12 +1865,14 @@ static int __devinit sis190_init_one(struct pci_dev *pdev,
if (rc < 0)
goto err_remove_mii;
- net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), %pM\n",
- pci_name(pdev), sis_chip_info[ent->driver_data].name,
- ioaddr, dev->irq, dev->dev_addr);
-
- net_probe(tp, KERN_INFO "%s: %s mode.\n", dev->name,
- (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII");
+ if (netif_msg_probe(tp)) {
+ netdev_info(dev, "%s: %s at %p (IRQ: %d), %pM\n",
+ pci_name(pdev),
+ sis_chip_info[ent->driver_data].name,
+ ioaddr, dev->irq, dev->dev_addr);
+ netdev_info(dev, "%s mode.\n",
+ (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII");
+ }
netif_carrier_off(dev);
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 32ae87c09f5..cc0c731c4f0 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -2300,9 +2300,8 @@ static void set_rx_mode(struct net_device *net_dev)
* packets */
struct dev_mc_list *mclist;
rx_mode = RFAAB;
- for (i = 0, mclist = net_dev->mc_list;
- mclist && i < netdev_mc_count(net_dev);
- i++, mclist = mclist->next) {
+
+ netdev_for_each_mc_addr(mclist, net_dev) {
unsigned int bit_nr =
sis900_mcast_bitnr(mclist->dmi_addr, sis_priv->chipset_rev);
mc_filter[bit_nr >> 4] |= (1 << (bit_nr & 0xf));
diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/skfp/skfddi.c
index 346adfae986..1921a54ea99 100644
--- a/drivers/net/skfp/skfddi.c
+++ b/drivers/net/skfp/skfddi.c
@@ -852,8 +852,7 @@ static void skfp_ctl_set_multicast_list(struct net_device *dev)
static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
{
struct s_smc *smc = netdev_priv(dev);
- struct dev_mc_list *dmi; /* ptr to multicast addr entry */
- int i;
+ struct dev_mc_list *dmi;
/* Enable promiscuous mode, if necessary */
if (dev->flags & IFF_PROMISC) {
@@ -877,17 +876,14 @@ static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
/* use exact filtering */
// point to first multicast addr
- dmi = dev->mc_list;
-
- for (i = 0; i < netdev_mc_count(dev); i++) {
+ netdev_for_each_mc_addr(dmi, dev) {
mac_add_multicast(smc,
(struct fddi_addr *)dmi->dmi_addr,
1);
pr_debug(KERN_INFO "ENABLE MC ADDRESS: %pMF\n",
dmi->dmi_addr);
- dmi = dmi->next;
- } // for
+ }
} else { // more MC addresses than HW supports
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 720af1c7854..d0058e5bb6a 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -2917,8 +2917,7 @@ static void genesis_set_multicast(struct net_device *dev)
struct skge_port *skge = netdev_priv(dev);
struct skge_hw *hw = skge->hw;
int port = skge->port;
- int i, count = netdev_mc_count(dev);
- struct dev_mc_list *list = dev->mc_list;
+ struct dev_mc_list *list;
u32 mode;
u8 filter[8];
@@ -2938,7 +2937,7 @@ static void genesis_set_multicast(struct net_device *dev)
skge->flow_status == FLOW_STAT_SYMMETRIC)
genesis_add_filter(filter, pause_mc_addr);
- for (i = 0; list && i < count; i++, list = list->next)
+ netdev_for_each_mc_addr(list, dev)
genesis_add_filter(filter, list->dmi_addr);
}
@@ -2957,7 +2956,7 @@ static void yukon_set_multicast(struct net_device *dev)
struct skge_port *skge = netdev_priv(dev);
struct skge_hw *hw = skge->hw;
int port = skge->port;
- struct dev_mc_list *list = dev->mc_list;
+ struct dev_mc_list *list;
int rx_pause = (skge->flow_status == FLOW_STAT_REM_SEND ||
skge->flow_status == FLOW_STAT_SYMMETRIC);
u16 reg;
@@ -2975,13 +2974,12 @@ static void yukon_set_multicast(struct net_device *dev)
else if (netdev_mc_empty(dev) && !rx_pause)/* no multicast */
reg &= ~GM_RXCR_MCF_ENA;
else {
- int i;
reg |= GM_RXCR_MCF_ENA;
if (rx_pause)
yukon_add_filter(filter, pause_mc_addr);
- for (i = 0; list && i < netdev_mc_count(dev); i++, list = list->next)
+ netdev_for_each_mc_addr(list, dev)
yukon_add_filter(filter, list->dmi_addr);
}
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index d1e98e2170c..653bdd76ef4 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -3621,7 +3621,7 @@ static void sky2_set_multicast(struct net_device *dev)
struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
unsigned port = sky2->port;
- struct dev_mc_list *list = dev->mc_list;
+ struct dev_mc_list *list;
u16 reg;
u8 filter[8];
int rx_pause;
@@ -3640,13 +3640,12 @@ static void sky2_set_multicast(struct net_device *dev)
else if (netdev_mc_empty(dev) && !rx_pause)
reg &= ~GM_RXCR_MCF_ENA;
else {
- int i;
reg |= GM_RXCR_MCF_ENA;
if (rx_pause)
sky2_add_filter(filter, pause_mc_addr);
- for (i = 0; list && i < netdev_mc_count(dev); i++, list = list->next)
+ netdev_for_each_mc_addr(list, dev)
sky2_add_filter(filter, list->dmi_addr);
}
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index 13c0b76dec4..4fd1d8b3878 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -1383,29 +1383,20 @@ static void smsc911x_set_multicast_list(struct net_device *dev)
/* Enabling specific multicast addresses */
unsigned int hash_high = 0;
unsigned int hash_low = 0;
- unsigned int count = 0;
- struct dev_mc_list *mc_list = dev->mc_list;
+ struct dev_mc_list *mc_list;
pdata->set_bits_mask = MAC_CR_HPFILT_;
pdata->clear_bits_mask = (MAC_CR_PRMS_ | MAC_CR_MCPAS_);
- while (mc_list) {
- count++;
- if ((mc_list->dmi_addrlen) == ETH_ALEN) {
- unsigned int bitnum =
- smsc911x_hash(mc_list->dmi_addr);
- unsigned int mask = 0x01 << (bitnum & 0x1F);
- if (bitnum & 0x20)
- hash_high |= mask;
- else
- hash_low |= mask;
- } else {
- SMSC_WARNING(DRV, "dmi_addrlen != 6");
- }
- mc_list = mc_list->next;
+ netdev_for_each_mc_addr(mc_list, dev) {
+ unsigned int bitnum = smsc911x_hash(mc_list->dmi_addr);
+ unsigned int mask = 0x01 << (bitnum & 0x1F);
+
+ if (bitnum & 0x20)
+ hash_high |= mask;
+ else
+ hash_low |= mask;
}
- if (count != (unsigned int)netdev_mc_count(dev))
- SMSC_WARNING(DRV, "mc_count != dev->mc_count");
pdata->hashhi = hash_high;
pdata->hashlo = hash_low;
diff --git a/drivers/net/smsc9420.c b/drivers/net/smsc9420.c
index 2bd3c986559..30110a11d73 100644
--- a/drivers/net/smsc9420.c
+++ b/drivers/net/smsc9420.c
@@ -1063,11 +1063,11 @@ static void smsc9420_set_multicast_list(struct net_device *dev)
mac_cr |= MAC_CR_MCPAS_;
mac_cr &= (~MAC_CR_HPFILT_);
} else if (!netdev_mc_empty(dev)) {
- struct dev_mc_list *mc_list = dev->mc_list;
+ struct dev_mc_list *mc_list;
u32 hash_lo = 0, hash_hi = 0;
smsc_dbg(HW, "Multicast filter enabled");
- while (mc_list) {
+ netdev_for_each_mc_addr(mc_list, dev) {
u32 bit_num = smsc9420_hash(mc_list->dmi_addr);
u32 mask = 1 << (bit_num & 0x1F);
@@ -1076,7 +1076,6 @@ static void smsc9420_set_multicast_list(struct net_device *dev)
else
hash_lo |= mask;
- mc_list = mc_list->next;
}
smsc9420_reg_write(pd, HASHH, hash_hi);
smsc9420_reg_write(pd, HASHL, hash_lo);
diff --git a/drivers/net/sonic.c b/drivers/net/sonic.c
index bd8bc66f2e0..287c251075e 100644
--- a/drivers/net/sonic.c
+++ b/drivers/net/sonic.c
@@ -531,7 +531,7 @@ static void sonic_multicast_list(struct net_device *dev)
{
struct sonic_local *lp = netdev_priv(dev);
unsigned int rcr;
- struct dev_mc_list *dmi = dev->mc_list;
+ struct dev_mc_list *dmi;
unsigned char *addr;
int i;
@@ -549,13 +549,14 @@ static void sonic_multicast_list(struct net_device *dev)
printk("sonic_multicast_list: mc_count %d\n",
netdev_mc_count(dev));
sonic_set_cam_enable(dev, 1); /* always enable our own address */
- for (i = 1; i <= netdev_mc_count(dev); i++) {
+ i = 1;
+ netdev_for_each_mc_addr(dmi, dev) {
addr = dmi->dmi_addr;
- dmi = dmi->next;
sonic_cda_put(dev, i, SONIC_CD_CAP0, addr[1] << 8 | addr[0]);
sonic_cda_put(dev, i, SONIC_CD_CAP1, addr[3] << 8 | addr[2]);
sonic_cda_put(dev, i, SONIC_CD_CAP2, addr[5] << 8 | addr[4]);
sonic_set_cam_enable(dev, sonic_get_cam_enable(dev) | (1 << i));
+ i++;
}
SONIC_WRITE(SONIC_CDC, 16);
/* issue Load CAM command */
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 16191998ac6..2f8a8c32021 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -646,7 +646,7 @@ spider_net_set_multi(struct net_device *netdev)
hash = spider_net_get_multicast_hash(netdev, netdev->broadcast); */
set_bit(0xfd, bitmask);
- for (mc = netdev->mc_list; mc; mc = mc->next) {
+ netdev_for_each_mc_addr(mc, netdev) {
hash = spider_net_get_multicast_hash(netdev, mc->dmi_addr);
set_bit(hash, bitmask);
}
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index 58bc7ac086c..0f405ef5185 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -1804,14 +1804,14 @@ static void set_rx_mode(struct net_device *dev)
/* Use the 16 element perfect filter, skip first two entries. */
void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16;
__be16 *eaddrs;
- for (i = 2, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev) + 2;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
eaddrs = (__be16 *)mclist->dmi_addr;
writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 4;
writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 8;
}
eaddrs = (__be16 *)dev->dev_addr;
+ i = netdev_mc_count(dev) + 2;
while (i++ < 16) {
writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4;
writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
diff --git a/drivers/net/stmmac/dwmac100.c b/drivers/net/stmmac/dwmac100.c
index 576b256ee38..803b0373d84 100644
--- a/drivers/net/stmmac/dwmac100.c
+++ b/drivers/net/stmmac/dwmac100.c
@@ -315,7 +315,6 @@ static void dwmac100_set_filter(struct net_device *dev)
value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF |
MAC_CONTROL_HO | MAC_CONTROL_HP);
} else {
- int i;
u32 mc_filter[2];
struct dev_mc_list *mclist;
@@ -326,8 +325,7 @@ static void dwmac100_set_filter(struct net_device *dev)
MAC_CONTROL_IF | MAC_CONTROL_HO);
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list;
- mclist && i < netdev_mc_count(dev); i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
/* The upper 6 bits of the calculated CRC are used to
* index the contens of the hash table */
int bit_nr =
diff --git a/drivers/net/stmmac/dwmac1000_core.c b/drivers/net/stmmac/dwmac1000_core.c
index 90dbb4f41ef..a6538ae4694 100644
--- a/drivers/net/stmmac/dwmac1000_core.c
+++ b/drivers/net/stmmac/dwmac1000_core.c
@@ -93,7 +93,6 @@ static void dwmac1000_set_filter(struct net_device *dev)
writel(0xffffffff, ioaddr + GMAC_HASH_HIGH);
writel(0xffffffff, ioaddr + GMAC_HASH_LOW);
} else if (!netdev_mc_empty(dev)) {
- int i;
u32 mc_filter[2];
struct dev_mc_list *mclist;
@@ -101,8 +100,7 @@ static void dwmac1000_set_filter(struct net_device *dev)
value = GMAC_FRAME_FILTER_HMC;
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list;
- mclist && i < netdev_mc_count(dev); i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
/* The upper 6 bits of the calculated CRC are used to
index the contens of the hash table */
int bit_nr =
diff --git a/drivers/net/sun3_82586.c b/drivers/net/sun3_82586.c
index efedc252e4b..2f6a760e5f2 100644
--- a/drivers/net/sun3_82586.c
+++ b/drivers/net/sun3_82586.c
@@ -413,7 +413,7 @@ static int init586(struct net_device *dev)
volatile struct iasetup_cmd_struct *ias_cmd;
volatile struct tdr_cmd_struct *tdr_cmd;
volatile struct mcsetup_cmd_struct *mc_cmd;
- struct dev_mc_list *dmi=dev->mc_list;
+ struct dev_mc_list *dmi;
int num_addrs=netdev_mc_count(dev);
ptr = (void *) ((char *)p->scb + sizeof(struct scb_struct));
@@ -536,8 +536,10 @@ static int init586(struct net_device *dev)
mc_cmd->cmd_link = 0xffff;
mc_cmd->mc_cnt = swab16(num_addrs * 6);
- for(i=0;i<num_addrs;i++,dmi=dmi->next)
- memcpy((char *) mc_cmd->mc_list[i], dmi->dmi_addr,6);
+ i = 0;
+ netdev_for_each_mc_addr(dmi, dev)
+ memcpy((char *) mc_cmd->mc_list[i++],
+ dmi->dmi_addr, ETH_ALEN);
p->scb->cbl_offset = make16(mc_cmd);
p->scb->cmd_cuc = CUC_START;
diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c
index dfea56fa39e..a0bd361d5ec 100644
--- a/drivers/net/sunbmac.c
+++ b/drivers/net/sunbmac.c
@@ -999,7 +999,7 @@ static void bigmac_set_multicast(struct net_device *dev)
{
struct bigmac *bp = netdev_priv(dev);
void __iomem *bregs = bp->bregs;
- struct dev_mc_list *dmi = dev->mc_list;
+ struct dev_mc_list *dmi;
char *addrs;
int i;
u32 tmp, crc;
@@ -1028,9 +1028,8 @@ static void bigmac_set_multicast(struct net_device *dev)
for (i = 0; i < 4; i++)
hash_table[i] = 0;
- for (i = 0; i < netdev_mc_count(dev); i++) {
+ netdev_for_each_mc_addr(dmi, dev) {
addrs = dmi->dmi_addr;
- dmi = dmi->next;
if (!(*addrs & 1))
continue;
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index 4171259590b..a855934dfc3 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -1528,8 +1528,7 @@ static void set_rx_mode(struct net_device *dev)
int index;
int crc;
memset (mc_filter, 0, sizeof (mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
if (crc & 0x80000000) index |= 1 << bit;
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index d497ec05395..4344017bfae 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -1846,17 +1846,13 @@ static u32 gem_setup_multicast(struct gem *gp)
} else {
u16 hash_table[16];
u32 crc;
- struct dev_mc_list *dmi = gp->dev->mc_list;
+ struct dev_mc_list *dmi;
int i;
- for (i = 0; i < 16; i++)
- hash_table[i] = 0;
-
- for (i = 0; i < netdev_mc_count(gp->dev); i++) {
+ memset(hash_table, 0, sizeof(hash_table));
+ netdev_for_each_mc_addr(dmi, gp->dev) {
char *addrs = dmi->dmi_addr;
- dmi = dmi->next;
-
if (!(*addrs & 1))
continue;
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index 905df35ff78..b17dbb11bd6 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -1523,17 +1523,13 @@ static int happy_meal_init(struct happy_meal *hp)
hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff);
} else if ((hp->dev->flags & IFF_PROMISC) == 0) {
u16 hash_table[4];
- struct dev_mc_list *dmi = hp->dev->mc_list;
+ struct dev_mc_list *dmi;
char *addrs;
- int i;
u32 crc;
- for (i = 0; i < 4; i++)
- hash_table[i] = 0;
-
- for (i = 0; i < netdev_mc_count(hp->dev); i++) {
+ memset(hash_table, 0, sizeof(hash_table));
+ netdev_for_each_mc_addr(dmi, hp->dev) {
addrs = dmi->dmi_addr;
- dmi = dmi->next;
if (!(*addrs & 1))
continue;
@@ -2366,9 +2362,8 @@ static void happy_meal_set_multicast(struct net_device *dev)
{
struct happy_meal *hp = netdev_priv(dev);
void __iomem *bregs = hp->bigmacregs;
- struct dev_mc_list *dmi = dev->mc_list;
+ struct dev_mc_list *dmi;
char *addrs;
- int i;
u32 crc;
spin_lock_irq(&hp->happy_lock);
@@ -2384,12 +2379,9 @@ static void happy_meal_set_multicast(struct net_device *dev)
} else {
u16 hash_table[4];
- for (i = 0; i < 4; i++)
- hash_table[i] = 0;
-
- for (i = 0; i < netdev_mc_count(dev); i++) {
+ memset(hash_table, 0, sizeof(hash_table));
+ netdev_for_each_mc_addr(dmi, dev) {
addrs = dmi->dmi_addr;
- dmi = dmi->next;
if (!(*addrs & 1))
continue;
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index cf9d5bb9e1e..d7c73f478ef 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -1170,9 +1170,8 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
static void lance_load_multicast(struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
- struct dev_mc_list *dmi = dev->mc_list;
+ struct dev_mc_list *dmi;
char *addrs;
- int i;
u32 crc;
u32 val;
@@ -1196,9 +1195,8 @@ static void lance_load_multicast(struct net_device *dev)
return;
/* Add addresses */
- for (i = 0; i < netdev_mc_count(dev); i++) {
+ netdev_for_each_mc_addr(dmi, dev) {
addrs = dmi->dmi_addr;
- dmi = dmi->next;
/* multicast address? */
if (!(*addrs & 1))
diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c
index 3bc35d86ed6..be637dce944 100644
--- a/drivers/net/sunqe.c
+++ b/drivers/net/sunqe.c
@@ -627,7 +627,7 @@ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
static void qe_set_multicast(struct net_device *dev)
{
struct sunqe *qep = netdev_priv(dev);
- struct dev_mc_list *dmi = dev->mc_list;
+ struct dev_mc_list *dmi;
u8 new_mconfig = qep->mconfig;
char *addrs;
int i;
@@ -650,12 +650,9 @@ static void qe_set_multicast(struct net_device *dev)
u16 hash_table[4];
u8 *hbytes = (unsigned char *) &hash_table[0];
- for (i = 0; i < 4; i++)
- hash_table[i] = 0;
-
- for (i = 0; i < netdev_mc_count(dev); i++) {
+ memset(hash_table, 0, sizeof(hash_table));
+ netdev_for_each_mc_addr(dmi, dev) {
addrs = dmi->dmi_addr;
- dmi = dmi->next;
if (!(*addrs & 1))
continue;
diff --git a/drivers/net/sunvnet.c b/drivers/net/sunvnet.c
index d65764ea1d8..6b1b7cea7f6 100644
--- a/drivers/net/sunvnet.c
+++ b/drivers/net/sunvnet.c
@@ -765,7 +765,7 @@ static void __update_mc_list(struct vnet *vp, struct net_device *dev)
{
struct dev_addr_list *p;
- for (p = dev->mc_list; p; p = p->next) {
+ netdev_for_each_mc_addr(p, dev) {
struct vnet_mcast_entry *m;
m = __vnet_mc_find(vp, p->dmi_addr);
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index ddf66155f5e..f204f73c4f9 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -67,7 +67,6 @@
#include "tg3.h"
#define DRV_MODULE_NAME "tg3"
-#define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "3.108"
#define DRV_MODULE_RELDATE "February 17, 2010"
@@ -158,7 +157,7 @@
#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
static char version[] __devinitdata =
- DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+ DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
@@ -1099,8 +1098,7 @@ static int tg3_mdio_init(struct tg3 *tp)
i = mdiobus_register(tp->mdio_bus);
if (i) {
- printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
- tp->dev->name, i);
+ netdev_warn(tp->dev, "mdiobus_reg failed (0x%x)\n", i);
mdiobus_free(tp->mdio_bus);
return i;
}
@@ -1108,7 +1106,7 @@ static int tg3_mdio_init(struct tg3 *tp)
phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
if (!phydev || !phydev->drv) {
- printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name);
+ netdev_warn(tp->dev, "No PHY devices\n");
mdiobus_unregister(tp->mdio_bus);
mdiobus_free(tp->mdio_bus);
return -ENODEV;
@@ -1252,27 +1250,22 @@ static void tg3_ump_link_report(struct tg3 *tp)
static void tg3_link_report(struct tg3 *tp)
{
if (!netif_carrier_ok(tp->dev)) {
- if (netif_msg_link(tp))
- printk(KERN_INFO PFX "%s: Link is down.\n",
- tp->dev->name);
+ netif_info(tp, link, tp->dev, "Link is down\n");
tg3_ump_link_report(tp);
} else if (netif_msg_link(tp)) {
- printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
- tp->dev->name,
- (tp->link_config.active_speed == SPEED_1000 ?
- 1000 :
- (tp->link_config.active_speed == SPEED_100 ?
- 100 : 10)),
- (tp->link_config.active_duplex == DUPLEX_FULL ?
- "full" : "half"));
-
- printk(KERN_INFO PFX
- "%s: Flow control is %s for TX and %s for RX.\n",
- tp->dev->name,
- (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
- "on" : "off",
- (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
- "on" : "off");
+ netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
+ (tp->link_config.active_speed == SPEED_1000 ?
+ 1000 :
+ (tp->link_config.active_speed == SPEED_100 ?
+ 100 : 10)),
+ (tp->link_config.active_duplex == DUPLEX_FULL ?
+ "full" : "half"));
+
+ netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
+ (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
+ "on" : "off",
+ (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
+ "on" : "off");
tg3_ump_link_report(tp);
}
}
@@ -1471,7 +1464,7 @@ static int tg3_phy_init(struct tg3 *tp)
phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
phydev->dev_flags, phydev->interface);
if (IS_ERR(phydev)) {
- printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
+ netdev_err(tp->dev, "Could not attach to PHY\n");
return PTR_ERR(phydev);
}
@@ -2500,8 +2493,8 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
break;
default:
- printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
- tp->dev->name, state);
+ netdev_err(tp->dev, "Invalid power state (D%d) requested\n",
+ state);
return -EINVAL;
}
@@ -4342,10 +4335,8 @@ static void tg3_tx_recover(struct tg3 *tp)
BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
tp->write32_tx_mbox == tg3_write_indirect_mbox);
- printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
- "mapped I/O cycles to the network device, attempting to "
- "recover. Please report the problem to the driver maintainer "
- "and include system chipset information.\n", tp->dev->name);
+ netdev_warn(tp->dev, "The system may be re-ordering memory-mapped I/O cycles to the network device, attempting to recover\n"
+ "Please report the problem to the driver maintainer and include system chipset information.\n");
spin_lock(&tp->lock);
tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
@@ -5269,8 +5260,7 @@ static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
err = tg3_init_hw(tp, reset_phy);
if (err) {
- printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
- "aborting.\n", tp->dev->name);
+ netdev_err(tp->dev, "Failed to re-initialize device, aborting\n");
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
tg3_full_unlock(tp);
del_timer_sync(&tp->timer);
@@ -5343,10 +5333,10 @@ out:
static void tg3_dump_short_state(struct tg3 *tp)
{
- printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
- tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
- printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
- tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
+ netdev_err(tp->dev, "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
+ tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
+ netdev_err(tp->dev, "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
+ tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
}
static void tg3_tx_timeout(struct net_device *dev)
@@ -5354,8 +5344,7 @@ static void tg3_tx_timeout(struct net_device *dev)
struct tg3 *tp = netdev_priv(dev);
if (netif_msg_tx_err(tp)) {
- printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
- dev->name);
+ netdev_err(dev, "transmit timed out, resetting\n");
tg3_dump_short_state(tp);
}
@@ -5519,8 +5508,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
netif_tx_stop_queue(txq);
/* This is a hard error, log it. */
- printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
- "queue awake!\n", dev->name);
+ netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
}
return NETDEV_TX_BUSY;
}
@@ -5723,8 +5711,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
netif_tx_stop_queue(txq);
/* This is a hard error, log it. */
- printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
- "queue awake!\n", dev->name);
+ netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
}
return NETDEV_TX_BUSY;
}
@@ -6071,11 +6058,8 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
/* Now allocate fresh SKBs for each rx ring. */
for (i = 0; i < tp->rx_pending; i++) {
if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
- printk(KERN_WARNING PFX
- "%s: Using a smaller RX standard ring, "
- "only %d out of %d buffers were allocated "
- "successfully.\n",
- tp->dev->name, i, tp->rx_pending);
+ netdev_warn(tp->dev, "Using a smaller RX standard ring, only %d out of %d buffers were allocated successfully\n",
+ i, tp->rx_pending);
if (i == 0)
goto initfail;
tp->rx_pending = i;
@@ -6104,11 +6088,8 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
for (i = 0; i < tp->rx_jumbo_pending; i++) {
if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
- printk(KERN_WARNING PFX
- "%s: Using a smaller RX jumbo ring, "
- "only %d out of %d buffers were "
- "allocated successfully.\n",
- tp->dev->name, i, tp->rx_jumbo_pending);
+ netdev_warn(tp->dev, "Using a smaller RX jumbo ring, only %d out of %d buffers were allocated successfully\n",
+ i, tp->rx_jumbo_pending);
if (i == 0)
goto initfail;
tp->rx_jumbo_pending = i;
@@ -6452,8 +6433,7 @@ static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int
}
if (i == MAX_WAIT_CNT && !silent) {
- printk(KERN_ERR PFX "tg3_stop_block timed out, "
- "ofs=%lx enable_bit=%x\n",
+ pr_err("tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
ofs, enable_bit);
return -ENODEV;
}
@@ -6500,9 +6480,8 @@ static int tg3_abort_hw(struct tg3 *tp, int silent)
break;
}
if (i >= MAX_WAIT_CNT) {
- printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
- "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
- tp->dev->name, tr32(MAC_TX_MODE));
+ netdev_err(tp->dev, "%s timed out, TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
+ __func__, tr32(MAC_TX_MODE));
err |= -ENODEV;
}
@@ -6723,8 +6702,7 @@ static int tg3_poll_fw(struct tg3 *tp)
!(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
- printk(KERN_INFO PFX "%s: No firmware running.\n",
- tp->dev->name);
+ netdev_info(tp->dev, "No firmware running\n");
}
if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
@@ -7152,10 +7130,8 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
}
if (i >= 10000) {
- printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
- "and %s CPU\n",
- tp->dev->name,
- (offset == RX_CPU_BASE ? "RX" : "TX"));
+ netdev_err(tp->dev, "%s timed out, %s CPU\n",
+ __func__, offset == RX_CPU_BASE ? "RX" : "TX");
return -ENODEV;
}
@@ -7180,9 +7156,8 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_b
if (cpu_base == TX_CPU_BASE &&
(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
- printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
- "TX cpu firmware on %s which is 5705.\n",
- tp->dev->name);
+ netdev_err(tp->dev, "%s: Trying to load TX cpu firmware which is 5705\n",
+ __func__);
return -EINVAL;
}
@@ -7261,10 +7236,8 @@ static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
udelay(1000);
}
if (i >= 5) {
- printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
- "to set RX CPU PC, is %08x should be %08x\n",
- tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
- info.fw_base);
+ netdev_err(tp->dev, "tg3_load_firmware fails to set RX CPU PC, is %08x should be %08x\n",
+ tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
return -ENODEV;
}
tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
@@ -7327,10 +7300,8 @@ static int tg3_load_tso_firmware(struct tg3 *tp)
udelay(1000);
}
if (i >= 5) {
- printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
- "to set CPU PC, is %08x should be %08x\n",
- tp->dev->name, tr32(cpu_base + CPU_PC),
- info.fw_base);
+ netdev_err(tp->dev, "%s fails to set CPU PC, is %08x should be %08x\n",
+ __func__, tr32(cpu_base + CPU_PC), info.fw_base);
return -ENODEV;
}
tw32(cpu_base + CPU_STATE, 0xffffffff);
@@ -7791,8 +7762,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
udelay(10);
}
if (i >= 2000) {
- printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
- tp->dev->name);
+ netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
return -ENODEV;
}
@@ -8655,10 +8625,8 @@ static int tg3_test_msi(struct tg3 *tp)
return err;
/* MSI test failed, go back to INTx mode */
- printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
- "switching to INTx mode. Please report this failure to "
- "the PCI maintainer and include system chipset information.\n",
- tp->dev->name);
+ netdev_warn(tp->dev, "No interrupt was generated using MSI, switching to INTx mode\n"
+ "Please report this failure to the PCI maintainer and include system chipset information\n");
free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
@@ -8691,8 +8659,8 @@ static int tg3_request_firmware(struct tg3 *tp)
const __be32 *fw_data;
if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
- printk(KERN_ERR "%s: Failed to load firmware \"%s\"\n",
- tp->dev->name, tp->fw_needed);
+ netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
+ tp->fw_needed);
return -ENOENT;
}
@@ -8705,8 +8673,8 @@ static int tg3_request_firmware(struct tg3 *tp)
tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
if (tp->fw_len < (tp->fw->size - 12)) {
- printk(KERN_ERR "%s: bogus length %d in \"%s\"\n",
- tp->dev->name, tp->fw_len, tp->fw_needed);
+ netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
+ tp->fw_len, tp->fw_needed);
release_firmware(tp->fw);
tp->fw = NULL;
return -EINVAL;
@@ -8744,9 +8712,8 @@ static bool tg3_enable_msix(struct tg3 *tp)
return false;
if (pci_enable_msix(tp->pdev, msix_ent, rc))
return false;
- printk(KERN_NOTICE
- "%s: Requested %d MSI-X vectors, received %d\n",
- tp->dev->name, tp->irq_cnt, rc);
+ netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
+ tp->irq_cnt, rc);
tp->irq_cnt = rc;
}
@@ -8771,8 +8738,7 @@ static void tg3_ints_init(struct tg3 *tp)
/* All MSI supporting chips should support tagged
* status. Assert that this is the case.
*/
- printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
- "Not using MSI.\n", tp->dev->name);
+ netdev_warn(tp->dev, "MSI without TAGGED? Not using MSI\n");
goto defcfg;
}
@@ -8817,12 +8783,10 @@ static int tg3_open(struct net_device *dev)
if (err)
return err;
} else if (err) {
- printk(KERN_WARNING "%s: TSO capability disabled.\n",
- tp->dev->name);
+ netdev_warn(tp->dev, "TSO capability disabled\n");
tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
} else if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
- printk(KERN_NOTICE "%s: TSO capability restored.\n",
- tp->dev->name);
+ netdev_notice(tp->dev, "TSO capability restored\n");
tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
}
}
@@ -10687,8 +10651,7 @@ static int tg3_test_registers(struct tg3 *tp)
out:
if (netif_msg_hw(tp))
- printk(KERN_ERR PFX "Register test failed at offset %x\n",
- offset);
+ pr_err("Register test failed at offset %x\n", offset);
tw32(offset, save_val);
return -EIO;
}
@@ -11815,8 +11778,8 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
tp->tg3_flags |= TG3_FLAG_NVRAM;
if (tg3_nvram_lock(tp)) {
- printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
- "tg3_nvram_init failed.\n", tp->dev->name);
+ netdev_warn(tp->dev, "Cannot get nvram lock, %s failed\n",
+ __func__);
return;
}
tg3_enable_nvram_access(tp);
@@ -13280,8 +13243,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
if (!tp->pcix_cap) {
- printk(KERN_ERR PFX "Cannot find PCI-X "
- "capability, aborting.\n");
+ pr_err("Cannot find PCI-X capability, aborting\n");
return -EIO;
}
@@ -13478,8 +13440,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
/* Force the chip into D0. */
err = tg3_set_power_state(tp, PCI_D0);
if (err) {
- printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
- pci_name(tp->pdev));
+ pr_err("(%s) transition to D0 failed\n", pci_name(tp->pdev));
return err;
}
@@ -13653,7 +13614,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
err = tg3_phy_probe(tp);
if (err) {
- printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
+ pr_err("(%s) phy probe failed, err %d\n",
pci_name(tp->pdev), err);
/* ... but do not return immediately ... */
tg3_mdio_fini(tp);
@@ -14163,7 +14124,8 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
/* Send the buffer to the chip. */
ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
if (ret) {
- printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
+ pr_err("tg3_test_dma() Write the buffer failed %d\n",
+ ret);
break;
}
@@ -14173,7 +14135,8 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
u32 val;
tg3_read_mem(tp, 0x2100 + (i*4), &val);
if (le32_to_cpu(val) != p[i]) {
- printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
+ pr_err(" tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n",
+ val, i);
/* ret = -ENODEV here? */
}
p[i] = 0;
@@ -14182,7 +14145,8 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
/* Now read it back. */
ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
if (ret) {
- printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
+ pr_err("tg3_test_dma() Read the buffer failed %d\n",
+ ret);
break;
}
@@ -14199,7 +14163,8 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
break;
} else {
- printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
+ pr_err("tg3_test_dma() buffer corrupted on read back! (%d != %d)\n",
+ p[i], i);
ret = -ENODEV;
goto out;
}
@@ -14480,7 +14445,6 @@ static const struct net_device_ops tg3_netdev_ops_dma_bug = {
static int __devinit tg3_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- static int tg3_version_printed = 0;
struct net_device *dev;
struct tg3 *tp;
int i, err, pm_cap;
@@ -14488,20 +14452,17 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
char str[40];
u64 dma_mask, persist_dma_mask;
- if (tg3_version_printed++ == 0)
- printk(KERN_INFO "%s", version);
+ printk_once(KERN_INFO "%s\n", version);
err = pci_enable_device(pdev);
if (err) {
- printk(KERN_ERR PFX "Cannot enable PCI device, "
- "aborting.\n");
+ pr_err("Cannot enable PCI device, aborting\n");
return err;
}
err = pci_request_regions(pdev, DRV_MODULE_NAME);
if (err) {
- printk(KERN_ERR PFX "Cannot obtain PCI resources, "
- "aborting.\n");
+ pr_err("Cannot obtain PCI resources, aborting\n");
goto err_out_disable_pdev;
}
@@ -14510,15 +14471,14 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
/* Find power-management capability. */
pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
if (pm_cap == 0) {
- printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
- "aborting.\n");
+ pr_err("Cannot find PowerManagement capability, aborting\n");
err = -EIO;
goto err_out_free_res;
}
dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
if (!dev) {
- printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
+ pr_err("Etherdev alloc failed, aborting\n");
err = -ENOMEM;
goto err_out_free_res;
}
@@ -14568,8 +14528,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
tp->regs = pci_ioremap_bar(pdev, BAR_0);
if (!tp->regs) {
- printk(KERN_ERR PFX "Cannot map device registers, "
- "aborting.\n");
+ netdev_err(dev, "Cannot map device registers, aborting\n");
err = -ENOMEM;
goto err_out_free_dev;
}
@@ -14585,8 +14544,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
err = tg3_get_invariants(tp);
if (err) {
- printk(KERN_ERR PFX "Problem fetching invariants of chip, "
- "aborting.\n");
+ netdev_err(dev, "Problem fetching invariants of chip, aborting\n");
goto err_out_iounmap;
}
@@ -14621,8 +14579,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
err = pci_set_consistent_dma_mask(pdev,
persist_dma_mask);
if (err < 0) {
- printk(KERN_ERR PFX "Unable to obtain 64 bit "
- "DMA for consistent allocations\n");
+ netdev_err(dev, "Unable to obtain 64 bit DMA for consistent allocations\n");
goto err_out_iounmap;
}
}
@@ -14630,8 +14587,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
if (err || dma_mask == DMA_BIT_MASK(32)) {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
- printk(KERN_ERR PFX "No usable DMA configuration, "
- "aborting.\n");
+ netdev_err(dev, "No usable DMA configuration, aborting\n");
goto err_out_iounmap;
}
}
@@ -14680,16 +14636,14 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
err = tg3_get_device_address(tp);
if (err) {
- printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
- "aborting.\n");
+ netdev_err(dev, "Could not obtain valid ethernet address, aborting\n");
goto err_out_iounmap;
}
if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
if (!tp->aperegs) {
- printk(KERN_ERR PFX "Cannot map APE registers, "
- "aborting.\n");
+ netdev_err(dev, "Cannot map APE registers, aborting\n");
err = -ENOMEM;
goto err_out_iounmap;
}
@@ -14713,7 +14667,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
err = tg3_test_dma(tp);
if (err) {
- printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
+ netdev_err(dev, "DMA engine test failed, aborting\n");
goto err_out_apeunmap;
}
@@ -14774,45 +14728,39 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
err = register_netdev(dev);
if (err) {
- printk(KERN_ERR PFX "Cannot register net device, "
- "aborting.\n");
+ netdev_err(dev, "Cannot register net device, aborting\n");
goto err_out_apeunmap;
}
- printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
- dev->name,
- tp->board_part_number,
- tp->pci_chip_rev_id,
- tg3_bus_string(tp, str),
- dev->dev_addr);
+ netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
+ tp->board_part_number,
+ tp->pci_chip_rev_id,
+ tg3_bus_string(tp, str),
+ dev->dev_addr);
if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
struct phy_device *phydev;
phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
- printk(KERN_INFO
- "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
- tp->dev->name, phydev->drv->name,
- dev_name(&phydev->dev));
+ netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
+ phydev->drv->name, dev_name(&phydev->dev));
} else
- printk(KERN_INFO
- "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
- tp->dev->name, tg3_phy_string(tp),
- ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
- ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
- "10/100/1000Base-T")),
- (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0);
-
- printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
- dev->name,
- (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
- (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
- (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
- (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
- (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
- printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
- dev->name, tp->dma_rwctrl,
- (pdev->dma_mask == DMA_BIT_MASK(32)) ? 32 :
- (((u64) pdev->dma_mask == DMA_BIT_MASK(40)) ? 40 : 64));
+ netdev_info(dev, "attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
+ tg3_phy_string(tp),
+ ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
+ ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
+ "10/100/1000Base-T")),
+ (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0);
+
+ netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
+ (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
+ (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
+ (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
+ (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
+ (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
+ netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
+ tp->dma_rwctrl,
+ pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
+ ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
return 0;
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index eff68e1d107..0fb930feea4 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -1390,10 +1390,9 @@ static int xl_close(struct net_device *dev)
static void xl_set_rx_mode(struct net_device *dev)
{
struct xl_private *xl_priv = netdev_priv(dev);
- struct dev_mc_list *dmi ;
+ struct dev_mc_list *dmi;
unsigned char dev_mc_address[4] ;
u16 options ;
- int i ;
if (dev->flags & IFF_PROMISC)
options = 0x0004 ;
@@ -1408,7 +1407,7 @@ static void xl_set_rx_mode(struct net_device *dev)
dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
- for (i=0,dmi=dev->mc_list;i < netdev_mc_count(dev); i++,dmi = dmi->next) {
+ netdev_for_each_mc_addr(dmi, dev) {
dev_mc_address[0] |= dmi->dmi_addr[2] ;
dev_mc_address[1] |= dmi->dmi_addr[3] ;
dev_mc_address[2] |= dmi->dmi_addr[4] ;
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c
index 1ce8f85a89a..1a0967246e2 100644
--- a/drivers/net/tokenring/ibmtr.c
+++ b/drivers/net/tokenring/ibmtr.c
@@ -995,13 +995,11 @@ static void tok_set_multicast_list(struct net_device *dev)
/*BMS ifconfig tr down or hot unplug a PCMCIA card ??hownowbrowncow*/
if (/*BMSHELPdev->start == 0 ||*/ ti->open_status != OPEN) return;
address[0] = address[1] = address[2] = address[3] = 0;
- mclist = dev->mc_list;
- for (i = 0; i < netdev_mc_count(dev); i++) {
+ netdev_for_each_mc_addr(mclist, dev) {
address[0] |= mclist->dmi_addr[2];
address[1] |= mclist->dmi_addr[3];
address[2] |= mclist->dmi_addr[4];
address[3] |= mclist->dmi_addr[5];
- mclist = mclist->next;
}
SET_PAGE(ti->srb_page);
for (i = 0; i < sizeof(struct srb_set_funct_addr); i++)
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c
index 26d84daf660..dd028fee9dc 100644
--- a/drivers/net/tokenring/lanstreamer.c
+++ b/drivers/net/tokenring/lanstreamer.c
@@ -1268,7 +1268,6 @@ static void streamer_set_rx_mode(struct net_device *dev)
__u8 options = 0;
struct dev_mc_list *dmi;
unsigned char dev_mc_address[5];
- int i;
writel(streamer_priv->srb, streamer_mmio + LAPA);
options = streamer_priv->streamer_copy_all_options;
@@ -1303,8 +1302,7 @@ static void streamer_set_rx_mode(struct net_device *dev)
writel(streamer_priv->srb,streamer_mmio+LAPA);
dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
- for (i=0,dmi=dev->mc_list;i < netdev_mc_count(dev); i++,dmi = dmi->next)
- {
+ netdev_for_each_mc_addr(dmi, dev) {
dev_mc_address[0] |= dmi->dmi_addr[2] ;
dev_mc_address[1] |= dmi->dmi_addr[3] ;
dev_mc_address[2] |= dmi->dmi_addr[4] ;
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c
index a242d125b34..3a25e0434ae 100644
--- a/drivers/net/tokenring/olympic.c
+++ b/drivers/net/tokenring/olympic.c
@@ -1139,9 +1139,8 @@ static void olympic_set_rx_mode(struct net_device *dev)
u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ;
u8 options = 0;
u8 __iomem *srb;
- struct dev_mc_list *dmi ;
+ struct dev_mc_list *dmi;
unsigned char dev_mc_address[4] ;
- int i ;
writel(olympic_priv->srb,olympic_mmio+LAPA);
srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
@@ -1178,7 +1177,7 @@ static void olympic_set_rx_mode(struct net_device *dev)
dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
- for (i=0,dmi=dev->mc_list;i < netdev_mc_count(dev); i++,dmi = dmi->next) {
+ netdev_for_each_mc_addr(dmi, dev) {
dev_mc_address[0] |= dmi->dmi_addr[2] ;
dev_mc_address[1] |= dmi->dmi_addr[3] ;
dev_mc_address[2] |= dmi->dmi_addr[4] ;
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
index 6b8868959b8..21a01753312 100644
--- a/drivers/net/tokenring/tms380tr.c
+++ b/drivers/net/tokenring/tms380tr.c
@@ -1212,10 +1212,9 @@ static void tms380tr_set_multicast_list(struct net_device *dev)
}
else
{
- int i;
- struct dev_mc_list *mclist = dev->mc_list;
- for (i=0; i< netdev_mc_count(dev); i++)
- {
+ struct dev_mc_list *mclist;
+
+ netdev_for_each_mc_addr(mclist, dev) {
((char *)(&tp->ocpl.FunctAddr))[0] |=
mclist->dmi_addr[2];
((char *)(&tp->ocpl.FunctAddr))[1] |=
@@ -1224,7 +1223,6 @@ static void tms380tr_set_multicast_list(struct net_device *dev)
mclist->dmi_addr[4];
((char *)(&tp->ocpl.FunctAddr))[3] |=
mclist->dmi_addr[5];
- mclist = mclist->next;
}
}
tms380tr_exec_cmd(dev, OC_SET_FUNCT_ADDR);
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index a4cff23dcdf..cb429723b2c 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -677,18 +677,17 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
memset(hash_table, 0, sizeof(hash_table));
set_bit_le(255, hash_table); /* Broadcast entry */
/* This should work on big-endian machines as well. */
- for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
int index = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff;
set_bit_le(index, hash_table);
+ }
- for (i = 0; i < 32; i++) {
- *setup_frm++ = hash_table[i];
- *setup_frm++ = hash_table[i];
- }
- setup_frm = &de->setup_frame[13*6];
+ for (i = 0; i < 32; i++) {
+ *setup_frm++ = hash_table[i];
+ *setup_frm++ = hash_table[i];
}
+ setup_frm = &de->setup_frame[13*6];
/* Fill the final entry with our physical address. */
eaddrs = (u16 *)dev->dev_addr;
@@ -701,20 +700,18 @@ static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
{
struct de_private *de = netdev_priv(dev);
struct dev_mc_list *mclist;
- int i;
u16 *eaddrs;
/* We have <= 14 addresses so we can use the wonderful
16 address perfect filtering of the Tulip. */
- for (i = 0, mclist = dev->mc_list; i < netdev_mc_count(dev);
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
eaddrs = (u16 *)mclist->dmi_addr;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
}
/* Fill the unused entries with the broadcast address. */
- memset(setup_frm, 0xff, (15-i)*12);
+ memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12);
setup_frm = &de->setup_frame[15*6];
/* Fill the final entry with our physical address. */
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index 0b6a9731091..c4ecb9a9540 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -1951,9 +1951,9 @@ static void
SetMulticastFilter(struct net_device *dev)
{
struct de4x5_private *lp = netdev_priv(dev);
- struct dev_mc_list *dmi=dev->mc_list;
+ struct dev_mc_list *dmi;
u_long iobase = dev->base_addr;
- int i, j, bit, byte;
+ int i, bit, byte;
u16 hashcode;
u32 omr, crc;
char *pa;
@@ -1966,9 +1966,8 @@ SetMulticastFilter(struct net_device *dev)
if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 14)) {
omr |= OMR_PM; /* Pass all multicasts */
} else if (lp->setup_f == HASH_PERF) { /* Hash Filtering */
- for (i = 0; i < netdev_mc_count(dev) ;i++) {
- addrs=dmi->dmi_addr;
- dmi=dmi->next;
+ netdev_for_each_mc_addr(dmi, dev) {
+ addrs = dmi->dmi_addr;
if ((*addrs & 0x01) == 1) { /* multicast address? */
crc = ether_crc_le(ETH_ALEN, addrs);
hashcode = crc & HASH_BITS; /* hashcode is 9 LSb of CRC */
@@ -1984,9 +1983,8 @@ SetMulticastFilter(struct net_device *dev)
}
}
} else { /* Perfect filtering */
- for (j=0; j<netdev_mc_count(dev); j++) {
- addrs=dmi->dmi_addr;
- dmi=dmi->next;
+ netdev_for_each_mc_addr(dmi, dev) {
+ addrs = dmi->dmi_addr;
for (i=0; i<ETH_ALEN; i++) {
*(pa + (i&1)) = *addrs++;
if (i & 0x01) pa += 4;
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index 534afbdb9c9..95b38d803e9 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -330,8 +330,8 @@ static void poll_dmfe (struct net_device *dev);
static void dmfe_descriptor_init(struct dmfe_board_info *, unsigned long);
static void allocate_rx_buffer(struct dmfe_board_info *);
static void update_cr6(u32, unsigned long);
-static void send_filter_frame(struct DEVICE * ,int);
-static void dm9132_id_table(struct DEVICE * ,int);
+static void send_filter_frame(struct DEVICE *);
+static void dm9132_id_table(struct DEVICE *);
static u16 phy_read(unsigned long, u8, u8, u32);
static void phy_write(unsigned long, u8, u8, u16, u32);
static void phy_write_1bit(unsigned long, u32);
@@ -658,9 +658,9 @@ static void dmfe_init_dm910x(struct DEVICE *dev)
/* Send setup frame */
if (db->chip_id == PCI_DM9132_ID)
- dm9132_id_table(dev, netdev_mc_count(dev)); /* DM9132 */
+ dm9132_id_table(dev); /* DM9132 */
else
- send_filter_frame(dev, netdev_mc_count(dev)); /* DM9102/DM9102A */
+ send_filter_frame(dev); /* DM9102/DM9102A */
/* Init CR7, interrupt active bit */
db->cr7_data = CR7_DEFAULT;
@@ -1075,9 +1075,9 @@ static void dmfe_set_filter_mode(struct DEVICE * dev)
DMFE_DBUG(0, "Set multicast address", mc_count);
if (db->chip_id == PCI_DM9132_ID)
- dm9132_id_table(dev, mc_count); /* DM9132 */
+ dm9132_id_table(dev); /* DM9132 */
else
- send_filter_frame(dev, mc_count); /* DM9102/DM9102A */
+ send_filter_frame(dev); /* DM9102/DM9102A */
spin_unlock_irqrestore(&db->lock, flags);
}
@@ -1452,7 +1452,7 @@ static void update_cr6(u32 cr6_data, unsigned long ioaddr)
* This setup frame initilize DM910X address filter mode
*/
-static void dm9132_id_table(struct DEVICE *dev, int mc_cnt)
+static void dm9132_id_table(struct DEVICE *dev)
{
struct dev_mc_list *mcptr;
u16 * addrptr;
@@ -1472,15 +1472,14 @@ static void dm9132_id_table(struct DEVICE *dev, int mc_cnt)
ioaddr += 4;
/* Clear Hash Table */
- for (i = 0; i < 4; i++)
- hash_table[i] = 0x0;
+ memset(hash_table, 0, sizeof(hash_table));
/* broadcast address */
hash_table[3] = 0x8000;
/* the multicast address in Hash Table : 64 bits */
- for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
- hash_val = cal_CRC( (char *) mcptr->dmi_addr, 6, 0) & 0x3f;
+ netdev_for_each_mc_addr(mcptr, dev) {
+ hash_val = cal_CRC((char *) mcptr->dmi_addr, 6, 0) & 0x3f;
hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
}
@@ -1495,7 +1494,7 @@ static void dm9132_id_table(struct DEVICE *dev, int mc_cnt)
* This setup frame initilize DM910X address filter mode
*/
-static void send_filter_frame(struct DEVICE *dev, int mc_cnt)
+static void send_filter_frame(struct DEVICE *dev)
{
struct dmfe_board_info *db = netdev_priv(dev);
struct dev_mc_list *mcptr;
@@ -1521,14 +1520,14 @@ static void send_filter_frame(struct DEVICE *dev, int mc_cnt)
*suptr++ = 0xffff;
/* fit the multicast address */
- for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
+ netdev_for_each_mc_addr(mcptr, dev) {
addrptr = (u16 *) mcptr->dmi_addr;
*suptr++ = addrptr[0];
*suptr++ = addrptr[1];
*suptr++ = addrptr[2];
}
- for (; i<14; i++) {
+ for (i = netdev_mc_count(dev); i < 14; i++) {
*suptr++ = 0xffff;
*suptr++ = 0xffff;
*suptr++ = 0xffff;
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index cce2ada0795..7f544ef2f5f 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -997,12 +997,10 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
memset(hash_table, 0, sizeof(hash_table));
set_bit_le(255, hash_table); /* Broadcast entry */
/* This should work on big-endian machines as well. */
- for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
int index = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff;
set_bit_le(index, hash_table);
-
}
for (i = 0; i < 32; i++) {
*setup_frm++ = hash_table[i];
@@ -1021,20 +1019,18 @@ static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
{
struct tulip_private *tp = netdev_priv(dev);
struct dev_mc_list *mclist;
- int i;
u16 *eaddrs;
/* We have <= 14 addresses so we can use the wonderful
16 address perfect filtering of the Tulip. */
- for (i = 0, mclist = dev->mc_list; i < netdev_mc_count(dev);
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
eaddrs = (u16 *)mclist->dmi_addr;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
}
/* Fill the unused entries with the broadcast address. */
- memset(setup_frm, 0xff, (15-i)*12);
+ memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12);
setup_frm = &tp->setup_frame[15*6];
/* Fill the final entry with our physical address. */
@@ -1066,7 +1062,6 @@ static void set_rx_mode(struct net_device *dev)
/* Some work-alikes have only a 64-entry hash filter table. */
/* Should verify correctness on big-endian/__powerpc__ */
struct dev_mc_list *mclist;
- int i;
if (netdev_mc_count(dev) > 64) {
/* Arbitrary non-effective limit. */
tp->csr6 |= AcceptAllMulticast;
@@ -1074,9 +1069,7 @@ static void set_rx_mode(struct net_device *dev)
} else {
u32 mc_filter[2] = {0, 0}; /* Multicast hash filter */
int filterbit;
- for (i = 0, mclist = dev->mc_list;
- mclist && i < netdev_mc_count(dev);
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
if (tp->flags & COMET_MAC_ADDR)
filterbit = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
else
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index 216ceb322ed..0ab05af237e 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -1415,14 +1415,14 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt)
*suptr++ = 0xffff << FLT_SHIFT;
/* fit the multicast address */
- for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
+ netdev_for_each_mc_addr(mcptr, dev) {
addrptr = (u16 *) mcptr->dmi_addr;
*suptr++ = addrptr[0] << FLT_SHIFT;
*suptr++ = addrptr[1] << FLT_SHIFT;
*suptr++ = addrptr[2] << FLT_SHIFT;
}
- for (; i<14; i++) {
+ for (i = netdev_mc_count(dev); i < 14; i++) {
*suptr++ = 0xffff << FLT_SHIFT;
*suptr++ = 0xffff << FLT_SHIFT;
*suptr++ = 0xffff << FLT_SHIFT;
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index 98711a9f35a..304f43866c4 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -1368,11 +1368,9 @@ static u32 __set_rx_mode(struct net_device *dev)
rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
} else {
struct dev_mc_list *mclist;
- int i;
+
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list;
- mclist && i < netdev_mc_count(dev);
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
int filterbit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F;
filterbit &= 0x3f;
mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index f605204de3e..20e34608fa4 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -555,20 +555,18 @@ static void asix_set_multicast(struct net_device *net)
* for our 8 byte filter buffer
* to avoid allocating memory that
* is tricky to free later */
- struct dev_mc_list *mc_list = net->mc_list;
+ struct dev_mc_list *mc_list;
u32 crc_bits;
- int i;
memset(data->multi_filter, 0, AX_MCAST_FILTER_SIZE);
/* Build the multicast hash filter. */
- for (i = 0; i < netdev_mc_count(net); i++) {
+ netdev_for_each_mc_addr(mc_list, net) {
crc_bits =
ether_crc(ETH_ALEN,
mc_list->dmi_addr) >> 26;
data->multi_filter[crc_bits >> 3] |=
1 << (crc_bits & 7);
- mc_list = mc_list->next;
}
asix_write_cmd_async(dev, AX_CMD_WRITE_MULTI_FILTER, 0, 0,
@@ -769,20 +767,18 @@ static void ax88172_set_multicast(struct net_device *net)
* for our 8 byte filter buffer
* to avoid allocating memory that
* is tricky to free later */
- struct dev_mc_list *mc_list = net->mc_list;
+ struct dev_mc_list *mc_list;
u32 crc_bits;
- int i;
memset(data->multi_filter, 0, AX_MCAST_FILTER_SIZE);
/* Build the multicast hash filter. */
- for (i = 0; i < netdev_mc_count(net); i++) {
+ netdev_for_each_mc_addr(mc_list, net) {
crc_bits =
ether_crc(ETH_ALEN,
mc_list->dmi_addr) >> 26;
data->multi_filter[crc_bits >> 3] |=
1 << (crc_bits & 7);
- mc_list = mc_list->next;
}
asix_write_cmd_async(dev, AX_CMD_WRITE_MULTI_FILTER, 0, 0,
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 5a13660ebd1..96f1ebe0d34 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -632,7 +632,6 @@ static void catc_set_multicast_list(struct net_device *netdev)
struct dev_mc_list *mc;
u8 broadcast[6];
u8 rx = RxEnable | RxPolarity | RxMultiCast;
- int i;
memset(broadcast, 0xff, 6);
memset(catc->multicast, 0, 64);
@@ -648,9 +647,7 @@ static void catc_set_multicast_list(struct net_device *netdev)
if (netdev->flags & IFF_ALLMULTI) {
memset(catc->multicast, 0xff, 64);
} else {
- for (i = 0, mc = netdev->mc_list;
- mc && i < netdev_mc_count(netdev);
- i++, mc = mc->next) {
+ netdev_for_each_mc_addr(mc, netdev) {
u32 crc = ether_crc_le(6, mc->dmi_addr);
if (!catc->is_f5u011) {
catc->multicast[(crc >> 3) & 0x3f] |= 1 << (crc & 7);
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index 34665137f2c..70978219e98 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -452,15 +452,13 @@ static void mcs7830_data_set_multicast(struct net_device *net)
* for our 8 byte filter buffer
* to avoid allocating memory that
* is tricky to free later */
- struct dev_mc_list *mc_list = net->mc_list;
+ struct dev_mc_list *mc_list;
u32 crc_bits;
- int i;
/* Build the multicast hash filter. */
- for (i = 0; i < netdev_mc_count(net); i++) {
+ netdev_for_each_mc_addr(mc_list, net) {
crc_bits = ether_crc(ETH_ALEN, mc_list->dmi_addr) >> 26;
data->multi_filter[crc_bits >> 3] |= 1 << (crc_bits & 7);
- mc_list = mc_list->next;
}
}
}
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 1ada51eb71f..df9179a1c93 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -384,30 +384,20 @@ static void smsc95xx_set_multicast(struct net_device *netdev)
pdata->mac_cr |= MAC_CR_MCPAS_;
pdata->mac_cr &= ~(MAC_CR_PRMS_ | MAC_CR_HPFILT_);
} else if (!netdev_mc_empty(dev->net)) {
- struct dev_mc_list *mc_list = dev->net->mc_list;
- int count = 0;
+ struct dev_mc_list *mc_list;
pdata->mac_cr |= MAC_CR_HPFILT_;
pdata->mac_cr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_);
- while (mc_list) {
- count++;
- if (mc_list->dmi_addrlen == ETH_ALEN) {
- u32 bitnum = smsc95xx_hash(mc_list->dmi_addr);
- u32 mask = 0x01 << (bitnum & 0x1F);
- if (bitnum & 0x20)
- hash_hi |= mask;
- else
- hash_lo |= mask;
- } else {
- netdev_warn(dev->net, "dmi_addrlen != 6\n");
- }
- mc_list = mc_list->next;
+ netdev_for_each_mc_addr(mc_list, netdev) {
+ u32 bitnum = smsc95xx_hash(mc_list->dmi_addr);
+ u32 mask = 0x01 << (bitnum & 0x1F);
+ if (bitnum & 0x20)
+ hash_hi |= mask;
+ else
+ hash_lo |= mask;
}
- if (count != ((u32) netdev_mc_count(dev->net)))
- netdev_warn(dev->net, "mc_count != dev->mc_count\n");
-
netif_dbg(dev, drv, dev->net, "HASHH=0x%08X, HASHL=0x%08X\n",
hash_hi, hash_lo);
} else {
diff --git a/drivers/staging/arlan/arlan-main.c b/drivers/staging/arlan/arlan-main.c
index 921a082487a..88fdd53cf5d 100644
--- a/drivers/staging/arlan/arlan-main.c
+++ b/drivers/staging/arlan/arlan-main.c
@@ -1455,10 +1455,10 @@ static void arlan_rx_interrupt(struct net_device *dev, u_char rxStatus, u_short
#ifdef ARLAN_MULTICAST
if (!(dev->flags & IFF_ALLMULTI) &&
!(dev->flags & IFF_PROMISC) &&
- dev->mc_list)
+ !netdev_mc_empty(dev))
{
char hw_dst_addr[6];
- struct dev_mc_list *dmi = dev->mc_list;
+ struct dev_mc_list *dmi;
int i;
memcpy_fromio(hw_dst_addr, arlan->ultimateDestAddress, 6);
@@ -1469,20 +1469,15 @@ static void arlan_rx_interrupt(struct net_device *dev, u_char rxStatus, u_short
printk(KERN_ERR "%s mcast 0x0100 \n", dev->name);
else if (hw_dst_addr[1] == 0x40)
printk(KERN_ERR "%s m/bcast 0x0140 \n", dev->name);
- while (dmi)
- {
- if (dmi->dmi_addrlen == 6) {
- if (arlan_debug & ARLAN_DEBUG_HEADER_DUMP)
- printk(KERN_ERR "%s mcl %pM\n",
- dev->name, dmi->dmi_addr);
- for (i = 0; i < 6; i++)
- if (dmi->dmi_addr[i] != hw_dst_addr[i])
- break;
- if (i == 6)
+ netdev_for_each_mc_entry(dmi, dev) {
+ if (arlan_debug & ARLAN_DEBUG_HEADER_DUMP)
+ printk(KERN_ERR "%s mcl %pM\n",
+ dev->name, dmi->dmi_addr);
+ for (i = 0; i < 6; i++)
+ if (dmi->dmi_addr[i] != hw_dst_addr[i])
break;
- } else
- printk(KERN_ERR "%s: invalid multicast address length given.\n", dev->name);
- dmi = dmi->next;
+ if (i == 6)
+ break;
}
/* we reach here if multicast filtering is on and packet
* is multicast and not for receive */
diff --git a/drivers/staging/et131x/et131x_netdev.c b/drivers/staging/et131x/et131x_netdev.c
index bc1fad24895..edb78ae9e59 100644
--- a/drivers/staging/et131x/et131x_netdev.c
+++ b/drivers/staging/et131x/et131x_netdev.c
@@ -411,9 +411,9 @@ void et131x_multicast(struct net_device *netdev)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
uint32_t PacketFilter = 0;
- uint32_t count;
unsigned long flags;
- struct dev_mc_list *mclist = netdev->mc_list;
+ struct dev_mc_list *mclist;
+ int i;
spin_lock_irqsave(&adapter->Lock, flags);
@@ -456,12 +456,13 @@ void et131x_multicast(struct net_device *netdev)
}
/* Set values in the private adapter struct */
- adapter->MCAddressCount = netdev_mc_count(netdev);
-
- if (!netdev_mc_empty(netdev)) {
- count = netdev_mc_count(netdev) - 1;
- memcpy(adapter->MCList[count], mclist->dmi_addr, ETH_ALEN);
+ i = 0;
+ netdev_for_each_mc_addr(mclist, netdev) {
+ if (i == NIC_MAX_MCAST_LIST)
+ break;
+ memcpy(adapter->MCList[i++], mclist->dmi_addr, ETH_ALEN);
}
+ adapter->MCAddressCount = i;
/* Are the new flags different from the previous ones? If not, then no
* action is required
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index 4cfd4b136b3..220de133a6a 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -382,7 +382,7 @@ static void cvm_oct_common_set_multicast_list(struct net_device *dev)
control.u64 = 0;
control.s.bcst = 1; /* Allow broadcast MAC addresses */
- if (dev->mc_list || (dev->flags & IFF_ALLMULTI) ||
+ if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI) ||
(dev->flags & IFF_PROMISC))
/* Force accept multicast packets */
control.s.mcst = 2;
diff --git a/drivers/staging/slicoss/slicoss.c b/drivers/staging/slicoss/slicoss.c
index 8c9d5e5c770..f5cc01ba414 100644
--- a/drivers/staging/slicoss/slicoss.c
+++ b/drivers/staging/slicoss/slicoss.c
@@ -1362,25 +1362,17 @@ static void slic_mcast_set_list(struct net_device *dev)
{
struct adapter *adapter = (struct adapter *)netdev_priv(dev);
int status = STATUS_SUCCESS;
- int i;
char *addresses;
- struct dev_mc_list *mc_list = dev->mc_list;
- int mc_count = netdev_mc_count(dev);
+ struct dev_mc_list *mc_list;
ASSERT(adapter);
- for (i = 1; i <= mc_count; i++) {
+ netdev_for_each_mc_addr(mc_list, dev) {
addresses = (char *) &mc_list->dmi_addr;
- if (mc_list->dmi_addrlen == 6) {
- status = slic_mcast_add_list(adapter, addresses);
- if (status != STATUS_SUCCESS)
- break;
- } else {
- status = -EINVAL;
+ status = slic_mcast_add_list(adapter, addresses);
+ if (status != STATUS_SUCCESS)
break;
- }
slic_mcast_set_bit(adapter, addresses);
- mc_list = mc_list->next;
}
if (adapter->devflags_prev != dev->flags) {
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 82b3a6e0b15..0dadb765fec 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -3082,8 +3082,7 @@ static void device_set_multi(struct net_device *dev) {
PSMgmtObject pMgmt = pDevice->pMgmt;
u32 mc_filter[2];
- int i;
- struct dev_mc_list *mclist;
+ struct dev_mc_list *mclist;
VNSvInPortB(pDevice->PortOffset + MAC_REG_RCR, &(pDevice->byRxMode));
@@ -3103,8 +3102,7 @@ static void device_set_multi(struct net_device *dev) {
}
else {
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
mc_filter[bit_nr >> 5] |= cpu_to_le32(1 << (bit_nr & 31));
}
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 2c6a5350547..a8e1adbc959 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -1596,7 +1596,7 @@ static void device_set_multi(struct net_device *dev) {
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
u32 mc_filter[2];
int ii;
- struct dev_mc_list *mclist;
+ struct dev_mc_list *mclist;
BYTE pbyData[8] = {0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff};
BYTE byTmpMode = 0;
int rc;
@@ -1632,8 +1632,7 @@ static void device_set_multi(struct net_device *dev) {
}
else {
memset(mc_filter, 0, sizeof(mc_filter));
- for (ii = 0, mclist = dev->mc_list; mclist && ii < netdev_mc_count(dev);
- ii++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
mc_filter[bit_nr >> 5] |= cpu_to_le32(1 << (bit_nr & 31));
}
diff --git a/drivers/staging/wavelan/wavelan.c b/drivers/staging/wavelan/wavelan.c
index 961f1417fb5..54ca63196fd 100644
--- a/drivers/staging/wavelan/wavelan.c
+++ b/drivers/staging/wavelan/wavelan.c
@@ -1387,7 +1387,7 @@ static void wavelan_set_multicast_list(struct net_device * dev)
}
} else
/* Are there multicast addresses to send? */
- if (dev->mc_list != (struct dev_mc_list *) NULL) {
+ if (!netdev_mc_empty(dev)) {
/*
* Disable promiscuous mode, but receive all packets
* in multicast list
@@ -3531,7 +3531,7 @@ static void wv_82586_config(struct net_device * dev)
/* Any address to set? */
if (lp->mc_count) {
- for (dmi = dev->mc_list; dmi; dmi = dmi->next)
+ netdev_for_each_mc_addr(dmi, dev)
outsw(PIOP1(ioaddr), (u16 *) dmi->dmi_addr,
WAVELAN_ADDR_SIZE >> 1);
@@ -3539,7 +3539,7 @@ static void wv_82586_config(struct net_device * dev)
printk(KERN_DEBUG
"%s: wv_82586_config(): set %d multicast addresses:\n",
dev->name, lp->mc_count);
- for (dmi = dev->mc_list; dmi; dmi = dmi->next)
+ netdev_for_each_mc_addr(dmi, dev)
printk(KERN_DEBUG " %pM\n", dmi->dmi_addr);
#endif
}
diff --git a/drivers/staging/wavelan/wavelan_cs.c b/drivers/staging/wavelan/wavelan_cs.c
index 08fcb226d7d..04f691d127b 100644
--- a/drivers/staging/wavelan/wavelan_cs.c
+++ b/drivers/staging/wavelan/wavelan_cs.c
@@ -1410,8 +1410,7 @@ wavelan_set_multicast_list(struct net_device * dev)
}
else
/* If there is some multicast addresses to send */
- if(dev->mc_list != (struct dev_mc_list *) NULL)
- {
+ if (!netdev_mc_empty(dev)) {
/*
* Disable promiscuous mode, but receive all packets
* in multicast list
@@ -3598,13 +3597,13 @@ wv_82593_config(struct net_device * dev)
/* If any multicast address to set */
if(lp->mc_count)
{
- struct dev_mc_list * dmi;
+ struct dev_mc_list *dmi;
int addrs_len = WAVELAN_ADDR_SIZE * lp->mc_count;
#ifdef DEBUG_CONFIG_INFO
printk(KERN_DEBUG "%s: wv_hw_config(): set %d multicast addresses:\n",
dev->name, lp->mc_count);
- for(dmi=dev->mc_list; dmi; dmi=dmi->next)
+ netdev_for_each_mc_addr(dmi, dev)
printk(KERN_DEBUG " %pM\n", dmi->dmi_addr);
#endif
@@ -3613,7 +3612,7 @@ wv_82593_config(struct net_device * dev)
outb(((TX_BASE >> 8) & PIORH_MASK) | PIORH_SEL_TX, PIORH(base));
outb(addrs_len & 0xff, PIOP(base)); /* byte count lsb */
outb((addrs_len >> 8), PIOP(base)); /* byte count msb */
- for(dmi=dev->mc_list; dmi; dmi=dmi->next)
+ netdev_for_each_mc_addr(dmi, dev)
outsb(PIOP(base), dmi->dmi_addr, dmi->dmi_addrlen);
/* reset transmit DMA pointer */
diff --git a/drivers/staging/wlags49_h2/wl_netdev.c b/drivers/staging/wlags49_h2/wl_netdev.c
index a95ebf881fc..c33e225bc0e 100644
--- a/drivers/staging/wlags49_h2/wl_netdev.c
+++ b/drivers/staging/wlags49_h2/wl_netdev.c
@@ -1049,7 +1049,7 @@ void wl_multicast( struct net_device *dev )
//;?seems reasonable that even an AP-only driver could afford this small additional footprint
int x;
- struct dev_mc_list *mclist;
+ struct dev_mc_list *mclist;
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
/*------------------------------------------------------------------------*/
@@ -1072,11 +1072,9 @@ void wl_multicast( struct net_device *dev )
DBG_PRINT( " mc_count: %d\n", netdev_mc_count(dev));
- for( x = 0, mclist = dev->mc_list; mclist && x < netdev_mc_count(dev);
- x++, mclist = mclist->next ) {
+ netdev_for_each_mc_addr(mclist, dev)
DBG_PRINT( " %s (%d)\n", DbgHwAddr(mclist->dmi_addr),
mclist->dmi_addrlen );
- }
}
#endif /* DBG */
@@ -1120,12 +1118,10 @@ void wl_multicast( struct net_device *dev )
lp->ltvRecord.len = ( netdev_mc_count(dev) * 3 ) + 1;
lp->ltvRecord.typ = CFG_GROUP_ADDR;
- for( x = 0, mclist = dev->mc_list;
- ( x < netdev_mc_count(dev)) && ( mclist != NULL );
- x++, mclist = mclist->next ) {
- memcpy( &( lp->ltvRecord.u.u8[x * ETH_ALEN] ),
- mclist->dmi_addr, ETH_ALEN );
- }
+ x = 0;
+ netdev_for_each_mc_addr(mclist, dev)
+ memcpy(&(lp->ltvRecord.u.u8[x++ * ETH_ALEN]),
+ mclist->dmi_addr, ETH_ALEN);
DBG_PRINT( "Setting multicast list\n" );
hcf_put_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord ));
} else {
diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig
index 9e9355367bb..e4e2fd1b510 100644
--- a/drivers/vhost/Kconfig
+++ b/drivers/vhost/Kconfig
@@ -1,6 +1,6 @@
config VHOST_NET
tristate "Host kernel accelerator for virtio net (EXPERIMENTAL)"
- depends on NET && EVENTFD && (TUN || !TUN) && EXPERIMENTAL
+ depends on NET && EVENTFD && (TUN || !TUN) && (MACVTAP || !MACVTAP) && EXPERIMENTAL
---help---
This kernel module can be loaded in host kernel to accelerate
guest networking with virtio_net. Not to be confused with virtio_net
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 4c8928319e1..91a324cc229 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -22,6 +22,7 @@
#include <linux/if_packet.h>
#include <linux/if_arp.h>
#include <linux/if_tun.h>
+#include <linux/if_macvlan.h>
#include <net/sock.h>
@@ -452,13 +453,16 @@ err:
return ERR_PTR(r);
}
-static struct socket *get_tun_socket(int fd)
+static struct socket *get_tap_socket(int fd)
{
struct file *file = fget(fd);
struct socket *sock;
if (!file)
return ERR_PTR(-EBADF);
sock = tun_get_socket(file);
+ if (!IS_ERR(sock))
+ return sock;
+ sock = macvtap_get_socket(file);
if (IS_ERR(sock))
fput(file);
return sock;
@@ -473,7 +477,7 @@ static struct socket *get_socket(int fd)
sock = get_raw_socket(fd);
if (!IS_ERR(sock))
return sock;
- sock = get_tun_socket(fd);
+ sock = get_tap_socket(fd);
if (!IS_ERR(sock))
return sock;
return ERR_PTR(-ENOTSOCK);
diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h
index c0d8357917e..4c4c74ec598 100644
--- a/include/linux/icmpv6.h
+++ b/include/linux/icmpv6.h
@@ -174,8 +174,7 @@ struct icmp6_filter {
extern void icmpv6_send(struct sk_buff *skb,
u8 type, u8 code,
- __u32 info,
- struct net_device *dev);
+ __u32 info);
extern int icmpv6_init(void);
extern int icmpv6_err_convert(u8 type, u8 code,
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index f9cb9ba1475..b78a712247d 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -7,6 +7,19 @@
#include <linux/netlink.h>
#include <net/netlink.h>
+#if defined(CONFIG_MACVTAP) || defined(CONFIG_MACVTAP_MODULE)
+struct socket *macvtap_get_socket(struct file *);
+#else
+#include <linux/err.h>
+#include <linux/errno.h>
+struct file;
+struct socket;
+static inline struct socket *macvtap_get_socket(struct file *f)
+{
+ return ERR_PTR(-EINVAL);
+}
+#endif /* CONFIG_MACVTAP */
+
struct macvlan_port;
struct macvtap_queue;
diff --git a/include/linux/snmp.h b/include/linux/snmp.h
index 0f953fe4041..e28f5a0182e 100644
--- a/include/linux/snmp.h
+++ b/include/linux/snmp.h
@@ -257,6 +257,7 @@ enum
LINUX_MIB_XFRMOUTPOLBLOCK, /* XfrmOutPolBlock */
LINUX_MIB_XFRMOUTPOLDEAD, /* XfrmOutPolDead */
LINUX_MIB_XFRMOUTPOLERROR, /* XfrmOutPolError */
+ LINUX_MIB_XFRMFWDHDRERROR, /* XfrmFwdHdrError*/
__LINUX_MIB_XFRMMAX
};
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 7fee8a4df93..a778ee02459 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -103,6 +103,8 @@ enum {
#define TCP_CONGESTION 13 /* Congestion control algorithm */
#define TCP_MD5SIG 14 /* TCP MD5 Signature (RFC2385) */
#define TCP_COOKIE_TRANSACTIONS 15 /* TCP Cookie Transactions */
+#define TCP_THIN_LINEAR_TIMEOUTS 16 /* Use linear timeouts for thin streams*/
+#define TCP_THIN_DUPACK 17 /* Fast retrans. after 1 dupack */
/* for TCP_INFO socket option */
#define TCPI_OPT_TIMESTAMPS 1
@@ -340,7 +342,10 @@ struct tcp_sock {
u32 frto_highmark; /* snd_nxt when RTO occurred */
u16 advmss; /* Advertised MSS */
u8 frto_counter; /* Number of new acks after RTO */
- u8 nonagle; /* Disable Nagle algorithm? */
+ u8 nonagle : 4,/* Disable Nagle algorithm? */
+ thin_lto : 1,/* Use linear timeouts for thin streams */
+ thin_dupack : 1,/* Fast retransmit on first dupack */
+ unused : 2;
/* RTT measurement */
u32 srtt; /* smoothed round trip time << 3 */
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 8f279ddb359..86f46c49e31 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -124,7 +124,7 @@ static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst)
}
struct fib6_walker_t {
- struct fib6_walker_t *prev, *next;
+ struct list_head lh;
struct fib6_node *root, *node;
struct rt6_info *leaf;
unsigned char state;
diff --git a/include/net/netlink.h b/include/net/netlink.h
index a63b2192ac1..f82e463c875 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -196,7 +196,7 @@ enum {
* All other Exact length of attribute payload
*
* Example:
- * static struct nla_policy my_policy[ATTR_MAX+1] __read_mostly = {
+ * static const struct nla_policy my_policy[ATTR_MAX+1] = {
* [ATTR_FOO] = { .type = NLA_U16 },
* [ATTR_BAR] = { .type = NLA_STRING, .len = BARSIZ },
* [ATTR_BAZ] = { .len = sizeof(struct mystruct) },
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 75a00c80bdd..56f0aec40ed 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -196,6 +196,9 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo);
#define TCP_NAGLE_CORK 2 /* Socket is corked */
#define TCP_NAGLE_PUSH 4 /* Cork is overridden for already queued data */
+/* TCP thin-stream limits */
+#define TCP_THIN_LINEAR_RETRIES 6 /* After 6 linear retries, do exp. backoff */
+
extern struct inet_timewait_death_row tcp_death_row;
/* sysctl variables for tcp */
@@ -241,6 +244,8 @@ extern int sysctl_tcp_workaround_signed_windows;
extern int sysctl_tcp_slow_start_after_idle;
extern int sysctl_tcp_max_ssthresh;
extern int sysctl_tcp_cookie_size;
+extern int sysctl_tcp_thin_linear_timeouts;
+extern int sysctl_tcp_thin_dupack;
extern atomic_t tcp_memory_allocated;
extern struct percpu_counter tcp_sockets_allocated;
@@ -1386,6 +1391,14 @@ static inline void tcp_highest_sack_combine(struct sock *sk,
tcp_sk(sk)->highest_sack = new;
}
+/* Determines whether this is a thin stream (which may suffer from
+ * increased latency). Used to trigger latency-reducing mechanisms.
+ */
+static inline unsigned int tcp_stream_is_thin(struct tcp_sock *tp)
+{
+ return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
+}
+
/* /proc */
enum tcp_seq_states {
TCP_SEQ_STATE_LISTENING,
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index ea8384d3caa..899ca51be5e 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -46,15 +46,13 @@ static struct genl_family family = {
.maxattr = TASKSTATS_CMD_ATTR_MAX,
};
-static struct nla_policy taskstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1]
-__read_mostly = {
+static const struct nla_policy taskstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1] = {
[TASKSTATS_CMD_ATTR_PID] = { .type = NLA_U32 },
[TASKSTATS_CMD_ATTR_TGID] = { .type = NLA_U32 },
[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING },
[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },};
-static struct nla_policy
-cgroupstats_cmd_get_policy[CGROUPSTATS_CMD_ATTR_MAX+1] __read_mostly = {
+static const struct nla_policy cgroupstats_cmd_get_policy[CGROUPSTATS_CMD_ATTR_MAX+1] = {
[CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 },
};
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index db9f5b39388..813e399220a 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -54,7 +54,7 @@ MODULE_LICENSE("GPL");
/**************** DCB attribute policies *************************************/
/* DCB netlink attributes policy */
-static struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = {
+static const struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = {
[DCB_ATTR_IFNAME] = {.type = NLA_NUL_STRING, .len = IFNAMSIZ - 1},
[DCB_ATTR_STATE] = {.type = NLA_U8},
[DCB_ATTR_PFC_CFG] = {.type = NLA_NESTED},
@@ -68,7 +68,7 @@ static struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = {
};
/* DCB priority flow control to User Priority nested attributes */
-static struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = {
+static const struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = {
[DCB_PFC_UP_ATTR_0] = {.type = NLA_U8},
[DCB_PFC_UP_ATTR_1] = {.type = NLA_U8},
[DCB_PFC_UP_ATTR_2] = {.type = NLA_U8},
@@ -81,7 +81,7 @@ static struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = {
};
/* DCB priority grouping nested attributes */
-static struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = {
+static const struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = {
[DCB_PG_ATTR_TC_0] = {.type = NLA_NESTED},
[DCB_PG_ATTR_TC_1] = {.type = NLA_NESTED},
[DCB_PG_ATTR_TC_2] = {.type = NLA_NESTED},
@@ -103,7 +103,7 @@ static struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = {
};
/* DCB traffic class nested attributes. */
-static struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = {
+static const struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = {
[DCB_TC_ATTR_PARAM_PGID] = {.type = NLA_U8},
[DCB_TC_ATTR_PARAM_UP_MAPPING] = {.type = NLA_U8},
[DCB_TC_ATTR_PARAM_STRICT_PRIO] = {.type = NLA_U8},
@@ -112,7 +112,7 @@ static struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = {
};
/* DCB capabilities nested attributes. */
-static struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = {
+static const struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = {
[DCB_CAP_ATTR_ALL] = {.type = NLA_FLAG},
[DCB_CAP_ATTR_PG] = {.type = NLA_U8},
[DCB_CAP_ATTR_PFC] = {.type = NLA_U8},
@@ -124,14 +124,14 @@ static struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = {
};
/* DCB capabilities nested attributes. */
-static struct nla_policy dcbnl_numtcs_nest[DCB_NUMTCS_ATTR_MAX + 1] = {
+static const struct nla_policy dcbnl_numtcs_nest[DCB_NUMTCS_ATTR_MAX + 1] = {
[DCB_NUMTCS_ATTR_ALL] = {.type = NLA_FLAG},
[DCB_NUMTCS_ATTR_PG] = {.type = NLA_U8},
[DCB_NUMTCS_ATTR_PFC] = {.type = NLA_U8},
};
/* DCB BCN nested attributes. */
-static struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = {
+static const struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = {
[DCB_BCN_ATTR_RP_0] = {.type = NLA_U8},
[DCB_BCN_ATTR_RP_1] = {.type = NLA_U8},
[DCB_BCN_ATTR_RP_2] = {.type = NLA_U8},
@@ -160,7 +160,7 @@ static struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = {
};
/* DCB APP nested attributes. */
-static struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = {
+static const struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = {
[DCB_APP_ATTR_IDTYPE] = {.type = NLA_U8},
[DCB_APP_ATTR_ID] = {.type = NLA_U16},
[DCB_APP_ATTR_PRIORITY] = {.type = NLA_U8},
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index a2a5983dbf0..c0c5274d027 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -793,7 +793,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
}
if (mtu >= IPV6_MIN_MTU && mtu < skb->len - tunnel->hlen + gre_hlen) {
- icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev);
+ icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
ip_rt_put(rt);
goto tx_error;
}
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 7e3712ce399..c1bc074f61b 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -576,6 +576,20 @@ static struct ctl_table ipv4_table[] = {
.proc_handler = proc_dointvec
},
{
+ .procname = "tcp_thin_linear_timeouts",
+ .data = &sysctl_tcp_thin_linear_timeouts,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
+ {
+ .procname = "tcp_thin_dupack",
+ .data = &sysctl_tcp_thin_dupack,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
+ {
.procname = "udp_mem",
.data = &sysctl_udp_mem,
.maxlen = sizeof(sysctl_udp_mem),
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index e471d037fcc..5901010fad5 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2229,6 +2229,20 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
}
break;
+ case TCP_THIN_LINEAR_TIMEOUTS:
+ if (val < 0 || val > 1)
+ err = -EINVAL;
+ else
+ tp->thin_lto = val;
+ break;
+
+ case TCP_THIN_DUPACK:
+ if (val < 0 || val > 1)
+ err = -EINVAL;
+ else
+ tp->thin_dupack = val;
+ break;
+
case TCP_CORK:
/* When set indicates to always queue non-full frames.
* Later the user clears this option and we transmit
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 3fddc69cccc..788851ca8c5 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -89,6 +89,8 @@ int sysctl_tcp_frto __read_mostly = 2;
int sysctl_tcp_frto_response __read_mostly;
int sysctl_tcp_nometrics_save __read_mostly;
+int sysctl_tcp_thin_dupack __read_mostly;
+
int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
int sysctl_tcp_abc __read_mostly;
@@ -2447,6 +2449,16 @@ static int tcp_time_to_recover(struct sock *sk)
return 1;
}
+ /* If a thin stream is detected, retransmit after first
+ * received dupack. Employ only if SACK is supported in order
+ * to avoid possible corner-case series of spurious retransmissions
+ * Use only if there are no unsent data.
+ */
+ if ((tp->thin_dupack || sysctl_tcp_thin_dupack) &&
+ tcp_stream_is_thin(tp) && tcp_dupack_heuristics(tp) > 1 &&
+ tcp_is_sack(tp) && !tcp_send_head(sk))
+ return 1;
+
return 0;
}
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index de7d1bf9114..a17629b8912 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -29,6 +29,7 @@ int sysctl_tcp_keepalive_intvl __read_mostly = TCP_KEEPALIVE_INTVL;
int sysctl_tcp_retries1 __read_mostly = TCP_RETR1;
int sysctl_tcp_retries2 __read_mostly = TCP_RETR2;
int sysctl_tcp_orphan_retries __read_mostly;
+int sysctl_tcp_thin_linear_timeouts __read_mostly;
static void tcp_write_timer(unsigned long);
static void tcp_delack_timer(unsigned long);
@@ -415,7 +416,25 @@ void tcp_retransmit_timer(struct sock *sk)
icsk->icsk_retransmits++;
out_reset_timer:
- icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
+ /* If stream is thin, use linear timeouts. Since 'icsk_backoff' is
+ * used to reset timer, set to 0. Recalculate 'icsk_rto' as this
+ * might be increased if the stream oscillates between thin and thick,
+ * thus the old value might already be too high compared to the value
+ * set by 'tcp_set_rto' in tcp_input.c which resets the rto without
+ * backoff. Limit to TCP_THIN_LINEAR_RETRIES before initiating
+ * exponential backoff behaviour to avoid continue hammering
+ * linear-timeout retransmissions into a black hole
+ */
+ if (sk->sk_state == TCP_ESTABLISHED &&
+ (tp->thin_lto || sysctl_tcp_thin_linear_timeouts) &&
+ tcp_stream_is_thin(tp) &&
+ icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
+ icsk->icsk_backoff = 0;
+ icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX);
+ } else {
+ /* Use normal (exponential) backoff */
+ icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
+ }
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1))
__sk_dst_reset(sk);
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 4bac362b133..074f2c084f9 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -481,7 +481,7 @@ looped_back:
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_INHDRERRORS);
icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
- 0, skb->dev);
+ 0);
kfree_skb(skb);
return -1;
}
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 217dbc2e28d..eb9abe24bdf 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -67,11 +67,6 @@
#include <asm/uaccess.h>
#include <asm/system.h>
-DEFINE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics) __read_mostly;
-EXPORT_SYMBOL(icmpv6_statistics);
-DEFINE_SNMP_STAT(struct icmpv6msg_mib, icmpv6msg_statistics) __read_mostly;
-EXPORT_SYMBOL(icmpv6msg_statistics);
-
/*
* The ICMP socket(s). This is the most convenient way to flow control
* our ICMP output as well as maintain a clean interface throughout
@@ -119,7 +114,7 @@ static __inline__ void icmpv6_xmit_unlock(struct sock *sk)
*/
void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos)
{
- icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos, skb->dev);
+ icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos);
kfree_skb(skb);
}
@@ -305,8 +300,7 @@ static inline void mip6_addr_swap(struct sk_buff *skb) {}
/*
* Send an ICMP message in response to a packet in error
*/
-void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
- struct net_device *dev)
+void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
{
struct net *net = dev_net(skb->dev);
struct inet6_dev *idev = NULL;
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 77e122f53ea..2f9847924fa 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -93,29 +93,20 @@ static __u32 rt_sernum;
static void fib6_gc_timer_cb(unsigned long arg);
-static struct fib6_walker_t fib6_walker_list = {
- .prev = &fib6_walker_list,
- .next = &fib6_walker_list,
-};
-
-#define FOR_WALKERS(w) for ((w)=fib6_walker_list.next; (w) != &fib6_walker_list; (w)=(w)->next)
+static LIST_HEAD(fib6_walkers);
+#define FOR_WALKERS(w) list_for_each_entry(w, &fib6_walkers, lh)
static inline void fib6_walker_link(struct fib6_walker_t *w)
{
write_lock_bh(&fib6_walker_lock);
- w->next = fib6_walker_list.next;
- w->prev = &fib6_walker_list;
- w->next->prev = w;
- w->prev->next = w;
+ list_add(&w->lh, &fib6_walkers);
write_unlock_bh(&fib6_walker_lock);
}
static inline void fib6_walker_unlink(struct fib6_walker_t *w)
{
write_lock_bh(&fib6_walker_lock);
- w->next->prev = w->prev;
- w->prev->next = w->next;
- w->prev = w->next = w;
+ list_del(&w->lh);
write_unlock_bh(&fib6_walker_lock);
}
static __inline__ u32 fib6_new_sernum(void)
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 237e2dba6e9..e28f9203dec 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -216,8 +216,7 @@ resubmit:
IP6_INC_STATS_BH(net, idev,
IPSTATS_MIB_INUNKNOWNPROTOS);
icmpv6_send(skb, ICMPV6_PARAMPROB,
- ICMPV6_UNK_NEXTHDR, nhoff,
- skb->dev);
+ ICMPV6_UNK_NEXTHDR, nhoff);
}
} else
IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index eb6d0972863..1a5fe9ad194 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -267,7 +267,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
if (net_ratelimit())
printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n");
skb->dev = dst->dev;
- icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
+ icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
kfree_skb(skb);
return -EMSGSIZE;
@@ -441,8 +441,7 @@ int ip6_forward(struct sk_buff *skb)
if (hdr->hop_limit <= 1) {
/* Force OUTPUT device used as source address */
skb->dev = dst->dev;
- icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
- 0, skb->dev);
+ icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
IP6_INC_STATS_BH(net,
ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);
@@ -504,7 +503,7 @@ int ip6_forward(struct sk_buff *skb)
goto error;
if (addrtype & IPV6_ADDR_LINKLOCAL) {
icmpv6_send(skb, ICMPV6_DEST_UNREACH,
- ICMPV6_NOT_NEIGHBOUR, 0, skb->dev);
+ ICMPV6_NOT_NEIGHBOUR, 0);
goto error;
}
}
@@ -512,7 +511,7 @@ int ip6_forward(struct sk_buff *skb)
if (skb->len > dst_mtu(dst)) {
/* Again, force OUTPUT device used as source address */
skb->dev = dst->dev;
- icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst_mtu(dst), skb->dev);
+ icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst_mtu(dst));
IP6_INC_STATS_BH(net,
ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS);
IP6_INC_STATS_BH(net,
@@ -627,7 +626,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
*/
if (!skb->local_df) {
skb->dev = skb_dst(skb)->dev;
- icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
+ icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_FRAGFAILS);
kfree_skb(skb);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 9b02492d870..138980eec21 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -622,7 +622,7 @@ ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (rt && rt->rt6i_dev)
skb2->dev = rt->rt6i_dev;
- icmpv6_send(skb2, rel_type, rel_code, rel_info, skb2->dev);
+ icmpv6_send(skb2, rel_type, rel_code, rel_info);
if (rt)
dst_release(&rt->u.dst);
@@ -1014,7 +1014,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
if (tel->encap_limit == 0) {
icmpv6_send(skb, ICMPV6_PARAMPROB,
- ICMPV6_HDR_FIELD, offset + 2, skb->dev);
+ ICMPV6_HDR_FIELD, offset + 2);
return -1;
}
encap_limit = tel->encap_limit - 1;
@@ -1033,7 +1033,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
err = ip6_tnl_xmit2(skb, dev, dsfield, &fl, encap_limit, &mtu);
if (err != 0) {
if (err == -EMSGSIZE)
- icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev);
+ icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
return -1;
}
diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c
index f797e8c6f3b..2794b600283 100644
--- a/net/ipv6/mip6.c
+++ b/net/ipv6/mip6.c
@@ -56,7 +56,7 @@ static inline void *mip6_padn(__u8 *data, __u8 padlen)
static inline void mip6_param_prob(struct sk_buff *skb, u8 code, int pos)
{
- icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos, skb->dev);
+ icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos);
}
static int mip6_mh_len(int type)
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index 8311ca31816..dd8afbaf00a 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -169,7 +169,7 @@ send_unreach(struct net *net, struct sk_buff *skb_in, unsigned char code,
if (hooknum == NF_INET_LOCAL_OUT && skb_in->dev == NULL)
skb_in->dev = net->loopback_dev;
- icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0, NULL);
+ icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0);
}
static unsigned int
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index b2847ed6a7d..a555156e977 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -228,7 +228,7 @@ static void ip6_frag_expire(unsigned long data)
pointer directly, device might already disappeared.
*/
fq->q.fragments->dev = dev;
- icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0, dev);
+ icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
out_rcu_unlock:
rcu_read_unlock();
out:
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 8500156f263..88c0a5c49ae 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -909,7 +909,7 @@ static void ip6_link_failure(struct sk_buff *skb)
{
struct rt6_info *rt;
- icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0, skb->dev);
+ icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
rt = (struct rt6_info *) skb_dst(skb);
if (rt) {
@@ -1884,7 +1884,7 @@ static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
ipstats_mib_noroutes);
break;
}
- icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0, skb->dev);
+ icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
kfree_skb(skb);
return 0;
}
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 96eb2d4641c..b1eea811be4 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -743,7 +743,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
if (skb->len > mtu) {
- icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev);
+ icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
ip_rt_put(rt);
goto tx_error;
}
diff --git a/net/ipv6/tunnel6.c b/net/ipv6/tunnel6.c
index 51e2832d13a..e17bc1dfc1a 100644
--- a/net/ipv6/tunnel6.c
+++ b/net/ipv6/tunnel6.c
@@ -98,7 +98,7 @@ static int tunnel6_rcv(struct sk_buff *skb)
if (!handler->handler(skb))
return 0;
- icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, skb->dev);
+ icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
drop:
kfree_skb(skb);
@@ -116,7 +116,7 @@ static int tunnel46_rcv(struct sk_buff *skb)
if (!handler->handler(skb))
return 0;
- icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, skb->dev);
+ icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
drop:
kfree_skb(skb);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index a7af9d68cd6..52b8347ae3b 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -680,12 +680,11 @@ static inline int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh,
int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
int proto)
{
+ struct net *net = dev_net(skb->dev);
struct sock *sk;
struct udphdr *uh;
- struct net_device *dev = skb->dev;
struct in6_addr *saddr, *daddr;
u32 ulen = 0;
- struct net *net = dev_net(skb->dev);
if (!pskb_may_pull(skb, sizeof(struct udphdr)))
goto short_packet;
@@ -744,7 +743,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
proto == IPPROTO_UDPLITE);
- icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
+ icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
kfree_skb(skb);
return 0;
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index c4f4eef032a..0c92112dcba 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -38,7 +38,7 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb)
if (!skb->local_df && skb->len > mtu) {
skb->dev = dst->dev;
- icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
+ icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
ret = -EMSGSIZE;
}
diff --git a/net/irda/irnetlink.c b/net/irda/irnetlink.c
index 476b307bd80..69b5b75f543 100644
--- a/net/irda/irnetlink.c
+++ b/net/irda/irnetlink.c
@@ -124,7 +124,7 @@ static int irda_nl_get_mode(struct sk_buff *skb, struct genl_info *info)
return ret;
}
-static struct nla_policy irda_nl_policy[IRDA_NL_ATTR_MAX + 1] = {
+static const struct nla_policy irda_nl_policy[IRDA_NL_ATTR_MAX + 1] = {
[IRDA_NL_ATTR_IFNAME] = { .type = NLA_NUL_STRING,
.len = IFNAMSIZ-1 },
[IRDA_NL_ATTR_MODE] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 72e96d823eb..44590887a92 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -515,8 +515,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
*/
#ifdef CONFIG_IP_VS_IPV6
if (svc->af == AF_INET6)
- icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0,
- skb->dev);
+ icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
else
#endif
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
@@ -1048,7 +1047,7 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb,
icmpv6_send(skb,
ICMPV6_DEST_UNREACH,
ICMPV6_PORT_UNREACH,
- 0, skb->dev);
+ 0);
else
#endif
icmp_send(skb,
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 30b3189bd29..223b5018c7d 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -311,7 +311,7 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
mtu = dst_mtu(&rt->u.dst);
if (skb->len > mtu) {
dst_release(&rt->u.dst);
- icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
+ icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
IP_VS_DBG_RL("%s(): frag needed\n", __func__);
goto tx_error;
}
@@ -454,7 +454,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
mtu = dst_mtu(&rt->u.dst);
if (skb->len > mtu) {
dst_release(&rt->u.dst);
- icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
+ icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
IP_VS_DBG_RL_PKT(0, pp, skb, 0,
"ip_vs_nat_xmit_v6(): frag needed for");
goto tx_error;
@@ -672,7 +672,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr)) {
- icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
+ icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
dst_release(&rt->u.dst);
IP_VS_DBG_RL("%s(): frag needed\n", __func__);
goto tx_error;
@@ -814,7 +814,7 @@ ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
/* MTU checking */
mtu = dst_mtu(&rt->u.dst);
if (skb->len > mtu) {
- icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
+ icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
dst_release(&rt->u.dst);
IP_VS_DBG_RL("%s(): frag needed\n", __func__);
goto tx_error;
@@ -965,7 +965,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
mtu = dst_mtu(&rt->u.dst);
if (skb->len > mtu) {
dst_release(&rt->u.dst);
- icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
+ icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
IP_VS_DBG_RL("%s(): frag needed\n", __func__);
goto tx_error;
}
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 9bc9b92bc09..3d9122e78f4 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -144,7 +144,7 @@ static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
/*
* SMP locking strategy:
* hash table is protected with spinlock unix_table_lock
- * each socket state is protected by separate rwlock.
+ * each socket state is protected by separate spin lock.
*/
static inline unsigned unix_hash_fold(__wsum n)
diff --git a/net/wimax/op-msg.c b/net/wimax/op-msg.c
index d3bfb6ef13a..7718657e93d 100644
--- a/net/wimax/op-msg.c
+++ b/net/wimax/op-msg.c
@@ -320,8 +320,7 @@ int wimax_msg(struct wimax_dev *wimax_dev, const char *pipe_name,
EXPORT_SYMBOL_GPL(wimax_msg);
-static const
-struct nla_policy wimax_gnl_msg_policy[WIMAX_GNL_ATTR_MAX + 1] = {
+static const struct nla_policy wimax_gnl_msg_policy[WIMAX_GNL_ATTR_MAX + 1] = {
[WIMAX_GNL_MSG_IFIDX] = {
.type = NLA_U32,
},
diff --git a/net/wimax/op-reset.c b/net/wimax/op-reset.c
index 35f370091f4..4dc82a54ba3 100644
--- a/net/wimax/op-reset.c
+++ b/net/wimax/op-reset.c
@@ -91,8 +91,7 @@ int wimax_reset(struct wimax_dev *wimax_dev)
EXPORT_SYMBOL(wimax_reset);
-static const
-struct nla_policy wimax_gnl_reset_policy[WIMAX_GNL_ATTR_MAX + 1] = {
+static const struct nla_policy wimax_gnl_reset_policy[WIMAX_GNL_ATTR_MAX + 1] = {
[WIMAX_GNL_RESET_IFIDX] = {
.type = NLA_U32,
},
diff --git a/net/wimax/op-rfkill.c b/net/wimax/op-rfkill.c
index ae752a64d92..e978c7136c9 100644
--- a/net/wimax/op-rfkill.c
+++ b/net/wimax/op-rfkill.c
@@ -410,8 +410,7 @@ void wimax_rfkill_rm(struct wimax_dev *wimax_dev)
* just query).
*/
-static const
-struct nla_policy wimax_gnl_rfkill_policy[WIMAX_GNL_ATTR_MAX + 1] = {
+static const struct nla_policy wimax_gnl_rfkill_policy[WIMAX_GNL_ATTR_MAX + 1] = {
[WIMAX_GNL_RFKILL_IFIDX] = {
.type = NLA_U32,
},
diff --git a/net/wimax/op-state-get.c b/net/wimax/op-state-get.c
index a76b8fcb056..11ad3356eb5 100644
--- a/net/wimax/op-state-get.c
+++ b/net/wimax/op-state-get.c
@@ -33,8 +33,7 @@
#include "debug-levels.h"
-static const
-struct nla_policy wimax_gnl_state_get_policy[WIMAX_GNL_ATTR_MAX + 1] = {
+static const struct nla_policy wimax_gnl_state_get_policy[WIMAX_GNL_ATTR_MAX + 1] = {
[WIMAX_GNL_STGET_IFIDX] = {
.type = NLA_U32,
},
diff --git a/net/wimax/stack.c b/net/wimax/stack.c
index c8866412f83..813e1eaea29 100644
--- a/net/wimax/stack.c
+++ b/net/wimax/stack.c
@@ -75,8 +75,7 @@ MODULE_PARM_DESC(debug,
* close to where the data is generated.
*/
/*
-static const
-struct nla_policy wimax_gnl_re_status_change[WIMAX_GNL_ATTR_MAX + 1] = {
+static const struct nla_policy wimax_gnl_re_status_change[WIMAX_GNL_ATTR_MAX + 1] = {
[WIMAX_GNL_STCH_STATE_OLD] = { .type = NLA_U8 },
[WIMAX_GNL_STCH_STATE_NEW] = { .type = NLA_U8 },
};
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 5b79ecf17be..a001ea32cb7 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -58,7 +58,7 @@ static int get_rdev_dev_by_info_ifindex(struct genl_info *info,
}
/* policy for the attributes */
-static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = {
+static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
[NL80211_ATTR_WIPHY] = { .type = NLA_U32 },
[NL80211_ATTR_WIPHY_NAME] = { .type = NLA_NUL_STRING,
.len = 20-1 },
@@ -148,8 +148,7 @@ static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = {
};
/* policy for the attributes */
-static struct nla_policy
-nl80211_key_policy[NL80211_KEY_MAX + 1] __read_mostly = {
+static const struct nla_policy nl80211_key_policy[NL80211_KEY_MAX + 1] = {
[NL80211_KEY_DATA] = { .type = NLA_BINARY, .len = WLAN_MAX_KEY_LEN },
[NL80211_KEY_IDX] = { .type = NLA_U8 },
[NL80211_KEY_CIPHER] = { .type = NLA_U32 },
@@ -2501,8 +2500,7 @@ static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info)
return err;
}
-static const struct nla_policy
- reg_rule_policy[NL80211_REG_RULE_ATTR_MAX + 1] = {
+static const struct nla_policy reg_rule_policy[NL80211_REG_RULE_ATTR_MAX + 1] = {
[NL80211_ATTR_REG_RULE_FLAGS] = { .type = NLA_U32 },
[NL80211_ATTR_FREQ_RANGE_START] = { .type = NLA_U32 },
[NL80211_ATTR_FREQ_RANGE_END] = { .type = NLA_U32 },
@@ -2671,8 +2669,7 @@ do {\
} \
} while (0);\
-static struct nla_policy
-nl80211_meshconf_params_policy[NL80211_MESHCONF_ATTR_MAX+1] __read_mostly = {
+static const struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_ATTR_MAX+1] = {
[NL80211_MESHCONF_RETRY_TIMEOUT] = { .type = NLA_U16 },
[NL80211_MESHCONF_CONFIRM_TIMEOUT] = { .type = NLA_U16 },
[NL80211_MESHCONF_HOLDING_TIMEOUT] = { .type = NLA_U16 },
@@ -4470,8 +4467,7 @@ static u32 rateset_to_mask(struct ieee80211_supported_band *sband,
return mask;
}
-static struct nla_policy
-nl80211_txattr_policy[NL80211_TXRATE_MAX + 1] __read_mostly = {
+static const struct nla_policy nl80211_txattr_policy[NL80211_TXRATE_MAX + 1] = {
[NL80211_TXRATE_LEGACY] = { .type = NLA_BINARY,
.len = NL80211_MAX_SUPP_RATES },
};
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 2c5d93181f1..4368e7b8846 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2045,8 +2045,7 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
int res;
if (xfrm_decode_session(skb, &fl, family) < 0) {
- /* XXX: we should have something like FWDHDRERROR here. */
- XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
return 0;
}
diff --git a/net/xfrm/xfrm_proc.c b/net/xfrm/xfrm_proc.c
index 003f2c437ac..58d9ae00559 100644
--- a/net/xfrm/xfrm_proc.c
+++ b/net/xfrm/xfrm_proc.c
@@ -41,6 +41,7 @@ static const struct snmp_mib xfrm_mib_list[] = {
SNMP_MIB_ITEM("XfrmOutPolBlock", LINUX_MIB_XFRMOUTPOLBLOCK),
SNMP_MIB_ITEM("XfrmOutPolDead", LINUX_MIB_XFRMOUTPOLDEAD),
SNMP_MIB_ITEM("XfrmOutPolError", LINUX_MIB_XFRMOUTPOLERROR),
+ SNMP_MIB_ITEM("XfrmFwdHdrError", LINUX_MIB_XFRMFWDHDRERROR),
SNMP_MIB_SENTINEL
};