aboutsummaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/atm/clip.c4
-rw-r--r--net/core/dev.c12
-rw-r--r--net/core/dev_mcast.c28
-rw-r--r--net/core/netpoll.c9
-rw-r--r--net/core/pktgen.c4
-rw-r--r--net/sched/sch_generic.c28
-rw-r--r--net/sched/sch_teql.c9
7 files changed, 41 insertions, 53 deletions
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 72d85298266..f92f9c94d2c 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -98,7 +98,7 @@ static void unlink_clip_vcc(struct clip_vcc *clip_vcc)
printk(KERN_CRIT "!clip_vcc->entry (clip_vcc %p)\n", clip_vcc);
return;
}
- spin_lock_bh(&entry->neigh->dev->xmit_lock); /* block clip_start_xmit() */
+ netif_tx_lock_bh(entry->neigh->dev); /* block clip_start_xmit() */
entry->neigh->used = jiffies;
for (walk = &entry->vccs; *walk; walk = &(*walk)->next)
if (*walk == clip_vcc) {
@@ -122,7 +122,7 @@ static void unlink_clip_vcc(struct clip_vcc *clip_vcc)
printk(KERN_CRIT "ATMARP: unlink_clip_vcc failed (entry %p, vcc "
"0x%p)\n", entry, clip_vcc);
out:
- spin_unlock_bh(&entry->neigh->dev->xmit_lock);
+ netif_tx_unlock_bh(entry->neigh->dev);
}
/* The neighbour entry n->lock is held. */
diff --git a/net/core/dev.c b/net/core/dev.c
index 6bfa78c66c2..1b09f1cae46 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1282,15 +1282,13 @@ int __skb_linearize(struct sk_buff *skb, gfp_t gfp_mask)
#define HARD_TX_LOCK(dev, cpu) { \
if ((dev->features & NETIF_F_LLTX) == 0) { \
- spin_lock(&dev->xmit_lock); \
- dev->xmit_lock_owner = cpu; \
+ netif_tx_lock(dev); \
} \
}
#define HARD_TX_UNLOCK(dev) { \
if ((dev->features & NETIF_F_LLTX) == 0) { \
- dev->xmit_lock_owner = -1; \
- spin_unlock(&dev->xmit_lock); \
+ netif_tx_unlock(dev); \
} \
}
@@ -1389,8 +1387,8 @@ int dev_queue_xmit(struct sk_buff *skb)
/* The device has no queue. Common case for software devices:
loopback, all the sorts of tunnels...
- Really, it is unlikely that xmit_lock protection is necessary here.
- (f.e. loopback and IP tunnels are clean ignoring statistics
+ Really, it is unlikely that netif_tx_lock protection is necessary
+ here. (f.e. loopback and IP tunnels are clean ignoring statistics
counters.)
However, it is possible, that they rely on protection
made by us here.
@@ -2805,7 +2803,7 @@ int register_netdevice(struct net_device *dev)
BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
spin_lock_init(&dev->queue_lock);
- spin_lock_init(&dev->xmit_lock);
+ spin_lock_init(&dev->_xmit_lock);
dev->xmit_lock_owner = -1;
#ifdef CONFIG_NET_CLS_ACT
spin_lock_init(&dev->ingress_lock);
diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c
index 05d60850840..c57d887da2e 100644
--- a/net/core/dev_mcast.c
+++ b/net/core/dev_mcast.c
@@ -62,7 +62,7 @@
* Device mc lists are changed by bh at least if IPv6 is enabled,
* so that it must be bh protected.
*
- * We block accesses to device mc filters with dev->xmit_lock.
+ * We block accesses to device mc filters with netif_tx_lock.
*/
/*
@@ -93,9 +93,9 @@ static void __dev_mc_upload(struct net_device *dev)
void dev_mc_upload(struct net_device *dev)
{
- spin_lock_bh(&dev->xmit_lock);
+ netif_tx_lock_bh(dev);
__dev_mc_upload(dev);
- spin_unlock_bh(&dev->xmit_lock);
+ netif_tx_unlock_bh(dev);
}
/*
@@ -107,7 +107,7 @@ int dev_mc_delete(struct net_device *dev, void *addr, int alen, int glbl)
int err = 0;
struct dev_mc_list *dmi, **dmip;
- spin_lock_bh(&dev->xmit_lock);
+ netif_tx_lock_bh(dev);
for (dmip = &dev->mc_list; (dmi = *dmip) != NULL; dmip = &dmi->next) {
/*
@@ -139,13 +139,13 @@ int dev_mc_delete(struct net_device *dev, void *addr, int alen, int glbl)
*/
__dev_mc_upload(dev);
- spin_unlock_bh(&dev->xmit_lock);
+ netif_tx_unlock_bh(dev);
return 0;
}
}
err = -ENOENT;
done:
- spin_unlock_bh(&dev->xmit_lock);
+ netif_tx_unlock_bh(dev);
return err;
}
@@ -160,7 +160,7 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl)
dmi1 = kmalloc(sizeof(*dmi), GFP_ATOMIC);
- spin_lock_bh(&dev->xmit_lock);
+ netif_tx_lock_bh(dev);
for (dmi = dev->mc_list; dmi != NULL; dmi = dmi->next) {
if (memcmp(dmi->dmi_addr, addr, dmi->dmi_addrlen) == 0 &&
dmi->dmi_addrlen == alen) {
@@ -176,7 +176,7 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl)
}
if ((dmi = dmi1) == NULL) {
- spin_unlock_bh(&dev->xmit_lock);
+ netif_tx_unlock_bh(dev);
return -ENOMEM;
}
memcpy(dmi->dmi_addr, addr, alen);
@@ -189,11 +189,11 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl)
__dev_mc_upload(dev);
- spin_unlock_bh(&dev->xmit_lock);
+ netif_tx_unlock_bh(dev);
return 0;
done:
- spin_unlock_bh(&dev->xmit_lock);
+ netif_tx_unlock_bh(dev);
kfree(dmi1);
return err;
}
@@ -204,7 +204,7 @@ done:
void dev_mc_discard(struct net_device *dev)
{
- spin_lock_bh(&dev->xmit_lock);
+ netif_tx_lock_bh(dev);
while (dev->mc_list != NULL) {
struct dev_mc_list *tmp = dev->mc_list;
@@ -215,7 +215,7 @@ void dev_mc_discard(struct net_device *dev)
}
dev->mc_count = 0;
- spin_unlock_bh(&dev->xmit_lock);
+ netif_tx_unlock_bh(dev);
}
#ifdef CONFIG_PROC_FS
@@ -250,7 +250,7 @@ static int dev_mc_seq_show(struct seq_file *seq, void *v)
struct dev_mc_list *m;
struct net_device *dev = v;
- spin_lock_bh(&dev->xmit_lock);
+ netif_tx_lock_bh(dev);
for (m = dev->mc_list; m; m = m->next) {
int i;
@@ -262,7 +262,7 @@ static int dev_mc_seq_show(struct seq_file *seq, void *v)
seq_putc(seq, '\n');
}
- spin_unlock_bh(&dev->xmit_lock);
+ netif_tx_unlock_bh(dev);
return 0;
}
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index e8e05cebd95..9cb78183038 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -273,24 +273,21 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
do {
npinfo->tries--;
- spin_lock(&np->dev->xmit_lock);
- np->dev->xmit_lock_owner = smp_processor_id();
+ netif_tx_lock(np->dev);
/*
* network drivers do not expect to be called if the queue is
* stopped.
*/
if (netif_queue_stopped(np->dev)) {
- np->dev->xmit_lock_owner = -1;
- spin_unlock(&np->dev->xmit_lock);
+ netif_tx_unlock(np->dev);
netpoll_poll(np);
udelay(50);
continue;
}
status = np->dev->hard_start_xmit(skb, np->dev);
- np->dev->xmit_lock_owner = -1;
- spin_unlock(&np->dev->xmit_lock);
+ netif_tx_unlock(np->dev);
/* success */
if(!status) {
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index c23e9c06ee2..67ed14ddabd 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2897,7 +2897,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
}
}
- spin_lock_bh(&odev->xmit_lock);
+ netif_tx_lock_bh(odev);
if (!netif_queue_stopped(odev)) {
atomic_inc(&(pkt_dev->skb->users));
@@ -2942,7 +2942,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
pkt_dev->next_tx_ns = 0;
}
- spin_unlock_bh(&odev->xmit_lock);
+ netif_tx_unlock_bh(odev);
/* If pkt_dev->count is zero, then run forever */
if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) {
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 138ea92ed26..b1e4c5e20ac 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -72,9 +72,9 @@ void qdisc_unlock_tree(struct net_device *dev)
dev->queue_lock serializes queue accesses for this device
AND dev->qdisc pointer itself.
- dev->xmit_lock serializes accesses to device driver.
+ netif_tx_lock serializes accesses to device driver.
- dev->queue_lock and dev->xmit_lock are mutually exclusive,
+ dev->queue_lock and netif_tx_lock are mutually exclusive,
if one is grabbed, another must be free.
*/
@@ -108,7 +108,7 @@ int qdisc_restart(struct net_device *dev)
* will be requeued.
*/
if (!nolock) {
- if (!spin_trylock(&dev->xmit_lock)) {
+ if (!netif_tx_trylock(dev)) {
collision:
/* So, someone grabbed the driver. */
@@ -126,8 +126,6 @@ int qdisc_restart(struct net_device *dev)
__get_cpu_var(netdev_rx_stat).cpu_collision++;
goto requeue;
}
- /* Remember that the driver is grabbed by us. */
- dev->xmit_lock_owner = smp_processor_id();
}
{
@@ -142,8 +140,7 @@ int qdisc_restart(struct net_device *dev)
ret = dev->hard_start_xmit(skb, dev);
if (ret == NETDEV_TX_OK) {
if (!nolock) {
- dev->xmit_lock_owner = -1;
- spin_unlock(&dev->xmit_lock);
+ netif_tx_unlock(dev);
}
spin_lock(&dev->queue_lock);
return -1;
@@ -157,8 +154,7 @@ int qdisc_restart(struct net_device *dev)
/* NETDEV_TX_BUSY - we need to requeue */
/* Release the driver */
if (!nolock) {
- dev->xmit_lock_owner = -1;
- spin_unlock(&dev->xmit_lock);
+ netif_tx_unlock(dev);
}
spin_lock(&dev->queue_lock);
q = dev->qdisc;
@@ -187,7 +183,7 @@ static void dev_watchdog(unsigned long arg)
{
struct net_device *dev = (struct net_device *)arg;
- spin_lock(&dev->xmit_lock);
+ netif_tx_lock(dev);
if (dev->qdisc != &noop_qdisc) {
if (netif_device_present(dev) &&
netif_running(dev) &&
@@ -203,7 +199,7 @@ static void dev_watchdog(unsigned long arg)
dev_hold(dev);
}
}
- spin_unlock(&dev->xmit_lock);
+ netif_tx_unlock(dev);
dev_put(dev);
}
@@ -227,17 +223,17 @@ void __netdev_watchdog_up(struct net_device *dev)
static void dev_watchdog_up(struct net_device *dev)
{
- spin_lock_bh(&dev->xmit_lock);
+ netif_tx_lock_bh(dev);
__netdev_watchdog_up(dev);
- spin_unlock_bh(&dev->xmit_lock);
+ netif_tx_unlock_bh(dev);
}
static void dev_watchdog_down(struct net_device *dev)
{
- spin_lock_bh(&dev->xmit_lock);
+ netif_tx_lock_bh(dev);
if (del_timer(&dev->watchdog_timer))
dev_put(dev);
- spin_unlock_bh(&dev->xmit_lock);
+ netif_tx_unlock_bh(dev);
}
void netif_carrier_on(struct net_device *dev)
@@ -582,7 +578,7 @@ void dev_deactivate(struct net_device *dev)
while (test_bit(__LINK_STATE_SCHED, &dev->state))
yield();
- spin_unlock_wait(&dev->xmit_lock);
+ spin_unlock_wait(&dev->_xmit_lock);
}
void dev_init_scheduler(struct net_device *dev)
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 79b8ef34c6e..4c16ad57a3e 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -302,20 +302,17 @@ restart:
switch (teql_resolve(skb, skb_res, slave)) {
case 0:
- if (spin_trylock(&slave->xmit_lock)) {
- slave->xmit_lock_owner = smp_processor_id();
+ if (netif_tx_trylock(slave)) {
if (!netif_queue_stopped(slave) &&
slave->hard_start_xmit(skb, slave) == 0) {
- slave->xmit_lock_owner = -1;
- spin_unlock(&slave->xmit_lock);
+ netif_tx_unlock(slave);
master->slaves = NEXT_SLAVE(q);
netif_wake_queue(dev);
master->stats.tx_packets++;
master->stats.tx_bytes += len;
return 0;
}
- slave->xmit_lock_owner = -1;
- spin_unlock(&slave->xmit_lock);
+ netif_tx_unlock(slave);
}
if (netif_queue_stopped(dev))
busy = 1;