aboutsummaryrefslogtreecommitdiff
path: root/include/linux/netdevice.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-08-01 11:35:16 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2008-08-01 11:35:16 -0700
commit9a5467fd600669cda488771dac3e951034fe2b08 (patch)
tree20c3c73ff3571e525193aca20d3602161b4e90be /include/linux/netdevice.h
parent676056132425ac425d7215cdaa8bd25582e07966 (diff)
parent00b1304c4ca81dd893973cc620b87a5c3ff3f660 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (46 commits) tcp: MD5: Fix IPv6 signatures skbuff: add missing kernel-doc for do_not_encrypt net/ipv4/route.c: fix build error tcp: MD5: Fix MD5 signatures on certain ACK packets ipv6: Fix ip6_xmit to send fragments if ipfragok is true ipvs: Move userspace definitions to include/linux/ip_vs.h netdev: Fix lockdep warnings in multiqueue configurations. netfilter: xt_hashlimit: fix race between htable_destroy and htable_gc netfilter: ipt_recent: fix race between recent_mt_destroy and proc manipulations netfilter: nf_conntrack_tcp: decrease timeouts while data in unacknowledged irda: replace __FUNCTION__ with __func__ nsc-ircc: default to dongle type 9 on IBM hardware bluetooth: add quirks for a few hci_usb devices hysdn: remove the packed attribute from PofTimStamp_tag isdn: use the common ascii hex helpers tg3: adapt tg3 to use reworked PCI PM code atm: fix direct casts of pointers to u32 in the InterPhase driver atm: fix const assignment/discard warnings in the ATM networking driver net: use the common ascii hex helpers random32: seeding improvement ...
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r--include/linux/netdevice.h86
1 files changed, 55 insertions, 31 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index b4d056ceab9..ee583f642a9 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -440,6 +440,7 @@ static inline void napi_synchronize(const struct napi_struct *n)
enum netdev_queue_state_t
{
__QUEUE_STATE_XOFF,
+ __QUEUE_STATE_FROZEN,
};
struct netdev_queue {
@@ -636,7 +637,7 @@ struct net_device
unsigned int real_num_tx_queues;
unsigned long tx_queue_len; /* Max frames per queue allowed */
-
+ spinlock_t tx_global_lock;
/*
* One part is mostly used on xmit path (device)
*/
@@ -1099,6 +1100,11 @@ static inline int netif_queue_stopped(const struct net_device *dev)
return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
}
+static inline int netif_tx_queue_frozen(const struct netdev_queue *dev_queue)
+{
+ return test_bit(__QUEUE_STATE_FROZEN, &dev_queue->state);
+}
+
/**
* netif_running - test if up
* @dev: network device
@@ -1475,6 +1481,26 @@ static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
txq->xmit_lock_owner = smp_processor_id();
}
+static inline int __netif_tx_trylock(struct netdev_queue *txq)
+{
+ int ok = spin_trylock(&txq->_xmit_lock);
+ if (likely(ok))
+ txq->xmit_lock_owner = smp_processor_id();
+ return ok;
+}
+
+static inline void __netif_tx_unlock(struct netdev_queue *txq)
+{
+ txq->xmit_lock_owner = -1;
+ spin_unlock(&txq->_xmit_lock);
+}
+
+static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
+{
+ txq->xmit_lock_owner = -1;
+ spin_unlock_bh(&txq->_xmit_lock);
+}
+
/**
* netif_tx_lock - grab network device transmit lock
* @dev: network device
@@ -1484,12 +1510,23 @@ static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
*/
static inline void netif_tx_lock(struct net_device *dev)
{
- int cpu = smp_processor_id();
unsigned int i;
+ int cpu;
+ spin_lock(&dev->tx_global_lock);
+ cpu = smp_processor_id();
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
+
+ /* We are the only thread of execution doing a
+ * freeze, but we have to grab the _xmit_lock in
+ * order to synchronize with threads which are in
+ * the ->hard_start_xmit() handler and already
+ * checked the frozen bit.
+ */
__netif_tx_lock(txq, cpu);
+ set_bit(__QUEUE_STATE_FROZEN, &txq->state);
+ __netif_tx_unlock(txq);
}
}
@@ -1499,40 +1536,22 @@ static inline void netif_tx_lock_bh(struct net_device *dev)
netif_tx_lock(dev);
}
-static inline int __netif_tx_trylock(struct netdev_queue *txq)
-{
- int ok = spin_trylock(&txq->_xmit_lock);
- if (likely(ok))
- txq->xmit_lock_owner = smp_processor_id();
- return ok;
-}
-
-static inline int netif_tx_trylock(struct net_device *dev)
-{
- return __netif_tx_trylock(netdev_get_tx_queue(dev, 0));
-}
-
-static inline void __netif_tx_unlock(struct netdev_queue *txq)
-{
- txq->xmit_lock_owner = -1;
- spin_unlock(&txq->_xmit_lock);
-}
-
-static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
-{
- txq->xmit_lock_owner = -1;
- spin_unlock_bh(&txq->_xmit_lock);
-}
-
static inline void netif_tx_unlock(struct net_device *dev)
{
unsigned int i;
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
- __netif_tx_unlock(txq);
- }
+ /* No need to grab the _xmit_lock here. If the
+ * queue is not stopped for another reason, we
+ * force a schedule.
+ */
+ clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
+ if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
+ __netif_schedule(txq->qdisc);
+ }
+ spin_unlock(&dev->tx_global_lock);
}
static inline void netif_tx_unlock_bh(struct net_device *dev)
@@ -1556,13 +1575,18 @@ static inline void netif_tx_unlock_bh(struct net_device *dev)
static inline void netif_tx_disable(struct net_device *dev)
{
unsigned int i;
+ int cpu;
- netif_tx_lock_bh(dev);
+ local_bh_disable();
+ cpu = smp_processor_id();
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
+
+ __netif_tx_lock(txq, cpu);
netif_tx_stop_queue(txq);
+ __netif_tx_unlock(txq);
}
- netif_tx_unlock_bh(dev);
+ local_bh_enable();
}
static inline void netif_addr_lock(struct net_device *dev)