aboutsummaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
authorJesse Brandeburg <jesse.brandeburg@intel.com>2006-12-15 10:30:44 +0100
committerJeff Garzik <jeff@garzik.org>2006-12-26 15:51:28 -0500
commit2b65326e67f89899e8bcaed1989d8cfb0ed01f55 (patch)
tree56e78baa2b711638eba3bc7d8d9540076dfaf508 /drivers/net
parent7d16e65ba57f181732ec52626736b27904198edf (diff)
[PATCH] e1000: dynamic itr: take TSO and jumbo into account
The dynamic interrupt rate control patches omitted proper counting for jumbo's and TSO resulting in suboptimal interrupt mitigation strategies. Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com> Signed-off-by: Auke Kok <auke-jan.h.kok@intel.com> Signed-off-by: Arjan van de Ven <arjan@linux.intel.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/e1000/e1000_main.c40
1 files changed, 24 insertions, 16 deletions
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 73f3a85fd23..62ef267b3d6 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -2628,29 +2628,34 @@ static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
if (packets == 0)
goto update_itr_done;
-
switch (itr_setting) {
case lowest_latency:
- if ((packets < 5) && (bytes > 512))
+ /* jumbo frames get bulk treatment*/
+ if (bytes/packets > 8000)
+ retval = bulk_latency;
+ else if ((packets < 5) && (bytes > 512))
retval = low_latency;
break;
case low_latency: /* 50 usec aka 20000 ints/s */
if (bytes > 10000) {
- if ((packets < 10) ||
- ((bytes/packets) > 1200))
+ /* jumbo frames need bulk latency setting */
+ if (bytes/packets > 8000)
+ retval = bulk_latency;
+ else if ((packets < 10) || ((bytes/packets) > 1200))
retval = bulk_latency;
else if ((packets > 35))
retval = lowest_latency;
- } else if (packets <= 2 && bytes < 512)
+ } else if (bytes/packets > 2000)
+ retval = bulk_latency;
+ else if (packets <= 2 && bytes < 512)
retval = lowest_latency;
break;
case bulk_latency: /* 250 usec aka 4000 ints/s */
if (bytes > 25000) {
if (packets > 35)
retval = low_latency;
- } else {
- if (bytes < 6000)
- retval = low_latency;
+ } else if (bytes < 6000) {
+ retval = low_latency;
}
break;
}
@@ -2679,17 +2684,20 @@ static void e1000_set_itr(struct e1000_adapter *adapter)
adapter->tx_itr,
adapter->total_tx_packets,
adapter->total_tx_bytes);
+ /* conservative mode (itr 3) eliminates the lowest_latency setting */
+ if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
+ adapter->tx_itr = low_latency;
+
adapter->rx_itr = e1000_update_itr(adapter,
adapter->rx_itr,
adapter->total_rx_packets,
adapter->total_rx_bytes);
+ /* conservative mode (itr 3) eliminates the lowest_latency setting */
+ if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
+ adapter->rx_itr = low_latency;
current_itr = max(adapter->rx_itr, adapter->tx_itr);
- /* conservative mode eliminates the lowest_latency setting */
- if (current_itr == lowest_latency && (adapter->itr_setting == 3))
- current_itr = low_latency;
-
switch (current_itr) {
/* counts and packets in update_itr are dependent on these numbers */
case lowest_latency:
@@ -3868,11 +3876,11 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
cleaned = (i == eop);
if (cleaned) {
- /* this packet count is wrong for TSO but has a
- * tendency to make dynamic ITR change more
- * towards bulk */
+ struct sk_buff *skb = buffer_info->skb;
+ unsigned int segs = skb_shinfo(skb)->gso_segs;
+ total_tx_packets += segs;
total_tx_packets++;
- total_tx_bytes += buffer_info->skb->len;
+ total_tx_bytes += skb->len;
}
e1000_unmap_and_free_tx_resource(adapter, buffer_info);
tx_desc->upper.data = 0;