aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ixgbe
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ixgbe')
-rw-r--r--drivers/net/ixgbe/ixgbe.h33
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c178
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c37
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_nl.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c140
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c76
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.h3
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c253
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h26
9 files changed, 580 insertions, 168 deletions
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 385be601666..8da8eb53508 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -51,11 +51,11 @@
__func__ , ## args)))
/* TX/RX descriptor defines */
-#define IXGBE_DEFAULT_TXD 1024
+#define IXGBE_DEFAULT_TXD 512
#define IXGBE_MAX_TXD 4096
#define IXGBE_MIN_TXD 64
-#define IXGBE_DEFAULT_RXD 1024
+#define IXGBE_DEFAULT_RXD 512
#define IXGBE_MAX_RXD 4096
#define IXGBE_MIN_RXD 64
@@ -106,6 +106,7 @@ struct ixgbe_tx_buffer {
unsigned long time_stamp;
u16 length;
u16 next_to_watch;
+ u16 mapped_as_page;
};
struct ixgbe_rx_buffer {
@@ -159,10 +160,13 @@ struct ixgbe_ring {
struct ixgbe_queue_stats stats;
unsigned long reinit_state;
u64 rsc_count; /* stat for coalesced packets */
+ u64 rsc_flush; /* stats for flushed packets */
+ u32 restart_queue; /* track tx queue restarts */
+ u32 non_eop_descs; /* track hardware descriptor chaining */
unsigned int size; /* length in bytes */
dma_addr_t dma; /* phys. address of descriptor ring */
-};
+} ____cacheline_internodealigned_in_smp;
enum ixgbe_ring_f_enum {
RING_F_NONE = 0,
@@ -187,7 +191,7 @@ enum ixgbe_ring_f_enum {
struct ixgbe_ring_feature {
int indices;
int mask;
-};
+} ____cacheline_internodealigned_in_smp;
#define MAX_RX_QUEUES 128
#define MAX_TX_QUEUES 128
@@ -273,29 +277,25 @@ struct ixgbe_adapter {
u16 eitr_high;
/* TX */
- struct ixgbe_ring *tx_ring; /* One per active queue */
+ struct ixgbe_ring *tx_ring ____cacheline_aligned_in_smp; /* One per active queue */
int num_tx_queues;
- u64 restart_queue;
- u64 hw_csum_tx_good;
- u64 lsc_int;
- u64 hw_tso_ctxt;
- u64 hw_tso6_ctxt;
u32 tx_timeout_count;
bool detect_tx_hung;
+ u64 restart_queue;
+ u64 lsc_int;
+
/* RX */
- struct ixgbe_ring *rx_ring; /* One per active queue */
+ struct ixgbe_ring *rx_ring ____cacheline_aligned_in_smp; /* One per active queue */
int num_rx_queues;
u64 hw_csum_rx_error;
u64 hw_rx_no_dma_resources;
- u64 hw_csum_rx_good;
u64 non_eop_descs;
int num_msix_vectors;
int max_msix_q_vectors; /* true count of q_vectors for device */
struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE];
struct msix_entry *msix_entries;
- u64 rx_hdr_split;
u32 alloc_rx_page_failed;
u32 alloc_rx_buff_failed;
@@ -340,7 +340,6 @@ struct ixgbe_adapter {
/* OS defined structs */
struct net_device *netdev;
struct pci_dev *pdev;
- struct net_device_stats net_stats;
u32 test_icr;
struct ixgbe_ring test_tx_ring;
@@ -376,7 +375,8 @@ struct ixgbe_adapter {
#ifdef IXGBE_FCOE
struct ixgbe_fcoe fcoe;
#endif /* IXGBE_FCOE */
- u64 rsc_count;
+ u64 rsc_total_count;
+ u64 rsc_total_flush;
u32 wol;
u16 eeprom_version;
};
@@ -397,7 +397,7 @@ enum ixgbe_boards {
extern struct ixgbe_info ixgbe_82598_info;
extern struct ixgbe_info ixgbe_82599_info;
#ifdef CONFIG_IXGBE_DCB
-extern struct dcbnl_rtnl_ops dcbnl_ops;
+extern const struct dcbnl_rtnl_ops dcbnl_ops;
extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
struct ixgbe_dcb_config *dst_dcb_cfg,
int tc_max);
@@ -458,6 +458,7 @@ extern int ixgbe_fcoe_disable(struct net_device *netdev);
extern u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter);
extern u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
#endif /* CONFIG_IXGBE_DCB */
+extern int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
#endif /* IXGBE_FCOE */
#endif /* _IXGBE_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index 34b04924c8a..72106898a5c 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -42,6 +42,10 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg,
bool autoneg_wait_to_complete);
+static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg,
+ bool autoneg_wait_to_complete);
s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
bool autoneg_wait_to_complete);
s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
@@ -64,7 +68,13 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
/* Set up dual speed SFP+ support */
mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
} else {
- mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
+ if ((mac->ops.get_media_type(hw) ==
+ ixgbe_media_type_backplane) &&
+ (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
+ hw->phy.smart_speed == ixgbe_smart_speed_on))
+ mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed;
+ else
+ mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
}
}
@@ -337,6 +347,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
media_type = ixgbe_media_type_backplane;
break;
case IXGBE_DEV_ID_82599_SFP:
+ case IXGBE_DEV_ID_82599_SFP_EM:
media_type = ixgbe_media_type_fiber;
break;
case IXGBE_DEV_ID_82599_CX4:
@@ -479,7 +490,12 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
hw->mac.autotry_restart = false;
}
- /* The controller may take up to 500ms at 10g to acquire link */
+ /*
+ * Wait for the controller to acquire link. Per IEEE 802.3ap,
+ * Section 73.10.2, we may have to wait up to 500ms if KR is
+ * attempted. 82599 uses the same timing for 10g SFI.
+ */
+
for (i = 0; i < 5; i++) {
/* Wait for the link partner to also set speed */
msleep(100);
@@ -567,6 +583,111 @@ out:
}
/**
+ * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg: true if autonegotiation enabled
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Implements the Intel SmartSpeed algorithm.
+ **/
+static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed, bool autoneg,
+ bool autoneg_wait_to_complete)
+{
+ s32 status = 0;
+ ixgbe_link_speed link_speed;
+ s32 i, j;
+ bool link_up = false;
+ u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+
+ hw_dbg(hw, "ixgbe_setup_mac_link_smartspeed.\n");
+
+ /* Set autoneg_advertised value based on input link speed */
+ hw->phy.autoneg_advertised = 0;
+
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
+
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
+
+ if (speed & IXGBE_LINK_SPEED_100_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
+
+ /*
+ * Implement Intel SmartSpeed algorithm. SmartSpeed will reduce the
+ * autoneg advertisement if link is unable to be established at the
+ * highest negotiated rate. This can sometimes happen due to integrity
+ * issues with the physical media connection.
+ */
+
+ /* First, try to get link with full advertisement */
+ hw->phy.smart_speed_active = false;
+ for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
+ status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
+ autoneg_wait_to_complete);
+ if (status)
+ goto out;
+
+ /*
+ * Wait for the controller to acquire link. Per IEEE 802.3ap,
+ * Section 73.10.2, we may have to wait up to 500ms if KR is
+ * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
+ * Table 9 in the AN MAS.
+ */
+ for (i = 0; i < 5; i++) {
+ mdelay(100);
+
+ /* If we have link, just jump out */
+ hw->mac.ops.check_link(hw, &link_speed,
+ &link_up, false);
+ if (link_up)
+ goto out;
+ }
+ }
+
+ /*
+ * We didn't get link. If we advertised KR plus one of KX4/KX
+ * (or BX4/BX), then disable KR and try again.
+ */
+ if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
+ ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
+ goto out;
+
+ /* Turn SmartSpeed on to disable KR support */
+ hw->phy.smart_speed_active = true;
+ status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
+ autoneg_wait_to_complete);
+ if (status)
+ goto out;
+
+ /*
+ * Wait for the controller to acquire link. 600ms will allow for
+ * the AN link_fail_inhibit_timer as well for multiple cycles of
+ * parallel detect, both 10g and 1g. This allows for the maximum
+ * connect attempts as defined in the AN MAS table 73-7.
+ */
+ for (i = 0; i < 6; i++) {
+ mdelay(100);
+
+ /* If we have link, just jump out */
+ hw->mac.ops.check_link(hw, &link_speed,
+ &link_up, false);
+ if (link_up)
+ goto out;
+ }
+
+ /* We didn't get link. Turn SmartSpeed back off. */
+ hw->phy.smart_speed_active = false;
+ status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
+ autoneg_wait_to_complete);
+
+out:
+ return status;
+}
+
+/**
* ixgbe_check_mac_link_82599 - Determine link and speed status
* @hw: pointer to hardware structure
* @speed: pointer to link speed
@@ -669,7 +790,8 @@ s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
if (speed & IXGBE_LINK_SPEED_10GB_FULL)
if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
autoc |= IXGBE_AUTOC_KX4_SUPP;
- if (orig_autoc & IXGBE_AUTOC_KR_SUPP)
+ if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
+ (hw->phy.smart_speed_active == false))
autoc |= IXGBE_AUTOC_KR_SUPP;
if (speed & IXGBE_LINK_SPEED_1GB_FULL)
autoc |= IXGBE_AUTOC_KX_SUPP;
@@ -878,6 +1000,10 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
hw->mac.num_rar_entries--;
}
+ /* Store the alternative WWNN/WWPN prefix */
+ hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
+ &hw->mac.wwpn_prefix);
+
reset_hw_out:
return status;
}
@@ -2414,6 +2540,51 @@ fw_version_out:
return status;
}
+/**
+ * ixgbe_get_wwn_prefix_82599 - Get alternative WWNN/WWPN prefix from
+ * the EEPROM
+ * @hw: pointer to hardware structure
+ * @wwnn_prefix: the alternative WWNN prefix
+ * @wwpn_prefix: the alternative WWPN prefix
+ *
+ * This function will read the EEPROM from the alternative SAN MAC address
+ * block to check the support for the alternative WWNN/WWPN prefix support.
+ **/
+static s32 ixgbe_get_wwn_prefix_82599(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+ u16 *wwpn_prefix)
+{
+ u16 offset, caps;
+ u16 alt_san_mac_blk_offset;
+
+ /* clear output first */
+ *wwnn_prefix = 0xFFFF;
+ *wwpn_prefix = 0xFFFF;
+
+ /* check if alternative SAN MAC is supported */
+ hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
+ &alt_san_mac_blk_offset);
+
+ if ((alt_san_mac_blk_offset == 0) ||
+ (alt_san_mac_blk_offset == 0xFFFF))
+ goto wwn_prefix_out;
+
+ /* check capability in alternative san mac address block */
+ offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
+ hw->eeprom.ops.read(hw, offset, &caps);
+ if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
+ goto wwn_prefix_out;
+
+ /* get the corresponding prefix for WWNN/WWPN */
+ offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
+ hw->eeprom.ops.read(hw, offset, wwnn_prefix);
+
+ offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
+ hw->eeprom.ops.read(hw, offset, wwpn_prefix);
+
+wwn_prefix_out:
+ return 0;
+}
+
static struct ixgbe_mac_operations mac_ops_82599 = {
.init_hw = &ixgbe_init_hw_generic,
.reset_hw = &ixgbe_reset_hw_82599,
@@ -2425,6 +2596,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
.get_mac_addr = &ixgbe_get_mac_addr_generic,
.get_san_mac_addr = &ixgbe_get_san_mac_addr_82599,
.get_device_caps = &ixgbe_get_device_caps_82599,
+ .get_wwn_prefix = &ixgbe_get_wwn_prefix_82599,
.stop_adapter = &ixgbe_stop_adapter_generic,
.get_bus_info = &ixgbe_get_bus_info_generic,
.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie,
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index 40ff120a9ad..688b8ca5da3 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -1382,10 +1382,10 @@ s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
hw->addr_ctrl.overflow_promisc = 0;
/* Zero out the other receive addresses */
- hw_dbg(hw, "Clearing RAR[1-%d]\n", uc_addr_in_use);
- for (i = 1; i <= uc_addr_in_use; i++) {
- IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
- IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
+ hw_dbg(hw, "Clearing RAR[1-%d]\n", uc_addr_in_use + 1);
+ for (i = 0; i < uc_addr_in_use; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
}
/* Add the new addresses */
@@ -1755,17 +1755,24 @@ s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
/*
* On backplane, bail out if
* - backplane autoneg was not completed, or if
- * - link partner is not AN enabled
+ * - we are 82599 and link partner is not AN enabled
*/
if (hw->phy.media_type == ixgbe_media_type_backplane) {
links = IXGBE_READ_REG(hw, IXGBE_LINKS);
- links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
- if (((links & IXGBE_LINKS_KX_AN_COMP) == 0) ||
- ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0)) {
+ if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
hw->fc.fc_was_autonegged = false;
hw->fc.current_mode = hw->fc.requested_mode;
goto out;
}
+
+ if (hw->mac.type == ixgbe_mac_82599EB) {
+ links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
+ if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
+ hw->fc.fc_was_autonegged = false;
+ hw->fc.current_mode = hw->fc.requested_mode;
+ goto out;
+ }
+ }
}
/*
@@ -1784,6 +1791,20 @@ s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
}
/*
+ * Bail out on
+ * - copper or CX4 adapters
+ * - fiber adapters running at 10gig
+ */
+ if ((hw->phy.media_type == ixgbe_media_type_copper) ||
+ (hw->phy.media_type == ixgbe_media_type_cx4) ||
+ ((hw->phy.media_type == ixgbe_media_type_fiber) &&
+ (speed == IXGBE_LINK_SPEED_10GB_FULL))) {
+ hw->fc.fc_was_autonegged = false;
+ hw->fc.current_mode = hw->fc.requested_mode;
+ goto out;
+ }
+
+ /*
* Read the AN advertisement and LP ability registers and resolve
* local flow control settings accordingly
*/
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index a6bc1ef28f9..3c7a79a7d7c 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -563,7 +563,7 @@ static u8 ixgbe_dcbnl_setapp(struct net_device *netdev,
return rval;
}
-struct dcbnl_rtnl_ops dcbnl_ops = {
+const struct dcbnl_rtnl_ops dcbnl_ops = {
.getstate = ixgbe_dcbnl_get_state,
.setstate = ixgbe_dcbnl_set_state,
.getpermhwaddr = ixgbe_dcbnl_get_perm_hw_addr,
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 856c18c207f..06a9d18bbdb 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -40,19 +40,27 @@
#define IXGBE_ALL_RAR_ENTRIES 16
+enum {NETDEV_STATS, IXGBE_STATS};
+
struct ixgbe_stats {
char stat_string[ETH_GSTRING_LEN];
+ int type;
int sizeof_stat;
int stat_offset;
};
-#define IXGBE_STAT(m) sizeof(((struct ixgbe_adapter *)0)->m), \
- offsetof(struct ixgbe_adapter, m)
+#define IXGBE_STAT(m) IXGBE_STATS, \
+ sizeof(((struct ixgbe_adapter *)0)->m), \
+ offsetof(struct ixgbe_adapter, m)
+#define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \
+ sizeof(((struct net_device *)0)->m), \
+ offsetof(struct net_device, m)
+
static struct ixgbe_stats ixgbe_gstrings_stats[] = {
- {"rx_packets", IXGBE_STAT(net_stats.rx_packets)},
- {"tx_packets", IXGBE_STAT(net_stats.tx_packets)},
- {"rx_bytes", IXGBE_STAT(net_stats.rx_bytes)},
- {"tx_bytes", IXGBE_STAT(net_stats.tx_bytes)},
+ {"rx_packets", IXGBE_NETDEV_STAT(stats.rx_packets)},
+ {"tx_packets", IXGBE_NETDEV_STAT(stats.tx_packets)},
+ {"rx_bytes", IXGBE_NETDEV_STAT(stats.rx_bytes)},
+ {"tx_bytes", IXGBE_NETDEV_STAT(stats.tx_bytes)},
{"rx_pkts_nic", IXGBE_STAT(stats.gprc)},
{"tx_pkts_nic", IXGBE_STAT(stats.gptc)},
{"rx_bytes_nic", IXGBE_STAT(stats.gorc)},
@@ -60,40 +68,36 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = {
{"lsc_int", IXGBE_STAT(lsc_int)},
{"tx_busy", IXGBE_STAT(tx_busy)},
{"non_eop_descs", IXGBE_STAT(non_eop_descs)},
- {"rx_errors", IXGBE_STAT(net_stats.rx_errors)},
- {"tx_errors", IXGBE_STAT(net_stats.tx_errors)},
- {"rx_dropped", IXGBE_STAT(net_stats.rx_dropped)},
- {"tx_dropped", IXGBE_STAT(net_stats.tx_dropped)},
- {"multicast", IXGBE_STAT(net_stats.multicast)},
+ {"rx_errors", IXGBE_NETDEV_STAT(stats.rx_errors)},
+ {"tx_errors", IXGBE_NETDEV_STAT(stats.tx_errors)},
+ {"rx_dropped", IXGBE_NETDEV_STAT(stats.rx_dropped)},
+ {"tx_dropped", IXGBE_NETDEV_STAT(stats.tx_dropped)},
+ {"multicast", IXGBE_NETDEV_STAT(stats.multicast)},
{"broadcast", IXGBE_STAT(stats.bprc)},
{"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) },
- {"collisions", IXGBE_STAT(net_stats.collisions)},
- {"rx_over_errors", IXGBE_STAT(net_stats.rx_over_errors)},
- {"rx_crc_errors", IXGBE_STAT(net_stats.rx_crc_errors)},
- {"rx_frame_errors", IXGBE_STAT(net_stats.rx_frame_errors)},
- {"hw_rsc_count", IXGBE_STAT(rsc_count)},
+ {"collisions", IXGBE_NETDEV_STAT(stats.collisions)},
+ {"rx_over_errors", IXGBE_NETDEV_STAT(stats.rx_over_errors)},
+ {"rx_crc_errors", IXGBE_NETDEV_STAT(stats.rx_crc_errors)},
+ {"rx_frame_errors", IXGBE_NETDEV_STAT(stats.rx_frame_errors)},
+ {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)},
+ {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)},
{"fdir_match", IXGBE_STAT(stats.fdirmatch)},
{"fdir_miss", IXGBE_STAT(stats.fdirmiss)},
- {"rx_fifo_errors", IXGBE_STAT(net_stats.rx_fifo_errors)},
- {"rx_missed_errors", IXGBE_STAT(net_stats.rx_missed_errors)},
- {"tx_aborted_errors", IXGBE_STAT(net_stats.tx_aborted_errors)},
- {"tx_carrier_errors", IXGBE_STAT(net_stats.tx_carrier_errors)},
- {"tx_fifo_errors", IXGBE_STAT(net_stats.tx_fifo_errors)},
- {"tx_heartbeat_errors", IXGBE_STAT(net_stats.tx_heartbeat_errors)},
+ {"rx_fifo_errors", IXGBE_NETDEV_STAT(stats.rx_fifo_errors)},
+ {"rx_missed_errors", IXGBE_NETDEV_STAT(stats.rx_missed_errors)},
+ {"tx_aborted_errors", IXGBE_NETDEV_STAT(stats.tx_aborted_errors)},
+ {"tx_carrier_errors", IXGBE_NETDEV_STAT(stats.tx_carrier_errors)},
+ {"tx_fifo_errors", IXGBE_NETDEV_STAT(stats.tx_fifo_errors)},
+ {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(stats.tx_heartbeat_errors)},
{"tx_timeout_count", IXGBE_STAT(tx_timeout_count)},
{"tx_restart_queue", IXGBE_STAT(restart_queue)},
{"rx_long_length_errors", IXGBE_STAT(stats.roc)},
{"rx_short_length_errors", IXGBE_STAT(stats.ruc)},
- {"tx_tcp4_seg_ctxt", IXGBE_STAT(hw_tso_ctxt)},
- {"tx_tcp6_seg_ctxt", IXGBE_STAT(hw_tso6_ctxt)},
{"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)},
{"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)},
{"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)},
{"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)},
- {"rx_csum_offload_good", IXGBE_STAT(hw_csum_rx_good)},
{"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)},
- {"tx_csum_offload_ctxt", IXGBE_STAT(hw_csum_tx_good)},
- {"rx_header_split", IXGBE_STAT(rx_hdr_split)},
{"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
{"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
{"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)},
@@ -196,6 +200,56 @@ static int ixgbe_get_settings(struct net_device *netdev,
ecmd->autoneg = AUTONEG_DISABLE;
}
+ /* Get PHY type */
+ switch (adapter->hw.phy.type) {
+ case ixgbe_phy_tn:
+ case ixgbe_phy_cu_unknown:
+ /* Copper 10G-BASET */
+ ecmd->port = PORT_TP;
+ break;
+ case ixgbe_phy_qt:
+ ecmd->port = PORT_FIBRE;
+ break;
+ case ixgbe_phy_nl:
+ case ixgbe_phy_tw_tyco:
+ case ixgbe_phy_tw_unknown:
+ case ixgbe_phy_sfp_ftl:
+ case ixgbe_phy_sfp_avago:
+ case ixgbe_phy_sfp_intel:
+ case ixgbe_phy_sfp_unknown:
+ switch (adapter->hw.phy.sfp_type) {
+ /* SFP+ devices, further checking needed */
+ case ixgbe_sfp_type_da_cu:
+ case ixgbe_sfp_type_da_cu_core0:
+ case ixgbe_sfp_type_da_cu_core1:
+ ecmd->port = PORT_DA;
+ break;
+ case ixgbe_sfp_type_sr:
+ case ixgbe_sfp_type_lr:
+ case ixgbe_sfp_type_srlr_core0:
+ case ixgbe_sfp_type_srlr_core1:
+ ecmd->port = PORT_FIBRE;
+ break;
+ case ixgbe_sfp_type_not_present:
+ ecmd->port = PORT_NONE;
+ break;
+ case ixgbe_sfp_type_unknown:
+ default:
+ ecmd->port = PORT_OTHER;
+ break;
+ }
+ break;
+ case ixgbe_phy_xaui:
+ ecmd->port = PORT_NONE;
+ break;
+ case ixgbe_phy_unknown:
+ case ixgbe_phy_generic:
+ case ixgbe_phy_sfp_unsupported:
+ default:
+ ecmd->port = PORT_OTHER;
+ break;
+ }
+
hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
if (link_up) {
ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
@@ -933,10 +987,21 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
int stat_count = sizeof(struct ixgbe_queue_stats) / sizeof(u64);
int j, k;
int i;
+ char *p = NULL;
ixgbe_update_stats(adapter);
for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
- char *p = (char *)adapter + ixgbe_gstrings_stats[i].stat_offset;
+ switch (ixgbe_gstrings_stats[i].type) {
+ case NETDEV_STATS:
+ p = (char *) netdev +
+ ixgbe_gstrings_stats[i].stat_offset;
+ break;
+ case IXGBE_STATS:
+ p = (char *) adapter +
+ ixgbe_gstrings_stats[i].stat_offset;
+ break;
+ }
+
data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
@@ -1255,15 +1320,15 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
return 0;
} else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
shared_int = false;
- if (request_irq(irq, &ixgbe_test_intr, 0, netdev->name,
+ if (request_irq(irq, ixgbe_test_intr, 0, netdev->name,
netdev)) {
*data = 1;
return -1;
}
- } else if (!request_irq(irq, &ixgbe_test_intr, IRQF_PROBE_SHARED,
+ } else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED,
netdev->name, netdev)) {
shared_int = false;
- } else if (request_irq(irq, &ixgbe_test_intr, IRQF_SHARED,
+ } else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED,
netdev->name, netdev)) {
*data = 1;
return -1;
@@ -1952,6 +2017,10 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
break;
}
+ /* if in mixed tx/rx queues per vector mode, report only rx settings */
+ if (adapter->q_vector[0]->txr_count && adapter->q_vector[0]->rxr_count)
+ return 0;
+
/* only valid if in constant ITR mode */
switch (adapter->tx_itr_setting) {
case 0:
@@ -1977,12 +2046,9 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
struct ixgbe_q_vector *q_vector;
int i;
- /*
- * don't accept tx specific changes if we've got mixed RxTx vectors
- * test and jump out here if needed before changing the rx numbers
- */
- if ((1000000/ec->tx_coalesce_usecs) != adapter->tx_eitr_param &&
- adapter->q_vector[0]->txr_count && adapter->q_vector[0]->rxr_count)
+ /* don't accept tx specific changes if we've got mixed RxTx vectors */
+ if (adapter->q_vector[0]->txr_count && adapter->q_vector[0]->rxr_count
+ && ec->tx_coalesce_usecs)
return -EINVAL;
if (ec->tx_max_coalesced_frames_irq)
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index a3c9f99515e..da32a108a7b 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -499,6 +499,10 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
+#ifdef CONFIG_IXGBE_DCB
+ u8 tc;
+ u32 up2tc;
+#endif
/* create the pool for ddp if not created yet */
if (!fcoe->pool) {
@@ -540,6 +544,17 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
IXGBE_FCRXCTRL_FCOELLI |
IXGBE_FCRXCTRL_FCCRCBO |
(FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT));
+#ifdef CONFIG_IXGBE_DCB
+ up2tc = IXGBE_READ_REG(&adapter->hw, IXGBE_RTTUP2TC);
+ for (i = 0; i < MAX_USER_PRIORITY; i++) {
+ tc = (u8)(up2tc >> (i * IXGBE_RTTUP2TC_UP_SHIFT));
+ tc &= (MAX_TRAFFIC_CLASS - 1);
+ if (fcoe->tc == tc) {
+ fcoe->up = i;
+ break;
+ }
+ }
+#endif
}
/**
@@ -671,19 +686,7 @@ out_disable:
*/
u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter)
{
- int i;
- u8 tc;
- u32 up2tc;
-
- up2tc = IXGBE_READ_REG(&adapter->hw, IXGBE_RTTUP2TC);
- for (i = 0; i < MAX_USER_PRIORITY; i++) {
- tc = (u8)(up2tc >> (i * IXGBE_RTTUP2TC_UP_SHIFT));
- tc &= (MAX_TRAFFIC_CLASS - 1);
- if (adapter->fcoe.tc == tc)
- return 1 << i;
- }
-
- return 0;
+ return 1 << adapter->fcoe.up;
}
/**
@@ -710,6 +713,7 @@ u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up)
up2tc >>= (i * IXGBE_RTTUP2TC_UP_SHIFT);
up2tc &= (MAX_TRAFFIC_CLASS - 1);
adapter->fcoe.tc = (u8)up2tc;
+ adapter->fcoe.up = i;
return 0;
}
}
@@ -718,3 +722,49 @@ u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up)
return 1;
}
#endif /* CONFIG_IXGBE_DCB */
+
+/**
+ * ixgbe_fcoe_get_wwn - get world wide name for the node or the port
+ * @netdev : ixgbe adapter
+ * @wwn : the world wide name
+ * @type: the type of world wide name
+ *
+ * Returns the node or port world wide name if both the prefix and the san
+ * mac address are valid, then the wwn is formed based on the NAA-2 for
+ * IEEE Extended name identifier (ref. to T10 FC-LS Spec., Sec. 15.3).
+ *
+ * Returns : 0 on success
+ */
+int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
+{
+ int rc = -EINVAL;
+ u16 prefix = 0xffff;
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_mac_info *mac = &adapter->hw.mac;
+
+ switch (type) {
+ case NETDEV_FCOE_WWNN:
+ prefix = mac->wwnn_prefix;
+ break;
+ case NETDEV_FCOE_WWPN:
+ prefix = mac->wwpn_prefix;
+ break;
+ default:
+ break;
+ }
+
+ if ((prefix != 0xffff) &&
+ is_valid_ether_addr(mac->san_addr)) {
+ *wwn = ((u64) prefix << 48) |
+ ((u64) mac->san_addr[0] << 40) |
+ ((u64) mac->san_addr[1] << 32) |
+ ((u64) mac->san_addr[2] << 24) |
+ ((u64) mac->san_addr[3] << 16) |
+ ((u64) mac->san_addr[4] << 8) |
+ ((u64) mac->san_addr[5]);
+ rc = 0;
+ }
+ return rc;
+}
+
+
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.h b/drivers/net/ixgbe/ixgbe_fcoe.h
index b5dee7b3ef1..de8ff53187d 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ixgbe/ixgbe_fcoe.h
@@ -62,7 +62,10 @@ struct ixgbe_fcoe_ddp {
};
struct ixgbe_fcoe {
+#ifdef CONFIG_IXGBE_DCB
u8 tc;
+ u8 up;
+#endif
spinlock_t lock;
struct pci_pool *pool;
struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX];
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index a5036f7c192..247ed2a2476 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -98,6 +98,8 @@ static struct pci_device_id ixgbe_pci_tbl[] = {
board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP),
board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM),
+ board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ),
board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4),
@@ -216,10 +218,20 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
struct ixgbe_tx_buffer
*tx_buffer_info)
{
- tx_buffer_info->dma = 0;
+ if (tx_buffer_info->dma) {
+ if (tx_buffer_info->mapped_as_page)
+ pci_unmap_page(adapter->pdev,
+ tx_buffer_info->dma,
+ tx_buffer_info->length,
+ PCI_DMA_TODEVICE);
+ else
+ pci_unmap_single(adapter->pdev,
+ tx_buffer_info->dma,
+ tx_buffer_info->length,
+ PCI_DMA_TODEVICE);
+ tx_buffer_info->dma = 0;
+ }
if (tx_buffer_info->skb) {
- skb_dma_unmap(&adapter->pdev->dev, tx_buffer_info->skb,
- DMA_TO_DEVICE);
dev_kfree_skb_any(tx_buffer_info->skb);
tx_buffer_info->skb = NULL;
}
@@ -240,11 +252,11 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
static inline bool ixgbe_tx_is_paused(struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring)
{
- int tc;
u32 txoff = IXGBE_TFCS_TXOFF;
#ifdef CONFIG_IXGBE_DCB
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+ int tc;
int reg_idx = tx_ring->reg_idx;
int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
@@ -401,7 +413,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
!test_bit(__IXGBE_DOWN, &adapter->state)) {
netif_wake_subqueue(netdev, tx_ring->queue_index);
- ++adapter->restart_queue;
+ ++tx_ring->restart_queue;
}
}
@@ -423,8 +435,8 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
tx_ring->total_packets += total_packets;
tx_ring->stats.packets += total_packets;
tx_ring->stats.bytes += total_bytes;
- adapter->net_stats.tx_bytes += total_bytes;
- adapter->net_stats.tx_packets += total_packets;
+ netdev->stats.tx_bytes += total_bytes;
+ netdev->stats.tx_packets += total_packets;
return (count < tx_ring->work_limit);
}
@@ -612,7 +624,6 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
/* It must be a TCP or UDP packet with a valid checksum */
skb->ip_summed = CHECKSUM_UNNECESSARY;
- adapter->hw_csum_rx_good++;
}
static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
@@ -669,21 +680,18 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
if (!bi->skb) {
struct sk_buff *skb;
- skb = netdev_alloc_skb(adapter->netdev,
- (rx_ring->rx_buf_len +
- NET_IP_ALIGN));
+ /* netdev_alloc_skb reserves 32 bytes up front!! */
+ uint bufsz = rx_ring->rx_buf_len + SMP_CACHE_BYTES;
+ skb = netdev_alloc_skb(adapter->netdev, bufsz);
if (!skb) {
adapter->alloc_rx_buff_failed++;
goto no_buffers;
}
- /*
- * Make buffer alignment 2 beyond a 16 byte boundary
- * this will result in a 16 byte aligned IP header after
- * the 14 byte MAC header is removed
- */
- skb_reserve(skb, NET_IP_ALIGN);
+ /* advance the data pointer to the next cache line */
+ skb_reserve(skb, (PTR_ALIGN(skb->data, SMP_CACHE_BYTES)
+ - skb->data));
bi->skb = skb;
bi->dma = pci_map_single(pdev, skb->data,
@@ -735,12 +743,14 @@ static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc)
/**
* ixgbe_transform_rsc_queue - change rsc queue into a full packet
* @skb: pointer to the last skb in the rsc queue
+ * @count: pointer to number of packets coalesced in this context
*
* This function changes a queue full of hw rsc buffers into a completed
* packet. It uses the ->prev pointers to find the first packet and then
* turns it into the frag list owner.
**/
-static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb)
+static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb,
+ u64 *count)
{
unsigned int frag_list_size = 0;
@@ -749,6 +759,7 @@ static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb)
frag_list_size += skb->len;
skb->prev = NULL;
skb = prev;
+ *count += 1;
}
skb_shinfo(skb)->frag_list = skb->next;
@@ -764,6 +775,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
int *work_done, int work_to_do)
{
struct ixgbe_adapter *adapter = q_vector->adapter;
+ struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
@@ -793,8 +805,6 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
IXGBE_RXDADV_HDRBUFLEN_SHIFT;
- if (hdr_info & IXGBE_RXDADV_SPH)
- adapter->rx_hdr_split++;
if (len > IXGBE_RX_HDR_SIZE)
len = IXGBE_RX_HDR_SIZE;
upper_len = le16_to_cpu(rx_desc->wb.upper.length);
@@ -804,7 +814,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
cleaned = true;
skb = rx_buffer_info->skb;
- prefetch(skb->data - NET_IP_ALIGN);
+ prefetch(skb->data);
rx_buffer_info->skb = NULL;
if (rx_buffer_info->dma) {
@@ -850,14 +860,20 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
IXGBE_RXDADV_NEXTP_SHIFT;
next_buffer = &rx_ring->rx_buffer_info[nextp];
- rx_ring->rsc_count += (rsc_count - 1);
} else {
next_buffer = &rx_ring->rx_buffer_info[i];
}
if (staterr & IXGBE_RXD_STAT_EOP) {
if (skb->prev)
- skb = ixgbe_transform_rsc_queue(skb);
+ skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count));
+ if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
+ if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
+ rx_ring->rsc_count += skb_shinfo(skb)->nr_frags;
+ else
+ rx_ring->rsc_count++;
+ rx_ring->rsc_flush++;
+ }
rx_ring->stats.packets++;
rx_ring->stats.bytes += skb->len;
} else {
@@ -870,7 +886,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
skb->next = next_buffer->skb;
skb->next->prev = skb;
}
- adapter->non_eop_descs++;
+ rx_ring->non_eop_descs++;
goto next_desc;
}
@@ -935,8 +951,8 @@ next_desc:
rx_ring->total_packets += total_rx_packets;
rx_ring->total_bytes += total_rx_bytes;
- adapter->net_stats.rx_bytes += total_rx_bytes;
- adapter->net_stats.rx_packets += total_rx_packets;
+ netdev->stats.rx_bytes += total_rx_bytes;
+ netdev->stats.rx_packets += total_rx_packets;
return cleaned;
}
@@ -1209,6 +1225,7 @@ static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
adapter->link_check_timeout = jiffies;
if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
+ IXGBE_WRITE_FLUSH(hw);
schedule_work(&adapter->watchdog_task);
}
}
@@ -1312,8 +1329,7 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
r_idx + 1);
}
- /* disable interrupts on this vector only */
- ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
+ /* EIAM disabled interrupts (on this vector) for us */
napi_schedule(&q_vector->napi);
return IRQ_HANDLED;
@@ -1344,10 +1360,8 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
if (!q_vector->rxr_count)
return IRQ_HANDLED;
- r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- rx_ring = &(adapter->rx_ring[r_idx]);
/* disable interrupts on this vector only */
- ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
+ /* EIAM disabled interrupts (on this vector) for us */
napi_schedule(&q_vector->napi);
return IRQ_HANDLED;
@@ -1382,8 +1396,7 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
r_idx + 1);
}
- /* disable interrupts on this vector only */
- ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
+ /* EIAM disabled interrupts (on this vector) for us */
napi_schedule(&q_vector->napi);
return IRQ_HANDLED;
@@ -1667,7 +1680,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
sprintf(adapter->name[vector], "%s:lsc", netdev->name);
err = request_irq(adapter->msix_entries[vector].vector,
- &ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
+ ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
if (err) {
DPRINTK(PROBE, ERR,
"request_irq for msix_lsc failed: %d\n", err);
@@ -1838,10 +1851,10 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
err = ixgbe_request_msix_irqs(adapter);
} else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
- err = request_irq(adapter->pdev->irq, &ixgbe_intr, 0,
+ err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
netdev->name, netdev);
} else {
- err = request_irq(adapter->pdev->irq, &ixgbe_intr, IRQF_SHARED,
+ err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
netdev->name, netdev);
}
@@ -2063,18 +2076,18 @@ static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
* ixgbe_configure_rscctl - enable RSC for the indicated ring
* @adapter: address of board private structure
* @index: index of ring to set
- * @rx_buf_len: rx buffer length
**/
-static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, int index,
- int rx_buf_len)
+static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, int index)
{
struct ixgbe_ring *rx_ring;
struct ixgbe_hw *hw = &adapter->hw;
int j;
u32 rscctrl;
+ int rx_buf_len;
rx_ring = &adapter->rx_ring[index];
j = rx_ring->reg_idx;
+ rx_buf_len = rx_ring->rx_buf_len;
rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j));
rscctrl |= IXGBE_RSCCTL_RSCEN;
/*
@@ -2282,7 +2295,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
/* Enable 82599 HW-RSC */
for (i = 0; i < adapter->num_rx_queues; i++)
- ixgbe_configure_rscctl(adapter, i, rx_buf_len);
+ ixgbe_configure_rscctl(adapter, i);
/* Disable RSC for ACK packets */
IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
@@ -2333,23 +2346,25 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev,
* not in DCB mode.
*/
ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
+
+ /* Disable CFI check */
+ ctrl &= ~IXGBE_VLNCTRL_CFIEN;
+
+ /* enable VLAN tag stripping */
if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
- ctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
- ctrl &= ~IXGBE_VLNCTRL_CFIEN;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
+ ctrl |= IXGBE_VLNCTRL_VME;
} else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
- ctrl |= IXGBE_VLNCTRL_VFE;
- /* enable VLAN tag insert/strip */
- ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
- ctrl &= ~IXGBE_VLNCTRL_CFIEN;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
for (i = 0; i < adapter->num_rx_queues; i++) {
+ u32 ctrl;
j = adapter->rx_ring[i].reg_idx;
ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(j));
ctrl |= IXGBE_RXDCTL_VME;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(j), ctrl);
}
}
+
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
+
ixgbe_vlan_rx_add_vid(netdev, 0);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
@@ -2699,7 +2714,22 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
}
- if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+ /*
+ * use EIAM to auto-mask when MSI-X interrupt is asserted
+ * this saves a register write for every interrupt
+ */
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
+ break;
+ default:
+ case ixgbe_mac_82599EB:
+ IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
+ IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
+ break;
+ }
+ } else {
/* legacy interrupts, use EIAM to auto-mask when reading EICR,
* specifically only auto mask tx and rx interrupts */
IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
@@ -3632,10 +3662,10 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
* It's easy to be greedy for MSI-X vectors, but it really
* doesn't do us much good if we have a lot more vectors
* than CPU's. So let's be conservative and only ask for
- * (roughly) twice the number of vectors as there are CPU's.
+ * (roughly) the same number of vectors as there are CPU's.
*/
v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
- (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
+ (int)num_online_cpus()) + NON_Q_VECTORS;
/*
* At the same time, hardware can only support a maximum of
@@ -3943,8 +3973,10 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
adapter->ring_feature[RING_F_FCOE].indices = 0;
+#ifdef CONFIG_IXGBE_DCB
/* Default traffic class to use for FCoE */
adapter->fcoe.tc = IXGBE_FCOE_DEFTC;
+#endif
#endif /* IXGBE_FCOE */
}
@@ -4475,20 +4507,32 @@ static void ixgbe_shutdown(struct pci_dev *pdev)
**/
void ixgbe_update_stats(struct ixgbe_adapter *adapter)
{
+ struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
u64 total_mpc = 0;
u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
- if (hw->mac.type == ixgbe_mac_82599EB) {
+ if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
u64 rsc_count = 0;
+ u64 rsc_flush = 0;
for (i = 0; i < 16; i++)
adapter->hw_rx_no_dma_resources +=
IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
- for (i = 0; i < adapter->num_rx_queues; i++)
+ for (i = 0; i < adapter->num_rx_queues; i++) {
rsc_count += adapter->rx_ring[i].rsc_count;
- adapter->rsc_count = rsc_count;
+ rsc_flush += adapter->rx_ring[i].rsc_flush;
+ }
+ adapter->rsc_total_count = rsc_count;
+ adapter->rsc_total_flush = rsc_flush;
}
+ /* gather some stats to the adapter struct that are per queue */
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ adapter->restart_queue += adapter->tx_ring[i].restart_queue;
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ adapter->non_eop_descs += adapter->tx_ring[i].non_eop_descs;
+
adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
for (i = 0; i < 8; i++) {
/* for packet buffers not used, the register should read 0 */
@@ -4594,15 +4638,15 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
/* Fill out the OS statistics structure */
- adapter->net_stats.multicast = adapter->stats.mprc;
+ netdev->stats.multicast = adapter->stats.mprc;
/* Rx Errors */
- adapter->net_stats.rx_errors = adapter->stats.crcerrs +
+ netdev->stats.rx_errors = adapter->stats.crcerrs +
adapter->stats.rlec;
- adapter->net_stats.rx_dropped = 0;
- adapter->net_stats.rx_length_errors = adapter->stats.rlec;
- adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
- adapter->net_stats.rx_missed_errors = total_mpc;
+ netdev->stats.rx_dropped = 0;
+ netdev->stats.rx_length_errors = adapter->stats.rlec;
+ netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
+ netdev->stats.rx_missed_errors = total_mpc;
}
/**
@@ -4871,14 +4915,12 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
iph->daddr, 0,
IPPROTO_TCP,
0);
- adapter->hw_tso_ctxt++;
} else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
ipv6_hdr(skb)->payload_len = 0;
tcp_hdr(skb)->check =
~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
0, IPPROTO_TCP, 0);
- adapter->hw_tso6_ctxt++;
}
i = tx_ring->next_to_use;
@@ -4997,7 +5039,6 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
- adapter->hw_csum_tx_good++;
i++;
if (i == tx_ring->count)
i = 0;
@@ -5014,23 +5055,16 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
struct sk_buff *skb, u32 tx_flags,
unsigned int first)
{
+ struct pci_dev *pdev = adapter->pdev;
struct ixgbe_tx_buffer *tx_buffer_info;
unsigned int len;
unsigned int total = skb->len;
unsigned int offset = 0, size, count = 0, i;
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
unsigned int f;
- dma_addr_t *map;
i = tx_ring->next_to_use;
- if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) {
- dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
- return 0;
- }
-
- map = skb_shinfo(skb)->dma_maps;
-
if (tx_flags & IXGBE_TX_FLAGS_FCOE)
/* excluding fcoe_crc_eof for FCoE */
total -= sizeof(struct fcoe_crc_eof);
@@ -5041,7 +5075,12 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
tx_buffer_info->length = size;
- tx_buffer_info->dma = skb_shinfo(skb)->dma_head + offset;
+ tx_buffer_info->mapped_as_page = false;
+ tx_buffer_info->dma = pci_map_single(pdev,
+ skb->data + offset,
+ size, PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
+ goto dma_error;
tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
@@ -5062,7 +5101,7 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
frag = &skb_shinfo(skb)->frags[f];
len = min((unsigned int)frag->size, total);
- offset = 0;
+ offset = frag->page_offset;
while (len) {
i++;
@@ -5073,7 +5112,13 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
tx_buffer_info->length = size;
- tx_buffer_info->dma = map[f] + offset;
+ tx_buffer_info->dma = pci_map_page(adapter->pdev,
+ frag->page,
+ offset, size,
+ PCI_DMA_TODEVICE);
+ tx_buffer_info->mapped_as_page = true;
+ if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
+ goto dma_error;
tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
@@ -5090,6 +5135,27 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
tx_ring->tx_buffer_info[first].next_to_watch = i;
return count;
+
+dma_error:
+ dev_err(&pdev->dev, "TX DMA map failed\n");
+
+ /* clear timestamp and dma mappings for failed tx_buffer_info map */
+ tx_buffer_info->dma = 0;
+ tx_buffer_info->time_stamp = 0;
+ tx_buffer_info->next_to_watch = 0;
+ count--;
+
+ /* clear timestamp and dma mappings for remaining portion of packet */
+ while (count >= 0) {
+ count--;
+ i--;
+ if (i < 0)
+ i += tx_ring->count;
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+ }
+
+ return count;
}
static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
@@ -5209,8 +5275,6 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
struct ixgbe_ring *tx_ring, int size)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
-
netif_stop_subqueue(netdev, tx_ring->queue_index);
/* Herbert's original patch had:
* smp_mb__after_netif_stop_queue();
@@ -5224,7 +5288,7 @@ static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
/* A reprieve! - use start_queue because it doesn't call schedule */
netif_start_subqueue(netdev, tx_ring->queue_index);
- ++adapter->restart_queue;
+ ++tx_ring->restart_queue;
return 0;
}
@@ -5239,10 +5303,19 @@ static int ixgbe_maybe_stop_tx(struct net_device *netdev,
static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
+ int txq = smp_processor_id();
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
- return smp_processor_id();
+ return txq;
+#ifdef IXGBE_FCOE
+ if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
+ (skb->protocol == htons(ETH_P_FCOE))) {
+ txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
+ txq += adapter->ring_feature[RING_F_FCOE].mask;
+ return txq;
+ }
+#endif
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
return (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK) >> 13;
@@ -5257,7 +5330,7 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
unsigned int first;
unsigned int tx_flags = 0;
u8 hdr_len = 0;
- int r_idx = 0, tso;
+ int tso;
int count = 0;
unsigned int f;
@@ -5265,13 +5338,13 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
tx_flags |= vlan_tx_tag_get(skb);
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
- tx_flags |= (skb->queue_mapping << 13);
+ tx_flags |= ((skb->queue_mapping & 0x7) << 13);
}
tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
tx_flags |= IXGBE_TX_FLAGS_VLAN;
} else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
if (skb->priority != TC_PRIO_CONTROL) {
- tx_flags |= (skb->queue_mapping << 13);
+ tx_flags |= ((skb->queue_mapping & 0x7) << 13);
tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
tx_flags |= IXGBE_TX_FLAGS_VLAN;
} else {
@@ -5280,17 +5353,18 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
}
}
- r_idx = skb->queue_mapping;
- tx_ring = &adapter->tx_ring[r_idx];
+ tx_ring = &adapter->tx_ring[skb->queue_mapping];
if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
(skb->protocol == htons(ETH_P_FCOE))) {
tx_flags |= IXGBE_TX_FLAGS_FCOE;
#ifdef IXGBE_FCOE
- r_idx = smp_processor_id();
- r_idx &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
- r_idx += adapter->ring_feature[RING_F_FCOE].mask;
- tx_ring = &adapter->tx_ring[r_idx];
+#ifdef CONFIG_IXGBE_DCB
+ tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
+ << IXGBE_TX_FLAGS_VLAN_SHIFT);
+ tx_flags |= ((adapter->fcoe.up << 13)
+ << IXGBE_TX_FLAGS_VLAN_SHIFT);
+#endif
#endif
}
/* four things can cause us to need a context descriptor */
@@ -5372,10 +5446,8 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
**/
static struct net_device_stats *ixgbe_get_stats(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
-
/* only return the current stats */
- return &adapter->net_stats;
+ return &netdev->stats;
}
/**
@@ -5527,6 +5599,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
.ndo_fcoe_enable = ixgbe_fcoe_enable,
.ndo_fcoe_disable = ixgbe_fcoe_disable,
+ .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
#endif /* IXGBE_FCOE */
};
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index ef4bdd58e01..21b6633da57 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -52,6 +52,7 @@
#define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514
#define IXGBE_DEV_ID_82599_CX4 0x10F9
#define IXGBE_DEV_ID_82599_SFP 0x10FB
+#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8
@@ -1538,6 +1539,16 @@
#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4
#define IXGBE_FW_PATCH_VERSION_4 0x7
+/* Alternative SAN MAC Address Block */
+#define IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR 0x27 /* Alt. SAN MAC block */
+#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET 0x0 /* Alt. SAN MAC capability */
+#define IXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt. SAN MAC 0 offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET 0x4 /* Alt. SAN MAC 1 offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET 0x7 /* Alt. WWNN prefix offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET 0x8 /* Alt. WWPN prefix offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC 0x0 /* Alt. SAN MAC exists */
+#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt. WWN base exists */
+
/* PCI Bus Info */
#define IXGBE_PCI_LINK_STATUS 0xB2
#define IXGBE_PCI_DEVICE_CONTROL2 0xC8
@@ -2171,6 +2182,14 @@ enum ixgbe_fc_mode {
ixgbe_fc_default
};
+/* Smart Speed Settings */
+#define IXGBE_SMARTSPEED_MAX_RETRIES 3
+enum ixgbe_smart_speed {
+ ixgbe_smart_speed_auto = 0,
+ ixgbe_smart_speed_on,
+ ixgbe_smart_speed_off
+};
+
/* PCI bus types */
enum ixgbe_bus_type {
ixgbe_bus_type_unknown = 0,
@@ -2336,6 +2355,7 @@ struct ixgbe_mac_operations {
s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
s32 (*get_san_mac_addr)(struct ixgbe_hw *, u8 *);
s32 (*get_device_caps)(struct ixgbe_hw *, u16 *);
+ s32 (*get_wwn_prefix)(struct ixgbe_hw *, u16 *, u16 *);
s32 (*stop_adapter)(struct ixgbe_hw *);
s32 (*get_bus_info)(struct ixgbe_hw *);
void (*set_lan_id)(struct ixgbe_hw *);
@@ -2407,6 +2427,10 @@ struct ixgbe_mac_info {
u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
u8 san_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
+ /* prefix for World Wide Node Name (WWNN) */
+ u16 wwnn_prefix;
+ /* prefix for World Wide Port Name (WWPN) */
+ u16 wwpn_prefix;
s32 mc_filter_type;
u32 mcft_size;
u32 vft_size;
@@ -2431,6 +2455,8 @@ struct ixgbe_phy_info {
enum ixgbe_media_type media_type;
bool reset_disable;
ixgbe_autoneg_advertised autoneg_advertised;
+ enum ixgbe_smart_speed smart_speed;
+ bool smart_speed_active;
bool multispeed_fiber;
};