From 712744bebef1e47623244004a2770d0438b5b3f7 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Tue, 26 Aug 2008 04:26:56 -0700 Subject: ixgbe: fix rx csum return status misinterpretation the driver was misinterpreting rx_csum return value in the descriptor so occassionally we would indicate an rx_csum error in our stats when there was none. This would have no effect on traffic because we would just hand the packet to the stack anyway without the offload flag set, but would increase CPU for those packets that needed a recompute. Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgbe/ixgbe_main.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 53f41b649f0..b14192f369d 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -420,14 +420,12 @@ static void ixgbe_receive_skb(struct ixgbe_adapter *adapter, * @skb: skb currently being received and modified **/ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter, - u32 status_err, - struct sk_buff *skb) + u32 status_err, struct sk_buff *skb) { skb->ip_summed = CHECKSUM_NONE; - /* Ignore Checksum bit is set, or rx csum disabled */ - if ((status_err & IXGBE_RXD_STAT_IXSM) || - !(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED)) + /* Rx csum disabled */ + if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED)) return; /* if IP and error */ -- cgit v1.2.3 From 9da09bb1b806a85a0bc4fb5426fb3022f56aad19 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Tue, 26 Aug 2008 04:26:59 -0700 Subject: ixgbe: add little endian annotations for sparse Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgbe/ixgbe_type.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index c0282a223df..781f137decc 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h @@ -1007,15 +1007,15 @@ struct ixgbe_legacy_tx_desc { __le32 data; struct { __le16 length; /* Data buffer length */ - u8 cso; /* Checksum offset */ - u8 cmd; /* Descriptor control */ + u8 cso; /* Checksum offset */ + u8 cmd; /* Descriptor control */ } flags; } lower; union { __le32 data; struct { u8 status; /* Descriptor status */ - u8 css; /* Checksum start */ + u8 css; /* Checksum start */ __le16 vlan; } fields; } upper; @@ -1039,9 +1039,9 @@ union ixgbe_adv_tx_desc { struct ixgbe_legacy_rx_desc { __le64 buffer_addr; /* Address of the descriptor's data buffer */ __le16 length; /* Length of data DMAed into data buffer */ - u16 csum; /* Packet checksum */ - u8 status; /* Descriptor status */ - u8 errors; /* Descriptor Errors */ + __le16 csum; /* Packet checksum */ + u8 status; /* Descriptor status */ + u8 errors; /* Descriptor Errors */ __le16 vlan; }; @@ -1061,7 +1061,7 @@ union ixgbe_adv_rx_desc { __le32 rss; /* RSS Hash */ struct { __le16 ip_id; /* IP id */ - u16 csum; /* Packet Checksum */ + __le16 csum; /* Packet Checksum */ } csum_ip; } hi_dword; } lower; -- cgit v1.2.3 From 2c5645cf65dc6dce15dac47a7cdfabb85224fede Mon Sep 17 00:00:00 2001 From: Christopher Leech Date: Tue, 26 Aug 2008 04:27:02 -0700 Subject: ixgbe: Implement HAVE_SET_RX_MODE Implement HAVE_SET_RX_MODE in the driver for MC and UC lists. Signed-off-by: Christopher Leech Signed-off-by: Peter P Waskiewicz Jr Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgbe/ixgbe_82598.c | 6 +- drivers/net/ixgbe/ixgbe_common.c | 123 +++++++++++++++++++++++++++++++++++---- drivers/net/ixgbe/ixgbe_common.h | 4 +- drivers/net/ixgbe/ixgbe_main.c | 72 +++++++++++++---------- drivers/net/ixgbe/ixgbe_type.h | 14 +++-- 5 files changed, 169 insertions(+), 50 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c index f96358b641a..ba09063260d 100644 --- a/drivers/net/ixgbe/ixgbe_82598.c +++ b/drivers/net/ixgbe/ixgbe_82598.c @@ -36,6 +36,8 @@ #define IXGBE_82598_MAX_TX_QUEUES 32 #define IXGBE_82598_MAX_RX_QUEUES 64 #define IXGBE_82598_RAR_ENTRIES 16 +#define IXGBE_82598_MC_TBL_SIZE 128 +#define IXGBE_82598_VFT_TBL_SIZE 128 static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw); static s32 ixgbe_get_link_settings_82598(struct ixgbe_hw *hw, u32 *speed, @@ -60,7 +62,9 @@ static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw) { hw->mac.num_rx_queues = IXGBE_82598_MAX_RX_QUEUES; hw->mac.num_tx_queues = IXGBE_82598_MAX_TX_QUEUES; - hw->mac.num_rx_addrs = IXGBE_82598_RAR_ENTRIES; + hw->mac.mcft_size = IXGBE_82598_MC_TBL_SIZE; + hw->mac.vft_size = IXGBE_82598_VFT_TBL_SIZE; + hw->mac.num_rar_entries = IXGBE_82598_RAR_ENTRIES; /* PHY ops are filled in by default properly for Fiber only */ if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c index 7fd6aeb1b02..a9f4d0e58e1 100644 --- a/drivers/net/ixgbe/ixgbe_common.c +++ b/drivers/net/ixgbe/ixgbe_common.c @@ -661,7 +661,7 @@ s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vind, static s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw) { u32 i; - u32 rar_entries = hw->mac.num_rx_addrs; + u32 rar_entries = hw->mac.num_rar_entries; /* * If the current mac address is valid, assume it is a software override @@ -705,12 +705,113 @@ static s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw) IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); hw_dbg(hw, " Clearing MTA\n"); - for (i = 0; i < IXGBE_MC_TBL_SIZE; i++) + for (i = 0; i < hw->mac.mcft_size; i++) IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); return 0; } +/** + * ixgbe_add_uc_addr - Adds a secondary unicast address. + * @hw: pointer to hardware structure + * @addr: new address + * + * Adds it to unused receive address register or goes into promiscuous mode. + **/ +void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr) +{ + u32 rar_entries = hw->mac.num_rar_entries; + u32 rar; + + hw_dbg(hw, " UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n", + addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); + + /* + * Place this address in the RAR if there is room, + * else put the controller into promiscuous mode + */ + if (hw->addr_ctrl.rar_used_count < rar_entries) { + rar = hw->addr_ctrl.rar_used_count - + hw->addr_ctrl.mc_addr_in_rar_count; + ixgbe_set_rar(hw, rar, addr, 0, IXGBE_RAH_AV); + hw_dbg(hw, "Added a secondary address to RAR[%d]\n", rar); + hw->addr_ctrl.rar_used_count++; + } else { + hw->addr_ctrl.overflow_promisc++; + } + + hw_dbg(hw, "ixgbe_add_uc_addr Complete\n"); +} + +/** + * ixgbe_update_uc_addr_list - Updates MAC list of secondary addresses + * @hw: pointer to hardware structure + * @addr_list: the list of new addresses + * @addr_count: number of addresses + * @next: iterator function to walk the address list + * + * The given list replaces any existing list. Clears the secondary addrs from + * receive address registers. Uses unused receive address registers for the + * first secondary addresses, and falls back to promiscuous mode as needed. + * + * Drivers using secondary unicast addresses must set user_set_promisc when + * manually putting the device into promiscuous mode. + **/ +s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list, + u32 addr_count, ixgbe_mc_addr_itr next) +{ + u8 *addr; + u32 i; + u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc; + u32 uc_addr_in_use; + u32 fctrl; + u32 vmdq; + + /* + * Clear accounting of old secondary address list, + * don't count RAR[0] + */ + uc_addr_in_use = hw->addr_ctrl.rar_used_count - + hw->addr_ctrl.mc_addr_in_rar_count - 1; + hw->addr_ctrl.rar_used_count -= uc_addr_in_use; + hw->addr_ctrl.overflow_promisc = 0; + + /* Zero out the other receive addresses */ + hw_dbg(hw, "Clearing RAR[1-%d]\n", uc_addr_in_use); + for (i = 1; i <= uc_addr_in_use; i++) { + IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); + } + + /* Add the new addresses */ + for (i = 0; i < addr_count; i++) { + hw_dbg(hw, " Adding the secondary addresses:\n"); + addr = next(hw, &addr_list, &vmdq); + ixgbe_add_uc_addr(hw, addr); + } + + if (hw->addr_ctrl.overflow_promisc) { + /* enable promisc if not already in overflow or set by user */ + if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { + hw_dbg(hw, " Entering address overflow promisc mode\n"); + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + fctrl |= IXGBE_FCTRL_UPE; + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); + } + } else { + /* only disable if set by overflow, not by user */ + if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { + hw_dbg(hw, " Leaving address overflow promisc mode\n"); + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + fctrl &= ~IXGBE_FCTRL_UPE; + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); + } + } + + hw_dbg(hw, "ixgbe_update_uc_addr_list Complete\n"); + return 0; +} + /** * ixgbe_mta_vector - Determines bit-vector in multicast table to set * @hw: pointer to hardware structure @@ -794,7 +895,7 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) **/ static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr) { - u32 rar_entries = hw->mac.num_rx_addrs; + u32 rar_entries = hw->mac.num_rar_entries; hw_dbg(hw, " MC Addr =%.2X %.2X %.2X %.2X %.2X %.2X\n", mc_addr[0], mc_addr[1], mc_addr[2], @@ -823,7 +924,7 @@ static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr) * @hw: pointer to hardware structure * @mc_addr_list: the list of new multicast addresses * @mc_addr_count: number of addresses - * @pad: number of bytes between addresses in the list + * @next: iterator function to walk the multicast address list * * The given list replaces any existing list. Clears the MC addrs from receive * address registers and the multicast table. Uses unsed receive address @@ -831,10 +932,11 @@ static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr) * multicast table. **/ s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list, - u32 mc_addr_count, u32 pad) + u32 mc_addr_count, ixgbe_mc_addr_itr next) { u32 i; - u32 rar_entries = hw->mac.num_rx_addrs; + u32 rar_entries = hw->mac.num_rar_entries; + u32 vmdq; /* * Set the new number of MC addresses that we are being requested to @@ -854,14 +956,13 @@ s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list, /* Clear the MTA */ hw_dbg(hw, " Clearing MTA\n"); - for (i = 0; i < IXGBE_MC_TBL_SIZE; i++) + for (i = 0; i < hw->mac.mcft_size; i++) IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); /* Add the new addresses */ for (i = 0; i < mc_addr_count; i++) { hw_dbg(hw, " Adding the multicast addresses:\n"); - ixgbe_add_mc_addr(hw, mc_addr_list + - (i * (IXGBE_ETH_LENGTH_OF_ADDRESS + pad))); + ixgbe_add_mc_addr(hw, next(hw, &mc_addr_list, &vmdq)); } /* Enable mta */ @@ -884,11 +985,11 @@ static s32 ixgbe_clear_vfta(struct ixgbe_hw *hw) u32 offset; u32 vlanbyte; - for (offset = 0; offset < IXGBE_VLAN_FILTER_TBL_SIZE; offset++) + for (offset = 0; offset < hw->mac.vft_size; offset++) IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); for (vlanbyte = 0; vlanbyte < 4; vlanbyte++) - for (offset = 0; offset < IXGBE_VLAN_FILTER_TBL_SIZE; offset++) + for (offset = 0; offset < hw->mac.vft_size; offset++) IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset), 0); diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h index de6ddd5d04a..c75ecba9ccd 100644 --- a/drivers/net/ixgbe/ixgbe_common.h +++ b/drivers/net/ixgbe/ixgbe_common.h @@ -47,7 +47,9 @@ s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val); s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vind, u32 enable_addr); s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list, - u32 mc_addr_count, u32 pad); + u32 mc_addr_count, ixgbe_mc_addr_itr next); +s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *uc_addr_list, + u32 mc_addr_count, ixgbe_mc_addr_itr next); s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on); s32 ixgbe_validate_mac_addr(u8 *mac_addr); diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index b14192f369d..87ef2db8c43 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -1619,23 +1619,37 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) } } +static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq) +{ + struct dev_mc_list *mc_ptr; + u8 *addr = *mc_addr_ptr; + *vmdq = 0; + + mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]); + if (mc_ptr->next) + *mc_addr_ptr = mc_ptr->next->dmi_addr; + else + *mc_addr_ptr = NULL; + + return addr; +} + /** - * ixgbe_set_multi - Multicast and Promiscuous mode set + * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set * @netdev: network interface device structure * - * The set_multi entry point is called whenever the multicast address - * list or the network interface flags are updated. This routine is - * responsible for configuring the hardware for proper multicast, - * promiscuous mode, and all-multi behavior. + * The set_rx_method entry point is called whenever the unicast/multicast + * address list or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper unicast, multicast and + * promiscuous mode. **/ -static void ixgbe_set_multi(struct net_device *netdev) +static void ixgbe_set_rx_mode(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; - struct dev_mc_list *mc_ptr; - u8 *mta_list; u32 fctrl, vlnctrl; - int i; + u8 *addr_list = NULL; + int addr_count = 0; /* Check for Promiscuous and All Multicast modes */ @@ -1643,6 +1657,7 @@ static void ixgbe_set_multi(struct net_device *netdev) vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); if (netdev->flags & IFF_PROMISC) { + hw->addr_ctrl.user_set_promisc = 1; fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); vlnctrl &= ~IXGBE_VLNCTRL_VFE; } else { @@ -1653,33 +1668,25 @@ static void ixgbe_set_multi(struct net_device *netdev) fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); } vlnctrl |= IXGBE_VLNCTRL_VFE; + hw->addr_ctrl.user_set_promisc = 0; } IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); - if (netdev->mc_count) { - mta_list = kcalloc(netdev->mc_count, ETH_ALEN, GFP_ATOMIC); - if (!mta_list) - return; - - /* Shared function expects packed array of only addresses. */ - mc_ptr = netdev->mc_list; - - for (i = 0; i < netdev->mc_count; i++) { - if (!mc_ptr) - break; - memcpy(mta_list + (i * ETH_ALEN), mc_ptr->dmi_addr, - ETH_ALEN); - mc_ptr = mc_ptr->next; - } - - ixgbe_update_mc_addr_list(hw, mta_list, i, 0); - kfree(mta_list); - } else { - ixgbe_update_mc_addr_list(hw, NULL, 0, 0); - } + /* reprogram secondary unicast list */ + addr_count = netdev->uc_count; + if (addr_count) + addr_list = netdev->uc_list->dmi_addr; + ixgbe_update_uc_addr_list(hw, addr_list, addr_count, + ixgbe_addr_list_itr); + /* reprogram multicast list */ + addr_count = netdev->mc_count; + if (addr_count) + addr_list = netdev->mc_list->dmi_addr; + ixgbe_update_mc_addr_list(hw, addr_list, addr_count, + ixgbe_addr_list_itr); } static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) @@ -1723,7 +1730,7 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) struct net_device *netdev = adapter->netdev; int i; - ixgbe_set_multi(netdev); + ixgbe_set_rx_mode(netdev); ixgbe_restore_vlan(adapter); @@ -3508,7 +3515,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, netdev->stop = &ixgbe_close; netdev->hard_start_xmit = &ixgbe_xmit_frame; netdev->get_stats = &ixgbe_get_stats; - netdev->set_multicast_list = &ixgbe_set_multi; + netdev->set_rx_mode = &ixgbe_set_rx_mode; + netdev->set_multicast_list = &ixgbe_set_rx_mode; netdev->set_mac_address = &ixgbe_set_mac; netdev->change_mtu = &ixgbe_change_mtu; ixgbe_set_ethtool_ops(netdev); diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index 781f137decc..2f4d34e6729 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h @@ -822,10 +822,6 @@ #define IXGBE_RAH_VIND_SHIFT 18 #define IXGBE_RAH_AV 0x80000000 -/* Filters */ -#define IXGBE_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */ -#define IXGBE_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ - /* Header split receive */ #define IXGBE_RFCTL_ISCSI_DIS 0x00000001 #define IXGBE_RFCTL_ISCSI_DWC_MASK 0x0000003E @@ -1167,6 +1163,8 @@ struct ixgbe_addr_filter_info { u32 rar_used_count; u32 mc_addr_in_rar_count; u32 mta_in_use; + u32 overflow_promisc; + bool user_set_promisc; }; /* Flow control parameters */ @@ -1242,6 +1240,10 @@ struct ixgbe_hw_stats { /* forward declaration */ struct ixgbe_hw; +/* iterator type for walking multicast address lists */ +typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr, + u32 *vmdq); + struct ixgbe_mac_operations { s32 (*reset)(struct ixgbe_hw *); enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *); @@ -1263,9 +1265,11 @@ struct ixgbe_mac_info { u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; s32 mc_filter_type; + u32 mcft_size; + u32 vft_size; + u32 num_rar_entries; u32 num_rx_queues; u32 num_tx_queues; - u32 num_rx_addrs; u32 link_attach_type; u32 link_mode_select; bool link_settings_loaded; -- cgit v1.2.3 From 036c9b097034b4ea82974f7c98d10ec7fbf81902 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Tue, 26 Aug 2008 04:27:05 -0700 Subject: ixgbe: do not update stats twice each receive Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgbe/ixgbe_main.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 87ef2db8c43..50737ccdeca 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -664,9 +664,6 @@ next_desc: if (cleaned_count) ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count); - adapter->net_stats.rx_bytes += total_rx_bytes; - adapter->net_stats.rx_packets += total_rx_packets; - rx_ring->total_packets += total_rx_packets; rx_ring->total_bytes += total_rx_bytes; adapter->net_stats.rx_bytes += total_rx_bytes; -- cgit v1.2.3 From 3a581073e0f9f3966ac95a89cd04a0a6b948dc77 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Tue, 26 Aug 2008 04:27:08 -0700 Subject: ixgbe: Cleanup references to Tx and Rx rings to becommon across the driver Cleanup all the different references to the Tx ring and Rx ring structures and make them common across the driver. Signed-off-by: Jesse Brandeburg Signed-off-by: Peter P Waskiewicz Jr Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgbe/ixgbe_main.c | 158 ++++++++++++++++++++--------------------- 1 file changed, 77 insertions(+), 81 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 50737ccdeca..b5a9b9da2fb 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -290,38 +290,38 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter, #ifdef CONFIG_DCA static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, - struct ixgbe_ring *rxr) + struct ixgbe_ring *rx_ring) { u32 rxctrl; int cpu = get_cpu(); - int q = rxr - adapter->rx_ring; + int q = rx_ring - adapter->rx_ring; - if (rxr->cpu != cpu) { + if (rx_ring->cpu != cpu) { rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q)); rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK; rxctrl |= dca_get_tag(cpu); rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN; rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN; IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl); - rxr->cpu = cpu; + rx_ring->cpu = cpu; } put_cpu(); } static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, - struct ixgbe_ring *txr) + struct ixgbe_ring *tx_ring) { u32 txctrl; int cpu = get_cpu(); - int q = txr - adapter->tx_ring; + int q = tx_ring - adapter->tx_ring; - if (txr->cpu != cpu) { + if (tx_ring->cpu != cpu) { txctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q)); txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK; txctrl |= dca_get_tag(cpu); txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q), txctrl); - txr->cpu = cpu; + tx_ring->cpu = cpu; } put_cpu(); } @@ -459,31 +459,30 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; union ixgbe_adv_rx_desc *rx_desc; - struct ixgbe_rx_buffer *rx_buffer_info; - struct sk_buff *skb; + struct ixgbe_rx_buffer *bi; unsigned int i; unsigned int bufsz = adapter->rx_buf_len + NET_IP_ALIGN; i = rx_ring->next_to_use; - rx_buffer_info = &rx_ring->rx_buffer_info[i]; + bi = &rx_ring->rx_buffer_info[i]; while (cleaned_count--) { rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); - if (!rx_buffer_info->page && - (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) { - rx_buffer_info->page = alloc_page(GFP_ATOMIC); - if (!rx_buffer_info->page) { + if (!bi->page && + (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) { + bi->page = alloc_page(GFP_ATOMIC); + if (!bi->page) { adapter->alloc_rx_page_failed++; goto no_buffers; } - rx_buffer_info->page_dma = - pci_map_page(pdev, rx_buffer_info->page, - 0, PAGE_SIZE, PCI_DMA_FROMDEVICE); + bi->page_dma = pci_map_page(pdev, bi->page, 0, + PAGE_SIZE, + PCI_DMA_FROMDEVICE); } - if (!rx_buffer_info->skb) { - skb = netdev_alloc_skb(netdev, bufsz); + if (!bi->skb) { + struct sk_buff *skb = netdev_alloc_skb(netdev, bufsz); if (!skb) { adapter->alloc_rx_buff_failed++; @@ -497,27 +496,23 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, */ skb_reserve(skb, NET_IP_ALIGN); - rx_buffer_info->skb = skb; - rx_buffer_info->dma = pci_map_single(pdev, skb->data, - bufsz, - PCI_DMA_FROMDEVICE); + bi->skb = skb; + bi->dma = pci_map_single(pdev, skb->data, bufsz, + PCI_DMA_FROMDEVICE); } /* Refresh the desc even if buffer_addrs didn't change because * each write-back erases this info. */ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { - rx_desc->read.pkt_addr = - cpu_to_le64(rx_buffer_info->page_dma); - rx_desc->read.hdr_addr = - cpu_to_le64(rx_buffer_info->dma); + rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); + rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); } else { - rx_desc->read.pkt_addr = - cpu_to_le64(rx_buffer_info->dma); + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); } i++; if (i == rx_ring->count) i = 0; - rx_buffer_info = &rx_ring->rx_buffer_info[i]; + bi = &rx_ring->rx_buffer_info[i]; } no_buffers: if (rx_ring->next_to_use != i) { @@ -896,7 +891,7 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data) { struct ixgbe_q_vector *q_vector = data; struct ixgbe_adapter *adapter = q_vector->adapter; - struct ixgbe_ring *txr; + struct ixgbe_ring *tx_ring; int i, r_idx; if (!q_vector->txr_count) @@ -904,14 +899,14 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data) r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); for (i = 0; i < q_vector->txr_count; i++) { - txr = &(adapter->tx_ring[r_idx]); + tx_ring = &(adapter->tx_ring[r_idx]); #ifdef CONFIG_DCA if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) - ixgbe_update_tx_dca(adapter, txr); + ixgbe_update_tx_dca(adapter, tx_ring); #endif - txr->total_bytes = 0; - txr->total_packets = 0; - ixgbe_clean_tx_irq(adapter, txr); + tx_ring->total_bytes = 0; + tx_ring->total_packets = 0; + ixgbe_clean_tx_irq(adapter, tx_ring); r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, r_idx + 1); } @@ -928,18 +923,18 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data) { struct ixgbe_q_vector *q_vector = data; struct ixgbe_adapter *adapter = q_vector->adapter; - struct ixgbe_ring *rxr; + struct ixgbe_ring *rx_ring; int r_idx; r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); if (!q_vector->rxr_count) return IRQ_HANDLED; - rxr = &(adapter->rx_ring[r_idx]); + rx_ring = &(adapter->rx_ring[r_idx]); /* disable interrupts on this vector only */ - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rxr->v_idx); - rxr->total_bytes = 0; - rxr->total_packets = 0; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rx_ring->v_idx); + rx_ring->total_bytes = 0; + rx_ring->total_packets = 0; netif_rx_schedule(adapter->netdev, &q_vector->napi); return IRQ_HANDLED; @@ -964,18 +959,18 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) struct ixgbe_q_vector *q_vector = container_of(napi, struct ixgbe_q_vector, napi); struct ixgbe_adapter *adapter = q_vector->adapter; - struct ixgbe_ring *rxr; + struct ixgbe_ring *rx_ring; int work_done = 0; long r_idx; r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); - rxr = &(adapter->rx_ring[r_idx]); + rx_ring = &(adapter->rx_ring[r_idx]); #ifdef CONFIG_DCA if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) - ixgbe_update_rx_dca(adapter, rxr); + ixgbe_update_rx_dca(adapter, rx_ring); #endif - ixgbe_clean_rx_irq(adapter, rxr, &work_done, budget); + ixgbe_clean_rx_irq(adapter, rx_ring, &work_done, budget); /* If all Rx work done, exit the polling mode */ if (work_done < budget) { @@ -983,7 +978,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) if (adapter->rx_eitr < IXGBE_MIN_ITR_USECS) ixgbe_set_itr_msix(q_vector); if (!test_bit(__IXGBE_DOWN, &adapter->state)) - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rxr->v_idx); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rx_ring->v_idx); } return work_done; @@ -1342,7 +1337,7 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter) } /** - * ixgbe_configure_tx - Configure 8254x Transmit Unit after Reset + * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset * @adapter: board private structure * * Configure the Tx unit of the MAC after a reset. @@ -1408,7 +1403,7 @@ static int ixgbe_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph, } /** - * ixgbe_configure_rx - Configure 8254x Receive Unit after Reset + * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset * @adapter: board private structure * * Configure the Rx unit of the MAC after a reset. @@ -2483,40 +2478,41 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) /** * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors) * @adapter: board private structure - * @txdr: tx descriptor ring (for a specific queue) to setup + * @tx_ring: tx descriptor ring (for a specific queue) to setup * * Return 0 on success, negative on failure **/ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter, - struct ixgbe_ring *txdr) + struct ixgbe_ring *tx_ring) { struct pci_dev *pdev = adapter->pdev; int size; - size = sizeof(struct ixgbe_tx_buffer) * txdr->count; - txdr->tx_buffer_info = vmalloc(size); - if (!txdr->tx_buffer_info) { + size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; + tx_ring->tx_buffer_info = vmalloc(size); + if (!tx_ring->tx_buffer_info) { DPRINTK(PROBE, ERR, "Unable to allocate memory for the transmit descriptor ring\n"); return -ENOMEM; } - memset(txdr->tx_buffer_info, 0, size); + memset(tx_ring->tx_buffer_info, 0, size); /* round up to nearest 4K */ - txdr->size = txdr->count * sizeof(union ixgbe_adv_tx_desc); - txdr->size = ALIGN(txdr->size, 4096); + tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); - txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); - if (!txdr->desc) { - vfree(txdr->tx_buffer_info); + tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, + &tx_ring->dma); + if (!tx_ring->desc) { + vfree(tx_ring->tx_buffer_info); DPRINTK(PROBE, ERR, "Memory allocation failed for the tx desc ring\n"); return -ENOMEM; } - txdr->next_to_use = 0; - txdr->next_to_clean = 0; - txdr->work_limit = txdr->count; + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + tx_ring->work_limit = tx_ring->count; return 0; } @@ -2524,52 +2520,52 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter, /** * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors) * @adapter: board private structure - * @rxdr: rx descriptor ring (for a specific queue) to setup + * @rx_ring: rx descriptor ring (for a specific queue) to setup * * Returns 0 on success, negative on failure **/ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, - struct ixgbe_ring *rxdr) + struct ixgbe_ring *rx_ring) { struct pci_dev *pdev = adapter->pdev; int size; size = sizeof(struct net_lro_desc) * IXGBE_MAX_LRO_DESCRIPTORS; - rxdr->lro_mgr.lro_arr = vmalloc(size); - if (!rxdr->lro_mgr.lro_arr) + rx_ring->lro_mgr.lro_arr = vmalloc(size); + if (!rx_ring->lro_mgr.lro_arr) return -ENOMEM; - memset(rxdr->lro_mgr.lro_arr, 0, size); + memset(rx_ring->lro_mgr.lro_arr, 0, size); - size = sizeof(struct ixgbe_rx_buffer) * rxdr->count; - rxdr->rx_buffer_info = vmalloc(size); - if (!rxdr->rx_buffer_info) { + size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; + rx_ring->rx_buffer_info = vmalloc(size); + if (!rx_ring->rx_buffer_info) { DPRINTK(PROBE, ERR, "vmalloc allocation failed for the rx desc ring\n"); goto alloc_failed; } - memset(rxdr->rx_buffer_info, 0, size); + memset(rx_ring->rx_buffer_info, 0, size); /* Round up to nearest 4K */ - rxdr->size = rxdr->count * sizeof(union ixgbe_adv_rx_desc); - rxdr->size = ALIGN(rxdr->size, 4096); + rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); + rx_ring->size = ALIGN(rx_ring->size, 4096); - rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); + rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, &rx_ring->dma); - if (!rxdr->desc) { + if (!rx_ring->desc) { DPRINTK(PROBE, ERR, "Memory allocation failed for the rx desc ring\n"); - vfree(rxdr->rx_buffer_info); + vfree(rx_ring->rx_buffer_info); goto alloc_failed; } - rxdr->next_to_clean = 0; - rxdr->next_to_use = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; return 0; alloc_failed: - vfree(rxdr->lro_mgr.lro_arr); - rxdr->lro_mgr.lro_arr = NULL; + vfree(rx_ring->lro_mgr.lro_arr); + rx_ring->lro_mgr.lro_arr = NULL; return -ENOMEM; } -- cgit v1.2.3 From 2b9ade935cd2be6db26f5445656950bc3da7055d Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Tue, 26 Aug 2008 04:27:10 -0700 Subject: ixgbe: disable flow control by default Since the adapter cannot tell what the remote end's flow control capability is through auto-neg, we must turn off flow control by default. Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgbe/ixgbe.h | 6 +++--- drivers/net/ixgbe/ixgbe_common.c | 17 +++++++++++++++++ drivers/net/ixgbe/ixgbe_main.c | 15 ++++++--------- 3 files changed, 26 insertions(+), 12 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index 956914a5028..496a91aea63 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h @@ -69,12 +69,12 @@ /* flow control */ #define IXGBE_DEFAULT_FCRTL 0x10000 -#define IXGBE_MIN_FCRTL 0 +#define IXGBE_MIN_FCRTL 0x40 #define IXGBE_MAX_FCRTL 0x7FF80 #define IXGBE_DEFAULT_FCRTH 0x20000 -#define IXGBE_MIN_FCRTH 0 +#define IXGBE_MIN_FCRTH 0x600 #define IXGBE_MAX_FCRTH 0x7FFF0 -#define IXGBE_DEFAULT_FCPAUSE 0x6800 /* may be too long */ +#define IXGBE_DEFAULT_FCPAUSE 0xFFFF #define IXGBE_MIN_FCPAUSE 0 #define IXGBE_MAX_FCPAUSE 0xFFFF diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c index a9f4d0e58e1..9c0d0a1964e 100644 --- a/drivers/net/ixgbe/ixgbe_common.c +++ b/drivers/net/ixgbe/ixgbe_common.c @@ -1064,6 +1064,13 @@ s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num) rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS); rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X); + /* + * 10 gig parts do not have a word in the EEPROM to determine the + * default flow control setting, so we explicitly set it to full. + */ + if (hw->fc.type == ixgbe_fc_default) + hw->fc.type = ixgbe_fc_full; + /* * We want to save off the original Flow Control configuration just in * case we get disconnected and then reconnected into a different hub @@ -1116,6 +1123,16 @@ s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num) IXGBE_WRITE_REG(hw, IXGBE_FCTRL, frctl_reg); IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg); + /* + * Check for invalid software configuration, zeros are completely + * invalid for all parameters used past this point, and if we enable + * flow control with zero water marks, we blast flow control packets. + */ + if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) { + hw_dbg(hw, "Flow control structure initialized incorrectly\n"); + return IXGBE_ERR_INVALID_LINK_SETTINGS; + } + /* * We need to set up the Receive Threshold high and low water * marks as well as (optionally) enabling the transmission of diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index b5a9b9da2fb..cba7a38bf6b 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -2446,8 +2446,12 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) adapter->tx_eitr = 1; /* default flow control settings */ - hw->fc.original_type = ixgbe_fc_full; - hw->fc.type = ixgbe_fc_full; + hw->fc.original_type = ixgbe_fc_none; + hw->fc.type = ixgbe_fc_none; + hw->fc.high_water = IXGBE_DEFAULT_FCRTH; + hw->fc.low_water = IXGBE_DEFAULT_FCRTL; + hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; + hw->fc.send_xon = true; /* select 10G link by default */ hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN; @@ -3587,13 +3591,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, INIT_WORK(&adapter->reset_task, ixgbe_reset_task); - /* initialize default flow control settings */ - hw->fc.original_type = ixgbe_fc_full; - hw->fc.type = ixgbe_fc_full; - hw->fc.high_water = IXGBE_DEFAULT_FCRTH; - hw->fc.low_water = IXGBE_DEFAULT_FCRTL; - hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; - err = ixgbe_init_interrupt_scheme(adapter); if (err) goto err_sw_init; -- cgit v1.2.3 From e01c31a5f7eb4f8a147cf6205f0f2ef11146068d Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Tue, 26 Aug 2008 04:27:13 -0700 Subject: ixgbe: Implement Tx Head Writeback Enable Tx Head Writeback in the hardware. This helps performance by removing adapter writebacks to descriptors on transmit completion. Signed-off-by: Jesse Brandeburg Signed-off-by: Peter P Waskiewicz Jr Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgbe/ixgbe_main.c | 198 +++++++++++++++++++++++------------------ 1 file changed, 113 insertions(+), 85 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index cba7a38bf6b..95d00416093 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -148,8 +148,7 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, *tx_buffer_info) { if (tx_buffer_info->dma) { - pci_unmap_page(adapter->pdev, - tx_buffer_info->dma, + pci_unmap_page(adapter->pdev, tx_buffer_info->dma, tx_buffer_info->length, PCI_DMA_TODEVICE); tx_buffer_info->dma = 0; } @@ -162,32 +161,35 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter, struct ixgbe_ring *tx_ring, - unsigned int eop, - union ixgbe_adv_tx_desc *eop_desc) + unsigned int eop) { + struct ixgbe_hw *hw = &adapter->hw; + u32 head, tail; + /* Detect a transmit hang in hardware, this serializes the - * check with the clearing of time_stamp and movement of i */ + * check with the clearing of time_stamp and movement of eop */ + head = IXGBE_READ_REG(hw, tx_ring->head); + tail = IXGBE_READ_REG(hw, tx_ring->tail); adapter->detect_tx_hung = false; - if (tx_ring->tx_buffer_info[eop].dma && + if ((head != tail) && + tx_ring->tx_buffer_info[eop].time_stamp && time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) && !(IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)) { /* detected Tx unit hang */ + union ixgbe_adv_tx_desc *tx_desc; + tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n" - " TDH <%x>\n" - " TDT <%x>\n" + " Tx Queue <%d>\n" + " TDH, TDT <%x>, <%x>\n" " next_to_use <%x>\n" " next_to_clean <%x>\n" "tx_buffer_info[next_to_clean]\n" " time_stamp <%lx>\n" - " next_to_watch <%x>\n" - " jiffies <%lx>\n" - " next_to_watch.status <%x>\n", - readl(adapter->hw.hw_addr + tx_ring->head), - readl(adapter->hw.hw_addr + tx_ring->tail), - tx_ring->next_to_use, - tx_ring->next_to_clean, - tx_ring->tx_buffer_info[eop].time_stamp, - eop, jiffies, eop_desc->wb.status); + " jiffies <%lx>\n", + tx_ring->queue_index, + head, tail, + tx_ring->next_to_use, eop, + tx_ring->tx_buffer_info[eop].time_stamp, jiffies); return true; } @@ -203,65 +205,75 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter, #define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \ MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */ +#define GET_TX_HEAD_FROM_RING(ring) (\ + *(volatile u32 *) \ + ((union ixgbe_adv_tx_desc *)(ring)->desc + (ring)->count)) +static void ixgbe_tx_timeout(struct net_device *netdev); + /** * ixgbe_clean_tx_irq - Reclaim resources after transmit completes * @adapter: board private structure + * @tx_ring: tx ring to clean **/ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter, - struct ixgbe_ring *tx_ring) + struct ixgbe_ring *tx_ring) { - struct net_device *netdev = adapter->netdev; - union ixgbe_adv_tx_desc *tx_desc, *eop_desc; + union ixgbe_adv_tx_desc *tx_desc; struct ixgbe_tx_buffer *tx_buffer_info; - unsigned int i, eop; - bool cleaned = false; - unsigned int total_tx_bytes = 0, total_tx_packets = 0; + struct net_device *netdev = adapter->netdev; + struct sk_buff *skb; + unsigned int i; + u32 head, oldhead; + unsigned int count = 0; + unsigned int total_bytes = 0, total_packets = 0; + rmb(); + head = GET_TX_HEAD_FROM_RING(tx_ring); + head = le32_to_cpu(head); i = tx_ring->next_to_clean; - eop = tx_ring->tx_buffer_info[i].next_to_watch; - eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); - while (eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) { - cleaned = false; - while (!cleaned) { + while (1) { + while (i != head) { tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); tx_buffer_info = &tx_ring->tx_buffer_info[i]; - cleaned = (i == eop); + skb = tx_buffer_info->skb; - tx_ring->stats.bytes += tx_buffer_info->length; - if (cleaned) { - struct sk_buff *skb = tx_buffer_info->skb; + if (skb) { unsigned int segs, bytecount; + + /* gso_segs is currently only valid for tcp */ segs = skb_shinfo(skb)->gso_segs ?: 1; /* multiply data chunks by size of headers */ bytecount = ((segs - 1) * skb_headlen(skb)) + - skb->len; - total_tx_packets += segs; - total_tx_bytes += bytecount; + skb->len; + total_packets += segs; + total_bytes += bytecount; } + ixgbe_unmap_and_free_tx_resource(adapter, - tx_buffer_info); - tx_desc->wb.status = 0; + tx_buffer_info); i++; if (i == tx_ring->count) i = 0; - } - - tx_ring->stats.packets++; - - eop = tx_ring->tx_buffer_info[i].next_to_watch; - eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); - - /* weight of a sort for tx, avoid endless transmit cleanup */ - if (total_tx_packets >= tx_ring->work_limit) - break; - } + count++; + if (count == tx_ring->count) + goto done_cleaning; + } + oldhead = head; + rmb(); + head = GET_TX_HEAD_FROM_RING(tx_ring); + head = le32_to_cpu(head); + if (head == oldhead) + goto done_cleaning; + } /* while (1) */ + +done_cleaning: tx_ring->next_to_clean = i; #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) - if (total_tx_packets && netif_carrier_ok(netdev) && - (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) { + if (unlikely(count && netif_carrier_ok(netdev) && + (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { /* Make sure that anybody stopping the queue after this * sees the new next_to_clean. */ @@ -269,23 +281,32 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter, if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && !test_bit(__IXGBE_DOWN, &adapter->state)) { netif_wake_subqueue(netdev, tx_ring->queue_index); - adapter->restart_queue++; + ++adapter->restart_queue; } } - if (adapter->detect_tx_hung) - if (ixgbe_check_tx_hang(adapter, tx_ring, eop, eop_desc)) - netif_stop_subqueue(netdev, tx_ring->queue_index); + if (adapter->detect_tx_hung) { + if (ixgbe_check_tx_hang(adapter, tx_ring, i)) { + /* schedule immediate reset if we believe we hung */ + DPRINTK(PROBE, INFO, + "tx hang %d detected, resetting adapter\n", + adapter->tx_timeout_count + 1); + ixgbe_tx_timeout(adapter->netdev); + } + } - if (total_tx_packets >= tx_ring->work_limit) - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->eims_value); + /* re-arm the interrupt */ + if ((total_packets >= tx_ring->work_limit) || + (count == tx_ring->count)) + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->v_idx); - tx_ring->total_bytes += total_tx_bytes; - tx_ring->total_packets += total_tx_packets; - adapter->net_stats.tx_bytes += total_tx_bytes; - adapter->net_stats.tx_packets += total_tx_packets; - cleaned = total_tx_packets ? true : false; - return cleaned; + tx_ring->total_bytes += total_bytes; + tx_ring->total_packets += total_packets; + tx_ring->stats.bytes += total_bytes; + tx_ring->stats.packets += total_packets; + adapter->net_stats.tx_bytes += total_bytes; + adapter->net_stats.tx_packets += total_packets; + return (total_packets ? true : false); } #ifdef CONFIG_DCA @@ -1344,19 +1365,24 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter) **/ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) { - u64 tdba; + u64 tdba, tdwba; struct ixgbe_hw *hw = &adapter->hw; u32 i, j, tdlen, txctrl; /* Setup the HW Tx Head and Tail descriptor pointers */ for (i = 0; i < adapter->num_tx_queues; i++) { - j = adapter->tx_ring[i].reg_idx; - tdba = adapter->tx_ring[i].dma; - tdlen = adapter->tx_ring[i].count * - sizeof(union ixgbe_adv_tx_desc); + struct ixgbe_ring *ring = &adapter->tx_ring[i]; + j = ring->reg_idx; + tdba = ring->dma; + tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc); IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j), - (tdba & DMA_32BIT_MASK)); + (tdba & DMA_32BIT_MASK)); IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32)); + tdwba = ring->dma + + (ring->count * sizeof(union ixgbe_adv_tx_desc)); + tdwba |= IXGBE_TDWBAL_HEAD_WB_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(j), tdwba & DMA_32BIT_MASK); + IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(j), (tdwba >> 32)); IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen); IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); @@ -1365,9 +1391,9 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) /* Disable Tx Head Writeback RO bit, since this hoses * bookkeeping if things aren't delivered in order. */ - txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); + txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j)); txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; - IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl); + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl); } } @@ -1775,6 +1801,8 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) for (i = 0; i < adapter->num_tx_queues; i++) { j = adapter->tx_ring[i].reg_idx; txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); + /* enable WTHRESH=8 descriptors, to encourage burst writeback */ + txdctl |= (8 << 16); txdctl |= IXGBE_TXDCTL_ENABLE; IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl); } @@ -2487,38 +2515,38 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) * Return 0 on success, negative on failure **/ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter, - struct ixgbe_ring *tx_ring) + struct ixgbe_ring *tx_ring) { struct pci_dev *pdev = adapter->pdev; int size; size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; tx_ring->tx_buffer_info = vmalloc(size); - if (!tx_ring->tx_buffer_info) { - DPRINTK(PROBE, ERR, - "Unable to allocate memory for the transmit descriptor ring\n"); - return -ENOMEM; - } + if (!tx_ring->tx_buffer_info) + goto err; memset(tx_ring->tx_buffer_info, 0, size); /* round up to nearest 4K */ - tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); + tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc) + + sizeof(u32); tx_ring->size = ALIGN(tx_ring->size, 4096); tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, &tx_ring->dma); - if (!tx_ring->desc) { - vfree(tx_ring->tx_buffer_info); - DPRINTK(PROBE, ERR, - "Memory allocation failed for the tx desc ring\n"); - return -ENOMEM; - } + if (!tx_ring->desc) + goto err; tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; tx_ring->work_limit = tx_ring->count; - return 0; + +err: + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + DPRINTK(PROBE, ERR, "Unable to allocate memory for the transmit " + "descriptor ring\n"); + return -ENOMEM; } /** @@ -2581,7 +2609,7 @@ alloc_failed: * Free all transmit software resources **/ static void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter, - struct ixgbe_ring *tx_ring) + struct ixgbe_ring *tx_ring) { struct pci_dev *pdev = adapter->pdev; -- cgit v1.2.3 From 7c6e0a436d971641d37cebcb12e8cc0c4419b5d4 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Tue, 26 Aug 2008 04:27:16 -0700 Subject: ixgbe: Lock RSS seed, move rx_buf_len to the rx_ring This locks the seed down so loading/unloading the driver will present predictable hashing from RSS. Also move the rx_buf_len out of the adapter struct, and into the Rx ring struct. Signed-off-by: Jesse Brandeburg Signed-off-by: Peter P Waskiewicz Jr Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgbe/ixgbe.h | 2 +- drivers/net/ixgbe/ixgbe_main.c | 102 +++++++++++++++++++++++------------------ drivers/net/ixgbe/ixgbe_type.h | 9 ++-- 3 files changed, 64 insertions(+), 49 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index 496a91aea63..628f60cf2fb 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h @@ -166,6 +166,7 @@ struct ixgbe_ring { char name[IFNAMSIZ + 5]; u16 work_limit; /* max work per interrupt */ + u16 rx_buf_len; }; #define RING_F_VMDQ 1 @@ -228,7 +229,6 @@ struct ixgbe_adapter { struct timer_list watchdog_timer; struct vlan_group *vlgrp; u16 bd_number; - u16 rx_buf_len; struct work_struct reset_task; struct ixgbe_q_vector q_vector[MAX_MSIX_Q_VECTORS]; char name[MAX_MSIX_COUNT][IFNAMSIZ + 5]; diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 95d00416093..b5780991c17 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -474,15 +474,15 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter, * @adapter: address of board private structure **/ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, - struct ixgbe_ring *rx_ring, - int cleaned_count) + struct ixgbe_ring *rx_ring, + int cleaned_count) { struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; union ixgbe_adv_rx_desc *rx_desc; struct ixgbe_rx_buffer *bi; unsigned int i; - unsigned int bufsz = adapter->rx_buf_len + NET_IP_ALIGN; + unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN; i = rx_ring->next_to_use; bi = &rx_ring->rx_buffer_info[i]; @@ -498,8 +498,8 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, goto no_buffers; } bi->page_dma = pci_map_page(pdev, bi->page, 0, - PAGE_SIZE, - PCI_DMA_FROMDEVICE); + PAGE_SIZE, + PCI_DMA_FROMDEVICE); } if (!bi->skb) { @@ -535,6 +535,7 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, i = 0; bi = &rx_ring->rx_buffer_info[i]; } + no_buffers: if (rx_ring->next_to_use != i) { rx_ring->next_to_use = i; @@ -552,9 +553,19 @@ no_buffers: } } +static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc) +{ + return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info; +} + +static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc) +{ + return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; +} + static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter, - struct ixgbe_ring *rx_ring, - int *work_done, int work_to_do) + struct ixgbe_ring *rx_ring, + int *work_done, int work_to_do) { struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; @@ -562,36 +573,35 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter, struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer; struct sk_buff *skb; unsigned int i; - u32 upper_len, len, staterr; + u32 len, staterr; u16 hdr_info; bool cleaned = false; int cleaned_count = 0; unsigned int total_rx_bytes = 0, total_rx_packets = 0; i = rx_ring->next_to_clean; - upper_len = 0; rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); staterr = le32_to_cpu(rx_desc->wb.upper.status_error); rx_buffer_info = &rx_ring->rx_buffer_info[i]; while (staterr & IXGBE_RXD_STAT_DD) { + u32 upper_len = 0; if (*work_done >= work_to_do) break; (*work_done)++; if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { - hdr_info = - le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info); - len = - ((hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> - IXGBE_RXDADV_HDRBUFLEN_SHIFT); + hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc)); + len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> + IXGBE_RXDADV_HDRBUFLEN_SHIFT; if (hdr_info & IXGBE_RXDADV_SPH) adapter->rx_hdr_split++; if (len > IXGBE_RX_HDR_SIZE) len = IXGBE_RX_HDR_SIZE; upper_len = le16_to_cpu(rx_desc->wb.upper.length); - } else + } else { len = le16_to_cpu(rx_desc->wb.upper.length); + } cleaned = true; skb = rx_buffer_info->skb; @@ -600,8 +610,8 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter, if (len && !skb_shinfo(skb)->nr_frags) { pci_unmap_single(pdev, rx_buffer_info->dma, - adapter->rx_buf_len + NET_IP_ALIGN, - PCI_DMA_FROMDEVICE); + rx_ring->rx_buf_len + NET_IP_ALIGN, + PCI_DMA_FROMDEVICE); skb_put(skb, len); } @@ -1415,7 +1425,7 @@ static int ixgbe_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph, union ixgbe_adv_rx_desc *rx_desc = priv; /* Verify that this is a valid IPv4 TCP packet */ - if (!(rx_desc->wb.lower.lo_dword.pkt_info & + if (!(ixgbe_get_pkt_info(rx_desc) & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP))) return -1; @@ -1442,10 +1452,13 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; int i, j; u32 rdlen, rxctrl, rxcsum; - u32 random[10]; + static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D, + 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE, + 0x6A3E67EA, 0x14364D17, 0x3BED200D}; u32 fctrl, hlreg0; u32 pages; u32 reta = 0, mrqc, srrctl; + int rx_buf_len; /* Decide whether to use packet split mode or not */ if (netdev->mtu > ETH_DATA_LEN) @@ -1455,12 +1468,12 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) /* Set the RX buffer length according to the mode */ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { - adapter->rx_buf_len = IXGBE_RX_HDR_SIZE; + rx_buf_len = IXGBE_RX_HDR_SIZE; } else { if (netdev->mtu <= ETH_DATA_LEN) - adapter->rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; + rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; else - adapter->rx_buf_len = ALIGN(max_frame, 1024); + rx_buf_len = ALIGN(max_frame, 1024); } fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); @@ -1490,12 +1503,11 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) } else { srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; - if (adapter->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE) + if (rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE) srrctl |= IXGBE_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; else - srrctl |= - adapter->rx_buf_len >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; + srrctl |= rx_buf_len >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; } IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(0), srrctl); @@ -1508,13 +1520,15 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) * the Base and Length of the Rx Descriptor Ring */ for (i = 0; i < adapter->num_rx_queues; i++) { rdba = adapter->rx_ring[i].dma; - IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i), (rdba & DMA_32BIT_MASK)); - IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32)); - IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i), rdlen); - IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0); - IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0); - adapter->rx_ring[i].head = IXGBE_RDH(i); - adapter->rx_ring[i].tail = IXGBE_RDT(i); + j = adapter->rx_ring[i].reg_idx; + IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_32BIT_MASK)); + IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32)); + IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), rdlen); + IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0); + IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0); + adapter->rx_ring[i].head = IXGBE_RDH(j); + adapter->rx_ring[i].tail = IXGBE_RDT(j); + adapter->rx_ring[i].rx_buf_len = rx_buf_len; } /* Intitial LRO Settings */ @@ -1541,22 +1555,20 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) } /* Fill out hash function seeds */ - /* XXX use a random constant here to glue certain flows */ - get_random_bytes(&random[0], 40); for (i = 0; i < 10; i++) - IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random[i]); + IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]); mrqc = IXGBE_MRQC_RSSEN /* Perform hash on these packet types */ - | IXGBE_MRQC_RSS_FIELD_IPV4 - | IXGBE_MRQC_RSS_FIELD_IPV4_TCP - | IXGBE_MRQC_RSS_FIELD_IPV4_UDP - | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP - | IXGBE_MRQC_RSS_FIELD_IPV6_EX - | IXGBE_MRQC_RSS_FIELD_IPV6 - | IXGBE_MRQC_RSS_FIELD_IPV6_TCP - | IXGBE_MRQC_RSS_FIELD_IPV6_UDP - | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; + | IXGBE_MRQC_RSS_FIELD_IPV4 + | IXGBE_MRQC_RSS_FIELD_IPV4_TCP + | IXGBE_MRQC_RSS_FIELD_IPV4_UDP + | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP + | IXGBE_MRQC_RSS_FIELD_IPV6_EX + | IXGBE_MRQC_RSS_FIELD_IPV6 + | IXGBE_MRQC_RSS_FIELD_IPV6_TCP + | IXGBE_MRQC_RSS_FIELD_IPV6_UDP + | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); } @@ -1926,7 +1938,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, rx_buffer_info = &rx_ring->rx_buffer_info[i]; if (rx_buffer_info->dma) { pci_unmap_single(pdev, rx_buffer_info->dma, - adapter->rx_buf_len, + rx_ring->rx_buf_len, PCI_DMA_FROMDEVICE); rx_buffer_info->dma = 0; } diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index 2f4d34e6729..85eb03cce25 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h @@ -1049,9 +1049,12 @@ union ixgbe_adv_rx_desc { } read; struct { struct { - struct { - __le16 pkt_info; /* RSS type, Packet type */ - __le16 hdr_info; /* Split Header, header len */ + union { + __le32 data; + struct { + __le16 pkt_info; /* RSS type, Packet type */ + __le16 hdr_info; /* Split Header, header len */ + } hs_rss; } lo_dword; union { __le32 rss; /* RSS Hash */ -- cgit v1.2.3 From 22f32b7a5ad34f23eb18f0e089522f1652ae8abc Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Tue, 26 Aug 2008 04:27:18 -0700 Subject: ixgbe: should not use HW_CSUM, should use IP* flags as mentioned by Herbert, our hardware supports IP offloads, not full checksum offloads for any protocol in existence (even though the hardware just provides generic csum support over any range of bytes) Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgbe/ixgbe_ethtool.c | 6 +++--- drivers/net/ixgbe/ixgbe_main.c | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index 3efe5dda10a..61c000e2309 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -233,15 +233,15 @@ static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data) static u32 ixgbe_get_tx_csum(struct net_device *netdev) { - return (netdev->features & NETIF_F_HW_CSUM) != 0; + return (netdev->features & NETIF_F_IP_CSUM) != 0; } static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data) { if (data) - netdev->features |= NETIF_F_HW_CSUM; + netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); else - netdev->features &= ~NETIF_F_HW_CSUM; + netdev->features &= ~NETIF_F_IP_CSUM; return 0; } diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index b5780991c17..b6973ee905e 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -3593,7 +3593,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, goto err_sw_init; netdev->features = NETIF_F_SG | - NETIF_F_HW_CSUM | + NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; @@ -3604,7 +3604,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, netdev->vlan_features |= NETIF_F_TSO; netdev->vlan_features |= NETIF_F_TSO6; - netdev->vlan_features |= NETIF_F_HW_CSUM; + netdev->vlan_features |= NETIF_F_IP_CSUM; netdev->vlan_features |= NETIF_F_SG; if (pci_using_dac) -- cgit v1.2.3 From 96b0e0f63b03153f7f2915f584083b4191b1932d Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Tue, 26 Aug 2008 04:27:21 -0700 Subject: ixgbe: update dca to new interface, fix CONFIG_DCA_MODULE DCA related fixes ================= - ixgbe was not compiling and using DCA correctly if dca was a module - DCA interface changed with new kernel - ixgbe was not correctly configured to indicate DCA hints to the correct CPU. Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgbe/ixgbe.h | 35 +++++++++++++++++++---------- drivers/net/ixgbe/ixgbe_main.c | 50 +++++++++++++++++++++++++++++------------- 2 files changed, 59 insertions(+), 26 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index 628f60cf2fb..90b53830196 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h @@ -37,7 +37,7 @@ #include "ixgbe_type.h" #include "ixgbe_common.h" -#ifdef CONFIG_DCA +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) #include #endif @@ -150,7 +150,7 @@ struct ixgbe_ring { * offset associated with this ring, which is different * for DCE and RSS modes */ -#ifdef CONFIG_DCA +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) /* cpu for tx queue */ int cpu; #endif @@ -267,15 +267,28 @@ struct ixgbe_adapter { * thus the additional *_CAPABLE flags. */ u32 flags; -#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1 << 0) -#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 1) -#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 2) -#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 3) -#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 4) -#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 5) -#define IXGBE_FLAG_RSS_ENABLED (u32)(1 << 6) -#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 7) -#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 8) +#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1) +#define IXGBE_FLAG_MSI_CAPABLE (u32)(1 << 1) +#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 2) +#define IXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 3) +#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 4) +#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 6) +#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 7) +#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 8) +#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 9) +#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 10) +#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 11) +#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 12) +#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 13) +#define IXGBE_FLAG_RSS_ENABLED (u32)(1 << 16) +#define IXGBE_FLAG_RSS_CAPABLE (u32)(1 << 17) +#define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 18) +#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 19) +#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 22) +#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 23) + +/* default to trying for four seconds */ +#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ) /* OS defined structs */ struct net_device *netdev; diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index b6973ee905e..f37e3eb12f7 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -80,7 +80,7 @@ static struct pci_device_id ixgbe_pci_tbl[] = { }; MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl); -#ifdef CONFIG_DCA +#if defined(CONFIG_DCA) || defined (CONFIG_DCA_MODULE) static int ixgbe_notify_dca(struct notifier_block *, unsigned long event, void *p); static struct notifier_block dca_notifier = { @@ -309,7 +309,7 @@ done_cleaning: return (total_packets ? true : false); } -#ifdef CONFIG_DCA +#if defined(CONFIG_DCA) || defined (CONFIG_DCA_MODULE) static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, struct ixgbe_ring *rx_ring) { @@ -320,7 +320,7 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, if (rx_ring->cpu != cpu) { rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q)); rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK; - rxctrl |= dca_get_tag(cpu); + rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN; rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN; IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl); @@ -339,7 +339,7 @@ static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, if (tx_ring->cpu != cpu) { txctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q)); txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK; - txctrl |= dca_get_tag(cpu); + txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q), txctrl); tx_ring->cpu = cpu; @@ -372,11 +372,14 @@ static int __ixgbe_notify_dca(struct device *dev, void *data) switch (event) { case DCA_PROVIDER_ADD: - adapter->flags |= IXGBE_FLAG_DCA_ENABLED; + /* if we're already enabled, don't do it again */ + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) + break; /* Always use CB2 mode, difference is masked * in the CB driver. */ IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); if (dca_add_requester(dev) == 0) { + adapter->flags |= IXGBE_FLAG_DCA_ENABLED; ixgbe_setup_dca(adapter); break; } @@ -393,7 +396,7 @@ static int __ixgbe_notify_dca(struct device *dev, void *data) return 0; } -#endif /* CONFIG_DCA */ +#endif /* CONFIG_DCA or CONFIG_DCA_MODULE */ /** * ixgbe_receive_skb - Send a completed packet up the stack * @adapter: board private structure @@ -931,7 +934,7 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data) r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); for (i = 0; i < q_vector->txr_count; i++) { tx_ring = &(adapter->tx_ring[r_idx]); -#ifdef CONFIG_DCA +#if defined(CONFIG_DCA) || defined (CONFIG_DCA_MODULE) if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) ixgbe_update_tx_dca(adapter, tx_ring); #endif @@ -996,7 +999,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); rx_ring = &(adapter->rx_ring[r_idx]); -#ifdef CONFIG_DCA +#if defined(CONFIG_DCA) || defined (CONFIG_DCA_MODULE) if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) ixgbe_update_rx_dca(adapter, rx_ring); #endif @@ -2054,11 +2057,28 @@ void ixgbe_down(struct ixgbe_adapter *adapter) netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); +#if defined(CONFIG_DCA) || defined (CONFIG_DCA_MODULE) + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { + adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; + dca_remove_requester(&adapter->pdev->dev); + } + +#endif if (!pci_channel_offline(adapter->pdev)) ixgbe_reset(adapter); ixgbe_clean_all_tx_rings(adapter); ixgbe_clean_all_rx_rings(adapter); +#if defined(CONFIG_DCA) || defined (CONFIG_DCA_MODULE) + /* since we reset the hardware DCA settings were cleared */ + if (dca_add_requester(&adapter->pdev->dev) == 0) { + adapter->flags |= IXGBE_FLAG_DCA_ENABLED; + /* always use CB2 mode, difference is masked + * in the CB driver */ + IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); + ixgbe_setup_dca(adapter); + } +#endif } static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state) @@ -2113,7 +2133,7 @@ static int ixgbe_poll(struct napi_struct *napi, int budget) struct ixgbe_adapter *adapter = q_vector->adapter; int tx_cleaned = 0, work_done = 0; -#ifdef CONFIG_DCA +#if defined(CONFIG_DCA) || defined (CONFIG_DCA_MODULE) if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { ixgbe_update_tx_dca(adapter, adapter->tx_ring); ixgbe_update_rx_dca(adapter, adapter->rx_ring); @@ -3677,7 +3697,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, if (err) goto err_register; -#ifdef CONFIG_DCA +#if defined(CONFIG_DCA) || defined (CONFIG_DCA_MODULE) if (dca_add_requester(&pdev->dev) == 0) { adapter->flags |= IXGBE_FLAG_DCA_ENABLED; /* always use CB2 mode, difference is masked @@ -3727,7 +3747,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) flush_scheduled_work(); -#ifdef CONFIG_DCA +#if defined(CONFIG_DCA) || defined (CONFIG_DCA_MODULE) if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; dca_remove_requester(&pdev->dev); @@ -3860,7 +3880,7 @@ static int __init ixgbe_init_module(void) printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright); -#ifdef CONFIG_DCA +#if defined(CONFIG_DCA) || defined (CONFIG_DCA_MODULE) dca_register_notify(&dca_notifier); #endif @@ -3877,13 +3897,13 @@ module_init(ixgbe_init_module); **/ static void __exit ixgbe_exit_module(void) { -#ifdef CONFIG_DCA +#if defined(CONFIG_DCA) || defined (CONFIG_DCA_MODULE) dca_unregister_notify(&dca_notifier); #endif pci_unregister_driver(&ixgbe_driver); } -#ifdef CONFIG_DCA +#if defined(CONFIG_DCA) || defined (CONFIG_DCA_MODULE) static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event, void *p) { @@ -3894,7 +3914,7 @@ static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event, return ret_val ? NOTIFY_BAD : NOTIFY_DONE; } -#endif /* CONFIG_DCA */ +#endif /* CONFIG_DCA or CONFIG_DCA_MODULE */ module_exit(ixgbe_exit_module); -- cgit v1.2.3 From e9990a9cd76a14905a8bf2348444ff775b24a92f Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Tue, 26 Aug 2008 04:27:24 -0700 Subject: ixgbe: fix bug where lro settings are per ring Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgbe/ixgbe_main.c | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index f37e3eb12f7..6b96c41687d 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -1428,8 +1428,8 @@ static int ixgbe_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph, union ixgbe_adv_rx_desc *rx_desc = priv; /* Verify that this is a valid IPv4 TCP packet */ - if (!(ixgbe_get_pkt_info(rx_desc) & - (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP))) + if (!((ixgbe_get_pkt_info(rx_desc) & IXGBE_RXDADV_PKTTYPE_IPV4) && + (ixgbe_get_pkt_info(rx_desc) & IXGBE_RXDADV_PKTTYPE_TCP))) return -1; /* Set network headers */ @@ -1532,18 +1532,18 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) adapter->rx_ring[i].head = IXGBE_RDH(j); adapter->rx_ring[i].tail = IXGBE_RDT(j); adapter->rx_ring[i].rx_buf_len = rx_buf_len; + /* Intitial LRO Settings */ + adapter->rx_ring[i].lro_mgr.max_aggr = IXGBE_MAX_LRO_AGGREGATE; + adapter->rx_ring[i].lro_mgr.max_desc = IXGBE_MAX_LRO_DESCRIPTORS; + adapter->rx_ring[i].lro_mgr.get_skb_header = ixgbe_get_skb_hdr; + adapter->rx_ring[i].lro_mgr.features = LRO_F_EXTRACT_VLAN_ID; + if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) + adapter->rx_ring[i].lro_mgr.features |= LRO_F_NAPI; + adapter->rx_ring[i].lro_mgr.dev = adapter->netdev; + adapter->rx_ring[i].lro_mgr.ip_summed = CHECKSUM_UNNECESSARY; + adapter->rx_ring[i].lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; } - /* Intitial LRO Settings */ - adapter->rx_ring[i].lro_mgr.max_aggr = IXGBE_MAX_LRO_AGGREGATE; - adapter->rx_ring[i].lro_mgr.max_desc = IXGBE_MAX_LRO_DESCRIPTORS; - adapter->rx_ring[i].lro_mgr.get_skb_header = ixgbe_get_skb_hdr; - adapter->rx_ring[i].lro_mgr.features = LRO_F_EXTRACT_VLAN_ID; - if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) - adapter->rx_ring[i].lro_mgr.features |= LRO_F_NAPI; - adapter->rx_ring[i].lro_mgr.dev = adapter->netdev; - adapter->rx_ring[i].lro_mgr.ip_summed = CHECKSUM_UNNECESSARY; - adapter->rx_ring[i].lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { /* Fill out redirection table */ @@ -3618,9 +3618,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; - netdev->features |= NETIF_F_LRO; + netdev->features |= NETIF_F_IPV6_CSUM; netdev->features |= NETIF_F_TSO; netdev->features |= NETIF_F_TSO6; + netdev->features |= NETIF_F_LRO; netdev->vlan_features |= NETIF_F_TSO; netdev->vlan_features |= NETIF_F_TSO6; -- cgit v1.2.3 From cc41ac7c0011703460dd4d4674bb7cbf73bb883d Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Tue, 26 Aug 2008 04:27:27 -0700 Subject: ixgbe: fix dca hints going to wrong processor hardware was configured incorrectly which led all hints to be sent to queue[0]'s DCA configuration. Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgbe/ixgbe_main.c | 90 ++++++++++++++++++++++++++++++------------ drivers/net/ixgbe/ixgbe_type.h | 10 ++--- 2 files changed, 69 insertions(+), 31 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 6b96c41687d..28d3321b0dd 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -1410,10 +1410,51 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) } } -#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \ - (((S) & (PAGE_SIZE - 1)) ? 1 : 0)) +#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 + +static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index) +{ + struct ixgbe_ring *rx_ring; + u32 srrctl; + int queue0; + unsigned long *mask, maskval = 1; + long shift, len; + + if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { + mask = (unsigned long *) &adapter->ring_feature[RING_F_RSS].mask; + len = sizeof(adapter->ring_feature[RING_F_RSS].mask) * 8; + } else { + mask = &maskval; + len = 1; + } + shift = find_first_bit(mask, len); + queue0 = index << shift; + rx_ring = &adapter->rx_ring[queue0]; + + srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index)); + + srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; + srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; + + if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { + srrctl |= IXGBE_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; + srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; + srrctl |= ((IXGBE_RX_HDR_SIZE << + IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & + IXGBE_SRRCTL_BSIZEHDR_MASK); + } else { + srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; + + if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE) + srrctl |= IXGBE_RXBUFFER_2048 >> + IXGBE_SRRCTL_BSIZEPKT_SHIFT; + else + srrctl |= rx_ring->rx_buf_len >> + IXGBE_SRRCTL_BSIZEPKT_SHIFT; + } + IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl); +} -#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /** * ixgbe_get_skb_hdr - helper function for LRO header processing * @skb: pointer to sk_buff to be added to LRO packet @@ -1441,6 +1482,9 @@ static int ixgbe_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph, return 0; } +#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \ + (((S) & (PAGE_SIZE - 1)) ? 1 : 0)) + /** * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset * @adapter: board private structure @@ -1460,7 +1504,8 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) 0x6A3E67EA, 0x14364D17, 0x3BED200D}; u32 fctrl, hlreg0; u32 pages; - u32 reta = 0, mrqc, srrctl; + u32 reta = 0, mrqc; + u32 rdrxctl; int rx_buf_len; /* Decide whether to use packet split mode or not */ @@ -1493,27 +1538,6 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) pages = PAGE_USE_COUNT(adapter->netdev->mtu); - srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(0)); - srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; - srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; - - if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { - srrctl |= PAGE_SIZE >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; - srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; - srrctl |= ((IXGBE_RX_HDR_SIZE << - IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & - IXGBE_SRRCTL_BSIZEHDR_MASK); - } else { - srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; - - if (rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE) - srrctl |= - IXGBE_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; - else - srrctl |= rx_buf_len >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; - } - IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(0), srrctl); - rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc); /* disable receives while setting up the descriptors */ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); @@ -1542,8 +1566,24 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) adapter->rx_ring[i].lro_mgr.dev = adapter->netdev; adapter->rx_ring[i].lro_mgr.ip_summed = CHECKSUM_UNNECESSARY; adapter->rx_ring[i].lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; + + ixgbe_configure_srrctl(adapter, j); } + /* + * For VMDq support of different descriptor types or + * buffer sizes through the use of multiple SRRCTL + * registers, RDRXCTL.MVMEN must be set to 1 + * + * also, the manual doesn't mention it clearly but DCA hints + * will only use queue 0's tags unless this bit is set. Side + * effects of setting this bit are only that SRRCTL must be + * fully programmed [0..15] + */ + rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); + rdrxctl |= IXGBE_RDRXCTL_MVMEN; + IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); + if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { /* Fill out redirection table */ diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index 85eb03cce25..3e9c483ad8e 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h @@ -356,12 +356,10 @@ #define IXGBE_ANLP2 0x042B4 #define IXGBE_ATLASCTL 0x04800 -/* RSCCTL Bit Masks */ -#define IXGBE_RSCCTL_RSCEN 0x01 -#define IXGBE_RSCCTL_MAXDESC_1 0x00 -#define IXGBE_RSCCTL_MAXDESC_4 0x04 -#define IXGBE_RSCCTL_MAXDESC_8 0x08 -#define IXGBE_RSCCTL_MAXDESC_16 0x0C +/* RDRXCTL Bit Masks */ +#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 /* Rx Desc Min Threshold Size */ +#define IXGBE_RDRXCTL_MVMEN 0x00000020 +#define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */ /* CTRL Bit Masks */ #define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Master Disable bit */ -- cgit v1.2.3 From 4eeae6fd3f6a7f4cee0e75cf8b0d30b265339ae8 Mon Sep 17 00:00:00 2001 From: PJ Waskiewicz Date: Tue, 26 Aug 2008 04:27:30 -0700 Subject: ixgbe: use different context for tso and offload Change TSO offloads to use a different context than VLAN insertion and Tx checksumming. Hardware has separate registers internally for storing these so use them. Signed-off-by: Peter P Waskiewicz Jr Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgbe/ixgbe_main.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 28d3321b0dd..198b9d9a3d4 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -3126,6 +3126,8 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter, mss_l4len_idx |= (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT); mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT); + /* use index 1 for TSO */ + mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT); context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); tx_buffer_info->time_stamp = jiffies; @@ -3198,6 +3200,7 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, } context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); + /* use index zero for tx checksum offload */ context_desc->mss_l4len_idx = 0; tx_buffer_info->time_stamp = jiffies; @@ -3306,6 +3309,8 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, olinfo_status |= IXGBE_TXD_POPTS_TXSM << IXGBE_ADVTXD_POPTS_SHIFT; + /* use index 1 context for tso */ + olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); if (tx_flags & IXGBE_TX_FLAGS_IPV4) olinfo_status |= IXGBE_TXD_POPTS_IXSM << IXGBE_ADVTXD_POPTS_SHIFT; -- cgit v1.2.3 From 3be1adfb912867e244729c3826b457ee76b8f737 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Sat, 30 Aug 2008 00:29:10 -0700 Subject: ixgbe: change config srrctl to only program one register per VMDq/RSS id This change makes it so only one srrctl register is programmed per VMDq id, and if VMDq is not enabled it is one register per RSS queue. Currently this function is working correctly for the multiqueue RSS and single queue cases, but if any advances features such as VMDq or DCB would have been enabled this function would have caused issues as it was not correct. Signed-off-by: Alexander Duyck Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgbe/ixgbe_main.c | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 198b9d9a3d4..99e0b34416e 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -1417,18 +1417,23 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index) struct ixgbe_ring *rx_ring; u32 srrctl; int queue0; - unsigned long *mask, maskval = 1; - long shift, len; - - if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { - mask = (unsigned long *) &adapter->ring_feature[RING_F_RSS].mask; - len = sizeof(adapter->ring_feature[RING_F_RSS].mask) * 8; + unsigned long mask; + + /* program one srrctl register per VMDq index */ + if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) { + long shift, len; + mask = (unsigned long) adapter->ring_feature[RING_F_RSS].mask; + len = sizeof(adapter->ring_feature[RING_F_VMDQ].mask) * 8; + shift = find_first_bit(&mask, len); + queue0 = index & mask; + index = (index & mask) >> shift; + /* program one srrctl per RSS queue since RDRXCTL.MVMEN is enabled */ } else { - mask = &maskval; - len = 1; + mask = (unsigned long) adapter->ring_feature[RING_F_RSS].mask; + queue0 = index & mask; + index = index & mask; } - shift = find_first_bit(mask, len); - queue0 = index << shift; + rx_ring = &adapter->rx_ring[queue0]; srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index)); -- cgit v1.2.3 From af72166f31662850e10d1d1c734654efb2ae4357 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Thu, 11 Sep 2008 19:54:23 -0700 Subject: ixgbe: fix bug where using wake queue instead of start Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgbe/ixgbe_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 362541aa946..c67211c91cb 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -3377,7 +3377,7 @@ static int __ixgbe_maybe_stop_tx(struct net_device *netdev, return -EBUSY; /* A reprieve! - use start_queue because it doesn't call schedule */ - netif_wake_subqueue(netdev, tx_ring->queue_index); + netif_start_subqueue(netdev, tx_ring->queue_index); ++adapter->restart_queue; return 0; } -- cgit v1.2.3 From a1f96ee7cce0dce583ff2f32de6376495ef51e4d Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Thu, 11 Sep 2008 19:54:48 -0700 Subject: ixgbe: fix dca defines to not have spaces Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgbe/ixgbe_main.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index c67211c91cb..904819586e2 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -80,7 +80,7 @@ static struct pci_device_id ixgbe_pci_tbl[] = { }; MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl); -#if defined(CONFIG_DCA) || defined (CONFIG_DCA_MODULE) +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) static int ixgbe_notify_dca(struct notifier_block *, unsigned long event, void *p); static struct notifier_block dca_notifier = { @@ -309,7 +309,7 @@ done_cleaning: return (total_packets ? true : false); } -#if defined(CONFIG_DCA) || defined (CONFIG_DCA_MODULE) +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, struct ixgbe_ring *rx_ring) { @@ -934,7 +934,7 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data) r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); for (i = 0; i < q_vector->txr_count; i++) { tx_ring = &(adapter->tx_ring[r_idx]); -#if defined(CONFIG_DCA) || defined (CONFIG_DCA_MODULE) +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) ixgbe_update_tx_dca(adapter, tx_ring); #endif @@ -999,7 +999,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); rx_ring = &(adapter->rx_ring[r_idx]); -#if defined(CONFIG_DCA) || defined (CONFIG_DCA_MODULE) +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) ixgbe_update_rx_dca(adapter, rx_ring); #endif @@ -2102,7 +2102,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter) netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); -#if defined(CONFIG_DCA) || defined (CONFIG_DCA_MODULE) +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; dca_remove_requester(&adapter->pdev->dev); @@ -2114,7 +2114,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter) ixgbe_clean_all_tx_rings(adapter); ixgbe_clean_all_rx_rings(adapter); -#if defined(CONFIG_DCA) || defined (CONFIG_DCA_MODULE) +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) /* since we reset the hardware DCA settings were cleared */ if (dca_add_requester(&adapter->pdev->dev) == 0) { adapter->flags |= IXGBE_FLAG_DCA_ENABLED; @@ -2178,7 +2178,7 @@ static int ixgbe_poll(struct napi_struct *napi, int budget) struct ixgbe_adapter *adapter = q_vector->adapter; int tx_cleaned = 0, work_done = 0; -#if defined(CONFIG_DCA) || defined (CONFIG_DCA_MODULE) +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { ixgbe_update_tx_dca(adapter, adapter->tx_ring); ixgbe_update_rx_dca(adapter, adapter->rx_ring); @@ -3754,7 +3754,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, if (err) goto err_register; -#if defined(CONFIG_DCA) || defined (CONFIG_DCA_MODULE) +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) if (dca_add_requester(&pdev->dev) == 0) { adapter->flags |= IXGBE_FLAG_DCA_ENABLED; /* always use CB2 mode, difference is masked @@ -3804,7 +3804,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) flush_scheduled_work(); -#if defined(CONFIG_DCA) || defined (CONFIG_DCA_MODULE) +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; dca_remove_requester(&pdev->dev); @@ -3937,7 +3937,7 @@ static int __init ixgbe_init_module(void) printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright); -#if defined(CONFIG_DCA) || defined (CONFIG_DCA_MODULE) +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) dca_register_notify(&dca_notifier); #endif @@ -3954,13 +3954,13 @@ module_init(ixgbe_init_module); **/ static void __exit ixgbe_exit_module(void) { -#if defined(CONFIG_DCA) || defined (CONFIG_DCA_MODULE) +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) dca_unregister_notify(&dca_notifier); #endif pci_unregister_driver(&ixgbe_driver); } -#if defined(CONFIG_DCA) || defined (CONFIG_DCA_MODULE) +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event, void *p) { -- cgit v1.2.3 From ce94bf469edf84228771b58489944cf654aeb496 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Thu, 11 Sep 2008 19:55:14 -0700 Subject: ixgbe: fix multicast address update after the most recent patches, the driver was not using the correct iterator for updating the receive address registers (RAR) Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgbe/ixgbe_common.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c index 9c0d0a1964e..f5b2617111a 100644 --- a/drivers/net/ixgbe/ixgbe_common.c +++ b/drivers/net/ixgbe/ixgbe_common.c @@ -896,6 +896,7 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr) { u32 rar_entries = hw->mac.num_rar_entries; + u32 rar; hw_dbg(hw, " MC Addr =%.2X %.2X %.2X %.2X %.2X %.2X\n", mc_addr[0], mc_addr[1], mc_addr[2], @@ -906,8 +907,8 @@ static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr) * else put it in the MTA */ if (hw->addr_ctrl.rar_used_count < rar_entries) { - ixgbe_set_rar(hw, hw->addr_ctrl.rar_used_count, - mc_addr, 0, IXGBE_RAH_AV); + rar = rar_entries - hw->addr_ctrl.mc_addr_in_rar_count - 1; + ixgbe_set_rar(hw, rar, mc_addr, 0, IXGBE_RAH_AV); hw_dbg(hw, "Added a multicast address to RAR[%d]\n", hw->addr_ctrl.rar_used_count); hw->addr_ctrl.rar_used_count++; -- cgit v1.2.3 From cf8280ee7be3aaf44d32e389f15c725b850e5e32 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Thu, 11 Sep 2008 19:55:32 -0700 Subject: ixgbe: Update watchdog thread to accomodate longerlink_up events This patch updates the link_up code and watchdog thread so that link_up doesn't cause stack overflows due to long waits in interrupt context. Signed-off-by: Jesse Brandeburg Signed-off-by: Peter P Waskiewicz Jr Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgbe/ixgbe.h | 6 ++ drivers/net/ixgbe/ixgbe_82598.c | 29 +++++++-- drivers/net/ixgbe/ixgbe_ethtool.c | 2 +- drivers/net/ixgbe/ixgbe_main.c | 129 +++++++++++++++++++++++++------------- drivers/net/ixgbe/ixgbe_type.h | 3 +- 5 files changed, 118 insertions(+), 51 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index 90b53830196..2b827a67c9c 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h @@ -309,6 +309,12 @@ struct ixgbe_adapter { u64 lro_aggregated; u64 lro_flushed; u64 lro_no_desc; + + u32 link_speed; + bool link_up; + unsigned long link_check_timeout; + + struct work_struct watchdog_task; }; enum ixbge_state_t { diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c index ba09063260d..1e014bcc72d 100644 --- a/drivers/net/ixgbe/ixgbe_82598.c +++ b/drivers/net/ixgbe/ixgbe_82598.c @@ -47,7 +47,8 @@ static s32 ixgbe_get_copper_link_settings_82598(struct ixgbe_hw *hw, static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw); static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw); static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, u32 *speed, - bool *link_up); + bool *link_up, + bool link_up_wait_to_complete); static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw, u32 speed, bool autoneg, bool autoneg_wait_to_complete); @@ -277,20 +278,36 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw) * @hw: pointer to hardware structure * @speed: pointer to link speed * @link_up: true is link is up, false otherwise + * @link_up_wait_to_complete: bool used to wait for link up or not * * Reads the links register to determine if link is up and the current speed **/ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, u32 *speed, - bool *link_up) + bool *link_up, + bool link_up_wait_to_complete) { u32 links_reg; + u32 i; links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); - if (links_reg & IXGBE_LINKS_UP) - *link_up = true; - else - *link_up = false; + if (link_up_wait_to_complete) { + for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { + if (links_reg & IXGBE_LINKS_UP) { + *link_up = true; + break; + } else { + *link_up = false; + } + msleep(100); + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + } + } else { + if (links_reg & IXGBE_LINKS_UP) + *link_up = true; + else + *link_up = false; + } if (links_reg & IXGBE_LINKS_SPEED) *speed = IXGBE_LINK_SPEED_10GB_FULL; diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index 61c000e2309..8f0e3f93e6b 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -130,7 +130,7 @@ static int ixgbe_get_settings(struct net_device *netdev, ecmd->port = PORT_FIBRE; } - adapter->hw.mac.ops.check_link(hw, &(link_speed), &link_up); + adapter->hw.mac.ops.check_link(hw, &(link_speed), &link_up, false); if (link_up) { ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? SPEED_10000 : SPEED_1000; diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 904819586e2..036393e5383 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -902,6 +902,20 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector) return; } + +static void ixgbe_check_lsc(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + + adapter->lsc_int++; + adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + if (!test_bit(__IXGBE_DOWN, &adapter->state)) { + IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); + schedule_work(&adapter->watchdog_task); + } +} + static irqreturn_t ixgbe_msix_lsc(int irq, void *data) { struct net_device *netdev = data; @@ -909,11 +923,8 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data) struct ixgbe_hw *hw = &adapter->hw; u32 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); - if (eicr & IXGBE_EICR_LSC) { - adapter->lsc_int++; - if (!test_bit(__IXGBE_DOWN, &adapter->state)) - mod_timer(&adapter->watchdog_timer, jiffies); - } + if (eicr & IXGBE_EICR_LSC) + ixgbe_check_lsc(adapter); if (!test_bit(__IXGBE_DOWN, &adapter->state)) IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); @@ -1237,12 +1248,8 @@ static irqreturn_t ixgbe_intr(int irq, void *data) if (!eicr) return IRQ_NONE; /* Not our interrupt */ - if (eicr & IXGBE_EICR_LSC) { - adapter->lsc_int++; - if (!test_bit(__IXGBE_DOWN, &adapter->state)) - mod_timer(&adapter->watchdog_timer, jiffies); - } - + if (eicr & IXGBE_EICR_LSC) + ixgbe_check_lsc(adapter); if (netif_rx_schedule_prep(netdev, &adapter->q_vector[0].napi)) { adapter->tx_ring[0].total_packets = 0; @@ -1897,6 +1904,8 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) /* bring the link up in the watchdog, this could race with our first * link up interrupt but shouldn't be a problem */ + adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; mod_timer(&adapter->watchdog_timer, jiffies); return 0; } @@ -2098,6 +2107,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter) ixgbe_napi_disable_all(adapter); del_timer_sync(&adapter->watchdog_timer); + cancel_work_sync(&adapter->watchdog_task); netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); @@ -3010,27 +3020,74 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) static void ixgbe_watchdog(unsigned long data) { struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; - struct net_device *netdev = adapter->netdev; - bool link_up; - u32 link_speed = 0; + struct ixgbe_hw *hw = &adapter->hw; + + /* Do the watchdog outside of interrupt context due to the lovely + * delays that some of the newer hardware requires */ + if (!test_bit(__IXGBE_DOWN, &adapter->state)) { + /* Cause software interrupt to ensure rx rings are cleaned */ + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { + u32 eics = + (1 << (adapter->num_msix_vectors - NON_Q_VECTORS)) - 1; + IXGBE_WRITE_REG(hw, IXGBE_EICS, eics); + } else { + /* For legacy and MSI interrupts don't set any bits that + * are enabled for EIAM, because this operation would + * set *both* EIMS and EICS for any bit in EIAM */ + IXGBE_WRITE_REG(hw, IXGBE_EICS, + (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER)); + } + /* Reset the timer */ + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + 2 * HZ)); + } - adapter->hw.mac.ops.check_link(&adapter->hw, &(link_speed), &link_up); + schedule_work(&adapter->watchdog_task); +} + +/** + * ixgbe_watchdog_task - worker thread to bring link up + * @work: pointer to work_struct containing our data + **/ +static void ixgbe_watchdog_task(struct work_struct *work) +{ + struct ixgbe_adapter *adapter = container_of(work, + struct ixgbe_adapter, + watchdog_task); + struct net_device *netdev = adapter->netdev; + struct ixgbe_hw *hw = &adapter->hw; + u32 link_speed = adapter->link_speed; + bool link_up = adapter->link_up; + + adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK; + + if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); + if (link_up || + time_after(jiffies, (adapter->link_check_timeout + + IXGBE_TRY_LINK_TIMEOUT))) { + IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC); + adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; + } + adapter->link_up = link_up; + adapter->link_speed = link_speed; + } if (link_up) { if (!netif_carrier_ok(netdev)) { - u32 frctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); - u32 rmcs = IXGBE_READ_REG(&adapter->hw, IXGBE_RMCS); + u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS); #define FLOW_RX (frctl & IXGBE_FCTRL_RFCE) #define FLOW_TX (rmcs & IXGBE_RMCS_TFCE_802_3X) DPRINTK(LINK, INFO, "NIC Link is Up %s, " - "Flow Control: %s\n", - (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? - "10 Gbps" : - (link_speed == IXGBE_LINK_SPEED_1GB_FULL ? - "1 Gbps" : "unknown speed")), - ((FLOW_RX && FLOW_TX) ? "RX/TX" : - (FLOW_RX ? "RX" : - (FLOW_TX ? "TX" : "None")))); + "Flow Control: %s\n", + (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? + "10 Gbps" : + (link_speed == IXGBE_LINK_SPEED_1GB_FULL ? + "1 Gbps" : "unknown speed")), + ((FLOW_RX && FLOW_TX) ? "RX/TX" : + (FLOW_RX ? "RX" : + (FLOW_TX ? "TX" : "None")))); netif_carrier_on(netdev); netif_tx_wake_all_queues(netdev); @@ -3039,6 +3096,8 @@ static void ixgbe_watchdog(unsigned long data) adapter->detect_tx_hung = true; } } else { + adapter->link_up = false; + adapter->link_speed = 0; if (netif_carrier_ok(netdev)) { DPRINTK(LINK, INFO, "NIC Link is Down\n"); netif_carrier_off(netdev); @@ -3047,24 +3106,7 @@ static void ixgbe_watchdog(unsigned long data) } ixgbe_update_stats(adapter); - - if (!test_bit(__IXGBE_DOWN, &adapter->state)) { - /* Cause software interrupt to ensure rx rings are cleaned */ - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { - u32 eics = - (1 << (adapter->num_msix_vectors - NON_Q_VECTORS)) - 1; - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, eics); - } else { - /* for legacy and MSI interrupts don't set any bits that - * are enabled for EIAM, because this operation would - * set *both* EIMS and EICS for any bit in EIAM */ - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, - (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER)); - } - /* Reset the timer */ - mod_timer(&adapter->watchdog_timer, - round_jiffies(jiffies + 2 * HZ)); - } + adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK; } static int ixgbe_tso(struct ixgbe_adapter *adapter, @@ -3707,6 +3749,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, adapter->watchdog_timer.data = (unsigned long)adapter; INIT_WORK(&adapter->reset_task, ixgbe_reset_task); + INIT_WORK(&adapter->watchdog_task, ixgbe_watchdog_task); err = ixgbe_init_interrupt_scheme(adapter); if (err) diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index 3e9c483ad8e..172f766acde 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h @@ -703,6 +703,7 @@ #define IXGBE_LINKS_TL_FAULT 0x00001000 #define IXGBE_LINKS_SIGNAL 0x00000F00 +#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */ #define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */ /* SW Semaphore Register bitmasks */ @@ -1249,7 +1250,7 @@ struct ixgbe_mac_operations { s32 (*reset)(struct ixgbe_hw *); enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *); s32 (*setup_link)(struct ixgbe_hw *); - s32 (*check_link)(struct ixgbe_hw *, u32 *, bool *); + s32 (*check_link)(struct ixgbe_hw *, u32 *, bool *, bool); s32 (*setup_link_speed)(struct ixgbe_hw *, u32, bool, bool); s32 (*get_link_settings)(struct ixgbe_hw *, u32 *, bool *); }; -- cgit v1.2.3 From 41fb924866128fbb3fe15aafe7becc7d5ce4504f Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Thu, 11 Sep 2008 19:55:58 -0700 Subject: ixgbe: link change interrupt was not causing link event Upon review a buglet was found where link change was not causing an immediate link change event as it should. Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgbe/ixgbe_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 036393e5383..11bf86b9362 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -758,9 +758,9 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX, v_idx); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950); - /* set up to autoclear timer, lsc, and the vectors */ + /* set up to autoclear timer, and the vectors */ mask = IXGBE_EIMS_ENABLE_MASK; - mask &= ~IXGBE_EIMS_OTHER; + mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask); } -- cgit v1.2.3 From f47cf66e9cc778d21533c681e89b4034ed2d8666 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Thu, 11 Sep 2008 19:56:14 -0700 Subject: ixgbe: fix bug with shared interrupts fix ixgbe bug reported with shared legacy interrupts Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgbe/ixgbe_main.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 11bf86b9362..cfaa3ca28f5 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -1245,8 +1245,13 @@ static irqreturn_t ixgbe_intr(int irq, void *data) /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read * therefore no explict interrupt disable is necessary */ eicr = IXGBE_READ_REG(hw, IXGBE_EICR); - if (!eicr) + if (!eicr) { + /* shared interrupt alert! + * make sure interrupts are enabled because the read will + * have disabled interrupts due to EIAM */ + ixgbe_irq_enable(adapter); return IRQ_NONE; /* Not our interrupt */ + } if (eicr & IXGBE_EICR_LSC) ixgbe_check_lsc(adapter); -- cgit v1.2.3 From 42c783c5b1b755ae2ab2dec720ad58ffc2257a60 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Thu, 11 Sep 2008 19:56:28 -0700 Subject: ixgbe: limit small mtu to minimum for ipv4 support Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgbe/ixgbe_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index cfaa3ca28f5..2c8895e9dae 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -2839,8 +2839,8 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) struct ixgbe_adapter *adapter = netdev_priv(netdev); int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; - if ((max_frame < (ETH_ZLEN + ETH_FCS_LEN)) || - (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) + /* MTU < 68 is an error and causes problems on some kernels */ + if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) return -EINVAL; DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n", -- cgit v1.2.3 From 98c00a1c5de23295aebe7e46a61d85e05ee82855 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Thu, 11 Sep 2008 19:56:41 -0700 Subject: ixgbe: fix ethtool register dump 1) reading some of the registers in our hardware causes them to clear, so don't read ICR in the ethtool register dump function. 2) several register iterators were not iterating Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgbe/ixgbe_ethtool.c | 23 ++++++++++------------- drivers/net/ixgbe/ixgbe_type.h | 10 ++-------- 2 files changed, 12 insertions(+), 21 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index 8f0e3f93e6b..64460b4c50c 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -315,7 +315,9 @@ static void ixgbe_get_regs(struct net_device *netdev, regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC); /* Interrupt */ - regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICR); + /* don't read EICR because it can clear interrupt causes, instead + * read EICS which is a shadow but doesn't clear EICR */ + regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS); regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS); regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS); regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC); @@ -419,7 +421,6 @@ static void ixgbe_get_regs(struct net_device *netdev, regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM); regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT); - /* DCE */ regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS); regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); @@ -539,21 +540,17 @@ static void ixgbe_get_regs(struct net_device *netdev, /* Diagnostic */ regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL); for (i = 0; i < 8; i++) - regs_buff[1072] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i)); + regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i)); regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN); - regs_buff[1081] = IXGBE_READ_REG(hw, IXGBE_RIC_DW0); - regs_buff[1082] = IXGBE_READ_REG(hw, IXGBE_RIC_DW1); - regs_buff[1083] = IXGBE_READ_REG(hw, IXGBE_RIC_DW2); - regs_buff[1084] = IXGBE_READ_REG(hw, IXGBE_RIC_DW3); + for (i = 0; i < 4; i++) + regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i)); regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE); regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL); for (i = 0; i < 8; i++) - regs_buff[1087] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i)); + regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i)); regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN); - regs_buff[1096] = IXGBE_READ_REG(hw, IXGBE_TIC_DW0); - regs_buff[1097] = IXGBE_READ_REG(hw, IXGBE_TIC_DW1); - regs_buff[1098] = IXGBE_READ_REG(hw, IXGBE_TIC_DW2); - regs_buff[1099] = IXGBE_READ_REG(hw, IXGBE_TIC_DW3); + for (i = 0; i < 4; i++) + regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i)); regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE); regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL); regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0); @@ -566,7 +563,7 @@ static void ixgbe_get_regs(struct net_device *netdev, regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2); regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3); for (i = 0; i < 8; i++) - regs_buff[1111] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i)); + regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i)); regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL); regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1); regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2); diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index 172f766acde..196841b344d 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h @@ -278,18 +278,12 @@ #define IXGBE_RDSTATCTL 0x02C20 #define IXGBE_RDSTAT(_i) (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */ #define IXGBE_RDHMPN 0x02F08 -#define IXGBE_RIC_DW0 0x02F10 -#define IXGBE_RIC_DW1 0x02F14 -#define IXGBE_RIC_DW2 0x02F18 -#define IXGBE_RIC_DW3 0x02F1C +#define IXGBE_RIC_DW(_i) (0x02F10 + ((_i) * 4)) #define IXGBE_RDPROBE 0x02F20 #define IXGBE_TDSTATCTL 0x07C20 #define IXGBE_TDSTAT(_i) (0x07C00 + ((_i) * 4)) /* 0x07C00 - 0x07C1C */ #define IXGBE_TDHMPN 0x07F08 -#define IXGBE_TIC_DW0 0x07F10 -#define IXGBE_TIC_DW1 0x07F14 -#define IXGBE_TIC_DW2 0x07F18 -#define IXGBE_TIC_DW3 0x07F1C +#define IXGBE_TIC_DW(_i) (0x07F10 + ((_i) * 4)) #define IXGBE_TDPROBE 0x07F20 #define IXGBE_TXBUFCTRL 0x0C600 #define IXGBE_TXBUFDATA0 0x0C610 -- cgit v1.2.3 From 05857980cf15d96dd9eb47dd444b0634f436ba34 Mon Sep 17 00:00:00 2001 From: Jeff Kirsher Date: Thu, 11 Sep 2008 19:57:00 -0700 Subject: ixgbe: fix pci_resource allocation as in other drivers Form: Jesse Brandeburg Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgbe/ixgbe_main.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 2c8895e9dae..0269115593e 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -3605,7 +3605,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, struct ixgbe_adapter *adapter = NULL; struct ixgbe_hw *hw; const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; - unsigned long mmio_start, mmio_len; static int cards_found; int i, err, pci_using_dac; u16 link_status, link_speed, link_width; @@ -3657,10 +3656,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, hw->back = adapter; adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; - mmio_start = pci_resource_start(pdev, 0); - mmio_len = pci_resource_len(pdev, 0); - - hw->hw_addr = ioremap(mmio_start, mmio_len); + hw->hw_addr = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); if (!hw->hw_addr) { err = -EIO; goto err_ioremap; @@ -3690,9 +3687,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, #endif strcpy(netdev->name, pci_name(pdev)); - netdev->mem_start = mmio_start; - netdev->mem_end = mmio_start + mmio_len; - adapter->bd_number = cards_found; /* PCI config space info */ -- cgit v1.2.3 From 3d3d6d3cc2ad1e77516f3ad3f79d5988ebc361cf Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Thu, 11 Sep 2008 19:57:17 -0700 Subject: ixgbe: fix ethtool disable csum for ipv6 ethtool was not disabling the correct netif flags when setting checksum disable. Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgbe/ixgbe_ethtool.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index 64460b4c50c..2506f9eae58 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -241,7 +241,7 @@ static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data) if (data) netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); else - netdev->features &= ~NETIF_F_IP_CSUM; + netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); return 0; } -- cgit v1.2.3 From 30efa5a363d18f1c284455879cb67fb1bf547bdc Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Thu, 11 Sep 2008 19:58:14 -0700 Subject: ixgbe: fix initial interrupt throttle settings ixgbe was incorrectly setting the throttle rate setting for all tx queues and the driver has been refreshed to better handle a dynamic interrupt mode as well as multiple queues. Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Garzik --- drivers/net/ixgbe/ixgbe.h | 23 ++++------- drivers/net/ixgbe/ixgbe_ethtool.c | 86 +++++++++++++++++++++------------------ drivers/net/ixgbe/ixgbe_main.c | 80 ++++++++++++++++++++---------------- 3 files changed, 100 insertions(+), 89 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index 2b827a67c9c..5ba03845c3e 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h @@ -62,11 +62,6 @@ #define IXGBE_MAX_RXQ 1 #define IXGBE_MIN_RXQ 1 -#define IXGBE_DEFAULT_ITR_RX_USECS 125 /* 8k irqs/sec */ -#define IXGBE_DEFAULT_ITR_TX_USECS 250 /* 4k irqs/sec */ -#define IXGBE_MIN_ITR_USECS 100 /* 500k irqs/sec */ -#define IXGBE_MAX_ITR_USECS 10000 /* 100 irqs/sec */ - /* flow control */ #define IXGBE_DEFAULT_FCRTL 0x10000 #define IXGBE_MIN_FCRTL 0x40 @@ -161,10 +156,7 @@ struct ixgbe_ring { * vector array, can also be used for finding the bit in EICR * and friends that represents the vector for this ring */ - u32 eims_value; - u16 itr_register; - char name[IFNAMSIZ + 5]; u16 work_limit; /* max work per interrupt */ u16 rx_buf_len; }; @@ -191,8 +183,8 @@ struct ixgbe_q_vector { DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */ u8 rxr_count; /* Rx ring count assigned to this vector */ u8 txr_count; /* Tx ring count assigned to this vector */ - u8 tx_eitr; - u8 rx_eitr; + u8 tx_itr; + u8 rx_itr; u32 eitr; }; @@ -240,7 +232,9 @@ struct ixgbe_adapter { /* TX */ struct ixgbe_ring *tx_ring; /* One per active queue */ + int num_tx_queues; u64 restart_queue; + u64 hw_csum_tx_good; u64 lsc_int; u64 hw_tso_ctxt; u64 hw_tso6_ctxt; @@ -249,12 +243,10 @@ struct ixgbe_adapter { /* RX */ struct ixgbe_ring *rx_ring; /* One per active queue */ - u64 hw_csum_tx_good; + int num_rx_queues; u64 hw_csum_rx_error; u64 hw_csum_rx_good; u64 non_eop_descs; - int num_tx_queues; - int num_rx_queues; int num_msix_vectors; struct ixgbe_ring_feature ring_feature[3]; struct msix_entry *msix_entries; @@ -301,14 +293,15 @@ struct ixgbe_adapter { struct ixgbe_hw_stats stats; /* Interrupt Throttle Rate */ - u32 rx_eitr; - u32 tx_eitr; + u32 eitr_param; unsigned long state; u64 tx_busy; u64 lro_aggregated; u64 lro_flushed; u64 lro_no_desc; + unsigned int tx_ring_count; + unsigned int rx_ring_count; u32 link_speed; bool link_up; diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index 2506f9eae58..f18e3daaf4f 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -880,17 +880,23 @@ static int ixgbe_get_coalesce(struct net_device *netdev, { struct ixgbe_adapter *adapter = netdev_priv(netdev); - if (adapter->rx_eitr < IXGBE_MIN_ITR_USECS) - ec->rx_coalesce_usecs = adapter->rx_eitr; - else - ec->rx_coalesce_usecs = 1000000 / adapter->rx_eitr; - - if (adapter->tx_eitr < IXGBE_MIN_ITR_USECS) - ec->tx_coalesce_usecs = adapter->tx_eitr; - else - ec->tx_coalesce_usecs = 1000000 / adapter->tx_eitr; - ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0].work_limit; + + /* only valid if in constant ITR mode */ + switch (adapter->itr_setting) { + case 0: + /* throttling disabled */ + ec->rx_coalesce_usecs = 0; + break; + case 1: + /* dynamic ITR mode */ + ec->rx_coalesce_usecs = 1; + break; + default: + /* fixed interrupt rate mode */ + ec->rx_coalesce_usecs = 1000000/adapter->eitr_param; + break; + } return 0; } @@ -898,38 +904,40 @@ static int ixgbe_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - - if ((ec->rx_coalesce_usecs > IXGBE_MAX_ITR_USECS) || - ((ec->rx_coalesce_usecs != 0) && - (ec->rx_coalesce_usecs != 1) && - (ec->rx_coalesce_usecs != 3) && - (ec->rx_coalesce_usecs < IXGBE_MIN_ITR_USECS))) - return -EINVAL; - if ((ec->tx_coalesce_usecs > IXGBE_MAX_ITR_USECS) || - ((ec->tx_coalesce_usecs != 0) && - (ec->tx_coalesce_usecs != 1) && - (ec->tx_coalesce_usecs != 3) && - (ec->tx_coalesce_usecs < IXGBE_MIN_ITR_USECS))) - return -EINVAL; - - /* convert to rate of irq's per second */ - if (ec->rx_coalesce_usecs < IXGBE_MIN_ITR_USECS) - adapter->rx_eitr = ec->rx_coalesce_usecs; - else - adapter->rx_eitr = (1000000 / ec->rx_coalesce_usecs); - - if (ec->tx_coalesce_usecs < IXGBE_MIN_ITR_USECS) - adapter->tx_eitr = ec->rx_coalesce_usecs; - else - adapter->tx_eitr = (1000000 / ec->tx_coalesce_usecs); + struct ixgbe_hw *hw = &adapter->hw; + int i; if (ec->tx_max_coalesced_frames_irq) - adapter->tx_ring[0].work_limit = - ec->tx_max_coalesced_frames_irq; + adapter->tx_ring[0].work_limit = ec->tx_max_coalesced_frames_irq; + + if (ec->rx_coalesce_usecs > 1) { + /* store the value in ints/second */ + adapter->eitr_param = 1000000/ec->rx_coalesce_usecs; + + /* static value of interrupt rate */ + adapter->itr_setting = adapter->eitr_param; + /* clear the lower bit */ + adapter->itr_setting &= ~1; + } else if (ec->rx_coalesce_usecs == 1) { + /* 1 means dynamic mode */ + adapter->eitr_param = 20000; + adapter->itr_setting = 1; + } else { + /* any other value means disable eitr, which is best + * served by setting the interrupt rate very high */ + adapter->eitr_param = 3000000; + adapter->itr_setting = 0; + } - if (netif_running(netdev)) { - ixgbe_down(adapter); - ixgbe_up(adapter); + for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { + struct ixgbe_q_vector *q_vector = &adapter->q_vector[i]; + if (q_vector->txr_count && !q_vector->rxr_count) + q_vector->eitr = (adapter->eitr_param >> 1); + else + /* rx only or mixed */ + q_vector->eitr = adapter->eitr_param; + IXGBE_WRITE_REG(hw, IXGBE_EITR(i), + EITR_INTS_PER_SEC_TO_REG(q_vector->eitr)); } return 0; diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 0269115593e..ef965c99684 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -744,12 +744,12 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) r_idx + 1); } - /* if this is a tx only vector use half the irq (tx) rate */ + /* if this is a tx only vector halve the interrupt rate */ if (q_vector->txr_count && !q_vector->rxr_count) - q_vector->eitr = adapter->tx_eitr; + q_vector->eitr = (adapter->eitr_param >> 1); else - /* rx only or mixed */ - q_vector->eitr = adapter->rx_eitr; + /* rx only */ + q_vector->eitr = adapter->eitr_param; IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), EITR_INTS_PER_SEC_TO_REG(q_vector->eitr)); @@ -845,13 +845,13 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector) for (i = 0; i < q_vector->txr_count; i++) { tx_ring = &(adapter->tx_ring[r_idx]); ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, - q_vector->tx_eitr, + q_vector->tx_itr, tx_ring->total_packets, tx_ring->total_bytes); /* if the result for this queue would decrease interrupt * rate for this vector then use that result */ - q_vector->tx_eitr = ((q_vector->tx_eitr > ret_itr) ? - q_vector->tx_eitr - 1 : ret_itr); + q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ? + q_vector->tx_itr - 1 : ret_itr); r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, r_idx + 1); } @@ -860,18 +860,18 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector) for (i = 0; i < q_vector->rxr_count; i++) { rx_ring = &(adapter->rx_ring[r_idx]); ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, - q_vector->rx_eitr, + q_vector->rx_itr, rx_ring->total_packets, rx_ring->total_bytes); /* if the result for this queue would decrease interrupt * rate for this vector then use that result */ - q_vector->rx_eitr = ((q_vector->rx_eitr > ret_itr) ? - q_vector->rx_eitr - 1 : ret_itr); + q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ? + q_vector->rx_itr - 1 : ret_itr); r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, r_idx + 1); } - current_itr = max(q_vector->rx_eitr, q_vector->tx_eitr); + current_itr = max(q_vector->rx_itr, q_vector->tx_itr); switch (current_itr) { /* counts and packets in update_itr are dependent on these numbers */ @@ -970,16 +970,24 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data) struct ixgbe_adapter *adapter = q_vector->adapter; struct ixgbe_ring *rx_ring; int r_idx; + int i; r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); + for (i = 0; i < q_vector->rxr_count; i++) { + rx_ring = &(adapter->rx_ring[r_idx]); + rx_ring->total_bytes = 0; + rx_ring->total_packets = 0; + r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, + r_idx + 1); + } + if (!q_vector->rxr_count) return IRQ_HANDLED; + r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); rx_ring = &(adapter->rx_ring[r_idx]); /* disable interrupts on this vector only */ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rx_ring->v_idx); - rx_ring->total_bytes = 0; - rx_ring->total_packets = 0; netif_rx_schedule(adapter->netdev, &q_vector->napi); return IRQ_HANDLED; @@ -1020,7 +1028,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) /* If all Rx work done, exit the polling mode */ if (work_done < budget) { netif_rx_complete(adapter->netdev, napi); - if (adapter->rx_eitr < IXGBE_MIN_ITR_USECS) + if (adapter->itr_setting & 3) ixgbe_set_itr_msix(q_vector); if (!test_bit(__IXGBE_DOWN, &adapter->state)) IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rx_ring->v_idx); @@ -1187,16 +1195,16 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter) struct ixgbe_ring *rx_ring = &adapter->rx_ring[0]; struct ixgbe_ring *tx_ring = &adapter->tx_ring[0]; - q_vector->tx_eitr = ixgbe_update_itr(adapter, new_itr, - q_vector->tx_eitr, - tx_ring->total_packets, - tx_ring->total_bytes); - q_vector->rx_eitr = ixgbe_update_itr(adapter, new_itr, - q_vector->rx_eitr, - rx_ring->total_packets, - rx_ring->total_bytes); + q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr, + q_vector->tx_itr, + tx_ring->total_packets, + tx_ring->total_bytes); + q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr, + q_vector->rx_itr, + rx_ring->total_packets, + rx_ring->total_bytes); - current_itr = max(q_vector->rx_eitr, q_vector->tx_eitr); + current_itr = max(q_vector->rx_itr, q_vector->tx_itr); switch (current_itr) { /* counts and packets in update_itr are dependent on these numbers */ @@ -1371,7 +1379,7 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; IXGBE_WRITE_REG(hw, IXGBE_EITR(0), - EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr)); + EITR_INTS_PER_SEC_TO_REG(adapter->eitr_param)); ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(0), 0); ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(0), 0); @@ -2209,7 +2217,7 @@ static int ixgbe_poll(struct napi_struct *napi, int budget) /* If budget not fully consumed, exit the polling mode */ if (work_done < budget) { netif_rx_complete(adapter->netdev, napi); - if (adapter->rx_eitr < IXGBE_MIN_ITR_USECS) + if (adapter->itr_setting & 3) ixgbe_set_itr(adapter); if (!test_bit(__IXGBE_DOWN, &adapter->state)) ixgbe_irq_enable(adapter); @@ -2420,12 +2428,6 @@ static int __devinit ixgbe_set_interrupt_capability(struct ixgbe_adapter int err = 0; int vector, v_budget; - /* - * Set the default interrupt throttle rate. - */ - adapter->rx_eitr = (1000000 / IXGBE_DEFAULT_ITR_RX_USECS); - adapter->tx_eitr = (1000000 / IXGBE_DEFAULT_ITR_TX_USECS); - /* * It's easy to be greedy for MSI-X vectors, but it really * doesn't do us much good if we have a lot more vectors @@ -2567,10 +2569,6 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) adapter->ring_feature[RING_F_RSS].indices = rss; adapter->flags |= IXGBE_FLAG_RSS_ENABLED; - /* Enable Dynamic interrupt throttling by default */ - adapter->rx_eitr = 1; - adapter->tx_eitr = 1; - /* default flow control settings */ hw->fc.original_type = ixgbe_fc_none; hw->fc.type = ixgbe_fc_none; @@ -2591,6 +2589,18 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) return -EIO; } + /* enable itr by default in dynamic mode */ + adapter->itr_setting = 1; + adapter->eitr_param = 20000; + + /* set defaults for eitr in MegaBytes */ + adapter->eitr_low = 10; + adapter->eitr_high = 20; + + /* set default ring sizes */ + adapter->tx_ring_count = IXGBE_DEFAULT_TXD; + adapter->rx_ring_count = IXGBE_DEFAULT_RXD; + /* initialize eeprom parameters */ if (ixgbe_init_eeprom(hw)) { dev_err(&pdev->dev, "EEPROM initialization failed\n"); -- cgit v1.2.3 From ff819cfb5d95c4945811f5e33aa57274885c7527 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Thu, 11 Sep 2008 19:58:29 -0700 Subject: ixgbe: fix bug with lots of tx queues when using more than 8 tx queues you can overrun the 8 bit v_idx field, so change it to 16 bits to represent the maximum number of queues (one for each bit) Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgbe/ixgbe.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index 5ba03845c3e..064af675a94 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h @@ -152,9 +152,9 @@ struct ixgbe_ring { struct net_lro_mgr lro_mgr; bool lro_used; struct ixgbe_queue_stats stats; - u8 v_idx; /* maps directly to the index for this ring in the hardware - * vector array, can also be used for finding the bit in EICR - * and friends that represents the vector for this ring */ + u16 v_idx; /* maps directly to the index for this ring in the hardware + * vector array, can also be used for finding the bit in EICR + * and friends that represents the vector for this ring */ u16 work_limit; /* max work per interrupt */ -- cgit v1.2.3 From 762f4c57105853d1cbad3b96ef18aa23beff3db2 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Thu, 11 Sep 2008 19:58:43 -0700 Subject: ixgbe: recycle pages in packet split mode most of the time we only need 1500 bytes for a packet which means we don't need a whole 4k page for each packet. Share the allocation by using a reference count to the page and giving half to two receive descriptors. This can enable us to use packet split mode all the time due to the performance increase of allocating half the pages. Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgbe/ixgbe.h | 1 + drivers/net/ixgbe/ixgbe_main.c | 49 ++++++++++++++++++++++++++---------------- 2 files changed, 32 insertions(+), 18 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index 064af675a94..71ddac6ac4f 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h @@ -114,6 +114,7 @@ struct ixgbe_rx_buffer { dma_addr_t dma; struct page *page; dma_addr_t page_dma; + unsigned int page_offset; }; struct ixgbe_queue_stats { diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index ef965c99684..5858ab2b48f 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -493,16 +493,24 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, while (cleaned_count--) { rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); - if (!bi->page && + if (!bi->page_dma && (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) { - bi->page = alloc_page(GFP_ATOMIC); if (!bi->page) { - adapter->alloc_rx_page_failed++; - goto no_buffers; + bi->page = alloc_page(GFP_ATOMIC); + if (!bi->page) { + adapter->alloc_rx_page_failed++; + goto no_buffers; + } + bi->page_offset = 0; + } else { + /* use a half page if we're re-using */ + bi->page_offset ^= (PAGE_SIZE / 2); } - bi->page_dma = pci_map_page(pdev, bi->page, 0, - PAGE_SIZE, - PCI_DMA_FROMDEVICE); + + bi->page_dma = pci_map_page(pdev, bi->page, + bi->page_offset, + (PAGE_SIZE / 2), + PCI_DMA_FROMDEVICE); } if (!bi->skb) { @@ -596,7 +604,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter, if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc)); len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> - IXGBE_RXDADV_HDRBUFLEN_SHIFT; + IXGBE_RXDADV_HDRBUFLEN_SHIFT; if (hdr_info & IXGBE_RXDADV_SPH) adapter->rx_hdr_split++; if (len > IXGBE_RX_HDR_SIZE) @@ -620,11 +628,18 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter, if (upper_len) { pci_unmap_page(pdev, rx_buffer_info->page_dma, - PAGE_SIZE, PCI_DMA_FROMDEVICE); + PAGE_SIZE / 2, PCI_DMA_FROMDEVICE); rx_buffer_info->page_dma = 0; skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, - rx_buffer_info->page, 0, upper_len); - rx_buffer_info->page = NULL; + rx_buffer_info->page, + rx_buffer_info->page_offset, + upper_len); + + if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) || + (page_count(rx_buffer_info->page) != 1)) + rx_buffer_info->page = NULL; + else + get_page(rx_buffer_info->page); skb->len += upper_len; skb->data_len += upper_len; @@ -647,6 +662,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter, rx_buffer_info->skb = next_buffer->skb; rx_buffer_info->dma = next_buffer->dma; next_buffer->skb = skb; + next_buffer->dma = 0; adapter->non_eop_descs++; goto next_desc; } @@ -1534,10 +1550,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) int rx_buf_len; /* Decide whether to use packet split mode or not */ - if (netdev->mtu > ETH_DATA_LEN) - adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; - else - adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; + adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; /* Set the RX buffer length according to the mode */ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { @@ -2018,12 +2031,12 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, } if (!rx_buffer_info->page) continue; - pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE, - PCI_DMA_FROMDEVICE); + pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2, + PCI_DMA_FROMDEVICE); rx_buffer_info->page_dma = 0; - put_page(rx_buffer_info->page); rx_buffer_info->page = NULL; + rx_buffer_info->page_offset = 0; } size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; -- cgit v1.2.3 From b95f5fcb8ba6073a652927d232a7a7cb552afe62 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Thu, 11 Sep 2008 19:58:59 -0700 Subject: ixgbe: add device support for XF LR adapters Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgbe/ixgbe_82598.c | 1 + drivers/net/ixgbe/ixgbe_main.c | 2 ++ drivers/net/ixgbe/ixgbe_type.h | 1 + 3 files changed, 4 insertions(+) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c index 1e014bcc72d..07261406cd6 100644 --- a/drivers/net/ixgbe/ixgbe_82598.c +++ b/drivers/net/ixgbe/ixgbe_82598.c @@ -196,6 +196,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw) case IXGBE_DEV_ID_82598AF_SINGLE_PORT: case IXGBE_DEV_ID_82598EB_CX4: case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: + case IXGBE_DEV_ID_82598EB_XF_LR: media_type = ixgbe_media_type_fiber; break; case IXGBE_DEV_ID_82598AT_DUAL_PORT: diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 5858ab2b48f..3b0481a9979 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -74,6 +74,8 @@ static struct pci_device_id ixgbe_pci_tbl[] = { board_82598 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), board_82598 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), + board_82598 }, /* required last entry */ {0, } diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index 196841b344d..7057aa3f393 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h @@ -40,6 +40,7 @@ #define IXGBE_DEV_ID_82598AT_DUAL_PORT 0x10C8 #define IXGBE_DEV_ID_82598EB_CX4 0x10DD #define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC +#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4 /* General Registers */ #define IXGBE_CTRL 0x00000 -- cgit v1.2.3 From c431f97ef96026e6da7032a871a0789cf5a2eaea Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Thu, 11 Sep 2008 19:59:16 -0700 Subject: ixgbe: fix ring reallocation in ethtool changing ring sizes in ethtool needs to be robust. If an allocation fails the driver must continue operation, with the previous settings. Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgbe/ixgbe.h | 4 ++ drivers/net/ixgbe/ixgbe_ethtool.c | 95 +++++++++++++++++++-------------------- drivers/net/ixgbe/ixgbe_main.c | 10 ++--- 3 files changed, 56 insertions(+), 53 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index 71ddac6ac4f..27db64f5c86 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h @@ -336,5 +336,9 @@ extern int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, struct ixgbe_ring *rxdr); extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter, struct ixgbe_ring *txdr); +extern void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter, + struct ixgbe_ring *rxdr); +extern void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter, + struct ixgbe_ring *txdr); #endif /* _IXGBE_H_ */ diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index f18e3daaf4f..928b97cc170 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -654,12 +654,9 @@ static int ixgbe_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_tx_buffer *old_buf; - struct ixgbe_rx_buffer *old_rx_buf; - void *old_desc; + struct ixgbe_ring *temp_ring; int i, err; - u32 new_rx_count, new_tx_count, old_size; - dma_addr_t old_dma; + u32 new_rx_count, new_tx_count; if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) return -EINVAL; @@ -678,6 +675,15 @@ static int ixgbe_set_ringparam(struct net_device *netdev, return 0; } + if (adapter->num_tx_queues > adapter->num_rx_queues) + temp_ring = vmalloc(adapter->num_tx_queues * + sizeof(struct ixgbe_ring)); + else + temp_ring = vmalloc(adapter->num_rx_queues * + sizeof(struct ixgbe_ring)); + if (!temp_ring) + return -ENOMEM; + while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) msleep(1); @@ -690,66 +696,59 @@ static int ixgbe_set_ringparam(struct net_device *netdev, * to the tx and rx ring structs. */ if (new_tx_count != adapter->tx_ring->count) { + memcpy(temp_ring, adapter->tx_ring, + adapter->num_tx_queues * sizeof(struct ixgbe_ring)); + for (i = 0; i < adapter->num_tx_queues; i++) { - /* Save existing descriptor ring */ - old_buf = adapter->tx_ring[i].tx_buffer_info; - old_desc = adapter->tx_ring[i].desc; - old_size = adapter->tx_ring[i].size; - old_dma = adapter->tx_ring[i].dma; - /* Try to allocate a new one */ - adapter->tx_ring[i].tx_buffer_info = NULL; - adapter->tx_ring[i].desc = NULL; - adapter->tx_ring[i].count = new_tx_count; - err = ixgbe_setup_tx_resources(adapter, - &adapter->tx_ring[i]); + temp_ring[i].count = new_tx_count; + err = ixgbe_setup_tx_resources(adapter, &temp_ring[i]); if (err) { - /* Restore the old one so at least - the adapter still works, even if - we failed the request */ - adapter->tx_ring[i].tx_buffer_info = old_buf; - adapter->tx_ring[i].desc = old_desc; - adapter->tx_ring[i].size = old_size; - adapter->tx_ring[i].dma = old_dma; + while (i) { + i--; + ixgbe_free_tx_resources(adapter, &temp_ring[i]); + } goto err_setup; } - /* Free the old buffer manually */ - vfree(old_buf); - pci_free_consistent(adapter->pdev, old_size, - old_desc, old_dma); } + + for (i = 0; i < adapter->num_tx_queues; i++) + ixgbe_free_tx_resources(adapter, &adapter->tx_ring[i]); + + memcpy(adapter->tx_ring, temp_ring, + adapter->num_tx_queues * sizeof(struct ixgbe_ring)); + + adapter->tx_ring_count = new_tx_count; } if (new_rx_count != adapter->rx_ring->count) { - for (i = 0; i < adapter->num_rx_queues; i++) { + memcpy(temp_ring, adapter->rx_ring, + adapter->num_rx_queues * sizeof(struct ixgbe_ring)); - old_rx_buf = adapter->rx_ring[i].rx_buffer_info; - old_desc = adapter->rx_ring[i].desc; - old_size = adapter->rx_ring[i].size; - old_dma = adapter->rx_ring[i].dma; - - adapter->rx_ring[i].rx_buffer_info = NULL; - adapter->rx_ring[i].desc = NULL; - adapter->rx_ring[i].dma = 0; - adapter->rx_ring[i].count = new_rx_count; - err = ixgbe_setup_rx_resources(adapter, - &adapter->rx_ring[i]); + for (i = 0; i < adapter->num_rx_queues; i++) { + temp_ring[i].count = new_rx_count; + err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]); if (err) { - adapter->rx_ring[i].rx_buffer_info = old_rx_buf; - adapter->rx_ring[i].desc = old_desc; - adapter->rx_ring[i].size = old_size; - adapter->rx_ring[i].dma = old_dma; + while (i) { + i--; + ixgbe_free_rx_resources(adapter, &temp_ring[i]); + } goto err_setup; } - - vfree(old_rx_buf); - pci_free_consistent(adapter->pdev, old_size, old_desc, - old_dma); } + + for (i = 0; i < adapter->num_rx_queues; i++) + ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]); + + memcpy(adapter->rx_ring, temp_ring, + adapter->num_rx_queues * sizeof(struct ixgbe_ring)); + + adapter->rx_ring_count = new_rx_count; } + /* success! */ err = 0; err_setup: - if (netif_running(adapter->netdev)) + if (netif_running(netdev)) ixgbe_up(adapter); clear_bit(__IXGBE_RESETTING, &adapter->state); diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 3b0481a9979..cde5d5a5a9a 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -2731,8 +2731,8 @@ alloc_failed: * * Free all transmit software resources **/ -static void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter, - struct ixgbe_ring *tx_ring) +void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter, + struct ixgbe_ring *tx_ring) { struct pci_dev *pdev = adapter->pdev; @@ -2761,14 +2761,14 @@ static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter) } /** - * ixgbe_free_rx_resources - Free Rx Resources + * ixgbe_ree_rx_resources - Free Rx Resources * @adapter: board private structure * @rx_ring: ring to clean the resources from * * Free all receive software resources **/ -static void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter, - struct ixgbe_ring *rx_ring) +void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter, + struct ixgbe_ring *rx_ring) { struct pci_dev *pdev = adapter->pdev; -- cgit v1.2.3 From f6af803f0b7c8e46d72156b042e105b4d481b6c3 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Thu, 11 Sep 2008 19:59:29 -0700 Subject: ixgbe: lro stats were not counted on first ethtool -Scall Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgbe/ixgbe_ethtool.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index 928b97cc170..4701abf3a59 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -774,6 +774,14 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, int j, k; int i; u64 aggregated = 0, flushed = 0, no_desc = 0; + for (i = 0; i < adapter->num_rx_queues; i++) { + aggregated += adapter->rx_ring[i].lro_mgr.stats.aggregated; + flushed += adapter->rx_ring[i].lro_mgr.stats.flushed; + no_desc += adapter->rx_ring[i].lro_mgr.stats.no_desc; + } + adapter->lro_aggregated = aggregated; + adapter->lro_flushed = flushed; + adapter->lro_no_desc = no_desc; ixgbe_update_stats(adapter); for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { @@ -788,17 +796,11 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, i += k; } for (j = 0; j < adapter->num_rx_queues; j++) { - aggregated += adapter->rx_ring[j].lro_mgr.stats.aggregated; - flushed += adapter->rx_ring[j].lro_mgr.stats.flushed; - no_desc += adapter->rx_ring[j].lro_mgr.stats.no_desc; queue_stat = (u64 *)&adapter->rx_ring[j].stats; for (k = 0; k < stat_count; k++) data[i + k] = queue_stat[k]; i += k; } - adapter->lro_aggregated = aggregated; - adapter->lro_flushed = flushed; - adapter->lro_no_desc = no_desc; } static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, -- cgit v1.2.3 From f08482766b7e3c0b2aaac4b68b30f33a91703aa3 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Thu, 11 Sep 2008 19:59:42 -0700 Subject: ixgbe: add clean rx many routine in some configurations there can be more than one rx queue per vector in msi-x mode. Add functionality to be able to clean this without changing the performance path single-rx-queue cleanup. Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgbe/ixgbe_main.c | 62 ++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 60 insertions(+), 2 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index cde5d5a5a9a..e18afa4e195 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -1024,13 +1024,15 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data) * @napi: napi struct with our devices info in it * @budget: amount of work driver is allowed to do this pass, in packets * + * This function is optimized for cleaning one queue only on a single + * q_vector!!! **/ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) { struct ixgbe_q_vector *q_vector = container_of(napi, struct ixgbe_q_vector, napi); struct ixgbe_adapter *adapter = q_vector->adapter; - struct ixgbe_ring *rx_ring; + struct ixgbe_ring *rx_ring = NULL; int work_done = 0; long r_idx; @@ -1055,6 +1057,56 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) return work_done; } +/** + * ixgbe_clean_rxonly_many - msix (aka one shot) rx clean routine + * @napi: napi struct with our devices info in it + * @budget: amount of work driver is allowed to do this pass, in packets + * + * This function will clean more than one rx queue associated with a + * q_vector. + **/ +static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget) +{ + struct ixgbe_q_vector *q_vector = + container_of(napi, struct ixgbe_q_vector, napi); + struct ixgbe_adapter *adapter = q_vector->adapter; + struct net_device *netdev = adapter->netdev; + struct ixgbe_ring *rx_ring = NULL; + int work_done = 0, i; + long r_idx; + u16 enable_mask = 0; + + /* attempt to distribute budget to each queue fairly, but don't allow + * the budget to go below 1 because we'll exit polling */ + budget /= (q_vector->rxr_count ?: 1); + budget = max(budget, 1); + r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); + for (i = 0; i < q_vector->rxr_count; i++) { + rx_ring = &(adapter->rx_ring[r_idx]); +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) + ixgbe_update_rx_dca(adapter, rx_ring); +#endif + ixgbe_clean_rx_irq(adapter, rx_ring, &work_done, budget); + enable_mask |= rx_ring->v_idx; + r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, + r_idx + 1); + } + + r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); + rx_ring = &(adapter->rx_ring[r_idx]); + /* If all Rx work done, exit the polling mode */ + if ((work_done == 0) || !netif_running(netdev)) { + netif_rx_complete(netdev, napi); + if (adapter->itr_setting & 3) + ixgbe_set_itr_msix(q_vector); + if (!test_bit(__IXGBE_DOWN, &adapter->state)) + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, enable_mask); + return 0; + } + + return work_done; +} static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx, int r_idx) { @@ -1813,10 +1865,16 @@ static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) q_vectors = 1; for (q_idx = 0; q_idx < q_vectors; q_idx++) { + struct napi_struct *napi; q_vector = &adapter->q_vector[q_idx]; if (!q_vector->rxr_count) continue; - napi_enable(&q_vector->napi); + napi = &q_vector->napi; + if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) && + (q_vector->rxr_count > 1)) + napi->poll = &ixgbe_clean_rxonly_many; + + napi_enable(napi); } } -- cgit v1.2.3 From c44ade9ef8ffd73cb8b026065ade78bc0040f0b4 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Thu, 11 Sep 2008 19:59:59 -0700 Subject: ixgbe: update to latest common code module This is a massive update that includes infrastructure for further patches where we will add support for more phy types and eeprom types. This code is shared as much as possible with other drivers, so the code may seem a little obtuse at times but wherever possible we keep to the linux style and methods. Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgbe/ixgbe.h | 2 - drivers/net/ixgbe/ixgbe_82598.c | 600 +++++++++++++++++++++---- drivers/net/ixgbe/ixgbe_common.c | 914 +++++++++++++++++++++++++------------- drivers/net/ixgbe/ixgbe_common.h | 59 +-- drivers/net/ixgbe/ixgbe_ethtool.c | 20 +- drivers/net/ixgbe/ixgbe_main.c | 105 +++-- drivers/net/ixgbe/ixgbe_phy.c | 241 ++++------ drivers/net/ixgbe/ixgbe_phy.h | 60 ++- drivers/net/ixgbe/ixgbe_type.h | 513 +++++++++++++-------- 9 files changed, 1700 insertions(+), 814 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index 27db64f5c86..2388af24672 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h @@ -41,8 +41,6 @@ #include #endif -#define IXGBE_ERR(args...) printk(KERN_ERR "ixgbe: " args) - #define PFX "ixgbe: " #define DPRINTK(nlevel, klevel, fmt, args...) \ ((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \ diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c index 07261406cd6..a08a267f166 100644 --- a/drivers/net/ixgbe/ixgbe_82598.c +++ b/drivers/net/ixgbe/ixgbe_82598.c @@ -39,68 +39,80 @@ #define IXGBE_82598_MC_TBL_SIZE 128 #define IXGBE_82598_VFT_TBL_SIZE 128 -static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw); -static s32 ixgbe_get_link_settings_82598(struct ixgbe_hw *hw, u32 *speed, - bool *autoneg); -static s32 ixgbe_get_copper_link_settings_82598(struct ixgbe_hw *hw, - u32 *speed, bool *autoneg); +static s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg); static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw); +static s32 ixgbe_setup_fc_82598(struct ixgbe_hw *hw, s32 packetbuf_num); static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw); -static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, u32 *speed, - bool *link_up, - bool link_up_wait_to_complete); -static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw, u32 speed, - bool autoneg, - bool autoneg_wait_to_complete); +static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *link_up, bool link_up_wait_to_complete); +static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg, + bool autoneg_wait_to_complete); static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw); -static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw, u32 speed, - bool autoneg, - bool autoneg_wait_to_complete); +static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg, + bool autoneg_wait_to_complete); static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw); +static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq); +static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq); +static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, + u32 vind, bool vlan_on); +static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw); +static s32 ixgbe_blink_led_stop_82598(struct ixgbe_hw *hw, u32 index); +static s32 ixgbe_blink_led_start_82598(struct ixgbe_hw *hw, u32 index); +static s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val); +static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val); +static s32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw); +static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw); - +/** + */ static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw) { - hw->mac.num_rx_queues = IXGBE_82598_MAX_RX_QUEUES; - hw->mac.num_tx_queues = IXGBE_82598_MAX_TX_QUEUES; - hw->mac.mcft_size = IXGBE_82598_MC_TBL_SIZE; - hw->mac.vft_size = IXGBE_82598_VFT_TBL_SIZE; - hw->mac.num_rar_entries = IXGBE_82598_RAR_ENTRIES; - - /* PHY ops are filled in by default properly for Fiber only */ - if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { - hw->mac.ops.setup_link = &ixgbe_setup_copper_link_82598; - hw->mac.ops.setup_link_speed = &ixgbe_setup_copper_link_speed_82598; - hw->mac.ops.get_link_settings = - &ixgbe_get_copper_link_settings_82598; - - /* Call PHY identify routine to get the phy type */ - ixgbe_identify_phy(hw); - - switch (hw->phy.type) { - case ixgbe_phy_tn: - hw->phy.ops.setup_link = &ixgbe_setup_tnx_phy_link; - hw->phy.ops.check_link = &ixgbe_check_tnx_phy_link; - hw->phy.ops.setup_link_speed = - &ixgbe_setup_tnx_phy_link_speed; - break; - default: - break; - } + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_phy_info *phy = &hw->phy; + + /* Call PHY identify routine to get the phy type */ + ixgbe_identify_phy_generic(hw); + + /* PHY Init */ + switch (phy->type) { + default: + break; + } + + if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { + mac->ops.setup_link = &ixgbe_setup_copper_link_82598; + mac->ops.setup_link_speed = + &ixgbe_setup_copper_link_speed_82598; + mac->ops.get_link_capabilities = + &ixgbe_get_copper_link_capabilities_82598; } + mac->mcft_size = IXGBE_82598_MC_TBL_SIZE; + mac->vft_size = IXGBE_82598_VFT_TBL_SIZE; + mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES; + mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES; + mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES; + return 0; } /** - * ixgbe_get_link_settings_82598 - Determines default link settings + * ixgbe_get_link_capabilities_82598 - Determines link capabilities * @hw: pointer to hardware structure * @speed: pointer to link speed * @autoneg: boolean auto-negotiation value * - * Determines the default link settings by reading the AUTOC register. + * Determines the link capabilities by reading the AUTOC register. **/ -static s32 ixgbe_get_link_settings_82598(struct ixgbe_hw *hw, u32 *speed, +static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, bool *autoneg) { s32 status = 0; @@ -150,15 +162,16 @@ static s32 ixgbe_get_link_settings_82598(struct ixgbe_hw *hw, u32 *speed, } /** - * ixgbe_get_copper_link_settings_82598 - Determines default link settings + * ixgbe_get_copper_link_capabilities_82598 - Determines link capabilities * @hw: pointer to hardware structure * @speed: pointer to link speed * @autoneg: boolean auto-negotiation value * - * Determines the default link settings by reading the AUTOC register. + * Determines the link capabilities by reading the AUTOC register. **/ -static s32 ixgbe_get_copper_link_settings_82598(struct ixgbe_hw *hw, - u32 *speed, bool *autoneg) +s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg) { s32 status = IXGBE_ERR_LINK_SETUP; u16 speed_ability; @@ -166,7 +179,7 @@ static s32 ixgbe_get_copper_link_settings_82598(struct ixgbe_hw *hw, *speed = 0; *autoneg = true; - status = ixgbe_read_phy_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY, + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY, IXGBE_MDIO_PMA_PMD_DEV_TYPE, &speed_ability); @@ -199,9 +212,6 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw) case IXGBE_DEV_ID_82598EB_XF_LR: media_type = ixgbe_media_type_fiber; break; - case IXGBE_DEV_ID_82598AT_DUAL_PORT: - media_type = ixgbe_media_type_copper; - break; default: media_type = ixgbe_media_type_unknown; break; @@ -210,6 +220,122 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw) return media_type; } +/** + * ixgbe_setup_fc_82598 - Configure flow control settings + * @hw: pointer to hardware structure + * @packetbuf_num: packet buffer number (0-7) + * + * Configures the flow control settings based on SW configuration. This + * function is used for 802.3x flow control configuration only. + **/ +s32 ixgbe_setup_fc_82598(struct ixgbe_hw *hw, s32 packetbuf_num) +{ + u32 frctl_reg; + u32 rmcs_reg; + + if (packetbuf_num < 0 || packetbuf_num > 7) { + hw_dbg(hw, "Invalid packet buffer number [%d], expected range is" + " 0-7\n", packetbuf_num); + } + + frctl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); + frctl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE); + + rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS); + rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X); + + /* + * 10 gig parts do not have a word in the EEPROM to determine the + * default flow control setting, so we explicitly set it to full. + */ + if (hw->fc.type == ixgbe_fc_default) + hw->fc.type = ixgbe_fc_full; + + /* + * We want to save off the original Flow Control configuration just in + * case we get disconnected and then reconnected into a different hub + * or switch with different Flow Control capabilities. + */ + hw->fc.original_type = hw->fc.type; + + /* + * The possible values of the "flow_control" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames but not + * send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but we do not + * support receiving pause frames) + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: Invalid. + */ + switch (hw->fc.type) { + case ixgbe_fc_none: + break; + case ixgbe_fc_rx_pause: + /* + * Rx Flow control is enabled, + * and Tx Flow control is disabled. + */ + frctl_reg |= IXGBE_FCTRL_RFCE; + break; + case ixgbe_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is disabled, + * by a software over-ride. + */ + rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; + break; + case ixgbe_fc_full: + /* + * Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + frctl_reg |= IXGBE_FCTRL_RFCE; + rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; + break; + default: + /* We should never get here. The value should be 0-3. */ + hw_dbg(hw, "Flow control param set incorrectly\n"); + break; + } + + /* Enable 802.3x based flow control settings. */ + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, frctl_reg); + IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg); + + /* + * Check for invalid software configuration, zeros are completely + * invalid for all parameters used past this point, and if we enable + * flow control with zero water marks, we blast flow control packets. + */ + if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) { + hw_dbg(hw, "Flow control structure initialized incorrectly\n"); + return IXGBE_ERR_INVALID_LINK_SETTINGS; + } + + /* + * We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. + */ + if (hw->fc.type & ixgbe_fc_tx_pause) { + if (hw->fc.send_xon) { + IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), + (hw->fc.low_water | IXGBE_FCRTL_XONE)); + } else { + IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), + hw->fc.low_water); + } + IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), + (hw->fc.high_water)|IXGBE_FCRTH_FCEN); + } + + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(0), hw->fc.pause_time); + IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1)); + + return 0; +} + /** * ixgbe_setup_mac_link_82598 - Configures MAC link settings * @hw: pointer to hardware structure @@ -254,8 +380,7 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw) } if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; - hw_dbg(hw, - "Autonegotiation did not complete.\n"); + hw_dbg(hw, "Autonegotiation did not complete.\n"); } } } @@ -265,8 +390,8 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw) * case we get disconnected and then reconnected into a different hub * or switch with different Flow Control capabilities. */ - hw->fc.type = hw->fc.original_type; - ixgbe_setup_fc(hw, 0); + hw->fc.original_type = hw->fc.type; + ixgbe_setup_fc_82598(hw, 0); /* Add delay to filter out noises during initial link setup */ msleep(50); @@ -283,15 +408,13 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw) * * Reads the links register to determine if link is up and the current speed **/ -static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, u32 *speed, - bool *link_up, - bool link_up_wait_to_complete) +static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up, bool link_up_wait_to_complete) { u32 links_reg; u32 i; links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); - if (link_up_wait_to_complete) { for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { if (links_reg & IXGBE_LINKS_UP) { @@ -318,6 +441,7 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, u32 *speed, return 0; } + /** * ixgbe_setup_mac_link_speed_82598 - Set MAC link speed * @hw: pointer to hardware structure @@ -328,18 +452,18 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, u32 *speed, * Set the link speed in the AUTOC register and restarts link. **/ static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw, - u32 speed, bool autoneg, + ixgbe_link_speed speed, bool autoneg, bool autoneg_wait_to_complete) { s32 status = 0; /* If speed is 10G, then check for CX4 or XAUI. */ if ((speed == IXGBE_LINK_SPEED_10GB_FULL) && - (!(hw->mac.link_attach_type & IXGBE_AUTOC_10G_KX4))) + (!(hw->mac.link_attach_type & IXGBE_AUTOC_10G_KX4))) { hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN; - else if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && (!autoneg)) + } else if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && (!autoneg)) { hw->mac.link_mode_select = IXGBE_AUTOC_LMS_1G_LINK_NO_AN; - else if (autoneg) { + } else if (autoneg) { /* BX mode - Autonegotiate 1G */ if (!(hw->mac.link_attach_type & IXGBE_AUTOC_1G_PMA_PMD)) hw->mac.link_mode_select = IXGBE_AUTOC_LMS_1G_AN; @@ -358,7 +482,7 @@ static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw, * ixgbe_hw This will write the AUTOC register based on the new * stored values */ - hw->mac.ops.setup_link(hw); + ixgbe_setup_mac_link_82598(hw); } return status; @@ -376,18 +500,17 @@ static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw, **/ static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw) { - s32 status = 0; + s32 status; /* Restart autonegotiation on PHY */ - if (hw->phy.ops.setup_link) - status = hw->phy.ops.setup_link(hw); + status = hw->phy.ops.setup_link(hw); - /* Set MAC to KX/KX4 autoneg, which defaultis to Parallel detection */ + /* Set MAC to KX/KX4 autoneg, which defaults to Parallel detection */ hw->mac.link_attach_type = (IXGBE_AUTOC_10G_KX4 | IXGBE_AUTOC_1G_KX); hw->mac.link_mode_select = IXGBE_AUTOC_LMS_KX4_AN; /* Set up MAC */ - hw->mac.ops.setup_link(hw); + ixgbe_setup_mac_link_82598(hw); return status; } @@ -401,14 +524,14 @@ static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw) * * Sets the link speed in the AUTOC register in the MAC and restarts link. **/ -static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw, u32 speed, +static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw, + ixgbe_link_speed speed, bool autoneg, bool autoneg_wait_to_complete) { - s32 status = 0; + s32 status; /* Setup the PHY according to input speed */ - if (hw->phy.ops.setup_link_speed) status = hw->phy.ops.setup_link_speed(hw, speed, autoneg, autoneg_wait_to_complete); @@ -417,7 +540,7 @@ static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw, u32 speed, hw->mac.link_mode_select = IXGBE_AUTOC_LMS_KX4_AN; /* Set up MAC */ - hw->mac.ops.setup_link(hw); + ixgbe_setup_mac_link_82598(hw); return status; } @@ -426,7 +549,7 @@ static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw, u32 speed, * ixgbe_reset_hw_82598 - Performs hardware reset * @hw: pointer to hardware structure * - * Resets the hardware by reseting the transmit and receive units, masks and + * Resets the hardware by resetting the transmit and receive units, masks and * clears all interrupts, performing a PHY reset, and performing a link (MAC) * reset. **/ @@ -440,35 +563,44 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) u8 analog_val; /* Call adapter stop to disable tx/rx and clear interrupts */ - ixgbe_stop_adapter(hw); + hw->mac.ops.stop_adapter(hw); /* - * Power up the Atlas TX lanes if they are currently powered down. - * Atlas TX lanes are powered down for MAC loopback tests, but + * Power up the Atlas Tx lanes if they are currently powered down. + * Atlas Tx lanes are powered down for MAC loopback tests, but * they are not automatically restored on reset. */ - ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val); + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val); if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) { - /* Enable TX Atlas so packets can be transmitted again */ - ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val); + /* Enable Tx Atlas so packets can be transmitted again */ + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, + &analog_val); analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN; - ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, analog_val); + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, + analog_val); - ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &analog_val); + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, + &analog_val); analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL; - ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, analog_val); + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, + analog_val); - ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &analog_val); + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, + &analog_val); analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL; - ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, analog_val); + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, + analog_val); - ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &analog_val); + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, + &analog_val); analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL; - ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, analog_val); + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, + analog_val); } /* Reset PHY */ - ixgbe_reset_phy(hw); + if (hw->phy.reset_disable == false) + hw->phy.ops.reset(hw); /* * Prevent the PCI-E bus from from hanging by disabling PCI-E master @@ -527,23 +659,305 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) } /* Store the permanent mac address */ - ixgbe_get_mac_addr(hw, hw->mac.perm_addr); + hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); return status; } +/** + * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address + * @hw: pointer to hardware struct + * @rar: receive address register index to associate with a VMDq index + * @vmdq: VMDq set index + **/ +s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) +{ + u32 rar_high; + + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); + rar_high &= ~IXGBE_RAH_VIND_MASK; + rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK); + IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); + return 0; +} + +/** + * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address + * @hw: pointer to hardware struct + * @rar: receive address register index to associate with a VMDq index + * @vmdq: VMDq clear index (not used in 82598, but elsewhere) + **/ +static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) +{ + u32 rar_high; + u32 rar_entries = hw->mac.num_rar_entries; + + if (rar < rar_entries) { + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); + if (rar_high & IXGBE_RAH_VIND_MASK) { + rar_high &= ~IXGBE_RAH_VIND_MASK; + IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); + } + } else { + hw_dbg(hw, "RAR index %d is out of range.\n", rar); + } + + return 0; +} + +/** + * ixgbe_set_vfta_82598 - Set VLAN filter table + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vind: VMDq output index that maps queue to VLAN id in VFTA + * @vlan_on: boolean flag to turn on/off VLAN in VFTA + * + * Turn on/off specified VLAN in the VLAN filter table. + **/ +s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on) +{ + u32 regindex; + u32 bitindex; + u32 bits; + u32 vftabyte; + + if (vlan > 4095) + return IXGBE_ERR_PARAM; + + /* Determine 32-bit word position in array */ + regindex = (vlan >> 5) & 0x7F; /* upper seven bits */ + + /* Determine the location of the (VMD) queue index */ + vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */ + bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */ + + /* Set the nibble for VMD queue index */ + bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex)); + bits &= (~(0x0F << bitindex)); + bits |= (vind << bitindex); + IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits); + + /* Determine the location of the bit for this VLAN id */ + bitindex = vlan & 0x1F; /* lower five bits */ + + bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); + if (vlan_on) + /* Turn on this VLAN id */ + bits |= (1 << bitindex); + else + /* Turn off this VLAN id */ + bits &= ~(1 << bitindex); + IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits); + + return 0; +} + +/** + * ixgbe_clear_vfta_82598 - Clear VLAN filter table + * @hw: pointer to hardware structure + * + * Clears the VLAN filer table, and the VMDq index associated with the filter + **/ +static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw) +{ + u32 offset; + u32 vlanbyte; + + for (offset = 0; offset < hw->mac.vft_size; offset++) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); + + for (vlanbyte = 0; vlanbyte < 4; vlanbyte++) + for (offset = 0; offset < hw->mac.vft_size; offset++) + IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset), + 0); + + return 0; +} + +/** + * ixgbe_blink_led_start_82598 - Blink LED based on index. + * @hw: pointer to hardware structure + * @index: led number to blink + **/ +static s32 ixgbe_blink_led_start_82598(struct ixgbe_hw *hw, u32 index) +{ + ixgbe_link_speed speed = 0; + bool link_up = 0; + u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + + /* + * Link must be up to auto-blink the LEDs on the 82598EB MAC; + * force it if link is down. + */ + hw->mac.ops.check_link(hw, &speed, &link_up, false); + + if (!link_up) { + autoc_reg |= IXGBE_AUTOC_FLU; + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); + msleep(10); + } + + led_reg &= ~IXGBE_LED_MODE_MASK(index); + led_reg |= IXGBE_LED_BLINK(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); + IXGBE_WRITE_FLUSH(hw); + + return 0; +} + +/** + * ixgbe_blink_led_stop_82598 - Stop blinking LED based on index. + * @hw: pointer to hardware structure + * @index: led number to stop blinking + **/ +static s32 ixgbe_blink_led_stop_82598(struct ixgbe_hw *hw, u32 index) +{ + u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + + autoc_reg &= ~IXGBE_AUTOC_FLU; + autoc_reg |= IXGBE_AUTOC_AN_RESTART; + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); + + led_reg &= ~IXGBE_LED_MODE_MASK(index); + led_reg &= ~IXGBE_LED_BLINK(index); + led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); + IXGBE_WRITE_FLUSH(hw); + + return 0; +} + +/** + * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register + * @hw: pointer to hardware structure + * @reg: analog register to read + * @val: read value + * + * Performs read operation to Atlas analog register specified. + **/ +s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val) +{ + u32 atlas_ctl; + + IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, + IXGBE_ATLASCTL_WRITE_CMD | (reg << 8)); + IXGBE_WRITE_FLUSH(hw); + udelay(10); + atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); + *val = (u8)atlas_ctl; + + return 0; +} + +/** + * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register + * @hw: pointer to hardware structure + * @reg: atlas register to write + * @val: value to write + * + * Performs write operation to Atlas analog register specified. + **/ +s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val) +{ + u32 atlas_ctl; + + atlas_ctl = (reg << 8) | val; + IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl); + IXGBE_WRITE_FLUSH(hw); + udelay(10); + + return 0; +} + +/** + * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type + * @hw: pointer to hardware structure + * + * Determines physical layer capabilities of the current configuration. + **/ +s32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw) +{ + s32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; + + switch (hw->device_id) { + case IXGBE_DEV_ID_82598EB_CX4: + case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; + break; + case IXGBE_DEV_ID_82598AF_DUAL_PORT: + case IXGBE_DEV_ID_82598AF_SINGLE_PORT: + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; + break; + case IXGBE_DEV_ID_82598EB_XF_LR: + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; + break; + + default: + physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; + break; + } + + return physical_layer; +} + static struct ixgbe_mac_operations mac_ops_82598 = { - .reset = &ixgbe_reset_hw_82598, + .init_hw = &ixgbe_init_hw_generic, + .reset_hw = &ixgbe_reset_hw_82598, + .start_hw = &ixgbe_start_hw_generic, + .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, .get_media_type = &ixgbe_get_media_type_82598, + .get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82598, + .get_mac_addr = &ixgbe_get_mac_addr_generic, + .stop_adapter = &ixgbe_stop_adapter_generic, + .read_analog_reg8 = &ixgbe_read_analog_reg8_82598, + .write_analog_reg8 = &ixgbe_write_analog_reg8_82598, .setup_link = &ixgbe_setup_mac_link_82598, - .check_link = &ixgbe_check_mac_link_82598, .setup_link_speed = &ixgbe_setup_mac_link_speed_82598, - .get_link_settings = &ixgbe_get_link_settings_82598, + .check_link = &ixgbe_check_mac_link_82598, + .get_link_capabilities = &ixgbe_get_link_capabilities_82598, + .led_on = &ixgbe_led_on_generic, + .led_off = &ixgbe_led_off_generic, + .blink_led_start = &ixgbe_blink_led_start_82598, + .blink_led_stop = &ixgbe_blink_led_stop_82598, + .set_rar = &ixgbe_set_rar_generic, + .clear_rar = &ixgbe_clear_rar_generic, + .set_vmdq = &ixgbe_set_vmdq_82598, + .clear_vmdq = &ixgbe_clear_vmdq_82598, + .init_rx_addrs = &ixgbe_init_rx_addrs_generic, + .update_uc_addr_list = &ixgbe_update_uc_addr_list_generic, + .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, + .enable_mc = &ixgbe_enable_mc_generic, + .disable_mc = &ixgbe_disable_mc_generic, + .clear_vfta = &ixgbe_clear_vfta_82598, + .set_vfta = &ixgbe_set_vfta_82598, + .setup_fc = &ixgbe_setup_fc_82598, +}; + +static struct ixgbe_eeprom_operations eeprom_ops_82598 = { + .init_params = &ixgbe_init_eeprom_params_generic, + .read = &ixgbe_read_eeprom_generic, + .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, + .update_checksum = &ixgbe_update_eeprom_checksum_generic, +}; + +static struct ixgbe_phy_operations phy_ops_82598 = { + .identify = &ixgbe_identify_phy_generic, + /* .identify_sfp = &ixgbe_identify_sfp_module_generic, */ + .reset = &ixgbe_reset_phy_generic, + .read_reg = &ixgbe_read_phy_reg_generic, + .write_reg = &ixgbe_write_phy_reg_generic, + .setup_link = &ixgbe_setup_phy_link_generic, + .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, }; struct ixgbe_info ixgbe_82598_info = { .mac = ixgbe_mac_82598EB, .get_invariants = &ixgbe_get_invariants_82598, .mac_ops = &mac_ops_82598, + .eeprom_ops = &eeprom_ops_82598, + .phy_ops = &phy_ops_82598, }; diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c index f5b2617111a..a11ff0db9d2 100644 --- a/drivers/net/ixgbe/ixgbe_common.c +++ b/drivers/net/ixgbe/ixgbe_common.c @@ -33,20 +33,28 @@ #include "ixgbe_common.h" #include "ixgbe_phy.h" -static s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw); - static s32 ixgbe_poll_eeprom_eerd_done(struct ixgbe_hw *hw); +static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw); static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw); static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw); +static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw); +static void ixgbe_standby_eeprom(struct ixgbe_hw *hw); +static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, + u16 count); +static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count); +static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); +static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); +static void ixgbe_release_eeprom(struct ixgbe_hw *hw); static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw); -static s32 ixgbe_clear_vfta(struct ixgbe_hw *hw); -static s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw); +static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index); +static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index); static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr); +static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq); /** - * ixgbe_start_hw - Prepare hardware for TX/RX + * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx * @hw: pointer to hardware structure * * Starts the hardware by filling the bus info structure and media type, clears @@ -54,7 +62,7 @@ static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr); * table, VLAN filter table, calls routine to set up link and flow control * settings, and leaves transmit and receive units disabled and uninitialized **/ -s32 ixgbe_start_hw(struct ixgbe_hw *hw) +s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) { u32 ctrl_ext; @@ -62,22 +70,22 @@ s32 ixgbe_start_hw(struct ixgbe_hw *hw) hw->phy.media_type = hw->mac.ops.get_media_type(hw); /* Identify the PHY */ - ixgbe_identify_phy(hw); + hw->phy.ops.identify(hw); /* * Store MAC address from RAR0, clear receive address registers, and * clear the multicast table */ - ixgbe_init_rx_addrs(hw); + hw->mac.ops.init_rx_addrs(hw); /* Clear the VLAN filter table */ - ixgbe_clear_vfta(hw); + hw->mac.ops.clear_vfta(hw); /* Set up link */ hw->mac.ops.setup_link(hw); /* Clear statistics registers */ - ixgbe_clear_hw_cntrs(hw); + hw->mac.ops.clear_hw_cntrs(hw); /* Set No Snoop Disable */ ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); @@ -92,34 +100,34 @@ s32 ixgbe_start_hw(struct ixgbe_hw *hw) } /** - * ixgbe_init_hw - Generic hardware initialization + * ixgbe_init_hw_generic - Generic hardware initialization * @hw: pointer to hardware structure * - * Initialize the hardware by reseting the hardware, filling the bus info + * Initialize the hardware by resetting the hardware, filling the bus info * structure and media type, clears all on chip counters, initializes receive * address registers, multicast table, VLAN filter table, calls routine to set * up link and flow control settings, and leaves transmit and receive units * disabled and uninitialized **/ -s32 ixgbe_init_hw(struct ixgbe_hw *hw) +s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw) { /* Reset the hardware */ - hw->mac.ops.reset(hw); + hw->mac.ops.reset_hw(hw); /* Start the HW */ - ixgbe_start_hw(hw); + hw->mac.ops.start_hw(hw); return 0; } /** - * ixgbe_clear_hw_cntrs - Generic clear hardware counters + * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters * @hw: pointer to hardware structure * * Clears all hardware statistics counters by reading them from the hardware * Statistics counters are clear on read. **/ -static s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw) +s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw) { u16 i = 0; @@ -191,7 +199,36 @@ static s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw) } /** - * ixgbe_get_mac_addr - Generic get MAC address + * ixgbe_read_pba_num_generic - Reads part number from EEPROM + * @hw: pointer to hardware structure + * @pba_num: stores the part number from the EEPROM + * + * Reads the part number from the EEPROM. + **/ +s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num) +{ + s32 ret_val; + u16 data; + + ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); + if (ret_val) { + hw_dbg(hw, "NVM Read Error\n"); + return ret_val; + } + *pba_num = (u32)(data << 16); + + ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data); + if (ret_val) { + hw_dbg(hw, "NVM Read Error\n"); + return ret_val; + } + *pba_num |= data; + + return 0; +} + +/** + * ixgbe_get_mac_addr_generic - Generic get MAC address * @hw: pointer to hardware structure * @mac_addr: Adapter MAC address * @@ -199,7 +236,7 @@ static s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw) * A reset of the adapter must be performed prior to calling this function * in order for the MAC address to have been loaded from the EEPROM into RAR0 **/ -s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr) +s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr) { u32 rar_high; u32 rar_low; @@ -217,30 +254,8 @@ s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr) return 0; } -s32 ixgbe_read_part_num(struct ixgbe_hw *hw, u32 *part_num) -{ - s32 ret_val; - u16 data; - - ret_val = ixgbe_read_eeprom(hw, IXGBE_PBANUM0_PTR, &data); - if (ret_val) { - hw_dbg(hw, "NVM Read Error\n"); - return ret_val; - } - *part_num = (u32)(data << 16); - - ret_val = ixgbe_read_eeprom(hw, IXGBE_PBANUM1_PTR, &data); - if (ret_val) { - hw_dbg(hw, "NVM Read Error\n"); - return ret_val; - } - *part_num |= data; - - return 0; -} - /** - * ixgbe_stop_adapter - Generic stop TX/RX units + * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units * @hw: pointer to hardware structure * * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, @@ -248,7 +263,7 @@ s32 ixgbe_read_part_num(struct ixgbe_hw *hw, u32 *part_num) * the shared code and drivers to determine if the adapter is in a stopped * state and should not touch the hardware. **/ -s32 ixgbe_stop_adapter(struct ixgbe_hw *hw) +s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) { u32 number_of_queues; u32 reg_val; @@ -264,6 +279,7 @@ s32 ixgbe_stop_adapter(struct ixgbe_hw *hw) reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL); reg_val &= ~(IXGBE_RXCTRL_RXEN); IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val); + IXGBE_WRITE_FLUSH(hw); msleep(2); /* Clear interrupt mask to stop from interrupts being generated */ @@ -273,7 +289,7 @@ s32 ixgbe_stop_adapter(struct ixgbe_hw *hw) IXGBE_READ_REG(hw, IXGBE_EICR); /* Disable the transmit unit. Each queue must be disabled. */ - number_of_queues = hw->mac.num_tx_queues; + number_of_queues = hw->mac.max_tx_queues; for (i = 0; i < number_of_queues; i++) { reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); if (reg_val & IXGBE_TXDCTL_ENABLE) { @@ -282,15 +298,22 @@ s32 ixgbe_stop_adapter(struct ixgbe_hw *hw) } } + /* + * Prevent the PCI-E bus from from hanging by disabling PCI-E master + * access and verify no pending requests + */ + if (ixgbe_disable_pcie_master(hw) != 0) + hw_dbg(hw, "PCI-E Master disable polling has failed.\n"); + return 0; } /** - * ixgbe_led_on - Turns on the software controllable LEDs. + * ixgbe_led_on_generic - Turns on the software controllable LEDs. * @hw: pointer to hardware structure * @index: led number to turn on **/ -s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index) +s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) { u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); @@ -304,11 +327,11 @@ s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index) } /** - * ixgbe_led_off - Turns off the software controllable LEDs. + * ixgbe_led_off_generic - Turns off the software controllable LEDs. * @hw: pointer to hardware structure * @index: led number to turn off **/ -s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index) +s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index) { u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); @@ -321,15 +344,14 @@ s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index) return 0; } - /** - * ixgbe_init_eeprom - Initialize EEPROM params + * ixgbe_init_eeprom_params_generic - Initialize EEPROM params * @hw: pointer to hardware structure * * Initializes the EEPROM parameters ixgbe_eeprom_info within the * ixgbe_hw struct in order to set up EEPROM access. **/ -s32 ixgbe_init_eeprom(struct ixgbe_hw *hw) +s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) { struct ixgbe_eeprom_info *eeprom = &hw->eeprom; u32 eec; @@ -337,6 +359,9 @@ s32 ixgbe_init_eeprom(struct ixgbe_hw *hw) if (eeprom->type == ixgbe_eeprom_uninitialized) { eeprom->type = ixgbe_eeprom_none; + /* Set default semaphore delay to 10ms which is a well + * tested value */ + eeprom->semaphore_delay = 10; /* * Check for EEPROM present first. @@ -369,18 +394,85 @@ s32 ixgbe_init_eeprom(struct ixgbe_hw *hw) } /** - * ixgbe_read_eeprom - Read EEPROM word using EERD + * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be read + * @data: read 16 bit value from EEPROM + * + * Reads 16 bit value from EEPROM through bit-bang method + **/ +s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, + u16 *data) +{ + s32 status; + u16 word_in; + u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI; + + hw->eeprom.ops.init_params(hw); + + if (offset >= hw->eeprom.word_size) { + status = IXGBE_ERR_EEPROM; + goto out; + } + + /* Prepare the EEPROM for reading */ + status = ixgbe_acquire_eeprom(hw); + + if (status == 0) { + if (ixgbe_ready_eeprom(hw) != 0) { + ixgbe_release_eeprom(hw); + status = IXGBE_ERR_EEPROM; + } + } + + if (status == 0) { + ixgbe_standby_eeprom(hw); + + /* + * Some SPI eeproms use the 8th address bit embedded in the + * opcode + */ + if ((hw->eeprom.address_bits == 8) && (offset >= 128)) + read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; + + /* Send the READ command (opcode + addr) */ + ixgbe_shift_out_eeprom_bits(hw, read_opcode, + IXGBE_EEPROM_OPCODE_BITS); + ixgbe_shift_out_eeprom_bits(hw, (u16)(offset*2), + hw->eeprom.address_bits); + + /* Read the data. */ + word_in = ixgbe_shift_in_eeprom_bits(hw, 16); + *data = (word_in >> 8) | (word_in << 8); + + /* End this read operation */ + ixgbe_release_eeprom(hw); + } + +out: + return status; +} + +/** + * ixgbe_read_eeprom_generic - Read EEPROM word using EERD * @hw: pointer to hardware structure * @offset: offset of word in the EEPROM to read * @data: word read from the EEPROM * * Reads a 16 bit word from the EEPROM using the EERD register. **/ -s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data) +s32 ixgbe_read_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 *data) { u32 eerd; s32 status; + hw->eeprom.ops.init_params(hw); + + if (offset >= hw->eeprom.word_size) { + status = IXGBE_ERR_EEPROM; + goto out; + } + eerd = (offset << IXGBE_EEPROM_READ_ADDR_SHIFT) + IXGBE_EEPROM_READ_REG_START; @@ -393,6 +485,7 @@ s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data) else hw_dbg(hw, "Eeprom read timed out\n"); +out: return status; } @@ -419,6 +512,58 @@ static s32 ixgbe_poll_eeprom_eerd_done(struct ixgbe_hw *hw) return status; } +/** + * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang + * @hw: pointer to hardware structure + * + * Prepares EEPROM for access using bit-bang method. This function should + * be called before issuing a command to the EEPROM. + **/ +static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) +{ + s32 status = 0; + u32 eec; + u32 i; + + if (ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0) + status = IXGBE_ERR_SWFW_SYNC; + + if (status == 0) { + eec = IXGBE_READ_REG(hw, IXGBE_EEC); + + /* Request EEPROM Access */ + eec |= IXGBE_EEC_REQ; + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + + for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) { + eec = IXGBE_READ_REG(hw, IXGBE_EEC); + if (eec & IXGBE_EEC_GNT) + break; + udelay(5); + } + + /* Release if grant not acquired */ + if (!(eec & IXGBE_EEC_GNT)) { + eec &= ~IXGBE_EEC_REQ; + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + hw_dbg(hw, "Could not acquire EEPROM grant\n"); + + ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + status = IXGBE_ERR_EEPROM; + } + } + + /* Setup EEPROM for Read/Write */ + if (status == 0) { + /* Clear CS and SK */ + eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK); + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + IXGBE_WRITE_FLUSH(hw); + udelay(1); + } + return status; +} + /** * ixgbe_get_eeprom_semaphore - Get hardware semaphore * @hw: pointer to hardware structure @@ -502,6 +647,217 @@ static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw) IXGBE_WRITE_FLUSH(hw); } +/** + * ixgbe_ready_eeprom - Polls for EEPROM ready + * @hw: pointer to hardware structure + **/ +static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw) +{ + s32 status = 0; + u16 i; + u8 spi_stat_reg; + + /* + * Read "Status Register" repeatedly until the LSB is cleared. The + * EEPROM will signal that the command has been completed by clearing + * bit 0 of the internal status register. If it's not cleared within + * 5 milliseconds, then error out. + */ + for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) { + ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI, + IXGBE_EEPROM_OPCODE_BITS); + spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8); + if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI)) + break; + + udelay(5); + ixgbe_standby_eeprom(hw); + }; + + /* + * On some parts, SPI write time could vary from 0-20mSec on 3.3V + * devices (and only 0-5mSec on 5V devices) + */ + if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) { + hw_dbg(hw, "SPI EEPROM Status error\n"); + status = IXGBE_ERR_EEPROM; + } + + return status; +} + +/** + * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state + * @hw: pointer to hardware structure + **/ +static void ixgbe_standby_eeprom(struct ixgbe_hw *hw) +{ + u32 eec; + + eec = IXGBE_READ_REG(hw, IXGBE_EEC); + + /* Toggle CS to flush commands */ + eec |= IXGBE_EEC_CS; + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + IXGBE_WRITE_FLUSH(hw); + udelay(1); + eec &= ~IXGBE_EEC_CS; + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + IXGBE_WRITE_FLUSH(hw); + udelay(1); +} + +/** + * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM. + * @hw: pointer to hardware structure + * @data: data to send to the EEPROM + * @count: number of bits to shift out + **/ +static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, + u16 count) +{ + u32 eec; + u32 mask; + u32 i; + + eec = IXGBE_READ_REG(hw, IXGBE_EEC); + + /* + * Mask is used to shift "count" bits of "data" out to the EEPROM + * one bit at a time. Determine the starting bit based on count + */ + mask = 0x01 << (count - 1); + + for (i = 0; i < count; i++) { + /* + * A "1" is shifted out to the EEPROM by setting bit "DI" to a + * "1", and then raising and then lowering the clock (the SK + * bit controls the clock input to the EEPROM). A "0" is + * shifted out to the EEPROM by setting "DI" to "0" and then + * raising and then lowering the clock. + */ + if (data & mask) + eec |= IXGBE_EEC_DI; + else + eec &= ~IXGBE_EEC_DI; + + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + IXGBE_WRITE_FLUSH(hw); + + udelay(1); + + ixgbe_raise_eeprom_clk(hw, &eec); + ixgbe_lower_eeprom_clk(hw, &eec); + + /* + * Shift mask to signify next bit of data to shift in to the + * EEPROM + */ + mask = mask >> 1; + }; + + /* We leave the "DI" bit set to "0" when we leave this routine. */ + eec &= ~IXGBE_EEC_DI; + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + IXGBE_WRITE_FLUSH(hw); +} + +/** + * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM + * @hw: pointer to hardware structure + **/ +static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count) +{ + u32 eec; + u32 i; + u16 data = 0; + + /* + * In order to read a register from the EEPROM, we need to shift + * 'count' bits in from the EEPROM. Bits are "shifted in" by raising + * the clock input to the EEPROM (setting the SK bit), and then reading + * the value of the "DO" bit. During this "shifting in" process the + * "DI" bit should always be clear. + */ + eec = IXGBE_READ_REG(hw, IXGBE_EEC); + + eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI); + + for (i = 0; i < count; i++) { + data = data << 1; + ixgbe_raise_eeprom_clk(hw, &eec); + + eec = IXGBE_READ_REG(hw, IXGBE_EEC); + + eec &= ~(IXGBE_EEC_DI); + if (eec & IXGBE_EEC_DO) + data |= 1; + + ixgbe_lower_eeprom_clk(hw, &eec); + } + + return data; +} + +/** + * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input. + * @hw: pointer to hardware structure + * @eec: EEC register's current value + **/ +static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) +{ + /* + * Raise the clock input to the EEPROM + * (setting the SK bit), then delay + */ + *eec = *eec | IXGBE_EEC_SK; + IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec); + IXGBE_WRITE_FLUSH(hw); + udelay(1); +} + +/** + * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input. + * @hw: pointer to hardware structure + * @eecd: EECD's current value + **/ +static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) +{ + /* + * Lower the clock input to the EEPROM (clearing the SK bit), then + * delay + */ + *eec = *eec & ~IXGBE_EEC_SK; + IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec); + IXGBE_WRITE_FLUSH(hw); + udelay(1); +} + +/** + * ixgbe_release_eeprom - Release EEPROM, release semaphores + * @hw: pointer to hardware structure + **/ +static void ixgbe_release_eeprom(struct ixgbe_hw *hw) +{ + u32 eec; + + eec = IXGBE_READ_REG(hw, IXGBE_EEC); + + eec |= IXGBE_EEC_CS; /* Pull CS high */ + eec &= ~IXGBE_EEC_SK; /* Lower SCK */ + + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + IXGBE_WRITE_FLUSH(hw); + + udelay(1); + + /* Stop requesting EEPROM access */ + eec &= ~IXGBE_EEC_REQ; + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + + ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); +} + /** * ixgbe_calc_eeprom_checksum - Calculates and returns the checksum * @hw: pointer to hardware structure @@ -517,7 +873,7 @@ static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw) /* Include 0x0-0x3F in the checksum */ for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { - if (ixgbe_read_eeprom(hw, i, &word) != 0) { + if (hw->eeprom.ops.read(hw, i, &word) != 0) { hw_dbg(hw, "EEPROM read failed\n"); break; } @@ -526,15 +882,15 @@ static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw) /* Include all data from pointers except for the fw pointer */ for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { - ixgbe_read_eeprom(hw, i, &pointer); + hw->eeprom.ops.read(hw, i, &pointer); /* Make sure the pointer seems valid */ if (pointer != 0xFFFF && pointer != 0) { - ixgbe_read_eeprom(hw, pointer, &length); + hw->eeprom.ops.read(hw, pointer, &length); if (length != 0xFFFF && length != 0) { for (j = pointer+1; j <= pointer+length; j++) { - ixgbe_read_eeprom(hw, j, &word); + hw->eeprom.ops.read(hw, j, &word); checksum += word; } } @@ -547,14 +903,15 @@ static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw) } /** - * ixgbe_validate_eeprom_checksum - Validate EEPROM checksum + * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum * @hw: pointer to hardware structure * @checksum_val: calculated checksum * * Performs checksum calculation and validates the EEPROM checksum. If the * caller does not need checksum_val, the value can be NULL. **/ -s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val) +s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, + u16 *checksum_val) { s32 status; u16 checksum; @@ -565,12 +922,12 @@ s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val) * not continue or we could be in for a very long wait while every * EEPROM read fails */ - status = ixgbe_read_eeprom(hw, 0, &checksum); + status = hw->eeprom.ops.read(hw, 0, &checksum); if (status == 0) { checksum = ixgbe_calc_eeprom_checksum(hw); - ixgbe_read_eeprom(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); + hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); /* * Verify read checksum from EEPROM is the same as @@ -589,6 +946,33 @@ s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val) return status; } +/** + * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum + * @hw: pointer to hardware structure + **/ +s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw) +{ + s32 status; + u16 checksum; + + /* + * Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = hw->eeprom.ops.read(hw, 0, &checksum); + + if (status == 0) { + checksum = ixgbe_calc_eeprom_checksum(hw); + status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, + checksum); + } else { + hw_dbg(hw, "EEPROM read failed\n"); + } + + return status; +} + /** * ixgbe_validate_mac_addr - Validate MAC address * @mac_addr: pointer to MAC address. @@ -607,58 +991,137 @@ s32 ixgbe_validate_mac_addr(u8 *mac_addr) status = IXGBE_ERR_INVALID_MAC_ADDR; /* Reject the zero address */ else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 && - mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) + mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) status = IXGBE_ERR_INVALID_MAC_ADDR; return status; } /** - * ixgbe_set_rar - Set RX address register + * ixgbe_set_rar_generic - Set Rx address register * @hw: pointer to hardware structure - * @addr: Address to put into receive address register * @index: Receive address register to write - * @vind: Vind to set RAR to + * @addr: Address to put into receive address register + * @vmdq: VMDq "set" or "pool" index * @enable_addr: set flag that address is active * * Puts an ethernet address into a receive address register. **/ -s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vind, - u32 enable_addr) +s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, + u32 enable_addr) { u32 rar_low, rar_high; + u32 rar_entries = hw->mac.num_rar_entries; + + /* setup VMDq pool selection before this RAR gets enabled */ + hw->mac.ops.set_vmdq(hw, index, vmdq); + /* Make sure we are using a valid rar index range */ + if (index < rar_entries) { /* - * HW expects these in little endian so we reverse the byte order from - * network order (big endian) to little endian + * HW expects these in little endian so we reverse the byte + * order from network order (big endian) to little endian */ rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) | ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); - - rar_high = ((u32)addr[4] | - ((u32)addr[5] << 8) | - ((vind << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK)); + /* + * Some parts put the VMDq setting in the extra RAH bits, + * so save everything except the lower 16 bits that hold part + * of the address and the address valid bit. + */ + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); + rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); + rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8)); if (enable_addr != 0) rar_high |= IXGBE_RAH_AV; IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); + } else { + hw_dbg(hw, "RAR index %d is out of range.\n", index); + } + + return 0; +} + +/** + * ixgbe_clear_rar_generic - Remove Rx address register + * @hw: pointer to hardware structure + * @index: Receive address register to write + * + * Clears an ethernet address from a receive address register. + **/ +s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index) +{ + u32 rar_high; + u32 rar_entries = hw->mac.num_rar_entries; + + /* Make sure we are using a valid rar index range */ + if (index < rar_entries) { + /* + * Some parts put the VMDq setting in the extra RAH bits, + * so save everything except the lower 16 bits that hold part + * of the address and the address valid bit. + */ + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); + rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); + + IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0); + IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); + } else { + hw_dbg(hw, "RAR index %d is out of range.\n", index); + } + + /* clear VMDq pool/queue selection for this RAR */ + hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL); return 0; } /** - * ixgbe_init_rx_addrs - Initializes receive address filters. + * ixgbe_enable_rar - Enable Rx address register + * @hw: pointer to hardware structure + * @index: index into the RAR table + * + * Enables the select receive address register. + **/ +static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index) +{ + u32 rar_high; + + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); + rar_high |= IXGBE_RAH_AV; + IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); +} + +/** + * ixgbe_disable_rar - Disable Rx address register + * @hw: pointer to hardware structure + * @index: index into the RAR table + * + * Disables the select receive address register. + **/ +static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index) +{ + u32 rar_high; + + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); + rar_high &= (~IXGBE_RAH_AV); + IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); +} + +/** + * ixgbe_init_rx_addrs_generic - Initializes receive address filters. * @hw: pointer to hardware structure * * Places the MAC address in receive address register 0 and clears the rest - * of the receive addresss registers. Clears the multicast table. Assumes + * of the receive address registers. Clears the multicast table. Assumes * the receiver is in reset when the routine is called. **/ -static s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw) +s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) { u32 i; u32 rar_entries = hw->mac.num_rar_entries; @@ -671,7 +1134,7 @@ static s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw) if (ixgbe_validate_mac_addr(hw->mac.addr) == IXGBE_ERR_INVALID_MAC_ADDR) { /* Get the MAC address from the RAR0 for later reference */ - ixgbe_get_mac_addr(hw, hw->mac.addr); + hw->mac.ops.get_mac_addr(hw, hw->mac.addr); hw_dbg(hw, " Keeping Current RAR0 Addr =%.2X %.2X %.2X ", hw->mac.addr[0], hw->mac.addr[1], @@ -687,13 +1150,14 @@ static s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw) hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3], hw->mac.addr[4], hw->mac.addr[5]); - ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); + hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); } + hw->addr_ctrl.overflow_promisc = 0; hw->addr_ctrl.rar_used_count = 1; /* Zero out the other receive addresses. */ - hw_dbg(hw, "Clearing RAR[1-15]\n"); + hw_dbg(hw, "Clearing RAR[1-%d]\n", rar_entries - 1); for (i = 1; i < rar_entries; i++) { IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); @@ -708,6 +1172,9 @@ static s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw) for (i = 0; i < hw->mac.mcft_size; i++) IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); + if (hw->mac.ops.init_uta_tables) + hw->mac.ops.init_uta_tables(hw); + return 0; } @@ -718,7 +1185,7 @@ static s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw) * * Adds it to unused receive address register or goes into promiscuous mode. **/ -void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr) +static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) { u32 rar_entries = hw->mac.num_rar_entries; u32 rar; @@ -733,7 +1200,7 @@ void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr) if (hw->addr_ctrl.rar_used_count < rar_entries) { rar = hw->addr_ctrl.rar_used_count - hw->addr_ctrl.mc_addr_in_rar_count; - ixgbe_set_rar(hw, rar, addr, 0, IXGBE_RAH_AV); + hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); hw_dbg(hw, "Added a secondary address to RAR[%d]\n", rar); hw->addr_ctrl.rar_used_count++; } else { @@ -744,7 +1211,7 @@ void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr) } /** - * ixgbe_update_uc_addr_list - Updates MAC list of secondary addresses + * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses * @hw: pointer to hardware structure * @addr_list: the list of new addresses * @addr_count: number of addresses @@ -757,7 +1224,7 @@ void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr) * Drivers using secondary unicast addresses must set user_set_promisc when * manually putting the device into promiscuous mode. **/ -s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list, +s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list, u32 addr_count, ixgbe_mc_addr_itr next) { u8 *addr; @@ -787,7 +1254,7 @@ s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list, for (i = 0; i < addr_count; i++) { hw_dbg(hw, " Adding the secondary addresses:\n"); addr = next(hw, &addr_list, &vmdq); - ixgbe_add_uc_addr(hw, addr); + ixgbe_add_uc_addr(hw, addr, vmdq); } if (hw->addr_ctrl.overflow_promisc) { @@ -808,7 +1275,7 @@ s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list, } } - hw_dbg(hw, "ixgbe_update_uc_addr_list Complete\n"); + hw_dbg(hw, "ixgbe_update_uc_addr_list_generic Complete\n"); return 0; } @@ -821,7 +1288,7 @@ s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list, * bit-vector to set in the multicast table. The hardware uses 12 bits, from * incoming rx multicast addresses, to determine the bit-vector to check in * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set - * by the MO field of the MCSTCTRL. The MO field is set during initalization + * by the MO field of the MCSTCTRL. The MO field is set during initialization * to mc_filter_type. **/ static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) @@ -907,10 +1374,10 @@ static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr) * else put it in the MTA */ if (hw->addr_ctrl.rar_used_count < rar_entries) { + /* use RAR from the end up for multicast */ rar = rar_entries - hw->addr_ctrl.mc_addr_in_rar_count - 1; - ixgbe_set_rar(hw, rar, mc_addr, 0, IXGBE_RAH_AV); - hw_dbg(hw, "Added a multicast address to RAR[%d]\n", - hw->addr_ctrl.rar_used_count); + hw->mac.ops.set_rar(hw, rar, mc_addr, 0, IXGBE_RAH_AV); + hw_dbg(hw, "Added a multicast address to RAR[%d]\n", rar); hw->addr_ctrl.rar_used_count++; hw->addr_ctrl.mc_addr_in_rar_count++; } else { @@ -921,18 +1388,18 @@ static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr) } /** - * ixgbe_update_mc_addr_list - Updates MAC list of multicast addresses + * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses * @hw: pointer to hardware structure * @mc_addr_list: the list of new multicast addresses * @mc_addr_count: number of addresses * @next: iterator function to walk the multicast address list * * The given list replaces any existing list. Clears the MC addrs from receive - * address registers and the multicast table. Uses unsed receive address + * address registers and the multicast table. Uses unused receive address * registers for the first multicast addresses, and hashes the rest into the * multicast table. **/ -s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list, +s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, u32 mc_addr_count, ixgbe_mc_addr_itr next) { u32 i; @@ -949,7 +1416,8 @@ s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list, hw->addr_ctrl.mta_in_use = 0; /* Zero out the other receive addresses. */ - hw_dbg(hw, "Clearing RAR[1-15]\n"); + hw_dbg(hw, "Clearing RAR[%d-%d]\n", hw->addr_ctrl.rar_used_count, + rar_entries - 1); for (i = hw->addr_ctrl.rar_used_count; i < rar_entries; i++) { IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); @@ -971,188 +1439,53 @@ s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list, IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); - hw_dbg(hw, "ixgbe_update_mc_addr_list Complete\n"); + hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n"); return 0; } /** - * ixgbe_clear_vfta - Clear VLAN filter table + * ixgbe_enable_mc_generic - Enable multicast address in RAR * @hw: pointer to hardware structure * - * Clears the VLAN filer table, and the VMDq index associated with the filter + * Enables multicast address in RAR and the use of the multicast hash table. **/ -static s32 ixgbe_clear_vfta(struct ixgbe_hw *hw) +s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw) { - u32 offset; - u32 vlanbyte; + u32 i; + u32 rar_entries = hw->mac.num_rar_entries; + struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; - for (offset = 0; offset < hw->mac.vft_size; offset++) - IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); + if (a->mc_addr_in_rar_count > 0) + for (i = (rar_entries - a->mc_addr_in_rar_count); + i < rar_entries; i++) + ixgbe_enable_rar(hw, i); - for (vlanbyte = 0; vlanbyte < 4; vlanbyte++) - for (offset = 0; offset < hw->mac.vft_size; offset++) - IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset), - 0); + if (a->mta_in_use > 0) + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | + hw->mac.mc_filter_type); return 0; } /** - * ixgbe_set_vfta - Set VLAN filter table + * ixgbe_disable_mc_generic - Disable multicast address in RAR * @hw: pointer to hardware structure - * @vlan: VLAN id to write to VLAN filter - * @vind: VMDq output index that maps queue to VLAN id in VFTA - * @vlan_on: boolean flag to turn on/off VLAN in VFTA * - * Turn on/off specified VLAN in the VLAN filter table. + * Disables multicast address in RAR and the use of the multicast hash table. **/ -s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, - bool vlan_on) +s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw) { - u32 VftaIndex; - u32 BitOffset; - u32 VftaReg; - u32 VftaByte; - - /* Determine 32-bit word position in array */ - VftaIndex = (vlan >> 5) & 0x7F; /* upper seven bits */ - - /* Determine the location of the (VMD) queue index */ - VftaByte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */ - BitOffset = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */ - - /* Set the nibble for VMD queue index */ - VftaReg = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(VftaByte, VftaIndex)); - VftaReg &= (~(0x0F << BitOffset)); - VftaReg |= (vind << BitOffset); - IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(VftaByte, VftaIndex), VftaReg); - - /* Determine the location of the bit for this VLAN id */ - BitOffset = vlan & 0x1F; /* lower five bits */ - - VftaReg = IXGBE_READ_REG(hw, IXGBE_VFTA(VftaIndex)); - if (vlan_on) - /* Turn on this VLAN id */ - VftaReg |= (1 << BitOffset); - else - /* Turn off this VLAN id */ - VftaReg &= ~(1 << BitOffset); - IXGBE_WRITE_REG(hw, IXGBE_VFTA(VftaIndex), VftaReg); - - return 0; -} - -/** - * ixgbe_setup_fc - Configure flow control settings - * @hw: pointer to hardware structure - * @packetbuf_num: packet buffer number (0-7) - * - * Configures the flow control settings based on SW configuration. - * This function is used for 802.3x flow control configuration only. - **/ -s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num) -{ - u32 frctl_reg; - u32 rmcs_reg; - - if (packetbuf_num < 0 || packetbuf_num > 7) - hw_dbg(hw, "Invalid packet buffer number [%d], expected range " - "is 0-7\n", packetbuf_num); - - frctl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); - frctl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE); - - rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS); - rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X); - - /* - * 10 gig parts do not have a word in the EEPROM to determine the - * default flow control setting, so we explicitly set it to full. - */ - if (hw->fc.type == ixgbe_fc_default) - hw->fc.type = ixgbe_fc_full; - - /* - * We want to save off the original Flow Control configuration just in - * case we get disconnected and then reconnected into a different hub - * or switch with different Flow Control capabilities. - */ - hw->fc.type = hw->fc.original_type; - - /* - * The possible values of the "flow_control" parameter are: - * 0: Flow control is completely disabled - * 1: Rx flow control is enabled (we can receive pause frames but not - * send pause frames). - * 2: Tx flow control is enabled (we can send pause frames but we do not - * support receiving pause frames) - * 3: Both Rx and TX flow control (symmetric) are enabled. - * other: Invalid. - */ - switch (hw->fc.type) { - case ixgbe_fc_none: - break; - case ixgbe_fc_rx_pause: - /* - * RX Flow control is enabled, - * and TX Flow control is disabled. - */ - frctl_reg |= IXGBE_FCTRL_RFCE; - break; - case ixgbe_fc_tx_pause: - /* - * TX Flow control is enabled, and RX Flow control is disabled, - * by a software over-ride. - */ - rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; - break; - case ixgbe_fc_full: - /* - * Flow control (both RX and TX) is enabled by a software - * over-ride. - */ - frctl_reg |= IXGBE_FCTRL_RFCE; - rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; - break; - default: - /* We should never get here. The value should be 0-3. */ - hw_dbg(hw, "Flow control param set incorrectly\n"); - break; - } - - /* Enable 802.3x based flow control settings. */ - IXGBE_WRITE_REG(hw, IXGBE_FCTRL, frctl_reg); - IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg); - - /* - * Check for invalid software configuration, zeros are completely - * invalid for all parameters used past this point, and if we enable - * flow control with zero water marks, we blast flow control packets. - */ - if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) { - hw_dbg(hw, "Flow control structure initialized incorrectly\n"); - return IXGBE_ERR_INVALID_LINK_SETTINGS; - } + u32 i; + u32 rar_entries = hw->mac.num_rar_entries; + struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; - /* - * We need to set up the Receive Threshold high and low water - * marks as well as (optionally) enabling the transmission of - * XON frames. - */ - if (hw->fc.type & ixgbe_fc_tx_pause) { - if (hw->fc.send_xon) { - IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), - (hw->fc.low_water | IXGBE_FCRTL_XONE)); - } else { - IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), - hw->fc.low_water); - } - IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), - (hw->fc.high_water)|IXGBE_FCRTH_FCEN); - } + if (a->mc_addr_in_rar_count > 0) + for (i = (rar_entries - a->mc_addr_in_rar_count); + i < rar_entries; i++) + ixgbe_disable_rar(hw, i); - IXGBE_WRITE_REG(hw, IXGBE_FCTTV(0), hw->fc.pause_time); - IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1)); + if (a->mta_in_use > 0) + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); return 0; } @@ -1168,13 +1501,24 @@ s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num) **/ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) { - u32 ctrl; - s32 i; + u32 i; + u32 reg_val; + u32 number_of_queues; s32 status = IXGBE_ERR_MASTER_REQUESTS_PENDING; - ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); - ctrl |= IXGBE_CTRL_GIO_DIS; - IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); + /* Disable the receive unit by stopping each queue */ + number_of_queues = hw->mac.max_rx_queues; + for (i = 0; i < number_of_queues; i++) { + reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); + if (reg_val & IXGBE_RXDCTL_ENABLE) { + reg_val &= ~IXGBE_RXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val); + } + } + + reg_val = IXGBE_READ_REG(hw, IXGBE_CTRL); + reg_val |= IXGBE_CTRL_GIO_DIS; + IXGBE_WRITE_REG(hw, IXGBE_CTRL, reg_val); for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) { @@ -1189,11 +1533,11 @@ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) /** - * ixgbe_acquire_swfw_sync - Aquire SWFW semaphore + * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore * @hw: pointer to hardware structure - * @mask: Mask to specify wich semaphore to acquire + * @mask: Mask to specify which semaphore to acquire * - * Aquires the SWFW semaphore throught the GSSR register for the specified + * Acquires the SWFW semaphore thought the GSSR register for the specified * function (CSR, PHY0, PHY1, EEPROM, Flash) **/ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) @@ -1235,9 +1579,9 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) /** * ixgbe_release_swfw_sync - Release SWFW semaphore * @hw: pointer to hardware structure - * @mask: Mask to specify wich semaphore to release + * @mask: Mask to specify which semaphore to release * - * Releases the SWFW semaphore throught the GSSR register for the specified + * Releases the SWFW semaphore thought the GSSR register for the specified * function (CSR, PHY0, PHY1, EEPROM, Flash) **/ void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask) @@ -1254,45 +1598,3 @@ void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask) ixgbe_release_eeprom_semaphore(hw); } -/** - * ixgbe_read_analog_reg8 - Reads 8 bit Atlas analog register - * @hw: pointer to hardware structure - * @reg: analog register to read - * @val: read value - * - * Performs write operation to analog register specified. - **/ -s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val) -{ - u32 atlas_ctl; - - IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, - IXGBE_ATLASCTL_WRITE_CMD | (reg << 8)); - IXGBE_WRITE_FLUSH(hw); - udelay(10); - atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); - *val = (u8)atlas_ctl; - - return 0; -} - -/** - * ixgbe_write_analog_reg8 - Writes 8 bit Atlas analog register - * @hw: pointer to hardware structure - * @reg: atlas register to write - * @val: value to write - * - * Performs write operation to Atlas analog register specified. - **/ -s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val) -{ - u32 atlas_ctl; - - atlas_ctl = (reg << 8) | val; - IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl); - IXGBE_WRITE_FLUSH(hw); - udelay(10); - - return 0; -} - diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h index c75ecba9ccd..13ed8d2ff4a 100644 --- a/drivers/net/ixgbe/ixgbe_common.h +++ b/drivers/net/ixgbe/ixgbe_common.h @@ -31,36 +31,45 @@ #include "ixgbe_type.h" -s32 ixgbe_init_hw(struct ixgbe_hw *hw); -s32 ixgbe_start_hw(struct ixgbe_hw *hw); -s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr); -s32 ixgbe_stop_adapter(struct ixgbe_hw *hw); -s32 ixgbe_read_part_num(struct ixgbe_hw *hw, u32 *part_num); - -s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index); -s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index); - -s32 ixgbe_init_eeprom(struct ixgbe_hw *hw); -s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data); -s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val); - -s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vind, - u32 enable_addr); -s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list, - u32 mc_addr_count, ixgbe_mc_addr_itr next); -s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *uc_addr_list, - u32 mc_addr_count, ixgbe_mc_addr_itr next); -s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on); -s32 ixgbe_validate_mac_addr(u8 *mac_addr); - -s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packtetbuf_num); +s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw); +s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw); +s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw); +s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw); +s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num); +s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr); +s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw); +s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw); + +s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index); + +s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw); +s32 ixgbe_read_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 *data); +s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, + u16 *data); +s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, + u16 *checksum_val); +s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw); + +s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, + u32 enable_addr); +s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw); +s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, + ixgbe_mc_addr_itr func); +s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list, + u32 addr_count, ixgbe_mc_addr_itr func); +s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw); +s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw); +s32 ixgbe_validate_mac_addr(u8 *mac_addr); s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask); void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask); s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw); -s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val); -s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val); +s32 ixgbe_read_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 *val); +s32 ixgbe_write_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 val); #define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg))) diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index 4701abf3a59..ff4fac34a17 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -128,9 +128,10 @@ static int ixgbe_get_settings(struct net_device *netdev, ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); ecmd->port = PORT_FIBRE; + ecmd->autoneg = AUTONEG_DISABLE; } - adapter->hw.mac.ops.check_link(hw, &(link_speed), &link_up, false); + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); if (link_up) { ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? SPEED_10000 : SPEED_1000; @@ -327,7 +328,7 @@ static void ixgbe_get_regs(struct net_device *netdev, regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0)); regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT); regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA); - regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL); + regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0)); regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE); /* Flow Control */ @@ -373,7 +374,7 @@ static void ixgbe_get_regs(struct net_device *netdev, regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i)); for (i = 0; i < 16; i++) regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i)); - regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE); + regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)); regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL); regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL); @@ -605,8 +606,8 @@ static int ixgbe_get_eeprom(struct net_device *netdev, return -ENOMEM; for (i = 0; i < eeprom_len; i++) { - if ((ret_val = ixgbe_read_eeprom(hw, first_word + i, - &eeprom_buff[i]))) + if ((ret_val = hw->eeprom.ops.read(hw, first_word + i, + &eeprom_buff[i]))) break; } @@ -807,7 +808,7 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - u8 *p = data; + char *p = (char *)data; int i; switch (stringset) { @@ -857,16 +858,17 @@ static int ixgbe_nway_reset(struct net_device *netdev) static int ixgbe_phys_id(struct net_device *netdev, u32 data) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - u32 led_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_LEDCTL); + struct ixgbe_hw *hw = &adapter->hw; + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); u32 i; if (!data || data > 300) data = 300; for (i = 0; i < (data * 1000); i += 400) { - ixgbe_led_on(&adapter->hw, IXGBE_LED_ON); + hw->mac.ops.led_on(hw, IXGBE_LED_ON); msleep_interruptible(200); - ixgbe_led_off(&adapter->hw, IXGBE_LED_ON); + hw->mac.ops.led_off(hw, IXGBE_LED_ON); msleep_interruptible(200); } diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index e18afa4e195..df093ec830d 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -1749,14 +1749,16 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev, static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) { struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; /* add VID to filter table */ - ixgbe_set_vfta(&adapter->hw, vid, 0, true); + hw->mac.ops.set_vfta(&adapter->hw, vid, 0, true); } static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) { struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; if (!test_bit(__IXGBE_DOWN, &adapter->state)) ixgbe_irq_disable(adapter); @@ -1767,7 +1769,7 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) ixgbe_irq_enable(adapter); /* remove VID from filter table */ - ixgbe_set_vfta(&adapter->hw, vid, 0, false); + hw->mac.ops.set_vfta(&adapter->hw, vid, 0, false); } static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) @@ -1843,15 +1845,15 @@ static void ixgbe_set_rx_mode(struct net_device *netdev) addr_count = netdev->uc_count; if (addr_count) addr_list = netdev->uc_list->dmi_addr; - ixgbe_update_uc_addr_list(hw, addr_list, addr_count, - ixgbe_addr_list_itr); + hw->mac.ops.update_uc_addr_list(hw, addr_list, addr_count, + ixgbe_addr_list_itr); /* reprogram multicast list */ addr_count = netdev->mc_count; if (addr_count) addr_list = netdev->mc_list->dmi_addr; - ixgbe_update_mc_addr_list(hw, addr_list, addr_count, - ixgbe_addr_list_itr); + hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count, + ixgbe_addr_list_itr); } static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) @@ -2016,11 +2018,12 @@ int ixgbe_up(struct ixgbe_adapter *adapter) void ixgbe_reset(struct ixgbe_adapter *adapter) { - if (ixgbe_init_hw(&adapter->hw)) - DPRINTK(PROBE, ERR, "Hardware Error\n"); + struct ixgbe_hw *hw = &adapter->hw; + if (hw->mac.ops.init_hw(hw)) + dev_err(&adapter->pdev->dev, "Hardware Error\n"); /* reprogram the RAR[0] in case user changed it. */ - ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV); + hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); } @@ -2637,6 +2640,14 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) struct pci_dev *pdev = adapter->pdev; unsigned int rss; + /* PCI config space info */ + + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->revision_id = pdev->revision; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + /* Set capability flags */ rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus()); adapter->ring_feature[RING_F_RSS].indices = rss; @@ -2652,15 +2663,6 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) /* select 10G link by default */ hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN; - if (hw->mac.ops.reset(hw)) { - dev_err(&pdev->dev, "HW Init failed\n"); - return -EIO; - } - if (hw->mac.ops.setup_link_speed(hw, IXGBE_LINK_SPEED_10GB_FULL, true, - false)) { - dev_err(&pdev->dev, "Link Speed setup failed\n"); - return -EIO; - } /* enable itr by default in dynamic mode */ adapter->itr_setting = 1; @@ -2675,7 +2677,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) adapter->rx_ring_count = IXGBE_DEFAULT_RXD; /* initialize eeprom parameters */ - if (ixgbe_init_eeprom(hw)) { + if (ixgbe_init_eeprom_params_generic(hw)) { dev_err(&pdev->dev, "EEPROM initialization failed\n"); return -EIO; } @@ -3622,7 +3624,7 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p) memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len); - ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV); + adapter->hw.mac.ops.set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV); return 0; } @@ -3645,6 +3647,22 @@ static void ixgbe_netpoll(struct net_device *netdev) } #endif +/** + * ixgbe_link_config - set up initial link with default speed and duplex + * @hw: pointer to private hardware struct + * + * Returns 0 on success, negative on failure + **/ +static int ixgbe_link_config(struct ixgbe_hw *hw) +{ + u32 autoneg = IXGBE_LINK_SPEED_10GB_FULL; + + /* must always autoneg for both 1G and 10G link */ + hw->mac.autoneg = true; + + return hw->mac.ops.setup_link_speed(hw, autoneg, true, true); +} + /** * ixgbe_napi_add_all - prep napi structs for use * @adapter: private struct @@ -3691,7 +3709,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, static int cards_found; int i, err, pci_using_dac; u16 link_status, link_speed, link_width; - u32 part_num; + u32 part_num, eec; err = pci_enable_device(pdev); if (err) @@ -3705,8 +3723,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, if (err) { err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); if (err) { - dev_err(&pdev->dev, "No usable DMA " - "configuration, aborting\n"); + dev_err(&pdev->dev, "No usable DMA configuration, " + "aborting\n"); goto err_dma; } } @@ -3772,17 +3790,21 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, adapter->bd_number = cards_found; - /* PCI config space info */ - hw->vendor_id = pdev->vendor; - hw->device_id = pdev->device; - hw->revision_id = pdev->revision; - hw->subsystem_vendor_id = pdev->subsystem_vendor; - hw->subsystem_device_id = pdev->subsystem_device; - /* Setup hw api */ memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); hw->mac.type = ii->mac; + /* EEPROM */ + memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops)); + eec = IXGBE_READ_REG(hw, IXGBE_EEC); + /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */ + if (!(eec & (1 << 8))) + hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic; + + /* PHY */ + memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops)); + /* phy->sfp_type = ixgbe_sfp_type_unknown; */ + err = ii->get_invariants(hw); if (err) goto err_hw_init; @@ -3792,6 +3814,13 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, if (err) goto err_sw_init; + /* reset_hw fills in the perm_addr as well */ + err = hw->mac.ops.reset_hw(hw); + if (err) { + dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err); + goto err_sw_init; + } + netdev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX | @@ -3812,7 +3841,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, netdev->features |= NETIF_F_HIGHDMA; /* make sure the EEPROM is good */ - if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) { + if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) { dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n"); err = -EIO; goto err_eeprom; @@ -3821,7 +3850,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len); memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len); - if (ixgbe_validate_mac_addr(netdev->dev_addr)) { + if (ixgbe_validate_mac_addr(netdev->perm_addr)) { + dev_err(&pdev->dev, "invalid MAC address\n"); err = -EIO; goto err_eeprom; } @@ -3853,7 +3883,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, "Unknown"), netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2], netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]); - ixgbe_read_part_num(hw, &part_num); + ixgbe_read_pba_num_generic(hw, &part_num); dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n", hw->mac.type, hw->phy.type, (part_num >> 8), (part_num & 0xff)); @@ -3867,7 +3897,14 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, } /* reset the hardware with the new settings */ - ixgbe_start_hw(hw); + hw->mac.ops.start_hw(hw); + + /* link_config depends on start_hw being called at least once */ + err = ixgbe_link_config(hw); + if (err) { + dev_err(&pdev->dev, "setup_link_speed FAILED %d\n", err); + goto err_register; + } netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c index 8002931ae82..63a70176241 100644 --- a/drivers/net/ixgbe/ixgbe_phy.c +++ b/drivers/net/ixgbe/ixgbe_phy.c @@ -33,32 +33,36 @@ #include "ixgbe_common.h" #include "ixgbe_phy.h" +static bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr); static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id); static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw); -static bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr); -static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u16 phy_data); /** - * ixgbe_identify_phy - Get physical layer module + * ixgbe_identify_phy_generic - Get physical layer module * @hw: pointer to hardware structure * * Determines the physical layer module found on the current adapter. **/ -s32 ixgbe_identify_phy(struct ixgbe_hw *hw) +s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) { s32 status = IXGBE_ERR_PHY_ADDR_INVALID; u32 phy_addr; - for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { - if (ixgbe_validate_phy_addr(hw, phy_addr)) { - hw->phy.addr = phy_addr; - ixgbe_get_phy_id(hw); - hw->phy.type = ixgbe_get_phy_type_from_id(hw->phy.id); - status = 0; - break; + if (hw->phy.type == ixgbe_phy_unknown) { + for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { + if (ixgbe_validate_phy_addr(hw, phy_addr)) { + hw->phy.addr = phy_addr; + ixgbe_get_phy_id(hw); + hw->phy.type = + ixgbe_get_phy_type_from_id(hw->phy.id); + status = 0; + break; + } } + } else { + status = 0; } + return status; } @@ -73,10 +77,8 @@ static bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr) bool valid = false; hw->phy.addr = phy_addr; - ixgbe_read_phy_reg(hw, - IXGBE_MDIO_PHY_ID_HIGH, - IXGBE_MDIO_PMA_PMD_DEV_TYPE, - &phy_id); + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_id); if (phy_id != 0xFFFF && phy_id != 0x0) valid = true; @@ -95,21 +97,18 @@ static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw) u16 phy_id_high = 0; u16 phy_id_low = 0; - status = ixgbe_read_phy_reg(hw, - IXGBE_MDIO_PHY_ID_HIGH, - IXGBE_MDIO_PMA_PMD_DEV_TYPE, - &phy_id_high); + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + &phy_id_high); if (status == 0) { hw->phy.id = (u32)(phy_id_high << 16); - status = ixgbe_read_phy_reg(hw, - IXGBE_MDIO_PHY_ID_LOW, - IXGBE_MDIO_PMA_PMD_DEV_TYPE, - &phy_id_low); + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_LOW, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + &phy_id_low); hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK); hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK); } - return status; } @@ -123,9 +122,6 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) enum ixgbe_phy_type phy_type; switch (phy_id) { - case TN1010_PHY_ID: - phy_type = ixgbe_phy_tn; - break; case QT2022_PHY_ID: phy_type = ixgbe_phy_qt; break; @@ -138,32 +134,31 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) } /** - * ixgbe_reset_phy - Performs a PHY reset + * ixgbe_reset_phy_generic - Performs a PHY reset * @hw: pointer to hardware structure **/ -s32 ixgbe_reset_phy(struct ixgbe_hw *hw) +s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) { /* * Perform soft PHY reset to the PHY_XS. * This will cause a soft reset to the PHY */ - return ixgbe_write_phy_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, - IXGBE_MDIO_PHY_XS_DEV_TYPE, - IXGBE_MDIO_PHY_XS_RESET); + return hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, + IXGBE_MDIO_PHY_XS_DEV_TYPE, + IXGBE_MDIO_PHY_XS_RESET); } /** - * ixgbe_read_phy_reg - Reads a value from a specified PHY register + * ixgbe_read_phy_reg_generic - Reads a value from a specified PHY register * @hw: pointer to hardware structure * @reg_addr: 32 bit address of PHY register to read * @phy_data: Pointer to read data from PHY register **/ -s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u16 *phy_data) +s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data) { u32 command; u32 i; - u32 timeout = 10; u32 data; s32 status = 0; u16 gssr; @@ -179,9 +174,9 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, if (status == 0) { /* Setup and write the address cycle command */ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | - (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | - (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | - (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); @@ -190,7 +185,7 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, * The MDI Command bit will clear when the operation is * complete */ - for (i = 0; i < timeout; i++) { + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { udelay(10); command = IXGBE_READ_REG(hw, IXGBE_MSCA); @@ -210,9 +205,9 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, * command */ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | - (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | - (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | - (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND)); + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND)); IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); @@ -221,7 +216,7 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, * completed. The MDI Command bit will clear when the * operation is complete */ - for (i = 0; i < timeout; i++) { + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { udelay(10); command = IXGBE_READ_REG(hw, IXGBE_MSCA); @@ -231,8 +226,7 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, } if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { - hw_dbg(hw, - "PHY read command didn't complete\n"); + hw_dbg(hw, "PHY read command didn't complete\n"); status = IXGBE_ERR_PHY; } else { /* @@ -247,22 +241,22 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, ixgbe_release_swfw_sync(hw, gssr); } + return status; } /** - * ixgbe_write_phy_reg - Writes a value to specified PHY register + * ixgbe_write_phy_reg_generic - Writes a value to specified PHY register * @hw: pointer to hardware structure * @reg_addr: 32 bit PHY register to write * @device_type: 5 bit device type * @phy_data: Data to write to the PHY register **/ -static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u16 phy_data) +s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data) { u32 command; u32 i; - u32 timeout = 10; s32 status = 0; u16 gssr; @@ -280,9 +274,9 @@ static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, /* Setup and write the address cycle command */ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | - (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | - (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | - (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); @@ -291,19 +285,19 @@ static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, * The MDI Command bit will clear when the operation is * complete */ - for (i = 0; i < timeout; i++) { + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { udelay(10); command = IXGBE_READ_REG(hw, IXGBE_MSCA); - if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) { - hw_dbg(hw, "PHY address cmd didn't complete\n"); + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) break; - } } - if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { + hw_dbg(hw, "PHY address cmd didn't complete\n"); status = IXGBE_ERR_PHY; + } if (status == 0) { /* @@ -311,9 +305,9 @@ static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, * command */ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | - (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | - (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | - (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND)); + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND)); IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); @@ -322,20 +316,19 @@ static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, * completed. The MDI Command bit will clear when the * operation is complete */ - for (i = 0; i < timeout; i++) { + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { udelay(10); command = IXGBE_READ_REG(hw, IXGBE_MSCA); - if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) { - hw_dbg(hw, "PHY write command did not " - "complete.\n"); + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) break; - } } - if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { + hw_dbg(hw, "PHY address cmd didn't complete\n"); status = IXGBE_ERR_PHY; + } } ixgbe_release_swfw_sync(hw, gssr); @@ -345,67 +338,54 @@ static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, } /** - * ixgbe_setup_tnx_phy_link - Set and restart autoneg + * ixgbe_setup_phy_link_generic - Set and restart autoneg * @hw: pointer to hardware structure * * Restart autonegotiation and PHY and waits for completion. **/ -s32 ixgbe_setup_tnx_phy_link(struct ixgbe_hw *hw) +s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) { s32 status = IXGBE_NOT_IMPLEMENTED; u32 time_out; u32 max_time_out = 10; - u16 autoneg_speed_selection_register = 0x10; - u16 autoneg_restart_mask = 0x0200; - u16 autoneg_complete_mask = 0x0020; - u16 autoneg_reg = 0; + u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; /* * Set advertisement settings in PHY based on autoneg_advertised * settings. If autoneg_advertised = 0, then advertise default values - * txn devices cannot be "forced" to a autoneg 10G and fail. But can + * tnx devices cannot be "forced" to a autoneg 10G and fail. But can * for a 1G. */ - ixgbe_read_phy_reg(hw, - autoneg_speed_selection_register, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, - &autoneg_reg); + hw->phy.ops.read_reg(hw, IXGBE_MII_SPEED_SELECTION_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); if (hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_1GB_FULL) autoneg_reg &= 0xEFFF; /* 0 in bit 12 is 1G operation */ else autoneg_reg |= 0x1000; /* 1 in bit 12 is 10G/1G operation */ - ixgbe_write_phy_reg(hw, - autoneg_speed_selection_register, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, - autoneg_reg); - + hw->phy.ops.write_reg(hw, IXGBE_MII_SPEED_SELECTION_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg); /* Restart PHY autonegotiation and wait for completion */ - ixgbe_read_phy_reg(hw, - IXGBE_MDIO_AUTO_NEG_CONTROL, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, - &autoneg_reg); + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); - autoneg_reg |= autoneg_restart_mask; + autoneg_reg |= IXGBE_MII_RESTART; - ixgbe_write_phy_reg(hw, - IXGBE_MDIO_AUTO_NEG_CONTROL, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, - autoneg_reg); + hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg); /* Wait for autonegotiation to finish */ for (time_out = 0; time_out < max_time_out; time_out++) { udelay(10); /* Restart PHY autonegotiation and wait for completion */ - status = ixgbe_read_phy_reg(hw, - IXGBE_MDIO_AUTO_NEG_STATUS, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, - &autoneg_reg); + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); - autoneg_reg &= autoneg_complete_mask; - if (autoneg_reg == autoneg_complete_mask) { + autoneg_reg &= IXGBE_MII_AUTONEG_COMPLETE; + if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE) { status = 0; break; } @@ -418,64 +398,17 @@ s32 ixgbe_setup_tnx_phy_link(struct ixgbe_hw *hw) } /** - * ixgbe_check_tnx_phy_link - Determine link and speed status - * @hw: pointer to hardware structure - * - * Reads the VS1 register to determine if link is up and the current speed for - * the PHY. - **/ -s32 ixgbe_check_tnx_phy_link(struct ixgbe_hw *hw, u32 *speed, - bool *link_up) -{ - s32 status = 0; - u32 time_out; - u32 max_time_out = 10; - u16 phy_link = 0; - u16 phy_speed = 0; - u16 phy_data = 0; - - /* Initialize speed and link to default case */ - *link_up = false; - *speed = IXGBE_LINK_SPEED_10GB_FULL; - - /* - * Check current speed and link status of the PHY register. - * This is a vendor specific register and may have to - * be changed for other copper PHYs. - */ - for (time_out = 0; time_out < max_time_out; time_out++) { - udelay(10); - if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) { - *link_up = true; - if (phy_speed == - IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS) - *speed = IXGBE_LINK_SPEED_1GB_FULL; - break; - } else { - status = ixgbe_read_phy_reg(hw, - IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS, - IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, - &phy_data); - phy_link = phy_data & - IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS; - phy_speed = phy_data & - IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS; - } - } - - return status; -} - -/** - * ixgbe_setup_tnx_phy_link_speed - Sets the auto advertised capabilities + * ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities * @hw: pointer to hardware structure * @speed: new link speed * @autoneg: true if autonegotiation enabled **/ -s32 ixgbe_setup_tnx_phy_link_speed(struct ixgbe_hw *hw, u32 speed, - bool autoneg, - bool autoneg_wait_to_complete) +s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg, + bool autoneg_wait_to_complete) { + /* * Clear autoneg_advertised and set new values based on input link * speed. @@ -484,11 +417,13 @@ s32 ixgbe_setup_tnx_phy_link_speed(struct ixgbe_hw *hw, u32 speed, if (speed & IXGBE_LINK_SPEED_10GB_FULL) hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; + if (speed & IXGBE_LINK_SPEED_1GB_FULL) hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; /* Setup link based on the new speed settings */ - ixgbe_setup_tnx_phy_link(hw); + hw->phy.ops.setup_link(hw); return 0; } + diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h index aa3ea72e678..f88c9131a01 100644 --- a/drivers/net/ixgbe/ixgbe_phy.h +++ b/drivers/net/ixgbe/ixgbe_phy.h @@ -30,20 +30,52 @@ #define _IXGBE_PHY_H_ #include "ixgbe_type.h" +#define IXGBE_I2C_EEPROM_DEV_ADDR 0xA0 -s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw); -s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, u32 *speed, bool *link_up); -s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, u32 speed, bool autoneg, - bool autoneg_wait_to_complete); -s32 ixgbe_identify_phy(struct ixgbe_hw *hw); -s32 ixgbe_reset_phy(struct ixgbe_hw *hw); -s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u16 *phy_data); - -/* PHY specific */ -s32 ixgbe_setup_tnx_phy_link(struct ixgbe_hw *hw); -s32 ixgbe_check_tnx_phy_link(struct ixgbe_hw *hw, u32 *speed, bool *link_up); -s32 ixgbe_setup_tnx_phy_link_speed(struct ixgbe_hw *hw, u32 speed, bool autoneg, - bool autoneg_wait_to_complete); +/* EEPROM byte offsets */ +#define IXGBE_SFF_IDENTIFIER 0x0 +#define IXGBE_SFF_IDENTIFIER_SFP 0x3 +#define IXGBE_SFF_VENDOR_OUI_BYTE0 0x25 +#define IXGBE_SFF_VENDOR_OUI_BYTE1 0x26 +#define IXGBE_SFF_VENDOR_OUI_BYTE2 0x27 +#define IXGBE_SFF_1GBE_COMP_CODES 0x6 +#define IXGBE_SFF_10GBE_COMP_CODES 0x3 +#define IXGBE_SFF_TRANSMISSION_MEDIA 0x9 + +/* Bitmasks */ +#define IXGBE_SFF_TWIN_AX_CAPABLE 0x80 +#define IXGBE_SFF_1GBASESX_CAPABLE 0x1 +#define IXGBE_SFF_10GBASESR_CAPABLE 0x10 +#define IXGBE_SFF_10GBASELR_CAPABLE 0x20 +#define IXGBE_I2C_EEPROM_READ_MASK 0x100 +#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3 +#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0 +#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1 +#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2 +#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3 + +/* Bit-shift macros */ +#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 12 +#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 8 +#define IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT 4 + +/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */ +#define IXGBE_SFF_VENDOR_OUI_TYCO 0x00407600 +#define IXGBE_SFF_VENDOR_OUI_FTL 0x00906500 +#define IXGBE_SFF_VENDOR_OUI_AVAGO 0x00176A00 + + +s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw); +s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw); +s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw); +s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data); +s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data); +s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw); +s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg, + bool autoneg_wait_to_complete); #endif /* _IXGBE_PHY_H_ */ diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index 7057aa3f393..c76e30b94d8 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h @@ -37,7 +37,6 @@ /* Device IDs */ #define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6 #define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7 -#define IXGBE_DEV_ID_82598AT_DUAL_PORT 0x10C8 #define IXGBE_DEV_ID_82598EB_CX4 0x10DD #define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC #define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4 @@ -71,11 +70,11 @@ #define IXGBE_EIMC 0x00888 #define IXGBE_EIAC 0x00810 #define IXGBE_EIAM 0x00890 -#define IXGBE_EITR(_i) (0x00820 + ((_i) * 4)) /* 0x820-0x86c */ -#define IXGBE_IVAR(_i) (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */ +#define IXGBE_EITR(_i) (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : (0x012300 + ((_i) * 4))) +#define IXGBE_IVAR(_i) (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */ #define IXGBE_MSIXT 0x00000 /* MSI-X Table. 0x0000 - 0x01C */ #define IXGBE_MSIXPBA 0x02000 /* MSI-X Pending bit array */ -#define IXGBE_PBACL 0x11068 +#define IXGBE_PBACL(_i) (((_i) == 0) ? (0x11068) : (0x110C0 + ((_i) * 4))) #define IXGBE_GPIE 0x00898 /* Flow Control Registers */ @@ -87,20 +86,33 @@ #define IXGBE_TFCS 0x0CE00 /* Receive DMA Registers */ -#define IXGBE_RDBAL(_i) (0x01000 + ((_i) * 0x40)) /* 64 of each (0-63)*/ -#define IXGBE_RDBAH(_i) (0x01004 + ((_i) * 0x40)) -#define IXGBE_RDLEN(_i) (0x01008 + ((_i) * 0x40)) -#define IXGBE_RDH(_i) (0x01010 + ((_i) * 0x40)) -#define IXGBE_RDT(_i) (0x01018 + ((_i) * 0x40)) -#define IXGBE_RXDCTL(_i) (0x01028 + ((_i) * 0x40)) -#define IXGBE_RSCCTL(_i) (0x0102C + ((_i) * 0x40)) -#define IXGBE_SRRCTL(_i) (0x02100 + ((_i) * 4)) - /* array of 16 (0x02100-0x0213C) */ -#define IXGBE_DCA_RXCTRL(_i) (0x02200 + ((_i) * 4)) - /* array of 16 (0x02200-0x0223C) */ -#define IXGBE_RDRXCTL 0x02F00 +#define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : (0x0D000 + ((_i - 64) * 0x40))) +#define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : (0x0D004 + ((_i - 64) * 0x40))) +#define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : (0x0D008 + ((_i - 64) * 0x40))) +#define IXGBE_RDH(_i) (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : (0x0D010 + ((_i - 64) * 0x40))) +#define IXGBE_RDT(_i) (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : (0x0D018 + ((_i - 64) * 0x40))) +#define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : (0x0D028 + ((_i - 64) * 0x40))) +/* + * Split and Replication Receive Control Registers + * 00-15 : 0x02100 + n*4 + * 16-64 : 0x01014 + n*0x40 + * 64-127: 0x0D014 + (n-64)*0x40 + */ +#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \ + (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \ + (0x0D014 + ((_i - 64) * 0x40)))) +/* + * Rx DCA Control Register: + * 00-15 : 0x02200 + n*4 + * 16-64 : 0x0100C + n*0x40 + * 64-127: 0x0D00C + (n-64)*0x40 + */ +#define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \ + (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \ + (0x0D00C + ((_i - 64) * 0x40)))) +#define IXGBE_RDRXCTL 0x02F00 #define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4)) - /* 8 of these 0x03C00 - 0x03C1C */ + /* 8 of these 0x03C00 - 0x03C1C */ #define IXGBE_RXCTRL 0x03000 #define IXGBE_DROPEN 0x03D04 #define IXGBE_RXPBSIZE_SHIFT 10 @@ -108,29 +120,32 @@ /* Receive Registers */ #define IXGBE_RXCSUM 0x05000 #define IXGBE_RFCTL 0x05008 +#define IXGBE_DRECCCTL 0x02F08 +#define IXGBE_DRECCCTL_DISABLE 0 +/* Multicast Table Array - 128 entries */ #define IXGBE_MTA(_i) (0x05200 + ((_i) * 4)) - /* Multicast Table Array - 128 entries */ -#define IXGBE_RAL(_i) (0x05400 + ((_i) * 8)) /* 16 of these (0-15) */ -#define IXGBE_RAH(_i) (0x05404 + ((_i) * 8)) /* 16 of these (0-15) */ -#define IXGBE_PSRTYPE 0x05480 - /* 0x5480-0x54BC Packet split receive type */ +#define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : (0x0A200 + ((_i) * 8))) +#define IXGBE_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : (0x0A204 + ((_i) * 8))) +/* Packet split receive type */ +#define IXGBE_PSRTYPE(_i) (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : (0x0EA00 + ((_i) * 4))) +/* array of 4096 1-bit vlan filters */ #define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4)) - /* array of 4096 1-bit vlan filters */ +/*array of 4096 4-bit vlan vmdq indices */ #define IXGBE_VFTAVIND(_j, _i) (0x0A200 + ((_j) * 0x200) + ((_i) * 4)) - /*array of 4096 4-bit vlan vmdq indicies */ #define IXGBE_FCTRL 0x05080 #define IXGBE_VLNCTRL 0x05088 #define IXGBE_MCSTCTRL 0x05090 #define IXGBE_MRQC 0x05818 -#define IXGBE_VMD_CTL 0x0581C #define IXGBE_IMIR(_i) (0x05A80 + ((_i) * 4)) /* 8 of these (0-7) */ #define IXGBE_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* 8 of these (0-7) */ #define IXGBE_IMIRVP 0x05AC0 +#define IXGBE_VMD_CTL 0x0581C #define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */ #define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */ + /* Transmit DMA registers */ -#define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40))/* 32 of these (0-31)*/ +#define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40)) /* 32 of these (0-31)*/ #define IXGBE_TDBAH(_i) (0x06004 + ((_i) * 0x40)) #define IXGBE_TDLEN(_i) (0x06008 + ((_i) * 0x40)) #define IXGBE_TDH(_i) (0x06010 + ((_i) * 0x40)) @@ -139,11 +154,10 @@ #define IXGBE_TDWBAL(_i) (0x06038 + ((_i) * 0x40)) #define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40)) #define IXGBE_DTXCTL 0x07E00 -#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) - /* there are 16 of these (0-15) */ + +#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */ #define IXGBE_TIPG 0x0CB00 -#define IXGBE_TXPBSIZE(_i) (0x0CC00 + ((_i) *0x04)) - /* there are 8 of these */ +#define IXGBE_TXPBSIZE(_i) (0x0CC00 + ((_i) * 4)) /* 8 of these */ #define IXGBE_MNGTXMAP 0x0CD10 #define IXGBE_TIPG_FIBER_DEFAULT 3 #define IXGBE_TXPBSIZE_SHIFT 10 @@ -155,6 +169,7 @@ #define IXGBE_IPAV 0x05838 #define IXGBE_IP4AT 0x05840 /* IPv4 table 0x5840-0x5858 */ #define IXGBE_IP6AT 0x05880 /* IPv6 table 0x5880-0x588F */ + #define IXGBE_WUPL 0x05900 #define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */ #define IXGBE_FHFT 0x09000 /* Flex host filter table 9000-93FC */ @@ -171,6 +186,8 @@ #define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */ #define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */ + + /* Stats registers */ #define IXGBE_CRCERRS 0x04000 #define IXGBE_ILLERRC 0x04004 @@ -225,7 +242,7 @@ #define IXGBE_XEC 0x04120 #define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4)) /* 16 of these */ -#define IXGBE_TQSMR(_i) (0x07300 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : (0x08600 + ((_i) * 4))) #define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */ #define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */ @@ -276,17 +293,17 @@ #define IXGBE_DCA_CTRL 0x11074 /* Diagnostic Registers */ -#define IXGBE_RDSTATCTL 0x02C20 -#define IXGBE_RDSTAT(_i) (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */ -#define IXGBE_RDHMPN 0x02F08 +#define IXGBE_RDSTATCTL 0x02C20 +#define IXGBE_RDSTAT(_i) (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */ +#define IXGBE_RDHMPN 0x02F08 #define IXGBE_RIC_DW(_i) (0x02F10 + ((_i) * 4)) -#define IXGBE_RDPROBE 0x02F20 -#define IXGBE_TDSTATCTL 0x07C20 -#define IXGBE_TDSTAT(_i) (0x07C00 + ((_i) * 4)) /* 0x07C00 - 0x07C1C */ -#define IXGBE_TDHMPN 0x07F08 +#define IXGBE_RDPROBE 0x02F20 +#define IXGBE_TDSTATCTL 0x07C20 +#define IXGBE_TDSTAT(_i) (0x07C00 + ((_i) * 4)) /* 0x07C00 - 0x07C1C */ +#define IXGBE_TDHMPN 0x07F08 #define IXGBE_TIC_DW(_i) (0x07F10 + ((_i) * 4)) -#define IXGBE_TDPROBE 0x07F20 -#define IXGBE_TXBUFCTRL 0x0C600 +#define IXGBE_TDPROBE 0x07F20 +#define IXGBE_TXBUFCTRL 0x0C600 #define IXGBE_TXBUFDATA0 0x0C610 #define IXGBE_TXBUFDATA1 0x0C614 #define IXGBE_TXBUFDATA2 0x0C618 @@ -387,7 +404,7 @@ #define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ #define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ -#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* TX Desc writeback RO bit */ +#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ #define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */ /* MSCA Bit Masks */ @@ -411,10 +428,10 @@ #define IXGBE_MSCA_MDI_IN_PROG_EN 0x80000000 /* MDI in progress enable */ /* MSRWD bit masks */ -#define IXGBE_MSRWD_WRITE_DATA_MASK 0x0000FFFF -#define IXGBE_MSRWD_WRITE_DATA_SHIFT 0 -#define IXGBE_MSRWD_READ_DATA_MASK 0xFFFF0000 -#define IXGBE_MSRWD_READ_DATA_SHIFT 16 +#define IXGBE_MSRWD_WRITE_DATA_MASK 0x0000FFFF +#define IXGBE_MSRWD_WRITE_DATA_SHIFT 0 +#define IXGBE_MSRWD_READ_DATA_MASK 0xFFFF0000 +#define IXGBE_MSRWD_READ_DATA_SHIFT 16 /* Atlas registers */ #define IXGBE_ATLAS_PDN_LPBK 0x24 @@ -429,6 +446,7 @@ #define IXGBE_ATLAS_PDN_TX_1G_QL_ALL 0xF0 #define IXGBE_ATLAS_PDN_TX_AN_QL_ALL 0xF0 + /* Device Type definitions for new protocol MDIO commands */ #define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1 #define IXGBE_MDIO_PCS_DEV_TYPE 0x3 @@ -436,6 +454,8 @@ #define IXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7 #define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Device 30 */ +#define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */ + #define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL 0x0 /* VS1 Control Reg */ #define IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS 0x1 /* VS1 Status Reg */ #define IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS 0x0008 /* 1 = Link Up */ @@ -449,23 +469,39 @@ #define IXGBE_MDIO_PHY_XS_RESET 0x8000 /* PHY_XS Reset */ #define IXGBE_MDIO_PHY_ID_HIGH 0x2 /* PHY ID High Reg*/ #define IXGBE_MDIO_PHY_ID_LOW 0x3 /* PHY ID Low Reg*/ -#define IXGBE_MDIO_PHY_SPEED_ABILITY 0x4 /* Speed Abilty Reg */ +#define IXGBE_MDIO_PHY_SPEED_ABILITY 0x4 /* Speed Ability Reg */ #define IXGBE_MDIO_PHY_SPEED_10G 0x0001 /* 10G capable */ #define IXGBE_MDIO_PHY_SPEED_1G 0x0010 /* 1G capable */ +#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Address Reg */ +#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */ +#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */ + +/* MII clause 22/28 definitions */ +#define IXGBE_MDIO_PHY_LOW_POWER_MODE 0x0800 + +#define IXGBE_MII_SPEED_SELECTION_REG 0x10 +#define IXGBE_MII_RESTART 0x200 +#define IXGBE_MII_AUTONEG_COMPLETE 0x20 +#define IXGBE_MII_AUTONEG_REG 0x0 + #define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0 #define IXGBE_MAX_PHY_ADDR 32 /* PHY IDs*/ -#define TN1010_PHY_ID 0x00A19410 #define QT2022_PHY_ID 0x0043A400 +/* PHY Types */ +#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0 + /* General purpose Interrupt Enable */ -#define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */ -#define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */ -#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */ -#define IXGBE_GPIE_EIAME 0x40000000 -#define IXGBE_GPIE_PBA_SUPPORT 0x80000000 +#define IXGBE_SDP0_GPIEN 0x00000001 /* SDP0 */ +#define IXGBE_SDP1_GPIEN 0x00000002 /* SDP1 */ +#define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */ +#define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */ +#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */ +#define IXGBE_GPIE_EIAME 0x40000000 +#define IXGBE_GPIE_PBA_SUPPORT 0x80000000 /* Transmit Flow Control status */ #define IXGBE_TFCS_TXOFF 0x00000001 @@ -526,7 +562,7 @@ #define IXGBE_PAP_TXPAUSECNT_MASK 0x0000FFFF /* Pause counter mask */ /* RMCS Bit Masks */ -#define IXGBE_RMCS_RRM 0x00000002 /* Receive Recylce Mode enable */ +#define IXGBE_RMCS_RRM 0x00000002 /* Receive Recycle Mode enable */ /* Receive Arbitration Control: 0 Round Robin, 1 DFP */ #define IXGBE_RMCS_RAC 0x00000004 #define IXGBE_RMCS_DFP IXGBE_RMCS_RAC /* Deficit Fixed Priority ena */ @@ -534,12 +570,15 @@ #define IXGBE_RMCS_TFCE_PRIORITY 0x00000010 /* Tx Priority flow control ena */ #define IXGBE_RMCS_ARBDIS 0x00000040 /* Arbitration disable bit */ + /* Interrupt register bitmasks */ /* Extended Interrupt Cause Read */ #define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */ #define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */ -#define IXGBE_EICR_MNG 0x00400000 /* Managability Event Interrupt */ +#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */ +#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */ +#define IXGBE_EICR_GPI_SDP1 0x02000000 /* Gen Purpose Interrupt on SDP1 */ #define IXGBE_EICR_PBUR 0x10000000 /* Packet Buffer Handler Error */ #define IXGBE_EICR_DHER 0x20000000 /* Descriptor Handler Error */ #define IXGBE_EICR_TCP_TIMER 0x40000000 /* TCP Timer */ @@ -547,11 +586,12 @@ /* Extended Interrupt Cause Set */ #define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ -#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */ -#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */ -#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ -#define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Error */ -#define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */ +#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */ +#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ +#define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ +#define IXGBE_EICS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ +#define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ +#define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */ #define IXGBE_EICS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ #define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ @@ -559,7 +599,9 @@ #define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ #define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */ #define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ -#define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Error */ +#define IXGBE_EIMS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ +#define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ +#define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ #define IXGBE_EIMS_DHER IXGBE_EICR_DHER /* Descr Handler Error */ #define IXGBE_EIMS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ #define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ @@ -568,18 +610,20 @@ #define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ #define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */ #define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ -#define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Error */ -#define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Error */ +#define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ +#define IXGBE_EIMC_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ +#define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ +#define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Err */ #define IXGBE_EIMC_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ #define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ -#define IXGBE_EIMS_ENABLE_MASK (\ - IXGBE_EIMS_RTX_QUEUE | \ - IXGBE_EIMS_LSC | \ - IXGBE_EIMS_TCP_TIMER | \ - IXGBE_EIMS_OTHER) +#define IXGBE_EIMS_ENABLE_MASK ( \ + IXGBE_EIMS_RTX_QUEUE | \ + IXGBE_EIMS_LSC | \ + IXGBE_EIMS_TCP_TIMER | \ + IXGBE_EIMS_OTHER) -/* Immediate Interrupt RX (A.K.A. Low Latency Interrupt) */ +/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ #define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */ #define IXGBE_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */ #define IXGBE_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ @@ -616,6 +660,7 @@ #define IXGBE_VLNCTRL_VFE 0x40000000 /* bit 30 */ #define IXGBE_VLNCTRL_VME 0x80000000 /* bit 31 */ + #define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */ /* STATUS Bit Masks */ @@ -663,16 +708,16 @@ #define IXGBE_AUTOC_AN_RESTART 0x00001000 #define IXGBE_AUTOC_FLU 0x00000001 #define IXGBE_AUTOC_LMS_SHIFT 13 -#define IXGBE_AUTOC_LMS_MASK (0x7 << IXGBE_AUTOC_LMS_SHIFT) -#define IXGBE_AUTOC_LMS_1G_LINK_NO_AN (0x0 << IXGBE_AUTOC_LMS_SHIFT) -#define IXGBE_AUTOC_LMS_10G_LINK_NO_AN (0x1 << IXGBE_AUTOC_LMS_SHIFT) -#define IXGBE_AUTOC_LMS_1G_AN (0x2 << IXGBE_AUTOC_LMS_SHIFT) -#define IXGBE_AUTOC_LMS_KX4_AN (0x4 << IXGBE_AUTOC_LMS_SHIFT) -#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT) -#define IXGBE_AUTOC_LMS_ATTACH_TYPE (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) - -#define IXGBE_AUTOC_1G_PMA_PMD 0x00000200 -#define IXGBE_AUTOC_10G_PMA_PMD 0x00000180 +#define IXGBE_AUTOC_LMS_MASK (0x7 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_1G_LINK_NO_AN (0x0 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_10G_LINK_NO_AN (0x1 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_1G_AN (0x2 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_AN (0x4 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_ATTACH_TYPE (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) + +#define IXGBE_AUTOC_1G_PMA_PMD 0x00000200 +#define IXGBE_AUTOC_10G_PMA_PMD 0x00000180 #define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7 #define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9 #define IXGBE_AUTOC_10G_XAUI (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) @@ -753,6 +798,11 @@ #define IXGBE_PBANUM0_PTR 0x15 #define IXGBE_PBANUM1_PTR 0x16 +/* Legacy EEPROM word offsets */ +#define IXGBE_ISCSI_BOOT_CAPS 0x0033 +#define IXGBE_ISCSI_SETUP_PORT_0 0x0030 +#define IXGBE_ISCSI_SETUP_PORT_1 0x0034 + /* EEPROM Commands - SPI */ #define IXGBE_EEPROM_MAX_RETRY_SPI 5000 /* Max wait 5ms for RDY signal */ #define IXGBE_EEPROM_STATUS_RDY_SPI 0x01 @@ -760,7 +810,7 @@ #define IXGBE_EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ #define IXGBE_EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = addr bit-8 */ #define IXGBE_EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Ena latch */ -/* EEPROM reset Write Enbale latch */ +/* EEPROM reset Write Enable latch */ #define IXGBE_EEPROM_WRDI_OPCODE_SPI 0x04 #define IXGBE_EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status reg */ #define IXGBE_EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status reg */ @@ -799,22 +849,20 @@ /* Number of 100 microseconds we wait for PCI Express master disable */ #define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800 -/* PHY Types */ -#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0 - /* Check whether address is multicast. This is little-endian specific check.*/ #define IXGBE_IS_MULTICAST(Address) \ - (bool)(((u8 *)(Address))[0] & ((u8)0x01)) + (bool)(((u8 *)(Address))[0] & ((u8)0x01)) /* Check whether an address is broadcast. */ #define IXGBE_IS_BROADCAST(Address) \ - ((((u8 *)(Address))[0] == ((u8)0xff)) && \ - (((u8 *)(Address))[1] == ((u8)0xff))) + ((((u8 *)(Address))[0] == ((u8)0xff)) && \ + (((u8 *)(Address))[1] == ((u8)0xff))) /* RAH */ #define IXGBE_RAH_VIND_MASK 0x003C0000 #define IXGBE_RAH_VIND_SHIFT 18 #define IXGBE_RAH_AV 0x80000000 +#define IXGBE_CLEAR_VMDQ_ALL 0xFFFFFFFF /* Header split receive */ #define IXGBE_RFCTL_ISCSI_DIS 0x00000001 @@ -843,7 +891,7 @@ #define IXGBE_MAX_FRAME_SZ 0x40040000 #define IXGBE_TDWBAL_HEAD_WB_ENABLE 0x1 /* Tx head write-back enable */ -#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2 /* Tx seq. # write-back enable */ +#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2 /* Tx seq# write-back enable */ /* Receive Config masks */ #define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */ @@ -856,7 +904,7 @@ #define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */ #define IXGBE_FCTRL_PMCF 0x00001000 /* Pass MAC Control Frames */ #define IXGBE_FCTRL_DPF 0x00002000 /* Discard Pause Frame */ -/* Receive Priority Flow Control Enbale */ +/* Receive Priority Flow Control Enable */ #define IXGBE_FCTRL_RPFCE 0x00004000 #define IXGBE_FCTRL_RFCE 0x00008000 /* Receive Flow Control Ena */ @@ -886,9 +934,8 @@ /* Receive Descriptor bit definitions */ #define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */ #define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */ -#define IXGBE_RXD_STAT_IXSM 0x04 /* Ignore checksum */ #define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ -#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum caculated */ +#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ #define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */ #define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ #define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */ @@ -904,7 +951,7 @@ #define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */ #define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */ #define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */ -#define IXGBE_RXDADV_HBO 0x00800000 +#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */ #define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */ #define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */ #define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */ @@ -918,15 +965,17 @@ #define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */ #define IXGBE_RXD_CFI_SHIFT 12 + /* SRRCTL bit definitions */ -#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */ -#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F -#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00 -#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000 +#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */ +#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F +#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00 +#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000 #define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 #define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 #define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 #define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 +#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000 #define IXGBE_RXDPS_HDRSTAT_HDRSP 0x00008000 #define IXGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF @@ -960,21 +1009,20 @@ #define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */ #define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */ #define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */ - /* Masks to determine if packets should be dropped due to frame errors */ -#define IXGBE_RXD_ERR_FRAME_ERR_MASK (\ - IXGBE_RXD_ERR_CE | \ - IXGBE_RXD_ERR_LE | \ - IXGBE_RXD_ERR_PE | \ - IXGBE_RXD_ERR_OSE | \ - IXGBE_RXD_ERR_USE) - -#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK (\ - IXGBE_RXDADV_ERR_CE | \ - IXGBE_RXDADV_ERR_LE | \ - IXGBE_RXDADV_ERR_PE | \ - IXGBE_RXDADV_ERR_OSE | \ - IXGBE_RXDADV_ERR_USE) +#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \ + IXGBE_RXD_ERR_CE | \ + IXGBE_RXD_ERR_LE | \ + IXGBE_RXD_ERR_PE | \ + IXGBE_RXD_ERR_OSE | \ + IXGBE_RXD_ERR_USE) + +#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \ + IXGBE_RXDADV_ERR_CE | \ + IXGBE_RXDADV_ERR_LE | \ + IXGBE_RXDADV_ERR_PE | \ + IXGBE_RXDADV_ERR_OSE | \ + IXGBE_RXDADV_ERR_USE) /* Multicast bit mask */ #define IXGBE_MCSTCTRL_MFE 0x4 @@ -990,6 +1038,7 @@ #define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */ #define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT + /* Transmit Descriptor - Legacy */ struct ixgbe_legacy_tx_desc { u64 buffer_addr; /* Address of the descriptor's data buffer */ @@ -1004,8 +1053,8 @@ struct ixgbe_legacy_tx_desc { union { __le32 data; struct { - u8 status; /* Descriptor status */ - u8 css; /* Checksum start */ + u8 status; /* Descriptor status */ + u8 css; /* Checksum start */ __le16 vlan; } fields; } upper; @@ -1014,7 +1063,7 @@ struct ixgbe_legacy_tx_desc { /* Transmit Descriptor - Advanced */ union ixgbe_adv_tx_desc { struct { - __le64 buffer_addr; /* Address of descriptor's data buf */ + __le64 buffer_addr; /* Address of descriptor's data buf */ __le32 cmd_type_len; __le32 olinfo_status; } read; @@ -1046,8 +1095,8 @@ union ixgbe_adv_rx_desc { union { __le32 data; struct { - __le16 pkt_info; /* RSS type, Packet type */ - __le16 hdr_info; /* Split Header, header len */ + __le16 pkt_info; /* RSS, Pkt type */ + __le16 hdr_info; /* Splithdr, hdrlen */ } hs_rss; } lo_dword; union { @@ -1075,49 +1124,69 @@ struct ixgbe_adv_tx_context_desc { }; /* Adv Transmit Descriptor Config Masks */ -#define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buffer length(bytes) */ +#define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buf length(bytes) */ #define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */ #define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */ #define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ #define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */ #define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */ -#define IXGBE_ADVTXD_DCMD_RDMA 0x04000000 /* RDMA */ #define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */ -#define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */ +#define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */ #define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */ #define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */ #define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ #define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */ -#define IXGBE_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED present in WB */ +#define IXGBE_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED pres in WB */ #define IXGBE_ADVTXD_STAT_RSV 0x0000000C /* STA Reserved */ #define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ +#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */ #define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */ #define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \ - IXGBE_ADVTXD_POPTS_SHIFT) + IXGBE_ADVTXD_POPTS_SHIFT) #define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \ - IXGBE_ADVTXD_POPTS_SHIFT) -#define IXGBE_ADVTXD_POPTS_EOM 0x00000400 /* Enable L bit-RDMA DDP hdr */ -#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */ -#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */ -#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ -#define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800 /* 1st&Last TSO-full iSCSI PDU*/ -#define IXGBE_ADVTXD_POPTS_RSV 0x00002000 /* POPTS Reserved */ -#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ -#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ -#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ -#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ -#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ -#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ -#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ -#define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /* Req requires Markers and CRC */ -#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ -#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ - + IXGBE_ADVTXD_POPTS_SHIFT) +#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */ +#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */ +#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ +#define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800 /* 1st&Last TSO-full iSCSI PDU */ +#define IXGBE_ADVTXD_POPTS_RSV 0x00002000 /* POPTS Reserved */ +#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ +#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ +#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ +#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ +#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ +#define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /*Req requires Markers and CRC*/ +#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ + +/* Autonegotiation advertised speeds */ +typedef u32 ixgbe_autoneg_advertised; /* Link speed */ +typedef u32 ixgbe_link_speed; #define IXGBE_LINK_SPEED_UNKNOWN 0 #define IXGBE_LINK_SPEED_100_FULL 0x0008 #define IXGBE_LINK_SPEED_1GB_FULL 0x0020 #define IXGBE_LINK_SPEED_10GB_FULL 0x0080 +#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \ + IXGBE_LINK_SPEED_10GB_FULL) + +/* Physical layer type */ +typedef u32 ixgbe_physical_layer; +#define IXGBE_PHYSICAL_LAYER_UNKNOWN 0 +#define IXGBE_PHYSICAL_LAYER_10GBASE_T 0x0001 +#define IXGBE_PHYSICAL_LAYER_1000BASE_T 0x0002 +#define IXGBE_PHYSICAL_LAYER_100BASE_T 0x0004 +#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x0008 +#define IXGBE_PHYSICAL_LAYER_10GBASE_LR 0x0010 +#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x0020 +#define IXGBE_PHYSICAL_LAYER_10GBASE_SR 0x0040 +#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4 0x0080 +#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4 0x0100 +#define IXGBE_PHYSICAL_LAYER_1000BASE_KX 0x0200 +#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x0400 enum ixgbe_eeprom_type { @@ -1134,16 +1203,38 @@ enum ixgbe_mac_type { enum ixgbe_phy_type { ixgbe_phy_unknown = 0, - ixgbe_phy_tn, ixgbe_phy_qt, - ixgbe_phy_xaui + ixgbe_phy_xaui, + ixgbe_phy_tw_tyco, + ixgbe_phy_tw_unknown, + ixgbe_phy_sfp_avago, + ixgbe_phy_sfp_ftl, + ixgbe_phy_sfp_unknown, + ixgbe_phy_generic +}; + +/* + * SFP+ module type IDs: + * + * ID Module Type + * ============= + * 0 SFP_DA_CU + * 1 SFP_SR + * 2 SFP_LR + */ +enum ixgbe_sfp_type { + ixgbe_sfp_type_da_cu = 0, + ixgbe_sfp_type_sr = 1, + ixgbe_sfp_type_lr = 2, + ixgbe_sfp_type_unknown = 0xFFFF }; enum ixgbe_media_type { ixgbe_media_type_unknown = 0, ixgbe_media_type_fiber, ixgbe_media_type_copper, - ixgbe_media_type_backplane + ixgbe_media_type_backplane, + ixgbe_media_type_virtual }; /* Flow Control Settings */ @@ -1241,59 +1332,114 @@ struct ixgbe_hw; typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq); +/* Function pointer table */ +struct ixgbe_eeprom_operations { + s32 (*init_params)(struct ixgbe_hw *); + s32 (*read)(struct ixgbe_hw *, u16, u16 *); + s32 (*write)(struct ixgbe_hw *, u16, u16); + s32 (*validate_checksum)(struct ixgbe_hw *, u16 *); + s32 (*update_checksum)(struct ixgbe_hw *); +}; + struct ixgbe_mac_operations { - s32 (*reset)(struct ixgbe_hw *); + s32 (*init_hw)(struct ixgbe_hw *); + s32 (*reset_hw)(struct ixgbe_hw *); + s32 (*start_hw)(struct ixgbe_hw *); + s32 (*clear_hw_cntrs)(struct ixgbe_hw *); enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *); + s32 (*get_supported_physical_layer)(struct ixgbe_hw *); + s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *); + s32 (*stop_adapter)(struct ixgbe_hw *); + s32 (*get_bus_info)(struct ixgbe_hw *); + s32 (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*); + s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8); + + /* Link */ s32 (*setup_link)(struct ixgbe_hw *); - s32 (*check_link)(struct ixgbe_hw *, u32 *, bool *, bool); - s32 (*setup_link_speed)(struct ixgbe_hw *, u32, bool, bool); - s32 (*get_link_settings)(struct ixgbe_hw *, u32 *, bool *); + s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool, + bool); + s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool); + s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *, + bool *); + + /* LED */ + s32 (*led_on)(struct ixgbe_hw *, u32); + s32 (*led_off)(struct ixgbe_hw *, u32); + s32 (*blink_led_start)(struct ixgbe_hw *, u32); + s32 (*blink_led_stop)(struct ixgbe_hw *, u32); + + /* RAR, Multicast, VLAN */ + s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32); + s32 (*clear_rar)(struct ixgbe_hw *, u32); + s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32); + s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32); + s32 (*init_rx_addrs)(struct ixgbe_hw *); + s32 (*update_uc_addr_list)(struct ixgbe_hw *, u8 *, u32, + ixgbe_mc_addr_itr); + s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32, + ixgbe_mc_addr_itr); + s32 (*enable_mc)(struct ixgbe_hw *); + s32 (*disable_mc)(struct ixgbe_hw *); + s32 (*clear_vfta)(struct ixgbe_hw *); + s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool); + s32 (*init_uta_tables)(struct ixgbe_hw *); + + /* Flow Control */ + s32 (*setup_fc)(struct ixgbe_hw *, s32); }; struct ixgbe_phy_operations { + s32 (*identify)(struct ixgbe_hw *); + s32 (*identify_sfp)(struct ixgbe_hw *); + s32 (*reset)(struct ixgbe_hw *); + s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *); + s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16); s32 (*setup_link)(struct ixgbe_hw *); - s32 (*check_link)(struct ixgbe_hw *, u32 *, bool *); - s32 (*setup_link_speed)(struct ixgbe_hw *, u32, bool, bool); -}; - -struct ixgbe_mac_info { - struct ixgbe_mac_operations ops; - enum ixgbe_mac_type type; - u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; - u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; - s32 mc_filter_type; - u32 mcft_size; - u32 vft_size; - u32 num_rar_entries; - u32 num_rx_queues; - u32 num_tx_queues; - u32 link_attach_type; - u32 link_mode_select; - bool link_settings_loaded; + s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool, + bool); + s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *); + s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8); + s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *); + s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8); }; struct ixgbe_eeprom_info { - enum ixgbe_eeprom_type type; - u16 word_size; - u16 address_bits; + struct ixgbe_eeprom_operations ops; + enum ixgbe_eeprom_type type; + u32 semaphore_delay; + u16 word_size; + u16 address_bits; }; -struct ixgbe_phy_info { - struct ixgbe_phy_operations ops; - - enum ixgbe_phy_type type; - u32 addr; - u32 id; - u32 revision; - enum ixgbe_media_type media_type; - u32 autoneg_advertised; - bool autoneg_wait_to_complete; +struct ixgbe_mac_info { + struct ixgbe_mac_operations ops; + enum ixgbe_mac_type type; + u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; + u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; + s32 mc_filter_type; + u32 mcft_size; + u32 vft_size; + u32 num_rar_entries; + u32 max_tx_queues; + u32 max_rx_queues; + u32 link_attach_type; + u32 link_mode_select; + bool link_settings_loaded; + bool autoneg; + bool autoneg_failed; }; -struct ixgbe_info { - enum ixgbe_mac_type mac; - s32 (*get_invariants)(struct ixgbe_hw *); - struct ixgbe_mac_operations *mac_ops; +struct ixgbe_phy_info { + struct ixgbe_phy_operations ops; + enum ixgbe_phy_type type; + u32 addr; + u32 id; + enum ixgbe_sfp_type sfp_type; + u32 revision; + enum ixgbe_media_type media_type; + bool reset_disable; + ixgbe_autoneg_advertised autoneg_advertised; + bool autoneg_wait_to_complete; }; struct ixgbe_hw { @@ -1312,6 +1458,15 @@ struct ixgbe_hw { bool adapter_stopped; }; +struct ixgbe_info { + enum ixgbe_mac_type mac; + s32 (*get_invariants)(struct ixgbe_hw *); + struct ixgbe_mac_operations *mac_ops; + struct ixgbe_eeprom_operations *eeprom_ops; + struct ixgbe_phy_operations *phy_ops; +}; + + /* Error Codes */ #define IXGBE_ERR_EEPROM -1 #define IXGBE_ERR_EEPROM_CHECKSUM -2 @@ -1330,6 +1485,8 @@ struct ixgbe_hw { #define IXGBE_ERR_RESET_FAILED -15 #define IXGBE_ERR_SWFW_SYNC -16 #define IXGBE_ERR_PHY_ADDR_INVALID -17 +#define IXGBE_ERR_I2C -18 +#define IXGBE_ERR_SFP_NOT_SUPPORTED -19 #define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF #endif /* _IXGBE_TYPE_H_ */ -- cgit v1.2.3 From 7f8218752a76bb1f70b5e4e918f49bc5bf33275a Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Thu, 11 Sep 2008 20:00:16 -0700 Subject: ixgbe: refresh the ixgbe_down function clean up the hardware shutdown sequence to prevent hardware from continuing to send when resetting or unloading. Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgbe/ixgbe_main.c | 29 +++++++++++++++++++---------- 1 file changed, 19 insertions(+), 10 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index df093ec830d..cafb915bc82 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -1096,7 +1096,7 @@ static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget) r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); rx_ring = &(adapter->rx_ring[r_idx]); /* If all Rx work done, exit the polling mode */ - if ((work_done == 0) || !netif_running(netdev)) { + if (work_done < budget) { netif_rx_complete(netdev, napi); if (adapter->itr_setting & 3) ixgbe_set_itr_msix(q_vector); @@ -2174,32 +2174,41 @@ static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter) void ixgbe_down(struct ixgbe_adapter *adapter) { struct net_device *netdev = adapter->netdev; + struct ixgbe_hw *hw = &adapter->hw; u32 rxctrl; + u32 txdctl; + int i, j; /* signal that we are down to the interrupt handler */ set_bit(__IXGBE_DOWN, &adapter->state); /* disable receives */ - rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, - rxctrl & ~IXGBE_RXCTRL_RXEN); + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); netif_tx_disable(netdev); - /* disable transmits in the hardware */ - - /* flush both disables */ - IXGBE_WRITE_FLUSH(&adapter->hw); + IXGBE_WRITE_FLUSH(hw); msleep(10); + netif_tx_stop_all_queues(netdev); + ixgbe_irq_disable(adapter); ixgbe_napi_disable_all(adapter); + del_timer_sync(&adapter->watchdog_timer); cancel_work_sync(&adapter->watchdog_task); + /* disable transmits in the hardware now that interrupts are off */ + for (i = 0; i < adapter->num_tx_queues; i++) { + j = adapter->tx_ring[i].reg_idx; + txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), + (txdctl & ~IXGBE_TXDCTL_ENABLE)); + } + netif_carrier_off(netdev); - netif_tx_stop_all_queues(netdev); #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { @@ -2219,7 +2228,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter) adapter->flags |= IXGBE_FLAG_DCA_ENABLED; /* always use CB2 mode, difference is masked * in the CB driver */ - IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); + IXGBE_WRITE_REG(hw, IXGBE_DCA_CTRL, 2); ixgbe_setup_dca(adapter); } #endif -- cgit v1.2.3 From b9804972f52c2e6d60ddfa3a892c3628ed0aa846 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Thu, 11 Sep 2008 20:00:29 -0700 Subject: ixgbe: move set_num_queues and call it after msix failure This is partial preparation for a future patch which will extend ixgbe_set_num_queues Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgbe/ixgbe_ethtool.c | 2 +- drivers/net/ixgbe/ixgbe_main.c | 88 +++++++++++++++++++-------------------- 2 files changed, 45 insertions(+), 45 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index ff4fac34a17..020b0c7c195 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -947,7 +947,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev, } -static struct ethtool_ops ixgbe_ethtool_ops = { +static const struct ethtool_ops ixgbe_ethtool_ops = { .get_settings = ixgbe_get_settings, .set_settings = ixgbe_set_settings, .get_drvinfo = ixgbe_get_drvinfo, diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index cafb915bc82..257866b06f6 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -2333,6 +2333,46 @@ static void ixgbe_reset_task(struct work_struct *work) ixgbe_reinit_locked(adapter); } +static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) +{ + int nrq = 1, ntq = 1; + int feature_mask = 0, rss_i, rss_m; + + /* Number of supported queues */ + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: + rss_i = adapter->ring_feature[RING_F_RSS].indices; + rss_m = 0; + feature_mask |= IXGBE_FLAG_RSS_ENABLED; + + switch (adapter->flags & feature_mask) { + case (IXGBE_FLAG_RSS_ENABLED): + rss_m = 0xF; + nrq = rss_i; + ntq = rss_i; + break; + case 0: + default: + rss_i = 0; + rss_m = 0; + nrq = 1; + ntq = 1; + break; + } + + adapter->ring_feature[RING_F_RSS].indices = rss_i; + adapter->ring_feature[RING_F_RSS].mask = rss_m; + break; + default: + nrq = 1; + ntq = 1; + break; + } + + adapter->num_rx_queues = nrq; + adapter->num_tx_queues = ntq; +} + static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, int vectors) { @@ -2372,54 +2412,13 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, kfree(adapter->msix_entries); adapter->msix_entries = NULL; adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; - adapter->num_tx_queues = 1; - adapter->num_rx_queues = 1; + ixgbe_set_num_queues(adapter); } else { adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */ adapter->num_msix_vectors = vectors; } } -static void __devinit ixgbe_set_num_queues(struct ixgbe_adapter *adapter) -{ - int nrq, ntq; - int feature_mask = 0, rss_i, rss_m; - - /* Number of supported queues */ - switch (adapter->hw.mac.type) { - case ixgbe_mac_82598EB: - rss_i = adapter->ring_feature[RING_F_RSS].indices; - rss_m = 0; - feature_mask |= IXGBE_FLAG_RSS_ENABLED; - - switch (adapter->flags & feature_mask) { - case (IXGBE_FLAG_RSS_ENABLED): - rss_m = 0xF; - nrq = rss_i; - ntq = rss_i; - break; - case 0: - default: - rss_i = 0; - rss_m = 0; - nrq = 1; - ntq = 1; - break; - } - - adapter->ring_feature[RING_F_RSS].indices = rss_i; - adapter->ring_feature[RING_F_RSS].mask = rss_m; - break; - default: - nrq = 1; - ntq = 1; - break; - } - - adapter->num_rx_queues = nrq; - adapter->num_tx_queues = ntq; -} - /** * ixgbe_cache_ring_register - Descriptor ring to register mapping * @adapter: board private structure to initialize @@ -2482,11 +2481,12 @@ static int __devinit ixgbe_alloc_queues(struct ixgbe_adapter *adapter) goto err_rx_ring_allocation; for (i = 0; i < adapter->num_tx_queues; i++) { - adapter->tx_ring[i].count = IXGBE_DEFAULT_TXD; + adapter->tx_ring[i].count = adapter->tx_ring_count; adapter->tx_ring[i].queue_index = i; } + for (i = 0; i < adapter->num_rx_queues; i++) { - adapter->rx_ring[i].count = IXGBE_DEFAULT_RXD; + adapter->rx_ring[i].count = adapter->rx_ring_count; adapter->rx_ring[i].queue_index = i; } -- cgit v1.2.3 From 2adc0511a3126e392aa390cd203e8c3d4603e2c6 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Thu, 11 Sep 2008 20:03:00 -0700 Subject: ixgbe: cleanup defines Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgbe/ixgbe.h | 7 ------- 1 file changed, 7 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index 2388af24672..d69215e1b4e 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h @@ -56,10 +56,6 @@ #define IXGBE_MAX_RXD 4096 #define IXGBE_MIN_RXD 64 -#define IXGBE_DEFAULT_RXQ 1 -#define IXGBE_MAX_RXQ 1 -#define IXGBE_MIN_RXQ 1 - /* flow control */ #define IXGBE_DEFAULT_FCRTL 0x10000 #define IXGBE_MIN_FCRTL 0x40 @@ -81,9 +77,6 @@ #define MAXIMUM_ETHERNET_VLAN_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN) -/* How many Tx Descriptors do we need to call netif_wake_queue? */ -#define IXGBE_TX_QUEUE_WAKE 16 - /* How many Rx Buffers do we bundle into one write to the hardware ? */ #define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */ -- cgit v1.2.3 From 74ce8dd29be934c30d7874276cdb3dfffbf2bc80 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Thu, 11 Sep 2008 20:03:23 -0700 Subject: ixgbe: lower stack space usage some functions were un-necessarily using local variables. Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgbe/ixgbe_main.c | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 257866b06f6..a215350c196 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -482,7 +482,6 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, struct ixgbe_ring *rx_ring, int cleaned_count) { - struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; union ixgbe_adv_rx_desc *rx_desc; struct ixgbe_rx_buffer *bi; @@ -516,7 +515,8 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, } if (!bi->skb) { - struct sk_buff *skb = netdev_alloc_skb(netdev, bufsz); + struct sk_buff *skb = netdev_alloc_skb(adapter->netdev, + bufsz); if (!skb) { adapter->alloc_rx_buff_failed++; @@ -580,7 +580,6 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter, struct ixgbe_ring *rx_ring, int *work_done, int work_to_do) { - struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; union ixgbe_adv_rx_desc *rx_desc, *next_rxd; struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer; @@ -680,9 +679,9 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter, total_rx_bytes += skb->len; total_rx_packets++; - skb->protocol = eth_type_trans(skb, netdev); + skb->protocol = eth_type_trans(skb, adapter->netdev); ixgbe_receive_skb(adapter, skb, staterr, rx_ring, rx_desc); - netdev->last_rx = jiffies; + adapter->netdev->last_rx = jiffies; next_desc: rx_desc->wb.upper.status_error = 0; @@ -1070,7 +1069,6 @@ static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget) struct ixgbe_q_vector *q_vector = container_of(napi, struct ixgbe_q_vector, napi); struct ixgbe_adapter *adapter = q_vector->adapter; - struct net_device *netdev = adapter->netdev; struct ixgbe_ring *rx_ring = NULL; int work_done = 0, i; long r_idx; @@ -1097,7 +1095,7 @@ static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget) rx_ring = &(adapter->rx_ring[r_idx]); /* If all Rx work done, exit the polling mode */ if (work_done < budget) { - netif_rx_complete(netdev, napi); + netif_rx_complete(adapter->netdev, napi); if (adapter->itr_setting & 3) ixgbe_set_itr_msix(q_vector); if (!test_bit(__IXGBE_DOWN, &adapter->state)) @@ -2284,7 +2282,7 @@ static int ixgbe_poll(struct napi_struct *napi, int budget) struct ixgbe_q_vector *q_vector = container_of(napi, struct ixgbe_q_vector, napi); struct ixgbe_adapter *adapter = q_vector->adapter; - int tx_cleaned = 0, work_done = 0; + int tx_cleaned, work_done = 0; #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { @@ -2307,7 +2305,6 @@ static int ixgbe_poll(struct napi_struct *napi, int budget) if (!test_bit(__IXGBE_DOWN, &adapter->state)) ixgbe_irq_enable(adapter); } - return work_done; } -- cgit v1.2.3 From 9f8cdf4f06f81c7e21689b01bee0439fe9ae1966 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Thu, 11 Sep 2008 20:03:35 -0700 Subject: ixgbe: xmit frame refactor ixgbe_xmit_frame can be refactored to use fewer locals and better utilize common kernel macros. also fixed minor buglet with internal to driver vlan flag variable being passed incorrectly. Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgbe/ixgbe_main.c | 42 +++++++++++++++++------------------------- 1 file changed, 17 insertions(+), 25 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index a215350c196..41aede55233 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -3213,8 +3213,8 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter, unsigned int i; int err; struct ixgbe_tx_buffer *tx_buffer_info; - u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0; - u32 mss_l4len_idx = 0, l4len; + u32 vlan_macip_lens = 0, type_tucmd_mlhl; + u32 mss_l4len_idx, l4len; if (skb_is_gso(skb)) { if (skb_header_cloned(skb)) { @@ -3263,7 +3263,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter, context_desc->seqnum_seed = 0; /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ - type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT | + type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT); if (skb->protocol == htons(ETH_P_IP)) @@ -3272,7 +3272,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter, context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); /* MSS L4LEN IDX */ - mss_l4len_idx |= + mss_l4len_idx = (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT); mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT); /* use index 1 for TSO */ @@ -3330,14 +3330,12 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; break; - case __constant_htons(ETH_P_IPV6): /* XXX what about other V6 headers?? */ if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; break; - default: if (unlikely(net_ratelimit())) { DPRINTK(PROBE, WARNING, @@ -3354,6 +3352,7 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, tx_buffer_info->time_stamp = jiffies; tx_buffer_info->next_to_watch = i; + adapter->hw_csum_tx_good++; i++; if (i == tx_ring->count) @@ -3362,6 +3361,7 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, return true; } + return false; } @@ -3533,43 +3533,35 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_ring *tx_ring; - unsigned int len = skb->len; unsigned int first; unsigned int tx_flags = 0; u8 hdr_len = 0; int r_idx = 0, tso; - unsigned int mss = 0; int count = 0; unsigned int f; - unsigned int nr_frags = skb_shinfo(skb)->nr_frags; - len -= skb->data_len; + r_idx = (adapter->num_tx_queues - 1) & skb->queue_mapping; tx_ring = &adapter->tx_ring[r_idx]; - - if (skb->len <= 0) { - dev_kfree_skb(skb); - return NETDEV_TX_OK; + if (adapter->vlgrp && vlan_tx_tag_present(skb)) { + tx_flags |= vlan_tx_tag_get(skb); + tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; + tx_flags |= IXGBE_TX_FLAGS_VLAN; } - mss = skb_shinfo(skb)->gso_size; - - if (mss) - count++; - else if (skb->ip_summed == CHECKSUM_PARTIAL) + /* three things can cause us to need a context descriptor */ + if (skb_is_gso(skb) || + (skb->ip_summed == CHECKSUM_PARTIAL) || + (tx_flags & IXGBE_TX_FLAGS_VLAN)) count++; - count += TXD_USE_COUNT(len); - for (f = 0; f < nr_frags; f++) + count += TXD_USE_COUNT(skb_headlen(skb)); + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) { adapter->tx_busy++; return NETDEV_TX_BUSY; } - if (adapter->vlgrp && vlan_tx_tag_present(skb)) { - tx_flags |= IXGBE_TX_FLAGS_VLAN; - tx_flags |= (vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT); - } if (skb->protocol == htons(ETH_P_IP)) tx_flags |= IXGBE_TX_FLAGS_IPV4; -- cgit v1.2.3 From 51ac6445b108abab5e5ebeb5e68665d4509a6f29 Mon Sep 17 00:00:00 2001 From: Jeff Kirsher Date: Thu, 11 Sep 2008 20:03:55 -0700 Subject: ixgbe: bump version Signed-off-by: Jeff Kirsher Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Garzik --- drivers/net/ixgbe/ixgbe_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 41aede55233..b5139118090 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -48,7 +48,7 @@ char ixgbe_driver_name[] = "ixgbe"; static const char ixgbe_driver_string[] = "Intel(R) 10 Gigabit PCI Express Network Driver"; -#define DRV_VERSION "1.3.18-k4" +#define DRV_VERSION "1.3.30-k2" const char ixgbe_driver_version[] = DRV_VERSION; static const char ixgbe_copyright[] = "Copyright (c) 1999-2007 Intel Corporation."; -- cgit v1.2.3 From b46172402f39719e97b921cc3ca85141f3e8b1c2 Mon Sep 17 00:00:00 2001 From: Peter P Waskiewicz Date: Thu, 11 Sep 2008 20:04:46 -0700 Subject: ixgbe: Whitespace, copyright update and version number change patch This patch cleans up a bit of whitespace issues with the driver, updates the copyright information, and bumps the version number up. Signed-off-by: Peter P Waskiewicz Jr Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgbe/ixgbe.h | 17 +- drivers/net/ixgbe/ixgbe_82598.c | 58 ++---- drivers/net/ixgbe/ixgbe_common.c | 57 +++--- drivers/net/ixgbe/ixgbe_common.h | 3 +- drivers/net/ixgbe/ixgbe_ethtool.c | 62 +++---- drivers/net/ixgbe/ixgbe_main.c | 360 +++++++++++++++++++------------------- drivers/net/ixgbe/ixgbe_phy.c | 3 +- drivers/net/ixgbe/ixgbe_phy.h | 3 +- drivers/net/ixgbe/ixgbe_type.h | 3 +- 9 files changed, 266 insertions(+), 300 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index d69215e1b4e..63f678931ad 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2007 Intel Corporation. + Copyright(c) 1999 - 2008 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -20,7 +20,6 @@ the file called "COPYING". Contact Information: - Linux NICS e1000-devel Mailing List Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 @@ -321,15 +320,11 @@ extern int ixgbe_up(struct ixgbe_adapter *adapter); extern void ixgbe_down(struct ixgbe_adapter *adapter); extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter); extern void ixgbe_reset(struct ixgbe_adapter *adapter); -extern void ixgbe_update_stats(struct ixgbe_adapter *adapter); extern void ixgbe_set_ethtool_ops(struct net_device *netdev); -extern int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, - struct ixgbe_ring *rxdr); -extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter, - struct ixgbe_ring *txdr); -extern void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter, - struct ixgbe_ring *rxdr); -extern void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter, - struct ixgbe_ring *txdr); +extern int ixgbe_setup_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); +extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); +extern void ixgbe_free_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); +extern void ixgbe_free_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); +extern void ixgbe_update_stats(struct ixgbe_adapter *adapter); #endif /* _IXGBE_H_ */ diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c index a08a267f166..7cddcfba809 100644 --- a/drivers/net/ixgbe/ixgbe_82598.c +++ b/drivers/net/ixgbe/ixgbe_82598.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2007 Intel Corporation. + Copyright(c) 1999 - 2008 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -20,7 +20,6 @@ the file called "COPYING". Contact Information: - Linux NICS e1000-devel Mailing List Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 @@ -42,33 +41,11 @@ static s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *autoneg); -static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw); -static s32 ixgbe_setup_fc_82598(struct ixgbe_hw *hw, s32 packetbuf_num); -static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw); -static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, - bool *link_up, bool link_up_wait_to_complete); -static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg, - bool autoneg_wait_to_complete); static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw); static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg, bool autoneg_wait_to_complete); -static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw); -static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq); -static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq); -static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, - u32 vind, bool vlan_on); -static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw); -static s32 ixgbe_blink_led_stop_82598(struct ixgbe_hw *hw, u32 index); -static s32 ixgbe_blink_led_start_82598(struct ixgbe_hw *hw, u32 index); -static s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val); -static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val); -static s32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw); -static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw); /** */ @@ -112,8 +89,8 @@ static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw) * Determines the link capabilities by reading the AUTOC register. **/ static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, - bool *autoneg) + ixgbe_link_speed *speed, + bool *autoneg) { s32 status = 0; s32 autoc_reg; @@ -180,8 +157,8 @@ s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw, *autoneg = true; status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY, - IXGBE_MDIO_PMA_PMD_DEV_TYPE, - &speed_ability); + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + &speed_ability); if (status == 0) { if (speed_ability & IXGBE_MDIO_PHY_SPEED_10G) @@ -408,8 +385,9 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw) * * Reads the links register to determine if link is up and the current speed **/ -static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, ixgbe_link_speed *speed, - bool *link_up, bool link_up_wait_to_complete) +static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, bool *link_up, + bool link_up_wait_to_complete) { u32 links_reg; u32 i; @@ -452,8 +430,8 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, ixgbe_link_speed *spe * Set the link speed in the AUTOC register and restarts link. **/ static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw, - ixgbe_link_speed speed, bool autoneg, - bool autoneg_wait_to_complete) + ixgbe_link_speed speed, bool autoneg, + bool autoneg_wait_to_complete) { s32 status = 0; @@ -525,15 +503,15 @@ static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw) * Sets the link speed in the AUTOC register in the MAC and restarts link. **/ static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg, - bool autoneg_wait_to_complete) + ixgbe_link_speed speed, + bool autoneg, + bool autoneg_wait_to_complete) { s32 status; /* Setup the PHY according to input speed */ - status = hw->phy.ops.setup_link_speed(hw, speed, autoneg, - autoneg_wait_to_complete); + status = hw->phy.ops.setup_link_speed(hw, speed, autoneg, + autoneg_wait_to_complete); /* Set MAC to KX/KX4 autoneg, which defaults to Parallel detection */ hw->mac.link_attach_type = (IXGBE_AUTOC_10G_KX4 | IXGBE_AUTOC_1G_KX); @@ -653,7 +631,7 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); } else { hw->mac.link_attach_type = - (autoc & IXGBE_AUTOC_LMS_ATTACH_TYPE); + (autoc & IXGBE_AUTOC_LMS_ATTACH_TYPE); hw->mac.link_mode_select = (autoc & IXGBE_AUTOC_LMS_MASK); hw->mac.link_settings_loaded = true; } @@ -715,7 +693,7 @@ static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) * Turn on/off specified VLAN in the VLAN filter table. **/ s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, - bool vlan_on) + bool vlan_on) { u32 regindex; u32 bitindex; @@ -770,7 +748,7 @@ static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw) for (vlanbyte = 0; vlanbyte < 4; vlanbyte++) for (offset = 0; offset < hw->mac.vft_size; offset++) IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset), - 0); + 0); return 0; } diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c index a11ff0db9d2..f67c68404bb 100644 --- a/drivers/net/ixgbe/ixgbe_common.c +++ b/drivers/net/ixgbe/ixgbe_common.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2007 Intel Corporation. + Copyright(c) 1999 - 2008 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -20,7 +20,6 @@ the file called "COPYING". Contact Information: - Linux NICS e1000-devel Mailing List Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 @@ -481,7 +480,7 @@ s32 ixgbe_read_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 *data) if (status == 0) *data = (IXGBE_READ_REG(hw, IXGBE_EERD) >> - IXGBE_EEPROM_READ_REG_DATA); + IXGBE_EEPROM_READ_REG_DATA); else hw_dbg(hw, "Eeprom read timed out\n"); @@ -620,7 +619,7 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) */ if (i >= timeout) { hw_dbg(hw, "Driver can't access the Eeprom - Semaphore " - "not granted.\n"); + "not granted.\n"); ixgbe_release_eeprom_semaphore(hw); status = IXGBE_ERR_EEPROM; } @@ -1018,14 +1017,14 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, /* Make sure we are using a valid rar index range */ if (index < rar_entries) { - /* + /* * HW expects these in little endian so we reverse the byte * order from network order (big endian) to little endian - */ - rar_low = ((u32)addr[0] | - ((u32)addr[1] << 8) | - ((u32)addr[2] << 16) | - ((u32)addr[3] << 24)); + */ + rar_low = ((u32)addr[0] | + ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | + ((u32)addr[3] << 24)); /* * Some parts put the VMDq setting in the extra RAH bits, * so save everything except the lower 16 bits that hold part @@ -1035,11 +1034,11 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8)); - if (enable_addr != 0) - rar_high |= IXGBE_RAH_AV; + if (enable_addr != 0) + rar_high |= IXGBE_RAH_AV; - IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); - IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); + IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); + IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); } else { hw_dbg(hw, "RAR index %d is out of range.\n", index); } @@ -1137,18 +1136,18 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) hw->mac.ops.get_mac_addr(hw, hw->mac.addr); hw_dbg(hw, " Keeping Current RAR0 Addr =%.2X %.2X %.2X ", - hw->mac.addr[0], hw->mac.addr[1], - hw->mac.addr[2]); + hw->mac.addr[0], hw->mac.addr[1], + hw->mac.addr[2]); hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3], - hw->mac.addr[4], hw->mac.addr[5]); + hw->mac.addr[4], hw->mac.addr[5]); } else { /* Setup the receive address. */ hw_dbg(hw, "Overriding MAC Address in RAR[0]\n"); hw_dbg(hw, " New MAC Addr =%.2X %.2X %.2X ", - hw->mac.addr[0], hw->mac.addr[1], - hw->mac.addr[2]); + hw->mac.addr[0], hw->mac.addr[1], + hw->mac.addr[2]); hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3], - hw->mac.addr[4], hw->mac.addr[5]); + hw->mac.addr[4], hw->mac.addr[5]); hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); } @@ -1296,19 +1295,19 @@ static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) u32 vector = 0; switch (hw->mac.mc_filter_type) { - case 0: /* use bits [47:36] of the address */ + case 0: /* use bits [47:36] of the address */ vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); break; - case 1: /* use bits [46:35] of the address */ + case 1: /* use bits [46:35] of the address */ vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); break; - case 2: /* use bits [45:34] of the address */ + case 2: /* use bits [45:34] of the address */ vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); break; - case 3: /* use bits [43:32] of the address */ + case 3: /* use bits [43:32] of the address */ vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); break; - default: /* Invalid mc_filter_type */ + default: /* Invalid mc_filter_type */ hw_dbg(hw, "MC filter type param set incorrectly\n"); break; } @@ -1366,8 +1365,8 @@ static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr) u32 rar; hw_dbg(hw, " MC Addr =%.2X %.2X %.2X %.2X %.2X %.2X\n", - mc_addr[0], mc_addr[1], mc_addr[2], - mc_addr[3], mc_addr[4], mc_addr[5]); + mc_addr[0], mc_addr[1], mc_addr[2], + mc_addr[3], mc_addr[4], mc_addr[5]); /* * Place this multicast address in the RAR if there is room, @@ -1400,7 +1399,7 @@ static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr) * multicast table. **/ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, - u32 mc_addr_count, ixgbe_mc_addr_itr next) + u32 mc_addr_count, ixgbe_mc_addr_itr next) { u32 i; u32 rar_entries = hw->mac.num_rar_entries; @@ -1437,7 +1436,7 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, /* Enable mta */ if (hw->addr_ctrl.mta_in_use > 0) IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, - IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); + IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n"); return 0; diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h index 13ed8d2ff4a..192f8d01291 100644 --- a/drivers/net/ixgbe/ixgbe_common.h +++ b/drivers/net/ixgbe/ixgbe_common.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2007 Intel Corporation. + Copyright(c) 1999 - 2008 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -20,7 +20,6 @@ the file called "COPYING". Contact Information: - Linux NICS e1000-devel Mailing List Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index 020b0c7c195..81a9c4b8672 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2007 Intel Corporation. + Copyright(c) 1999 - 2008 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -20,7 +20,6 @@ the file called "COPYING". Contact Information: - Linux NICS e1000-devel Mailing List Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 @@ -48,7 +47,7 @@ struct ixgbe_stats { }; #define IXGBE_STAT(m) sizeof(((struct ixgbe_adapter *)0)->m), \ - offsetof(struct ixgbe_adapter, m) + offsetof(struct ixgbe_adapter, m) static struct ixgbe_stats ixgbe_gstrings_stats[] = { {"rx_packets", IXGBE_STAT(net_stats.rx_packets)}, {"tx_packets", IXGBE_STAT(net_stats.tx_packets)}, @@ -95,14 +94,15 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = { }; #define IXGBE_QUEUE_STATS_LEN \ - ((((struct ixgbe_adapter *)netdev->priv)->num_tx_queues + \ - ((struct ixgbe_adapter *)netdev->priv)->num_rx_queues) * \ - (sizeof(struct ixgbe_queue_stats) / sizeof(u64))) -#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) + ((((struct ixgbe_adapter *)netdev->priv)->num_tx_queues + \ + ((struct ixgbe_adapter *)netdev->priv)->num_rx_queues) * \ + (sizeof(struct ixgbe_queue_stats) / sizeof(u64))) +#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN) +#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN) static int ixgbe_get_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) + struct ethtool_cmd *ecmd) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; @@ -114,7 +114,7 @@ static int ixgbe_get_settings(struct net_device *netdev, ecmd->transceiver = XCVR_EXTERNAL; if (hw->phy.media_type == ixgbe_media_type_copper) { ecmd->supported |= (SUPPORTED_1000baseT_Full | - SUPPORTED_TP | SUPPORTED_Autoneg); + SUPPORTED_TP | SUPPORTED_Autoneg); ecmd->advertising = (ADVERTISED_TP | ADVERTISED_Autoneg); if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) @@ -126,7 +126,7 @@ static int ixgbe_get_settings(struct net_device *netdev, } else { ecmd->supported |= SUPPORTED_FIBRE; ecmd->advertising = (ADVERTISED_10000baseT_Full | - ADVERTISED_FIBRE); + ADVERTISED_FIBRE); ecmd->port = PORT_FIBRE; ecmd->autoneg = AUTONEG_DISABLE; } @@ -134,7 +134,7 @@ static int ixgbe_get_settings(struct net_device *netdev, hw->mac.ops.check_link(hw, &link_speed, &link_up, false); if (link_up) { ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? - SPEED_10000 : SPEED_1000; + SPEED_10000 : SPEED_1000; ecmd->duplex = DUPLEX_FULL; } else { ecmd->speed = -1; @@ -145,7 +145,7 @@ static int ixgbe_get_settings(struct net_device *netdev, } static int ixgbe_set_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) + struct ethtool_cmd *ecmd) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; @@ -165,7 +165,7 @@ static int ixgbe_set_settings(struct net_device *netdev, } static void ixgbe_get_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pause) + struct ethtool_pauseparam *pause) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; @@ -183,7 +183,7 @@ static void ixgbe_get_pauseparam(struct net_device *netdev, } static int ixgbe_set_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pause) + struct ethtool_pauseparam *pause) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; @@ -282,7 +282,7 @@ static int ixgbe_get_regs_len(struct net_device *netdev) #define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_ static void ixgbe_get_regs(struct net_device *netdev, - struct ethtool_regs *regs, void *p) + struct ethtool_regs *regs, void *p) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; @@ -583,7 +583,7 @@ static int ixgbe_get_eeprom_len(struct net_device *netdev) } static int ixgbe_get_eeprom(struct net_device *netdev, - struct ethtool_eeprom *eeprom, u8 *bytes) + struct ethtool_eeprom *eeprom, u8 *bytes) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; @@ -607,7 +607,7 @@ static int ixgbe_get_eeprom(struct net_device *netdev, for (i = 0; i < eeprom_len; i++) { if ((ret_val = hw->eeprom.ops.read(hw, first_word + i, - &eeprom_buff[i]))) + &eeprom_buff[i]))) break; } @@ -622,7 +622,7 @@ static int ixgbe_get_eeprom(struct net_device *netdev, } static void ixgbe_get_drvinfo(struct net_device *netdev, - struct ethtool_drvinfo *drvinfo) + struct ethtool_drvinfo *drvinfo) { struct ixgbe_adapter *adapter = netdev_priv(netdev); @@ -635,7 +635,7 @@ static void ixgbe_get_drvinfo(struct net_device *netdev, } static void ixgbe_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_ring *tx_ring = adapter->tx_ring; @@ -652,7 +652,7 @@ static void ixgbe_get_ringparam(struct net_device *netdev, } static int ixgbe_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_ring *temp_ring; @@ -706,7 +706,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev, if (err) { while (i) { i--; - ixgbe_free_tx_resources(adapter, &temp_ring[i]); + ixgbe_free_tx_resources(adapter, + &temp_ring[i]); } goto err_setup; } @@ -731,7 +732,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev, if (err) { while (i) { i--; - ixgbe_free_rx_resources(adapter, &temp_ring[i]); + ixgbe_free_rx_resources(adapter, + &temp_ring[i]); } goto err_setup; } @@ -767,7 +769,7 @@ static int ixgbe_get_sset_count(struct net_device *netdev, int sset) } static void ixgbe_get_ethtool_stats(struct net_device *netdev, - struct ethtool_stats *stats, u64 *data) + struct ethtool_stats *stats, u64 *data) { struct ixgbe_adapter *adapter = netdev_priv(netdev); u64 *queue_stat; @@ -788,7 +790,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { char *p = (char *)adapter + ixgbe_gstrings_stats[i].stat_offset; data[i] = (ixgbe_gstrings_stats[i].sizeof_stat == - sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } for (j = 0; j < adapter->num_tx_queues; j++) { queue_stat = (u64 *)&adapter->tx_ring[j].stats; @@ -805,7 +807,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, } static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, - u8 *data) + u8 *data) { struct ixgbe_adapter *adapter = netdev_priv(netdev); char *p = (char *)data; @@ -830,14 +832,14 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, sprintf(p, "rx_queue_%u_bytes", i); p += ETH_GSTRING_LEN; } -/* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */ + /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */ break; } } static void ixgbe_get_wol(struct net_device *netdev, - struct ethtool_wolinfo *wol) + struct ethtool_wolinfo *wol) { wol->supported = 0; wol->wolopts = 0; @@ -879,7 +881,7 @@ static int ixgbe_phys_id(struct net_device *netdev, u32 data) } static int ixgbe_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec) { struct ixgbe_adapter *adapter = netdev_priv(netdev); @@ -904,7 +906,7 @@ static int ixgbe_get_coalesce(struct net_device *netdev, } static int ixgbe_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; @@ -974,7 +976,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops = { .set_tso = ixgbe_set_tso, .get_strings = ixgbe_get_strings, .phys_id = ixgbe_phys_id, - .get_sset_count = ixgbe_get_sset_count, + .get_sset_count = ixgbe_get_sset_count, .get_ethtool_stats = ixgbe_get_ethtool_stats, .get_coalesce = ixgbe_get_coalesce, .set_coalesce = ixgbe_set_coalesce, diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index b5139118090..2980a373645 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2007 Intel Corporation. + Copyright(c) 1999 - 2008 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -20,7 +20,6 @@ the file called "COPYING". Contact Information: - Linux NICS e1000-devel Mailing List Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 @@ -46,15 +45,14 @@ char ixgbe_driver_name[] = "ixgbe"; static const char ixgbe_driver_string[] = - "Intel(R) 10 Gigabit PCI Express Network Driver"; + "Intel(R) 10 Gigabit PCI Express Network Driver"; #define DRV_VERSION "1.3.30-k2" const char ixgbe_driver_version[] = DRV_VERSION; -static const char ixgbe_copyright[] = - "Copyright (c) 1999-2007 Intel Corporation."; +static char ixgbe_copyright[] = "Copyright (c) 1999-2007 Intel Corporation."; static const struct ixgbe_info *ixgbe_info_tbl[] = { - [board_82598] = &ixgbe_82598_info, + [board_82598] = &ixgbe_82598_info, }; /* ixgbe_pci_tbl - PCI Device ID Table @@ -84,7 +82,7 @@ MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl); #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) static int ixgbe_notify_dca(struct notifier_block *, unsigned long event, - void *p); + void *p); static struct notifier_block dca_notifier = { .notifier_call = ixgbe_notify_dca, .next = NULL, @@ -106,7 +104,7 @@ static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter) /* Let firmware take over control of h/w */ ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, - ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD); + ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD); } static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter) @@ -116,7 +114,7 @@ static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter) /* Let firmware know the driver has taken over */ ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, - ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD); + ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD); } #ifdef DEBUG @@ -133,7 +131,7 @@ char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw) #endif static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, u16 int_alloc_entry, - u8 msix_vector) + u8 msix_vector) { u32 ivar, index; @@ -146,12 +144,12 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, u16 int_alloc_entry, } static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, - struct ixgbe_tx_buffer - *tx_buffer_info) + struct ixgbe_tx_buffer + *tx_buffer_info) { if (tx_buffer_info->dma) { pci_unmap_page(adapter->pdev, tx_buffer_info->dma, - tx_buffer_info->length, PCI_DMA_TODEVICE); + tx_buffer_info->length, PCI_DMA_TODEVICE); tx_buffer_info->dma = 0; } if (tx_buffer_info->skb) { @@ -162,8 +160,8 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, } static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter, - struct ixgbe_ring *tx_ring, - unsigned int eop) + struct ixgbe_ring *tx_ring, + unsigned int eop) { struct ixgbe_hw *hw = &adapter->hw; u32 head, tail; @@ -198,14 +196,14 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter, return false; } -#define IXGBE_MAX_TXD_PWR 14 -#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) +#define IXGBE_MAX_TXD_PWR 14 +#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) /* Tx Descriptors needed, worst case */ #define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \ (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0)) #define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \ - MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */ + MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */ #define GET_TX_HEAD_FROM_RING(ring) (\ *(volatile u32 *) \ @@ -313,7 +311,7 @@ done_cleaning: #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, - struct ixgbe_ring *rx_ring) + struct ixgbe_ring *rx_ring) { u32 rxctrl; int cpu = get_cpu(); @@ -332,7 +330,7 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, } static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, - struct ixgbe_ring *tx_ring) + struct ixgbe_ring *tx_ring) { u32 txctrl; int cpu = get_cpu(); @@ -408,8 +406,8 @@ static int __ixgbe_notify_dca(struct device *dev, void *data) * @rx_desc: rx descriptor **/ static void ixgbe_receive_skb(struct ixgbe_adapter *adapter, - struct sk_buff *skb, u8 status, - struct ixgbe_ring *ring, + struct sk_buff *skb, u8 status, + struct ixgbe_ring *ring, union ixgbe_adv_rx_desc *rx_desc) { bool is_vlan = (status & IXGBE_RXD_STAT_VP); @@ -577,8 +575,8 @@ static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc) } static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter, - struct ixgbe_ring *rx_ring, - int *work_done, int work_to_do) + struct ixgbe_ring *rx_ring, + int *work_done, int work_to_do) { struct pci_dev *pdev = adapter->pdev; union ixgbe_adv_rx_desc *rx_desc, *next_rxd; @@ -622,8 +620,8 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter, if (len && !skb_shinfo(skb)->nr_frags) { pci_unmap_single(pdev, rx_buffer_info->dma, - rx_ring->rx_buf_len + NET_IP_ALIGN, - PCI_DMA_FROMDEVICE); + rx_ring->rx_buf_len + NET_IP_ALIGN, + PCI_DMA_FROMDEVICE); skb_put(skb, len); } @@ -741,24 +739,24 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) q_vector = &adapter->q_vector[v_idx]; /* XXX for_each_bit(...) */ r_idx = find_first_bit(q_vector->rxr_idx, - adapter->num_rx_queues); + adapter->num_rx_queues); for (i = 0; i < q_vector->rxr_count; i++) { j = adapter->rx_ring[r_idx].reg_idx; ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(j), v_idx); r_idx = find_next_bit(q_vector->rxr_idx, - adapter->num_rx_queues, - r_idx + 1); + adapter->num_rx_queues, + r_idx + 1); } r_idx = find_first_bit(q_vector->txr_idx, - adapter->num_tx_queues); + adapter->num_tx_queues); for (i = 0; i < q_vector->txr_count; i++) { j = adapter->tx_ring[r_idx].reg_idx; ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(j), v_idx); r_idx = find_next_bit(q_vector->txr_idx, - adapter->num_tx_queues, - r_idx + 1); + adapter->num_tx_queues, + r_idx + 1); } /* if this is a tx only vector halve the interrupt rate */ @@ -769,7 +767,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) q_vector->eitr = adapter->eitr_param; IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), - EITR_INTS_PER_SEC_TO_REG(q_vector->eitr)); + EITR_INTS_PER_SEC_TO_REG(q_vector->eitr)); } ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX, v_idx); @@ -807,8 +805,8 @@ enum latency_range { * parameter (see ixgbe_param.c) **/ static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter, - u32 eitr, u8 itr_setting, - int packets, int bytes) + u32 eitr, u8 itr_setting, + int packets, int bytes) { unsigned int retval = itr_setting; u32 timepassed_us; @@ -855,37 +853,37 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector) u32 new_itr; u8 current_itr, ret_itr; int i, r_idx, v_idx = ((void *)q_vector - (void *)(adapter->q_vector)) / - sizeof(struct ixgbe_q_vector); + sizeof(struct ixgbe_q_vector); struct ixgbe_ring *rx_ring, *tx_ring; r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); for (i = 0; i < q_vector->txr_count; i++) { tx_ring = &(adapter->tx_ring[r_idx]); ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, - q_vector->tx_itr, - tx_ring->total_packets, - tx_ring->total_bytes); + q_vector->tx_itr, + tx_ring->total_packets, + tx_ring->total_bytes); /* if the result for this queue would decrease interrupt * rate for this vector then use that result */ q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ? - q_vector->tx_itr - 1 : ret_itr); + q_vector->tx_itr - 1 : ret_itr); r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, - r_idx + 1); + r_idx + 1); } r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); for (i = 0; i < q_vector->rxr_count; i++) { rx_ring = &(adapter->rx_ring[r_idx]); ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, - q_vector->rx_itr, - rx_ring->total_packets, - rx_ring->total_bytes); + q_vector->rx_itr, + rx_ring->total_packets, + rx_ring->total_bytes); /* if the result for this queue would decrease interrupt * rate for this vector then use that result */ q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ? - q_vector->rx_itr - 1 : ret_itr); + q_vector->rx_itr - 1 : ret_itr); r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, - r_idx + 1); + r_idx + 1); } current_itr = max(q_vector->rx_itr, q_vector->tx_itr); @@ -912,7 +910,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector) itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr); /* must write high and low 16 bits to reset counter */ DPRINTK(TX_ERR, DEBUG, "writing eitr(%d): %08X\n", v_idx, - itr_reg); + itr_reg); IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg | (itr_reg)<<16); } @@ -970,7 +968,7 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data) tx_ring->total_packets = 0; ixgbe_clean_tx_irq(adapter, tx_ring); r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, - r_idx + 1); + r_idx + 1); } return IRQ_HANDLED; @@ -1029,7 +1027,7 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data) static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) { struct ixgbe_q_vector *q_vector = - container_of(napi, struct ixgbe_q_vector, napi); + container_of(napi, struct ixgbe_q_vector, napi); struct ixgbe_adapter *adapter = q_vector->adapter; struct ixgbe_ring *rx_ring = NULL; int work_done = 0; @@ -1106,7 +1104,7 @@ static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget) return work_done; } static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx, - int r_idx) + int r_idx) { a->q_vector[v_idx].adapter = a; set_bit(r_idx, a->q_vector[v_idx].rxr_idx); @@ -1115,7 +1113,7 @@ static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx, } static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, - int r_idx) + int r_idx) { a->q_vector[v_idx].adapter = a; set_bit(r_idx, a->q_vector[v_idx].txr_idx); @@ -1135,7 +1133,7 @@ static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, * mapping configurations in here. **/ static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter, - int vectors) + int vectors) { int v_start = 0; int rxr_idx = 0, txr_idx = 0; @@ -1212,28 +1210,28 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) goto out; #define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \ - (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \ - &ixgbe_msix_clean_many) + (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \ + &ixgbe_msix_clean_many) for (vector = 0; vector < q_vectors; vector++) { handler = SET_HANDLER(&adapter->q_vector[vector]); sprintf(adapter->name[vector], "%s:v%d-%s", - netdev->name, vector, - (handler == &ixgbe_msix_clean_rx) ? "Rx" : - ((handler == &ixgbe_msix_clean_tx) ? "Tx" : "TxRx")); + netdev->name, vector, + (handler == &ixgbe_msix_clean_rx) ? "Rx" : + ((handler == &ixgbe_msix_clean_tx) ? "Tx" : "TxRx")); err = request_irq(adapter->msix_entries[vector].vector, - handler, 0, adapter->name[vector], - &(adapter->q_vector[vector])); + handler, 0, adapter->name[vector], + &(adapter->q_vector[vector])); if (err) { DPRINTK(PROBE, ERR, - "request_irq failed for MSIX interrupt " - "Error: %d\n", err); + "request_irq failed for MSIX interrupt " + "Error: %d\n", err); goto free_queue_irqs; } } sprintf(adapter->name[vector], "%s:lsc", netdev->name); err = request_irq(adapter->msix_entries[vector].vector, - &ixgbe_msix_lsc, 0, adapter->name[vector], netdev); + &ixgbe_msix_lsc, 0, adapter->name[vector], netdev); if (err) { DPRINTK(PROBE, ERR, "request_irq for msix_lsc failed: %d\n", err); @@ -1245,7 +1243,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) free_queue_irqs: for (i = vector - 1; i >= 0; i--) free_irq(adapter->msix_entries[--vector].vector, - &(adapter->q_vector[i])); + &(adapter->q_vector[i])); adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; pci_disable_msix(adapter->pdev); kfree(adapter->msix_entries); @@ -1264,13 +1262,13 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter) struct ixgbe_ring *tx_ring = &adapter->tx_ring[0]; q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr, - q_vector->tx_itr, - tx_ring->total_packets, - tx_ring->total_bytes); + q_vector->tx_itr, + tx_ring->total_packets, + tx_ring->total_bytes); q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr, - q_vector->rx_itr, - rx_ring->total_packets, - rx_ring->total_bytes); + q_vector->rx_itr, + rx_ring->total_packets, + rx_ring->total_bytes); current_itr = max(q_vector->rx_itr, q_vector->tx_itr); @@ -1373,10 +1371,10 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter) err = ixgbe_request_msix_irqs(adapter); } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { err = request_irq(adapter->pdev->irq, &ixgbe_intr, 0, - netdev->name, netdev); + netdev->name, netdev); } else { err = request_irq(adapter->pdev->irq, &ixgbe_intr, IRQF_SHARED, - netdev->name, netdev); + netdev->name, netdev); } if (err) @@ -1400,7 +1398,7 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter) i--; for (; i >= 0; i--) { free_irq(adapter->msix_entries[i].vector, - &(adapter->q_vector[i])); + &(adapter->q_vector[i])); } ixgbe_reset_q_vectors(adapter); @@ -1533,8 +1531,8 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index) srrctl |= IXGBE_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; srrctl |= ((IXGBE_RX_HDR_SIZE << - IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & - IXGBE_SRRCTL_BSIZEHDR_MASK); + IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & + IXGBE_SRRCTL_BSIZEHDR_MASK); } else { srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; @@ -1551,7 +1549,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index) /** * ixgbe_get_skb_hdr - helper function for LRO header processing * @skb: pointer to sk_buff to be added to LRO packet - * @iphdr: pointer to tcp header structure + * @iphdr: pointer to ip header structure * @tcph: pointer to tcp header structure * @hdr_flags: pointer to header flags * @priv: private data @@ -1576,7 +1574,7 @@ static int ixgbe_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph, } #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \ - (((S) & (PAGE_SIZE - 1)) ? 1 : 0)) + (((S) & (PAGE_SIZE - 1)) ? 1 : 0)) /** * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset @@ -1723,7 +1721,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) } static void ixgbe_vlan_rx_register(struct net_device *netdev, - struct vlan_group *grp) + struct vlan_group *grp) { struct ixgbe_adapter *adapter = netdev_priv(netdev); u32 ctrl; @@ -1909,7 +1907,7 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) ixgbe_configure_rx(adapter); for (i = 0; i < adapter->num_rx_queues; i++) ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i], - (adapter->rx_ring[i].count - 1)); + (adapter->rx_ring[i].count - 1)); } static int ixgbe_up_complete(struct ixgbe_adapter *adapter) @@ -1927,7 +1925,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) (adapter->flags & IXGBE_FLAG_MSI_ENABLED)) { if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME | - IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD); + IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD); } else { /* MSI only */ gpie = 0; @@ -2037,7 +2035,7 @@ static int ixgbe_resume(struct pci_dev *pdev) err = pci_enable_device(pdev); if (err) { printk(KERN_ERR "ixgbe: Cannot enable PCI device from " \ - "suspend\n"); + "suspend\n"); return err; } pci_set_master(pdev); @@ -2068,7 +2066,7 @@ static int ixgbe_resume(struct pci_dev *pdev) * @rx_ring: ring to free buffers from **/ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, - struct ixgbe_ring *rx_ring) + struct ixgbe_ring *rx_ring) { struct pci_dev *pdev = adapter->pdev; unsigned long size; @@ -2082,8 +2080,8 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, rx_buffer_info = &rx_ring->rx_buffer_info[i]; if (rx_buffer_info->dma) { pci_unmap_single(pdev, rx_buffer_info->dma, - rx_ring->rx_buf_len, - PCI_DMA_FROMDEVICE); + rx_ring->rx_buf_len, + PCI_DMA_FROMDEVICE); rx_buffer_info->dma = 0; } if (rx_buffer_info->skb) { @@ -2119,7 +2117,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, * @tx_ring: ring to be cleaned **/ static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter, - struct ixgbe_ring *tx_ring) + struct ixgbe_ring *tx_ring) { struct ixgbe_tx_buffer *tx_buffer_info; unsigned long size; @@ -2226,7 +2224,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter) adapter->flags |= IXGBE_FLAG_DCA_ENABLED; /* always use CB2 mode, difference is masked * in the CB driver */ - IXGBE_WRITE_REG(hw, IXGBE_DCA_CTRL, 2); + IXGBE_WRITE_REG(hw, IXGBE_DCA_CTRL, 2); ixgbe_setup_dca(adapter); } #endif @@ -2280,7 +2278,7 @@ static void ixgbe_shutdown(struct pci_dev *pdev) static int ixgbe_poll(struct napi_struct *napi, int budget) { struct ixgbe_q_vector *q_vector = container_of(napi, - struct ixgbe_q_vector, napi); + struct ixgbe_q_vector, napi); struct ixgbe_adapter *adapter = q_vector->adapter; int tx_cleaned, work_done = 0; @@ -2371,7 +2369,7 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) } static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, - int vectors) + int vectors) { int err, vector_threshold; @@ -2390,7 +2388,7 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, */ while (vectors >= vector_threshold) { err = pci_enable_msix(adapter->pdev, adapter->msix_entries, - vectors); + vectors); if (!err) /* Success in acquiring all requested vectors. */ break; else if (err < 0) @@ -2425,9 +2423,6 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, **/ static void __devinit ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) { - /* TODO: Remove all uses of the indices in the cases where multiple - * features are OR'd together, if the feature set makes sense. - */ int feature_mask = 0, rss_i; int i, txr_idx, rxr_idx; @@ -2468,12 +2463,12 @@ static int __devinit ixgbe_alloc_queues(struct ixgbe_adapter *adapter) int i; adapter->tx_ring = kcalloc(adapter->num_tx_queues, - sizeof(struct ixgbe_ring), GFP_KERNEL); + sizeof(struct ixgbe_ring), GFP_KERNEL); if (!adapter->tx_ring) goto err_tx_ring_allocation; adapter->rx_ring = kcalloc(adapter->num_rx_queues, - sizeof(struct ixgbe_ring), GFP_KERNEL); + sizeof(struct ixgbe_ring), GFP_KERNEL); if (!adapter->rx_ring) goto err_rx_ring_allocation; @@ -2505,7 +2500,7 @@ err_tx_ring_allocation: * capabilities of the hardware and the kernel. **/ static int __devinit ixgbe_set_interrupt_capability(struct ixgbe_adapter - *adapter) + *adapter) { int err = 0; int vector, v_budget; @@ -2517,7 +2512,7 @@ static int __devinit ixgbe_set_interrupt_capability(struct ixgbe_adapter * (roughly) twice the number of vectors as there are CPU's. */ v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues, - (int)(num_online_cpus() * 2)) + NON_Q_VECTORS; + (int)(num_online_cpus() * 2)) + NON_Q_VECTORS; /* * At the same time, hardware can only support a maximum of @@ -2531,7 +2526,7 @@ static int __devinit ixgbe_set_interrupt_capability(struct ixgbe_adapter /* A failure in MSI-X entry allocation isn't fatal, but it does * mean we disable MSI-X capabilities of the adapter. */ adapter->msix_entries = kcalloc(v_budget, - sizeof(struct msix_entry), GFP_KERNEL); + sizeof(struct msix_entry), GFP_KERNEL); if (!adapter->msix_entries) { adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; ixgbe_set_num_queues(adapter); @@ -2540,7 +2535,7 @@ static int __devinit ixgbe_set_interrupt_capability(struct ixgbe_adapter err = ixgbe_alloc_queues(adapter); if (err) { DPRINTK(PROBE, ERR, "Unable to allocate memory " - "for queues\n"); + "for queues\n"); goto out; } @@ -2561,7 +2556,7 @@ try_msi: adapter->flags |= IXGBE_FLAG_MSI_ENABLED; } else { DPRINTK(HW, DEBUG, "Unable to allocate MSI interrupt, " - "falling back to legacy. Error: %d\n", err); + "falling back to legacy. Error: %d\n", err); /* reset err */ err = 0; } @@ -2617,9 +2612,9 @@ static int __devinit ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) } DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, " - "Tx Queue count = %u\n", - (adapter->num_rx_queues > 1) ? "Enabled" : - "Disabled", adapter->num_rx_queues, adapter->num_tx_queues); + "Tx Queue count = %u\n", + (adapter->num_rx_queues > 1) ? "Enabled" : + "Disabled", adapter->num_rx_queues, adapter->num_tx_queues); set_bit(__IXGBE_DOWN, &adapter->state); @@ -2746,7 +2741,7 @@ err: * Returns 0 on success, negative on failure **/ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, - struct ixgbe_ring *rx_ring) + struct ixgbe_ring *rx_ring) { struct pci_dev *pdev = adapter->pdev; int size; @@ -2761,7 +2756,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, rx_ring->rx_buffer_info = vmalloc(size); if (!rx_ring->rx_buffer_info) { DPRINTK(PROBE, ERR, - "vmalloc allocation failed for the rx desc ring\n"); + "vmalloc allocation failed for the rx desc ring\n"); goto alloc_failed; } memset(rx_ring->rx_buffer_info, 0, size); @@ -2774,7 +2769,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, if (!rx_ring->desc) { DPRINTK(PROBE, ERR, - "Memory allocation failed for the rx desc ring\n"); + "Memory allocation failed for the rx desc ring\n"); vfree(rx_ring->rx_buffer_info); goto alloc_failed; } @@ -2827,7 +2822,7 @@ static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter) } /** - * ixgbe_ree_rx_resources - Free Rx Resources + * ixgbe_free_rx_resources - Free Rx Resources * @adapter: board private structure * @rx_ring: ring to clean the resources from * @@ -2881,11 +2876,10 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) for (i = 0; i < adapter->num_tx_queues; i++) { err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]); - if (err) { - DPRINTK(PROBE, ERR, - "Allocation for Tx Queue %u failed\n", i); - break; - } + if (!err) + continue; + DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i); + break; } return err; @@ -2908,11 +2902,10 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) for (i = 0; i < adapter->num_rx_queues; i++) { err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]); - if (err) { - DPRINTK(PROBE, ERR, - "Allocation for Rx Queue %u failed\n", i); - break; - } + if (!err) + continue; + DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i); + break; } return err; @@ -2935,7 +2928,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) return -EINVAL; DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n", - netdev->mtu, new_mtu); + netdev->mtu, new_mtu); /* must set new MTU before calling down or up */ netdev->mtu = new_mtu; @@ -3102,7 +3095,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) /* Rx Errors */ adapter->net_stats.rx_errors = adapter->stats.crcerrs + - adapter->stats.rlec; + adapter->stats.rlec; adapter->net_stats.rx_dropped = 0; adapter->net_stats.rx_length_errors = adapter->stats.rlec; adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; @@ -3206,8 +3199,8 @@ static void ixgbe_watchdog_task(struct work_struct *work) } static int ixgbe_tso(struct ixgbe_adapter *adapter, - struct ixgbe_ring *tx_ring, struct sk_buff *skb, - u32 tx_flags, u8 *hdr_len) + struct ixgbe_ring *tx_ring, struct sk_buff *skb, + u32 tx_flags, u8 *hdr_len) { struct ixgbe_adv_tx_context_desc *context_desc; unsigned int i; @@ -3230,16 +3223,16 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter, iph->tot_len = 0; iph->check = 0; tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, - iph->daddr, 0, - IPPROTO_TCP, - 0); + iph->daddr, 0, + IPPROTO_TCP, + 0); adapter->hw_tso_ctxt++; } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { ipv6_hdr(skb)->payload_len = 0; tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, - &ipv6_hdr(skb)->daddr, - 0, IPPROTO_TCP, 0); + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); adapter->hw_tso6_ctxt++; } @@ -3253,7 +3246,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter, vlan_macip_lens |= (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK); vlan_macip_lens |= ((skb_network_offset(skb)) << - IXGBE_ADVTXD_MACLEN_SHIFT); + IXGBE_ADVTXD_MACLEN_SHIFT); *hdr_len += skb_network_offset(skb); vlan_macip_lens |= (skb_transport_header(skb) - skb_network_header(skb)); @@ -3264,7 +3257,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter, /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT | - IXGBE_ADVTXD_DTYP_CTXT); + IXGBE_ADVTXD_DTYP_CTXT); if (skb->protocol == htons(ETH_P_IP)) type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; @@ -3293,8 +3286,8 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter, } static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, - struct ixgbe_ring *tx_ring, - struct sk_buff *skb, u32 tx_flags) + struct ixgbe_ring *tx_ring, + struct sk_buff *skb, u32 tx_flags) { struct ixgbe_adv_tx_context_desc *context_desc; unsigned int i; @@ -3311,16 +3304,16 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, vlan_macip_lens |= (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK); vlan_macip_lens |= (skb_network_offset(skb) << - IXGBE_ADVTXD_MACLEN_SHIFT); + IXGBE_ADVTXD_MACLEN_SHIFT); if (skb->ip_summed == CHECKSUM_PARTIAL) vlan_macip_lens |= (skb_transport_header(skb) - - skb_network_header(skb)); + skb_network_header(skb)); context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); context_desc->seqnum_seed = 0; type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT | - IXGBE_ADVTXD_DTYP_CTXT); + IXGBE_ADVTXD_DTYP_CTXT); if (skb->ip_summed == CHECKSUM_PARTIAL) { switch (skb->protocol) { @@ -3328,13 +3321,13 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; if (ip_hdr(skb)->protocol == IPPROTO_TCP) type_tucmd_mlhl |= - IXGBE_ADVTXD_TUCMD_L4T_TCP; + IXGBE_ADVTXD_TUCMD_L4T_TCP; break; case __constant_htons(ETH_P_IPV6): /* XXX what about other V6 headers?? */ if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) type_tucmd_mlhl |= - IXGBE_ADVTXD_TUCMD_L4T_TCP; + IXGBE_ADVTXD_TUCMD_L4T_TCP; break; default: if (unlikely(net_ratelimit())) { @@ -3366,8 +3359,8 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, } static int ixgbe_tx_map(struct ixgbe_adapter *adapter, - struct ixgbe_ring *tx_ring, - struct sk_buff *skb, unsigned int first) + struct ixgbe_ring *tx_ring, + struct sk_buff *skb, unsigned int first) { struct ixgbe_tx_buffer *tx_buffer_info; unsigned int len = skb->len; @@ -3385,8 +3378,8 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter, tx_buffer_info->length = size; tx_buffer_info->dma = pci_map_single(adapter->pdev, - skb->data + offset, - size, PCI_DMA_TODEVICE); + skb->data + offset, + size, PCI_DMA_TODEVICE); tx_buffer_info->time_stamp = jiffies; tx_buffer_info->next_to_watch = i; @@ -3411,9 +3404,10 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter, tx_buffer_info->length = size; tx_buffer_info->dma = pci_map_page(adapter->pdev, - frag->page, - offset, - size, PCI_DMA_TODEVICE); + frag->page, + offset, + size, + PCI_DMA_TODEVICE); tx_buffer_info->time_stamp = jiffies; tx_buffer_info->next_to_watch = i; @@ -3436,8 +3430,8 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter, } static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, - struct ixgbe_ring *tx_ring, - int tx_flags, int count, u32 paylen, u8 hdr_len) + struct ixgbe_ring *tx_ring, + int tx_flags, int count, u32 paylen, u8 hdr_len) { union ixgbe_adv_tx_desc *tx_desc = NULL; struct ixgbe_tx_buffer *tx_buffer_info; @@ -3456,17 +3450,17 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; olinfo_status |= IXGBE_TXD_POPTS_TXSM << - IXGBE_ADVTXD_POPTS_SHIFT; + IXGBE_ADVTXD_POPTS_SHIFT; /* use index 1 context for tso */ olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); if (tx_flags & IXGBE_TX_FLAGS_IPV4) olinfo_status |= IXGBE_TXD_POPTS_IXSM << - IXGBE_ADVTXD_POPTS_SHIFT; + IXGBE_ADVTXD_POPTS_SHIFT; } else if (tx_flags & IXGBE_TX_FLAGS_CSUM) olinfo_status |= IXGBE_TXD_POPTS_TXSM << - IXGBE_ADVTXD_POPTS_SHIFT; + IXGBE_ADVTXD_POPTS_SHIFT; olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT); @@ -3476,7 +3470,7 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma); tx_desc->read.cmd_type_len = - cpu_to_le32(cmd_type_len | tx_buffer_info->length); + cpu_to_le32(cmd_type_len | tx_buffer_info->length); tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); i++; @@ -3499,7 +3493,7 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, } static int __ixgbe_maybe_stop_tx(struct net_device *netdev, - struct ixgbe_ring *tx_ring, int size) + struct ixgbe_ring *tx_ring, int size) { struct ixgbe_adapter *adapter = netdev_priv(netdev); @@ -3521,7 +3515,7 @@ static int __ixgbe_maybe_stop_tx(struct net_device *netdev, } static int ixgbe_maybe_stop_tx(struct net_device *netdev, - struct ixgbe_ring *tx_ring, int size) + struct ixgbe_ring *tx_ring, int size) { if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size)) return 0; @@ -3575,12 +3569,12 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) if (tso) tx_flags |= IXGBE_TX_FLAGS_TSO; else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) && - (skb->ip_summed == CHECKSUM_PARTIAL)) + (skb->ip_summed == CHECKSUM_PARTIAL)) tx_flags |= IXGBE_TX_FLAGS_CSUM; ixgbe_tx_queue(adapter, tx_ring, tx_flags, - ixgbe_tx_map(adapter, tx_ring, skb, first), - skb->len, hdr_len); + ixgbe_tx_map(adapter, tx_ring, skb, first), + skb->len, hdr_len); netdev->trans_start = jiffies; @@ -3614,15 +3608,16 @@ static struct net_device_stats *ixgbe_get_stats(struct net_device *netdev) static int ixgbe_set_mac(struct net_device *netdev, void *p) { struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; struct sockaddr *addr = p; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); - memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len); + memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); - adapter->hw.mac.ops.set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV); + hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); return 0; } @@ -3682,7 +3677,7 @@ static void ixgbe_napi_add_all(struct ixgbe_adapter *adapter) for (i = 0; i < q_vectors; i++) { struct ixgbe_q_vector *q_vector = &adapter->q_vector[i]; netif_napi_add(adapter->netdev, &q_vector->napi, - (*poll), 64); + (*poll), 64); } } @@ -3698,7 +3693,7 @@ static void ixgbe_napi_add_all(struct ixgbe_adapter *adapter) * and a hardware reset occur. **/ static int __devinit ixgbe_probe(struct pci_dev *pdev, - const struct pci_device_id *ent) + const struct pci_device_id *ent) { struct net_device *netdev; struct ixgbe_adapter *adapter = NULL; @@ -3721,8 +3716,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, if (err) { err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); if (err) { - dev_err(&pdev->dev, "No usable DMA configuration, " - "aborting\n"); + dev_err(&pdev->dev, "No usable DMA " + "configuration, aborting\n"); goto err_dma; } } @@ -3820,10 +3815,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, } netdev->features = NETIF_F_SG | - NETIF_F_IP_CSUM | - NETIF_F_HW_VLAN_TX | - NETIF_F_HW_VLAN_RX | - NETIF_F_HW_VLAN_FILTER; + NETIF_F_IP_CSUM | + NETIF_F_HW_VLAN_TX | + NETIF_F_HW_VLAN_RX | + NETIF_F_HW_VLAN_FILTER; netdev->features |= NETIF_F_IPV6_CSUM; netdev->features |= NETIF_F_TSO; @@ -3870,28 +3865,28 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, link_speed = link_status & IXGBE_PCI_LINK_SPEED; link_width = link_status & IXGBE_PCI_LINK_WIDTH; dev_info(&pdev->dev, "(PCI Express:%s:%s) " - "%02x:%02x:%02x:%02x:%02x:%02x\n", - ((link_speed == IXGBE_PCI_LINK_SPEED_5000) ? "5.0Gb/s" : - (link_speed == IXGBE_PCI_LINK_SPEED_2500) ? "2.5Gb/s" : - "Unknown"), - ((link_width == IXGBE_PCI_LINK_WIDTH_8) ? "Width x8" : - (link_width == IXGBE_PCI_LINK_WIDTH_4) ? "Width x4" : - (link_width == IXGBE_PCI_LINK_WIDTH_2) ? "Width x2" : - (link_width == IXGBE_PCI_LINK_WIDTH_1) ? "Width x1" : - "Unknown"), - netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2], - netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]); + "%02x:%02x:%02x:%02x:%02x:%02x\n", + ((link_speed == IXGBE_PCI_LINK_SPEED_5000) ? "5.0Gb/s" : + (link_speed == IXGBE_PCI_LINK_SPEED_2500) ? "2.5Gb/s" : + "Unknown"), + ((link_width == IXGBE_PCI_LINK_WIDTH_8) ? "Width x8" : + (link_width == IXGBE_PCI_LINK_WIDTH_4) ? "Width x4" : + (link_width == IXGBE_PCI_LINK_WIDTH_2) ? "Width x2" : + (link_width == IXGBE_PCI_LINK_WIDTH_1) ? "Width x1" : + "Unknown"), + netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2], + netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]); ixgbe_read_pba_num_generic(hw, &part_num); dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n", - hw->mac.type, hw->phy.type, - (part_num >> 8), (part_num & 0xff)); + hw->mac.type, hw->phy.type, + (part_num >> 8), (part_num & 0xff)); if (link_width <= IXGBE_PCI_LINK_WIDTH_4) { dev_warn(&pdev->dev, "PCI-Express bandwidth available for " - "this card is not sufficient for optimal " - "performance.\n"); + "this card is not sufficient for optimal " + "performance.\n"); dev_warn(&pdev->dev, "For optimal performance a x8 " - "PCI-Express slot is required.\n"); + "PCI-Express slot is required.\n"); } /* reset the hardware with the new settings */ @@ -3999,7 +3994,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) * this device has been detected. */ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) + pci_channel_state_t state) { struct net_device *netdev = pci_get_drvdata(pdev); struct ixgbe_adapter *adapter = netdev->priv; @@ -4010,7 +4005,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, ixgbe_down(adapter); pci_disable_device(pdev); - /* Request a slot slot reset. */ + /* Request a slot reset. */ return PCI_ERS_RESULT_NEED_RESET; } @@ -4027,7 +4022,7 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) if (pci_enable_device(pdev)) { DPRINTK(PROBE, ERR, - "Cannot re-enable PCI device after reset.\n"); + "Cannot re-enable PCI device after reset.\n"); return PCI_ERS_RESULT_DISCONNECT; } pci_set_master(pdev); @@ -4104,6 +4099,7 @@ static int __init ixgbe_init_module(void) ret = pci_register_driver(&ixgbe_driver); return ret; } + module_init(ixgbe_init_module); /** @@ -4122,12 +4118,12 @@ static void __exit ixgbe_exit_module(void) #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event, - void *p) + void *p) { int ret_val; ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event, - __ixgbe_notify_dca); + __ixgbe_notify_dca); return ret_val ? NOTIFY_BAD : NOTIFY_DONE; } diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c index 63a70176241..764035a8c9a 100644 --- a/drivers/net/ixgbe/ixgbe_phy.c +++ b/drivers/net/ixgbe/ixgbe_phy.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2007 Intel Corporation. + Copyright(c) 1999 - 2008 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -20,7 +20,6 @@ the file called "COPYING". Contact Information: - Linux NICS e1000-devel Mailing List Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h index f88c9131a01..9bfe3f2b1d8 100644 --- a/drivers/net/ixgbe/ixgbe_phy.h +++ b/drivers/net/ixgbe/ixgbe_phy.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2007 Intel Corporation. + Copyright(c) 1999 - 2008 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -20,7 +20,6 @@ the file called "COPYING". Contact Information: - Linux NICS e1000-devel Mailing List Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index c76e30b94d8..c6f8fa1c4e5 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2007 Intel Corporation. + Copyright(c) 1999 - 2008 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -20,7 +20,6 @@ the file called "COPYING". Contact Information: - Linux NICS e1000-devel Mailing List Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -- cgit v1.2.3 From b3c8b4ba619f3d461e01c27cdda02adcd48f02d4 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Thu, 11 Sep 2008 20:04:56 -0700 Subject: ixgbe: correctly add and remove napi queues This patch corrects support for NAPI so that queues are correctly added and removed during suspend/resume in the event that the number of MSI-X vectors changes. Signed-off-by: Alexander Duyck Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgbe/ixgbe_main.c | 230 +++++++++++++++++++++++------------------ 1 file changed, 130 insertions(+), 100 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 2980a373645..a8edbad0335 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -2023,43 +2023,6 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) } -#ifdef CONFIG_PM -static int ixgbe_resume(struct pci_dev *pdev) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct ixgbe_adapter *adapter = netdev_priv(netdev); - u32 err; - - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - err = pci_enable_device(pdev); - if (err) { - printk(KERN_ERR "ixgbe: Cannot enable PCI device from " \ - "suspend\n"); - return err; - } - pci_set_master(pdev); - - pci_enable_wake(pdev, PCI_D3hot, 0); - pci_enable_wake(pdev, PCI_D3cold, 0); - - if (netif_running(netdev)) { - err = ixgbe_request_irq(adapter); - if (err) - return err; - } - - ixgbe_reset(adapter); - - if (netif_running(netdev)) - ixgbe_up(adapter); - - netif_device_attach(netdev); - - return 0; -} -#endif - /** * ixgbe_clean_rx_ring - Free Rx Buffers per Queue * @adapter: board private structure @@ -2230,44 +2193,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter) #endif } -static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct ixgbe_adapter *adapter = netdev_priv(netdev); -#ifdef CONFIG_PM - int retval = 0; -#endif - - netif_device_detach(netdev); - - if (netif_running(netdev)) { - ixgbe_down(adapter); - ixgbe_free_irq(adapter); - } - -#ifdef CONFIG_PM - retval = pci_save_state(pdev); - if (retval) - return retval; -#endif - - pci_enable_wake(pdev, PCI_D3hot, 0); - pci_enable_wake(pdev, PCI_D3cold, 0); - - ixgbe_release_hw_control(adapter); - - pci_disable_device(pdev); - - pci_set_power_state(pdev, pci_choose_state(pdev, state)); - - return 0; -} - -static void ixgbe_shutdown(struct pci_dev *pdev) -{ - ixgbe_suspend(pdev, PMSG_SUSPEND); -} - /** * ixgbe_poll - NAPI Rx polling callback * @napi: structure for representing this polling device @@ -3022,6 +2947,135 @@ static int ixgbe_close(struct net_device *netdev) return 0; } +/** + * ixgbe_napi_add_all - prep napi structs for use + * @adapter: private struct + * helper function to napi_add each possible q_vector->napi + */ +static void ixgbe_napi_add_all(struct ixgbe_adapter *adapter) +{ + int q_idx, q_vectors; + int (*poll)(struct napi_struct *, int); + + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { + poll = &ixgbe_clean_rxonly; + /* Only enable as many vectors as we have rx queues. */ + q_vectors = adapter->num_rx_queues; + } else { + poll = &ixgbe_poll; + /* only one q_vector for legacy modes */ + q_vectors = 1; + } + + for (q_idx = 0; q_idx < q_vectors; q_idx++) { + struct ixgbe_q_vector *q_vector = &adapter->q_vector[q_idx]; + netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64); + } +} + +static void ixgbe_napi_del_all(struct ixgbe_adapter *adapter) +{ + int q_idx; + int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + + /* legacy and MSI only use one vector */ + if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) + q_vectors = 1; + + for (q_idx = 0; q_idx < q_vectors; q_idx++) { + struct ixgbe_q_vector *q_vector = &adapter->q_vector[q_idx]; + if (!q_vector->rxr_count) + continue; + netif_napi_del(&q_vector->napi); + } +} + +#ifdef CONFIG_PM +static int ixgbe_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct ixgbe_adapter *adapter = netdev_priv(netdev); + u32 err; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + err = pci_enable_device(pdev); + if (err) { + printk(KERN_ERR "ixgbe: Cannot enable PCI device from " \ + "suspend\n"); + return err; + } + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + err = ixgbe_init_interrupt_scheme(adapter); + if (err) { + printk(KERN_ERR "ixgbe: Cannot initialize interrupts for " + "device\n"); + return err; + } + + ixgbe_napi_add_all(adapter); + ixgbe_reset(adapter); + + if (netif_running(netdev)) { + err = ixgbe_open(adapter->netdev); + if (err) + return err; + } + + netif_device_attach(netdev); + + return 0; +} + +#endif /* CONFIG_PM */ +static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct ixgbe_adapter *adapter = netdev_priv(netdev); +#ifdef CONFIG_PM + int retval = 0; +#endif + + netif_device_detach(netdev); + + if (netif_running(netdev)) { + ixgbe_down(adapter); + ixgbe_free_irq(adapter); + ixgbe_free_all_tx_resources(adapter); + ixgbe_free_all_rx_resources(adapter); + } + ixgbe_reset_interrupt_capability(adapter); + ixgbe_napi_del_all(adapter); + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); + +#ifdef CONFIG_PM + retval = pci_save_state(pdev); + if (retval) + return retval; +#endif + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + ixgbe_release_hw_control(adapter); + + pci_disable_device(pdev); + + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + + return 0; +} + +static void ixgbe_shutdown(struct pci_dev *pdev) +{ + ixgbe_suspend(pdev, PMSG_SUSPEND); +} + /** * ixgbe_update_stats - Update the board statistics counters. * @adapter: board private structure @@ -3656,31 +3710,6 @@ static int ixgbe_link_config(struct ixgbe_hw *hw) return hw->mac.ops.setup_link_speed(hw, autoneg, true, true); } -/** - * ixgbe_napi_add_all - prep napi structs for use - * @adapter: private struct - * helper function to napi_add each possible q_vector->napi - */ -static void ixgbe_napi_add_all(struct ixgbe_adapter *adapter) -{ - int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - int (*poll)(struct napi_struct *, int); - - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { - poll = &ixgbe_clean_rxonly; - } else { - poll = &ixgbe_poll; - /* only one q_vector for legacy modes */ - q_vectors = 1; - } - - for (i = 0; i < q_vectors; i++) { - struct ixgbe_q_vector *q_vector = &adapter->q_vector[i]; - netif_napi_add(adapter->netdev, &q_vector->napi, - (*poll), 64); - } -} - /** * ixgbe_probe - Device Initialization Routine * @pdev: PCI device information struct @@ -3977,6 +4006,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) pci_release_regions(pdev); DPRINTK(PROBE, INFO, "complete\n"); + ixgbe_napi_del_all(adapter); kfree(adapter->tx_ring); kfree(adapter->rx_ring); -- cgit v1.2.3 From 69888674738db42dd73430ebc59a691010c8fa2e Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Thu, 11 Sep 2008 20:05:39 -0700 Subject: ixgbe: whitespace/formatting cleanup This patch cleans up some whitespace items, reorders a couple of functions, and removes some outdated comments. Signed-off-by: Alexander Duyck Signed-off-by: Jeff Kirsher Signed-off-by: Jeff Garzik --- drivers/net/ixgbe/ixgbe_main.c | 125 ++++++++++++++++++----------------------- 1 file changed, 54 insertions(+), 71 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index a8edbad0335..ca17af4349d 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -117,19 +117,6 @@ static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter) ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD); } -#ifdef DEBUG -/** - * ixgbe_get_hw_dev_name - return device name string - * used by hardware layer to print debugging information - **/ -char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw) -{ - struct ixgbe_adapter *adapter = hw->back; - struct net_device *netdev = adapter->netdev; - return netdev->name; -} -#endif - static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, u16 int_alloc_entry, u8 msix_vector) { @@ -1315,7 +1302,6 @@ static irqreturn_t ixgbe_intr(int irq, void *data) struct ixgbe_hw *hw = &adapter->hw; u32 eicr; - /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read * therefore no explict interrupt disable is necessary */ eicr = IXGBE_READ_REG(hw, IXGBE_EICR); @@ -2658,6 +2644,31 @@ err: return -ENOMEM; } +/** + * ixgbe_setup_all_tx_resources - allocate all queues Tx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]); + if (!err) + continue; + DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i); + break; + } + + return err; +} + /** * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors) * @adapter: board private structure @@ -2710,6 +2721,32 @@ alloc_failed: return -ENOMEM; } +/** + * ixgbe_setup_all_rx_resources - allocate all queues Rx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ + +static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]); + if (!err) + continue; + DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i); + break; + } + + return err; +} + /** * ixgbe_free_tx_resources - Free Tx Resources per Queue * @adapter: board private structure @@ -2785,57 +2822,6 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter) ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]); } -/** - * ixgbe_setup_all_tx_resources - allocate all queues Tx resources - * @adapter: board private structure - * - * If this function returns with an error, then it's possible one or - * more of the rings is populated (while the rest are not). It is the - * callers duty to clean those orphaned rings. - * - * Return 0 on success, negative on failure - **/ -static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) -{ - int i, err = 0; - - for (i = 0; i < adapter->num_tx_queues; i++) { - err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]); - if (!err) - continue; - DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i); - break; - } - - return err; -} - -/** - * ixgbe_setup_all_rx_resources - allocate all queues Rx resources - * @adapter: board private structure - * - * If this function returns with an error, then it's possible one or - * more of the rings is populated (while the rest are not). It is the - * callers duty to clean those orphaned rings. - * - * Return 0 on success, negative on failure - **/ - -static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) -{ - int i, err = 0; - - for (i = 0; i < adapter->num_rx_queues; i++) { - err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]); - if (!err) - continue; - DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i); - break; - } - - return err; -} - /** * ixgbe_change_mtu - Change the Maximum Transfer Unit * @netdev: network interface device structure @@ -3001,7 +2987,7 @@ static int ixgbe_resume(struct pci_dev *pdev) pci_restore_state(pdev); err = pci_enable_device(pdev); if (err) { - printk(KERN_ERR "ixgbe: Cannot enable PCI device from " \ + printk(KERN_ERR "ixgbe: Cannot enable PCI device from " "suspend\n"); return err; } @@ -3189,8 +3175,8 @@ static void ixgbe_watchdog(unsigned long data) } /** - * ixgbe_watchdog_task - worker thread to bring link up - * @work: pointer to work_struct containing our data + * ixgbe_watchdog_task - worker thread to bring link up + * @work: pointer to work_struct containing our data **/ static void ixgbe_watchdog_task(struct work_struct *work) { @@ -3526,7 +3512,6 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type_len | tx_buffer_info->length); tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); - i++; if (i == tx_ring->count) i = 0; @@ -3576,7 +3561,6 @@ static int ixgbe_maybe_stop_tx(struct net_device *netdev, return __ixgbe_maybe_stop_tx(netdev, tx_ring, size); } - static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); @@ -4086,7 +4070,6 @@ static void ixgbe_io_resume(struct pci_dev *pdev) } netif_device_attach(netdev); - } static struct pci_error_handlers ixgbe_err_handler = { -- cgit v1.2.3 From b39d66a81fb4f5ab555f86a2e49f3714f8369a3d Mon Sep 17 00:00:00 2001 From: Harvey Harrison Date: Wed, 20 Aug 2008 16:52:04 -0700 Subject: drivers/net: replace __FUNCTION__ with __func__ __FUNCTION__ is gcc-specific, use __func__ Signed-off-by: Harvey Harrison Signed-off-by: Andrew Morton Signed-off-by: Jeff Garzik --- drivers/net/ixgbe/ixgbe.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index 63f678931ad..2198b77c53e 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h @@ -44,7 +44,7 @@ #define DPRINTK(nlevel, klevel, fmt, args...) \ ((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \ printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \ - __FUNCTION__ , ## args))) + __func__ , ## args))) /* TX/RX descriptor defines */ #define IXGBE_DEFAULT_TXD 1024 -- cgit v1.2.3 From 5dd2d3322836036da169904afcb7d0f6dff5363f Mon Sep 17 00:00:00 2001 From: Jeff Garzik Date: Thu, 16 Oct 2008 05:09:31 -0400 Subject: [netdrvr] myri10ge, ixgbe: remove broken select INTEL_IOATDMA We cannot select INTEL_IOATDMA in Kconfig as soon as MYRI10GE or IXGBE is enabled since the former is not available on all architectures. Just use a Kconfig bool {IXGBE,MYRI10GE}_DCA set to =y when DCA support can actually be built. [myri10ge portion written and signed-off-by] Brice Goglin Signed-off-by: Jeff Garzik --- drivers/net/ixgbe/ixgbe.h | 4 ++-- drivers/net/ixgbe/ixgbe_main.c | 32 ++++++++++++++++---------------- 2 files changed, 18 insertions(+), 18 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index 2198b77c53e..e116d340dcc 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h @@ -36,7 +36,7 @@ #include "ixgbe_type.h" #include "ixgbe_common.h" -#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) +#ifdef CONFIG_IXGBE_DCA #include #endif @@ -136,7 +136,7 @@ struct ixgbe_ring { * offset associated with this ring, which is different * for DCE and RSS modes */ -#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) +#ifdef CONFIG_IXGBE_DCA /* cpu for tx queue */ int cpu; #endif diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index ca17af4349d..7548fb7360d 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -80,7 +80,7 @@ static struct pci_device_id ixgbe_pci_tbl[] = { }; MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl); -#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) +#ifdef CONFIG_IXGBE_DCA static int ixgbe_notify_dca(struct notifier_block *, unsigned long event, void *p); static struct notifier_block dca_notifier = { @@ -296,7 +296,7 @@ done_cleaning: return (total_packets ? true : false); } -#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) +#ifdef CONFIG_IXGBE_DCA static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, struct ixgbe_ring *rx_ring) { @@ -383,7 +383,7 @@ static int __ixgbe_notify_dca(struct device *dev, void *data) return 0; } -#endif /* CONFIG_DCA or CONFIG_DCA_MODULE */ +#endif /* CONFIG_IXGBE_DCA */ /** * ixgbe_receive_skb - Send a completed packet up the stack * @adapter: board private structure @@ -947,7 +947,7 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data) r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); for (i = 0; i < q_vector->txr_count; i++) { tx_ring = &(adapter->tx_ring[r_idx]); -#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) +#ifdef CONFIG_IXGBE_DCA if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) ixgbe_update_tx_dca(adapter, tx_ring); #endif @@ -1022,7 +1022,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); rx_ring = &(adapter->rx_ring[r_idx]); -#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) +#ifdef CONFIG_IXGBE_DCA if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) ixgbe_update_rx_dca(adapter, rx_ring); #endif @@ -1066,7 +1066,7 @@ static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget) r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); for (i = 0; i < q_vector->rxr_count; i++) { rx_ring = &(adapter->rx_ring[r_idx]); -#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) +#ifdef CONFIG_IXGBE_DCA if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) ixgbe_update_rx_dca(adapter, rx_ring); #endif @@ -2155,7 +2155,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter) netif_carrier_off(netdev); -#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) +#ifdef CONFIG_IXGBE_DCA if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; dca_remove_requester(&adapter->pdev->dev); @@ -2167,7 +2167,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter) ixgbe_clean_all_tx_rings(adapter); ixgbe_clean_all_rx_rings(adapter); -#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) +#ifdef CONFIG_IXGBE_DCA /* since we reset the hardware DCA settings were cleared */ if (dca_add_requester(&adapter->pdev->dev) == 0) { adapter->flags |= IXGBE_FLAG_DCA_ENABLED; @@ -2193,7 +2193,7 @@ static int ixgbe_poll(struct napi_struct *napi, int budget) struct ixgbe_adapter *adapter = q_vector->adapter; int tx_cleaned, work_done = 0; -#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) +#ifdef CONFIG_IXGBE_DCA if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { ixgbe_update_tx_dca(adapter, adapter->tx_ring); ixgbe_update_rx_dca(adapter, adapter->rx_ring); @@ -3922,7 +3922,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, if (err) goto err_register; -#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) +#ifdef CONFIG_IXGBE_DCA if (dca_add_requester(&pdev->dev) == 0) { adapter->flags |= IXGBE_FLAG_DCA_ENABLED; /* always use CB2 mode, difference is masked @@ -3972,7 +3972,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) flush_scheduled_work(); -#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) +#ifdef CONFIG_IXGBE_DCA if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; dca_remove_requester(&pdev->dev); @@ -4105,10 +4105,10 @@ static int __init ixgbe_init_module(void) printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright); -#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) +#ifdef CONFIG_IXGBE_DCA dca_register_notify(&dca_notifier); - #endif + ret = pci_register_driver(&ixgbe_driver); return ret; } @@ -4123,13 +4123,13 @@ module_init(ixgbe_init_module); **/ static void __exit ixgbe_exit_module(void) { -#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) +#ifdef CONFIG_IXGBE_DCA dca_unregister_notify(&dca_notifier); #endif pci_unregister_driver(&ixgbe_driver); } -#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) +#ifdef CONFIG_IXGBE_DCA static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event, void *p) { @@ -4140,7 +4140,7 @@ static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event, return ret_val ? NOTIFY_BAD : NOTIFY_DONE; } -#endif /* CONFIG_DCA or CONFIG_DCA_MODULE */ +#endif /* CONFIG_IXGBE_DCA */ module_exit(ixgbe_exit_module); -- cgit v1.2.3