aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/sfc/efx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/sfc/efx.c')
-rw-r--r--drivers/net/sfc/efx.c29
1 files changed, 12 insertions, 17 deletions
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 7269a426051..343e8da1fa3 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -50,16 +50,6 @@ static struct workqueue_struct *reset_workqueue;
*************************************************************************/
/*
- * Enable large receive offload (LRO) aka soft segment reassembly (SSR)
- *
- * This sets the default for new devices. It can be controlled later
- * using ethtool.
- */
-static int lro = true;
-module_param(lro, int, 0644);
-MODULE_PARM_DESC(lro, "Large receive offload acceleration");
-
-/*
* Use separate channels for TX and RX events
*
* Set this to 1 to use separate channels for TX and RX. It allows us
@@ -894,9 +884,9 @@ static int efx_wanted_rx_queues(void)
int count;
int cpu;
- if (!alloc_cpumask_var(&core_mask, GFP_KERNEL)) {
+ if (unlikely(!alloc_cpumask_var(&core_mask, GFP_KERNEL))) {
printk(KERN_WARNING
- "efx.c: allocation failure, irq balancing hobbled\n");
+ "sfc: RSS disabled due to allocation failure\n");
return 1;
}
@@ -1300,10 +1290,16 @@ out_requeue:
static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
{
struct efx_nic *efx = netdev_priv(net_dev);
+ struct mii_ioctl_data *data = if_mii(ifr);
EFX_ASSERT_RESET_SERIALISED(efx);
- return generic_mii_ioctl(&efx->mii, if_mii(ifr), cmd, NULL);
+ /* Convert phy_id from older PRTAD/DEVAD format */
+ if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
+ (data->phy_id & 0xfc00) == 0x0400)
+ data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400;
+
+ return mdio_mii_ioctl(&efx->mdio, data, cmd);
}
/**************************************************************************
@@ -1945,7 +1941,7 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
mutex_init(&efx->mac_lock);
efx->mac_op = &efx_dummy_mac_operations;
efx->phy_op = &efx_dummy_phy_operations;
- efx->mii.dev = net_dev;
+ efx->mdio.dev = net_dev;
INIT_WORK(&efx->phy_work, efx_phy_work);
INIT_WORK(&efx->mac_work, efx_mac_work);
atomic_set(&efx->netif_stop_count, 1);
@@ -2161,9 +2157,8 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
if (!net_dev)
return -ENOMEM;
net_dev->features |= (NETIF_F_IP_CSUM | NETIF_F_SG |
- NETIF_F_HIGHDMA | NETIF_F_TSO);
- if (lro)
- net_dev->features |= NETIF_F_GRO;
+ NETIF_F_HIGHDMA | NETIF_F_TSO |
+ NETIF_F_GRO);
/* Mask for features that also apply to VLAN devices */
net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
NETIF_F_HIGHDMA | NETIF_F_TSO);