aboutsummaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/benet/be_main.c73
-rw-r--r--drivers/net/bonding/bond_sysfs.c1
-rw-r--r--drivers/net/cxgb3/adapter.h2
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c78
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c27
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.h3
-rw-r--r--drivers/net/mdio.c17
-rw-r--r--drivers/net/netxen/netxen_nic_init.c9
-rw-r--r--drivers/net/qlge/qlge.h15
-rw-r--r--drivers/net/qlge/qlge_main.c30
-rw-r--r--drivers/net/r8169.c2
-rw-r--r--drivers/net/sfc/tenxpress.c11
-rw-r--r--drivers/net/tehuti.c9
-rw-r--r--drivers/net/vxge/vxge-config.c12
14 files changed, 228 insertions, 61 deletions
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 347d002fa42..66bb56874d9 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -168,6 +168,7 @@ static void netdev_stats_update(struct be_adapter *adapter)
struct be_port_rxf_stats *port_stats =
&rxf_stats->port[adapter->port_num];
struct net_device_stats *dev_stats = &adapter->stats.net_stats;
+ struct be_erx_stats *erx_stats = &hw_stats->erx;
dev_stats->rx_packets = port_stats->rx_total_frames;
dev_stats->tx_packets = port_stats->tx_unicastframes +
@@ -181,29 +182,33 @@ static void netdev_stats_update(struct be_adapter *adapter)
dev_stats->rx_errors = port_stats->rx_crc_errors +
port_stats->rx_alignment_symbol_errors +
port_stats->rx_in_range_errors +
- port_stats->rx_out_range_errors + port_stats->rx_frame_too_long;
-
- /* packet transmit problems */
- dev_stats->tx_errors = 0;
-
- /* no space in linux buffers */
- dev_stats->rx_dropped = 0;
-
- /* no space available in linux */
- dev_stats->tx_dropped = 0;
-
- dev_stats->multicast = port_stats->tx_multicastframes;
- dev_stats->collisions = 0;
+ port_stats->rx_out_range_errors +
+ port_stats->rx_frame_too_long +
+ port_stats->rx_dropped_too_small +
+ port_stats->rx_dropped_too_short +
+ port_stats->rx_dropped_header_too_small +
+ port_stats->rx_dropped_tcp_length +
+ port_stats->rx_dropped_runt +
+ port_stats->rx_tcp_checksum_errs +
+ port_stats->rx_ip_checksum_errs +
+ port_stats->rx_udp_checksum_errs;
+
+ /* no space in linux buffers: best possible approximation */
+ dev_stats->rx_dropped = erx_stats->rx_drops_no_fragments[0];
/* detailed rx errors */
dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
- port_stats->rx_out_range_errors + port_stats->rx_frame_too_long;
+ port_stats->rx_out_range_errors +
+ port_stats->rx_frame_too_long;
+
/* receive ring buffer overflow */
dev_stats->rx_over_errors = 0;
+
dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
/* frame alignment errors */
dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
+
/* receiver fifo overrun */
/* drops_no_pbuf is no per i/f, it's per BE card */
dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
@@ -211,6 +216,16 @@ static void netdev_stats_update(struct be_adapter *adapter)
rxf_stats->rx_drops_no_pbuf;
/* receiver missed packetd */
dev_stats->rx_missed_errors = 0;
+
+ /* packet transmit problems */
+ dev_stats->tx_errors = 0;
+
+ /* no space available in linux */
+ dev_stats->tx_dropped = 0;
+
+ dev_stats->multicast = port_stats->tx_multicastframes;
+ dev_stats->collisions = 0;
+
/* detailed tx_errors */
dev_stats->tx_aborted_errors = 0;
dev_stats->tx_carrier_errors = 0;
@@ -728,7 +743,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
if (pktsize <= rx_frag_size) {
BUG_ON(num_rcvd != 1);
- return;
+ goto done;
}
/* More frags present for this completion */
@@ -750,6 +765,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
memset(page_info, 0, sizeof(*page_info));
}
+done:
be_rx_stats_update(adapter, pktsize, num_rcvd);
return;
}
@@ -860,12 +876,19 @@ static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
- rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
-
queue_tail_inc(&adapter->rx_obj.cq);
return rxcp;
}
+/* To reset the valid bit, we need to reset the whole word as
+ * when walking the queue the valid entries are little-endian
+ * and invalid entries are host endian
+ */
+static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
+{
+ rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
+}
+
static inline struct page *be_alloc_pages(u32 size)
{
gfp_t alloc_flags = GFP_ATOMIC;
@@ -997,6 +1020,7 @@ static void be_rx_q_clean(struct be_adapter *adapter)
/* First cleanup pending rx completions */
while ((rxcp = be_rx_compl_get(adapter)) != NULL) {
be_rx_compl_discard(adapter, rxcp);
+ be_rx_compl_reset(rxcp);
be_cq_notify(&adapter->ctrl, rx_cq->id, true, 1);
}
@@ -1032,8 +1056,13 @@ static void be_tx_queues_destroy(struct be_adapter *adapter)
struct be_queue_info *q;
q = &adapter->tx_obj.q;
- if (q->created)
+ if (q->created) {
be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_TXQ);
+
+ /* No more tx completions can be rcvd now; clean up if there
+ * are any pending completions or pending tx requests */
+ be_tx_q_clean(adapter);
+ }
be_queue_free(adapter, q);
q = &adapter->tx_obj.cq;
@@ -1041,10 +1070,6 @@ static void be_tx_queues_destroy(struct be_adapter *adapter)
be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_CQ);
be_queue_free(adapter, q);
- /* No more tx completions can be rcvd now; clean up if there are
- * any pending completions or pending tx requests */
- be_tx_q_clean(adapter);
-
q = &adapter->tx_eq.q;
if (q->created)
be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_EQ);
@@ -1278,6 +1303,8 @@ int be_poll_rx(struct napi_struct *napi, int budget)
be_rx_compl_process_lro(adapter, rxcp);
else
be_rx_compl_process(adapter, rxcp);
+
+ be_rx_compl_reset(rxcp);
}
lro_flush_all(&adapter->rx_obj.lro_mgr);
@@ -1533,7 +1560,7 @@ static int be_close(struct net_device *netdev)
struct be_eq_obj *tx_eq = &adapter->tx_eq;
int vec;
- cancel_delayed_work(&adapter->work);
+ cancel_delayed_work_sync(&adapter->work);
netif_stop_queue(netdev);
netif_carrier_off(netdev);
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 3a1b7b04eb7..5fb861a0866 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -1541,6 +1541,7 @@ int bond_create_sysfs(void)
printk(KERN_ERR
"network device named %s already exists in sysfs",
class_attr_bonding_masters.attr.name);
+ ret = 0;
}
return ret;
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index e48e508b963..1694fad3872 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -253,6 +253,8 @@ struct adapter {
struct mutex mdio_lock;
spinlock_t stats_lock;
spinlock_t work_lock;
+
+ struct sk_buff *nofail_skb;
};
static inline u32 t3_read_reg(struct adapter *adapter, u32 reg_addr)
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index aef3ab21f5f..538dda4422d 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -433,40 +433,78 @@ static int init_tp_parity(struct adapter *adap)
for (i = 0; i < 16; i++) {
struct cpl_smt_write_req *req;
- skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
+ skb = alloc_skb(sizeof(*req), GFP_KERNEL);
+ if (!skb)
+ skb = adap->nofail_skb;
+ if (!skb)
+ goto alloc_skb_fail;
+
req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
memset(req, 0, sizeof(*req));
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
req->iff = i;
t3_mgmt_tx(adap, skb);
+ if (skb == adap->nofail_skb) {
+ await_mgmt_replies(adap, cnt, i + 1);
+ adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
+ if (!adap->nofail_skb)
+ goto alloc_skb_fail;
+ }
}
for (i = 0; i < 2048; i++) {
struct cpl_l2t_write_req *req;
- skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
+ skb = alloc_skb(sizeof(*req), GFP_KERNEL);
+ if (!skb)
+ skb = adap->nofail_skb;
+ if (!skb)
+ goto alloc_skb_fail;
+
req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
memset(req, 0, sizeof(*req));
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
req->params = htonl(V_L2T_W_IDX(i));
t3_mgmt_tx(adap, skb);
+ if (skb == adap->nofail_skb) {
+ await_mgmt_replies(adap, cnt, 16 + i + 1);
+ adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
+ if (!adap->nofail_skb)
+ goto alloc_skb_fail;
+ }
}
for (i = 0; i < 2048; i++) {
struct cpl_rte_write_req *req;
- skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
+ skb = alloc_skb(sizeof(*req), GFP_KERNEL);
+ if (!skb)
+ skb = adap->nofail_skb;
+ if (!skb)
+ goto alloc_skb_fail;
+
req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
memset(req, 0, sizeof(*req));
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
req->l2t_idx = htonl(V_L2T_W_IDX(i));
t3_mgmt_tx(adap, skb);
+ if (skb == adap->nofail_skb) {
+ await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
+ adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
+ if (!adap->nofail_skb)
+ goto alloc_skb_fail;
+ }
}
- skb = alloc_skb(sizeof(*greq), GFP_KERNEL | __GFP_NOFAIL);
+ skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
+ if (!skb)
+ skb = adap->nofail_skb;
+ if (!skb)
+ goto alloc_skb_fail;
+
greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
memset(greq, 0, sizeof(*greq));
greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
@@ -475,8 +513,17 @@ static int init_tp_parity(struct adapter *adap)
t3_mgmt_tx(adap, skb);
i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
+ if (skb == adap->nofail_skb) {
+ i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
+ adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
+ }
+
t3_tp_set_offload_mode(adap, 0);
return i;
+
+alloc_skb_fail:
+ t3_tp_set_offload_mode(adap, 0);
+ return -ENOMEM;
}
/**
@@ -871,7 +918,12 @@ static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
struct mngt_pktsched_wr *req;
int ret;
- skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
+ skb = alloc_skb(sizeof(*req), GFP_KERNEL);
+ if (!skb)
+ skb = adap->nofail_skb;
+ if (!skb)
+ return -ENOMEM;
+
req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
@@ -881,6 +933,12 @@ static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
req->max = hi;
req->binding = port;
ret = t3_mgmt_tx(adap, skb);
+ if (skb == adap->nofail_skb) {
+ adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
+ GFP_KERNEL);
+ if (!adap->nofail_skb)
+ ret = -ENOMEM;
+ }
return ret;
}
@@ -3020,6 +3078,14 @@ static int __devinit init_one(struct pci_dev *pdev,
goto out_disable_device;
}
+ adapter->nofail_skb =
+ alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
+ if (!adapter->nofail_skb) {
+ dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
+ err = -ENOMEM;
+ goto out_free_adapter;
+ }
+
adapter->regs = ioremap_nocache(mmio_start, mmio_len);
if (!adapter->regs) {
dev_err(&pdev->dev, "cannot map device registers\n");
@@ -3176,6 +3242,8 @@ static void __devexit remove_one(struct pci_dev *pdev)
free_netdev(adapter->port[i]);
iounmap(adapter->regs);
+ if (adapter->nofail_skb)
+ kfree_skb(adapter->nofail_skb);
kfree(adapter);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index 620d80be6aa..f9f54b57b28 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -566,13 +566,31 @@ static void t3_process_tid_release_list(struct work_struct *work)
spin_unlock_bh(&td->tid_release_lock);
skb = alloc_skb(sizeof(struct cpl_tid_release),
- GFP_KERNEL | __GFP_NOFAIL);
+ GFP_KERNEL);
+ if (!skb)
+ skb = td->nofail_skb;
+ if (!skb) {
+ spin_lock_bh(&td->tid_release_lock);
+ p->ctx = (void *)td->tid_release_list;
+ td->tid_release_list = (struct t3c_tid_entry *)p;
+ break;
+ }
mk_tid_release(skb, p - td->tid_maps.tid_tab);
cxgb3_ofld_send(tdev, skb);
p->ctx = NULL;
+ if (skb == td->nofail_skb)
+ td->nofail_skb =
+ alloc_skb(sizeof(struct cpl_tid_release),
+ GFP_KERNEL);
spin_lock_bh(&td->tid_release_lock);
}
+ td->release_list_incomplete = (td->tid_release_list == NULL) ? 0 : 1;
spin_unlock_bh(&td->tid_release_lock);
+
+ if (!td->nofail_skb)
+ td->nofail_skb =
+ alloc_skb(sizeof(struct cpl_tid_release),
+ GFP_KERNEL);
}
/* use ctx as a next pointer in the tid release list */
@@ -585,7 +603,7 @@ void cxgb3_queue_tid_release(struct t3cdev *tdev, unsigned int tid)
p->ctx = (void *)td->tid_release_list;
p->client = NULL;
td->tid_release_list = p;
- if (!p->ctx)
+ if (!p->ctx || td->release_list_incomplete)
schedule_work(&td->tid_release_task);
spin_unlock_bh(&td->tid_release_lock);
}
@@ -1274,6 +1292,9 @@ int cxgb3_offload_activate(struct adapter *adapter)
if (list_empty(&adapter_list))
register_netevent_notifier(&nb);
+ t->nofail_skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_KERNEL);
+ t->release_list_incomplete = 0;
+
add_adapter(adapter);
return 0;
@@ -1298,6 +1319,8 @@ void cxgb3_offload_deactivate(struct adapter *adapter)
T3C_DATA(tdev) = NULL;
t3_free_l2t(L2DATA(tdev));
L2DATA(tdev) = NULL;
+ if (t->nofail_skb)
+ kfree_skb(t->nofail_skb);
kfree(t);
}
diff --git a/drivers/net/cxgb3/cxgb3_offload.h b/drivers/net/cxgb3/cxgb3_offload.h
index a8e8e5fcdf8..55945f422ae 100644
--- a/drivers/net/cxgb3/cxgb3_offload.h
+++ b/drivers/net/cxgb3/cxgb3_offload.h
@@ -191,6 +191,9 @@ struct t3c_data {
struct t3c_tid_entry *tid_release_list;
spinlock_t tid_release_lock;
struct work_struct tid_release_task;
+
+ struct sk_buff *nofail_skb;
+ unsigned int release_list_incomplete;
};
/*
diff --git a/drivers/net/mdio.c b/drivers/net/mdio.c
index 66483035f68..dc45e9856c3 100644
--- a/drivers/net/mdio.c
+++ b/drivers/net/mdio.c
@@ -296,6 +296,23 @@ void mdio45_ethtool_gset_npage(const struct mdio_if_info *mdio,
ecmd->duplex = (reg & MDIO_CTRL1_FULLDPLX ||
ecmd->speed == SPEED_10000);
}
+
+ /* 10GBASE-T MDI/MDI-X */
+ if (ecmd->port == PORT_TP && ecmd->speed == SPEED_10000) {
+ switch (mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD,
+ MDIO_PMA_10GBT_SWAPPOL)) {
+ case MDIO_PMA_10GBT_SWAPPOL_ABNX | MDIO_PMA_10GBT_SWAPPOL_CDNX:
+ ecmd->eth_tp_mdix = ETH_TP_MDI;
+ break;
+ case 0:
+ ecmd->eth_tp_mdix = ETH_TP_MDI_X;
+ break;
+ default:
+ /* It's complicated... */
+ ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+ break;
+ }
+ }
}
EXPORT_SYMBOL(mdio45_ethtool_gset_npage);
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 4a51c31330d..6f77ad58e3b 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -178,10 +178,8 @@ void netxen_free_sw_resources(struct netxen_adapter *adapter)
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &recv_ctx->rds_rings[ring];
- if (rds_ring->rx_buf_arr) {
- vfree(rds_ring->rx_buf_arr);
- rds_ring->rx_buf_arr = NULL;
- }
+ vfree(rds_ring->rx_buf_arr);
+ rds_ring->rx_buf_arr = NULL;
}
kfree(recv_ctx->rds_rings);
@@ -190,8 +188,7 @@ skip_rds:
return;
tx_ring = adapter->tx_ring;
- if (tx_ring->cmd_buf_arr)
- vfree(tx_ring->cmd_buf_arr);
+ vfree(tx_ring->cmd_buf_arr);
}
int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index 5eb52ca0898..156e02e8905 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -27,6 +27,8 @@
"%s: " fmt, __func__, ##args); \
} while (0)
+#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
+
#define QLGE_VENDOR_ID 0x1077
#define QLGE_DEVICE_ID_8012 0x8012
#define QLGE_DEVICE_ID_8000 0x8000
@@ -39,7 +41,18 @@
#define NUM_SMALL_BUFFERS 512
#define NUM_LARGE_BUFFERS 512
+#define DB_PAGE_SIZE 4096
+
+/* Calculate the number of (4k) pages required to
+ * contain a buffer queue of the given length.
+ */
+#define MAX_DB_PAGES_PER_BQ(x) \
+ (((x * sizeof(u64)) / DB_PAGE_SIZE) + \
+ (((x * sizeof(u64)) % DB_PAGE_SIZE) ? 1 : 0))
+#define RX_RING_SHADOW_SPACE (sizeof(u64) + \
+ MAX_DB_PAGES_PER_BQ(NUM_SMALL_BUFFERS) * sizeof(u64) + \
+ MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64))
#define SMALL_BUFFER_SIZE 256
#define LARGE_BUFFER_SIZE PAGE_SIZE
#define MAX_SPLIT_SIZE 1023
@@ -63,8 +76,6 @@
#define TX_DESC_PER_OAL 0
#endif
-#define DB_PAGE_SIZE 4096
-
/* MPI test register definitions. This register
* is used for determining alternate NIC function's
* PCI->func number.
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 024c7343ada..b9a5f59d6c9 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -2237,7 +2237,7 @@ static int ql_alloc_tx_resources(struct ql_adapter *qdev,
&tx_ring->wq_base_dma);
if ((tx_ring->wq_base == NULL)
- || tx_ring->wq_base_dma & (tx_ring->wq_size - 1)) {
+ || tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
QPRINTK(qdev, IFUP, ERR, "tx_ring alloc failed.\n");
return -ENOMEM;
}
@@ -2552,14 +2552,16 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
{
struct cqicb *cqicb = &rx_ring->cqicb;
void *shadow_reg = qdev->rx_ring_shadow_reg_area +
- (rx_ring->cq_id * sizeof(u64) * 4);
+ (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
- (rx_ring->cq_id * sizeof(u64) * 4);
+ (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
void __iomem *doorbell_area =
qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
int err = 0;
u16 bq_len;
u64 tmp;
+ __le64 *base_indirect_ptr;
+ int page_entries;
/* Set up the shadow registers for this ring. */
rx_ring->prod_idx_sh_reg = shadow_reg;
@@ -2568,8 +2570,8 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
shadow_reg_dma += sizeof(u64);
rx_ring->lbq_base_indirect = shadow_reg;
rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
- shadow_reg += sizeof(u64);
- shadow_reg_dma += sizeof(u64);
+ shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
+ shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
rx_ring->sbq_base_indirect = shadow_reg;
rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
@@ -2606,7 +2608,14 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
if (rx_ring->lbq_len) {
cqicb->flags |= FLAGS_LL; /* Load lbq values */
tmp = (u64)rx_ring->lbq_base_dma;;
- *((__le64 *) rx_ring->lbq_base_indirect) = cpu_to_le64(tmp);
+ base_indirect_ptr = (__le64 *) rx_ring->lbq_base_indirect;
+ page_entries = 0;
+ do {
+ *base_indirect_ptr = cpu_to_le64(tmp);
+ tmp += DB_PAGE_SIZE;
+ base_indirect_ptr++;
+ page_entries++;
+ } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
cqicb->lbq_addr =
cpu_to_le64(rx_ring->lbq_base_indirect_dma);
bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
@@ -2623,7 +2632,14 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
if (rx_ring->sbq_len) {
cqicb->flags |= FLAGS_LS; /* Load sbq values */
tmp = (u64)rx_ring->sbq_base_dma;;
- *((__le64 *) rx_ring->sbq_base_indirect) = cpu_to_le64(tmp);
+ base_indirect_ptr = (__le64 *) rx_ring->sbq_base_indirect;
+ page_entries = 0;
+ do {
+ *base_indirect_ptr = cpu_to_le64(tmp);
+ tmp += DB_PAGE_SIZE;
+ base_indirect_ptr++;
+ page_entries++;
+ } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
cqicb->sbq_addr =
cpu_to_le64(rx_ring->sbq_base_indirect_dma);
cqicb->sbq_buf_size =
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index e94316b7868..007c881896d 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -3379,7 +3379,7 @@ static void rtl8169_tx_interrupt(struct net_device *dev,
rtl8169_unmap_tx_skb(tp->pci_dev, tx_skb, tp->TxDescArray + entry);
if (status & LastFrag) {
- dev_kfree_skb_irq(tx_skb->skb);
+ dev_kfree_skb(tx_skb->skb);
tx_skb->skb = NULL;
}
dirty_tx++;
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index db723c58f6f..f4d509015f7 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -63,6 +63,7 @@
/* extended status register */
#define PMA_PMD_XSTATUS_REG 49153
+#define PMA_PMD_XSTAT_MDIX_LBN 14
#define PMA_PMD_XSTAT_FLP_LBN (12)
/* LED control register */
@@ -741,9 +742,17 @@ tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
mdio45_ethtool_gset_npage(&efx->mdio, ecmd, adv, lpa);
- if (efx->phy_type != PHY_TYPE_SFX7101)
+ if (efx->phy_type != PHY_TYPE_SFX7101) {
ecmd->supported |= (SUPPORTED_100baseT_Full |
SUPPORTED_1000baseT_Full);
+ if (ecmd->speed != SPEED_10000) {
+ ecmd->eth_tp_mdix =
+ (efx_mdio_read(efx, MDIO_MMD_PMAPMD,
+ PMA_PMD_XSTATUS_REG) &
+ (1 << PMA_PMD_XSTAT_MDIX_LBN))
+ ? ETH_TP_MDI_X : ETH_TP_MDI;
+ }
+ }
/* In loopback, the PHY automatically brings up the correct interface,
* but doesn't advertise the correct speed. So override it */
diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c
index 093807a182f..3c2679cd196 100644
--- a/drivers/net/tehuti.c
+++ b/drivers/net/tehuti.c
@@ -948,8 +948,7 @@ static void print_rxfd(struct rxf_desc *rxfd);
static void bdx_rxdb_destroy(struct rxdb *db)
{
- if (db)
- vfree(db);
+ vfree(db);
}
static struct rxdb *bdx_rxdb_create(int nelem)
@@ -1482,10 +1481,8 @@ static void bdx_tx_db_close(struct txdb *d)
{
BDX_ASSERT(d == NULL);
- if (d->start) {
- vfree(d->start);
- d->start = NULL;
- }
+ vfree(d->start);
+ d->start = NULL;
}
/*************************************************************************
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index 6b41c884a33..26cde573af4 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -1884,17 +1884,13 @@ void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
mempool->memblock_size, dma_object);
}
- if (mempool->items_arr)
- vfree(mempool->items_arr);
+ vfree(mempool->items_arr);
- if (mempool->memblocks_dma_arr)
- vfree(mempool->memblocks_dma_arr);
+ vfree(mempool->memblocks_dma_arr);
- if (mempool->memblocks_priv_arr)
- vfree(mempool->memblocks_priv_arr);
+ vfree(mempool->memblocks_priv_arr);
- if (mempool->memblocks_arr)
- vfree(mempool->memblocks_arr);
+ vfree(mempool->memblocks_arr);
vfree(mempool);
}