aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/cxgb3
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/cxgb3')
-rw-r--r--drivers/net/cxgb3/adapter.h1
-rw-r--r--drivers/net/cxgb3/common.h4
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c264
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c19
-rw-r--r--drivers/net/cxgb3/firmware_exports.h20
-rw-r--r--drivers/net/cxgb3/l2t.c2
-rw-r--r--drivers/net/cxgb3/regs.h297
-rw-r--r--drivers/net/cxgb3/sge.c124
-rw-r--r--drivers/net/cxgb3/t3_hw.c204
-rw-r--r--drivers/net/cxgb3/version.h4
-rw-r--r--drivers/net/cxgb3/xgmac.c54
11 files changed, 814 insertions, 179 deletions
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index 60a62f510db..eb305a0895f 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -71,6 +71,7 @@ enum { /* adapter flags */
USING_MSI = (1 << 1),
USING_MSIX = (1 << 2),
QUEUES_BOUND = (1 << 3),
+ TP_PARITY_INIT = (1 << 4),
};
struct fl_pg_chunk {
diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
index 99c75d30f67..91ee7277b81 100644
--- a/drivers/net/cxgb3/common.h
+++ b/drivers/net/cxgb3/common.h
@@ -681,8 +681,8 @@ int t3_phy_intr_handler(struct adapter *adapter);
void t3_link_changed(struct adapter *adapter, int port_id);
int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
const struct adapter_info *t3_get_adapter_info(unsigned int board_id);
-int t3_seeprom_read(struct adapter *adapter, u32 addr, u32 *data);
-int t3_seeprom_write(struct adapter *adapter, u32 addr, u32 data);
+int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data);
+int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data);
int t3_seeprom_wp(struct adapter *adapter, int enable);
int t3_get_tp_version(struct adapter *adapter, u32 *vers);
int t3_check_tpsram_version(struct adapter *adapter, int *must_load);
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 61ffc925eae..fd2e05bbb90 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -76,20 +76,20 @@ enum {
#define EEPROM_MAGIC 0x38E2F10C
-#define CH_DEVICE(devid, ssid, idx) \
- { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
+#define CH_DEVICE(devid, idx) \
+ { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
static const struct pci_device_id cxgb3_pci_tbl[] = {
- CH_DEVICE(0x20, 1, 0), /* PE9000 */
- CH_DEVICE(0x21, 1, 1), /* T302E */
- CH_DEVICE(0x22, 1, 2), /* T310E */
- CH_DEVICE(0x23, 1, 3), /* T320X */
- CH_DEVICE(0x24, 1, 1), /* T302X */
- CH_DEVICE(0x25, 1, 3), /* T320E */
- CH_DEVICE(0x26, 1, 2), /* T310X */
- CH_DEVICE(0x30, 1, 2), /* T3B10 */
- CH_DEVICE(0x31, 1, 3), /* T3B20 */
- CH_DEVICE(0x32, 1, 1), /* T3B02 */
+ CH_DEVICE(0x20, 0), /* PE9000 */
+ CH_DEVICE(0x21, 1), /* T302E */
+ CH_DEVICE(0x22, 2), /* T310E */
+ CH_DEVICE(0x23, 3), /* T320X */
+ CH_DEVICE(0x24, 1), /* T302X */
+ CH_DEVICE(0x25, 3), /* T320E */
+ CH_DEVICE(0x26, 2), /* T310X */
+ CH_DEVICE(0x30, 2), /* T3B10 */
+ CH_DEVICE(0x31, 3), /* T3B20 */
+ CH_DEVICE(0x32, 1), /* T3B02 */
{0,}
};
@@ -306,6 +306,77 @@ static int request_msix_data_irqs(struct adapter *adap)
return 0;
}
+static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
+ unsigned long n)
+{
+ int attempts = 5;
+
+ while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
+ if (!--attempts)
+ return -ETIMEDOUT;
+ msleep(10);
+ }
+ return 0;
+}
+
+static int init_tp_parity(struct adapter *adap)
+{
+ int i;
+ struct sk_buff *skb;
+ struct cpl_set_tcb_field *greq;
+ unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
+
+ t3_tp_set_offload_mode(adap, 1);
+
+ for (i = 0; i < 16; i++) {
+ struct cpl_smt_write_req *req;
+
+ skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
+ req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
+ memset(req, 0, sizeof(*req));
+ req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
+ req->iff = i;
+ t3_mgmt_tx(adap, skb);
+ }
+
+ for (i = 0; i < 2048; i++) {
+ struct cpl_l2t_write_req *req;
+
+ skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
+ req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
+ memset(req, 0, sizeof(*req));
+ req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
+ req->params = htonl(V_L2T_W_IDX(i));
+ t3_mgmt_tx(adap, skb);
+ }
+
+ for (i = 0; i < 2048; i++) {
+ struct cpl_rte_write_req *req;
+
+ skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
+ req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
+ memset(req, 0, sizeof(*req));
+ req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
+ req->l2t_idx = htonl(V_L2T_W_IDX(i));
+ t3_mgmt_tx(adap, skb);
+ }
+
+ skb = alloc_skb(sizeof(*greq), GFP_KERNEL | __GFP_NOFAIL);
+ greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
+ memset(greq, 0, sizeof(*greq));
+ greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+ OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
+ greq->mask = cpu_to_be64(1);
+ t3_mgmt_tx(adap, skb);
+
+ i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
+ t3_tp_set_offload_mode(adap, 0);
+ return i;
+}
+
/**
* setup_rss - configure RSS
* @adap: the adapter
@@ -336,7 +407,7 @@ static void setup_rss(struct adapter *adap)
t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
- V_RRCPLCPUSIZE(6), cpus, rspq_map);
+ V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
}
static void init_napi(struct adapter *adap)
@@ -410,8 +481,7 @@ static int setup_sge_qsets(struct adapter *adap)
return 0;
}
-static ssize_t attr_show(struct device *d, struct device_attribute *attr,
- char *buf,
+static ssize_t attr_show(struct device *d, char *buf,
ssize_t(*format) (struct net_device *, char *))
{
ssize_t len;
@@ -423,7 +493,7 @@ static ssize_t attr_show(struct device *d, struct device_attribute *attr,
return len;
}
-static ssize_t attr_store(struct device *d, struct device_attribute *attr,
+static ssize_t attr_store(struct device *d,
const char *buf, size_t len,
ssize_t(*set) (struct net_device *, unsigned int),
unsigned int min_val, unsigned int max_val)
@@ -457,7 +527,7 @@ static ssize_t format_##name(struct net_device *dev, char *buf) \
static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
char *buf) \
{ \
- return attr_show(d, attr, buf, format_##name); \
+ return attr_show(d, buf, format_##name); \
}
static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
@@ -480,7 +550,7 @@ static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
const char *buf, size_t len)
{
- return attr_store(d, attr, buf, len, set_nfilters, 0, ~0);
+ return attr_store(d, buf, len, set_nfilters, 0, ~0);
}
static ssize_t set_nservers(struct net_device *dev, unsigned int val)
@@ -500,7 +570,7 @@ static ssize_t set_nservers(struct net_device *dev, unsigned int val)
static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
const char *buf, size_t len)
{
- return attr_store(d, attr, buf, len, set_nservers, 0, ~0);
+ return attr_store(d, buf, len, set_nservers, 0, ~0);
}
#define CXGB3_ATTR_R(name, val_expr) \
@@ -524,7 +594,7 @@ static struct attribute *cxgb3_attrs[] = {
static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
-static ssize_t tm_attr_show(struct device *d, struct device_attribute *attr,
+static ssize_t tm_attr_show(struct device *d,
char *buf, int sched)
{
struct port_info *pi = netdev_priv(to_net_dev(d));
@@ -550,7 +620,7 @@ static ssize_t tm_attr_show(struct device *d, struct device_attribute *attr,
return len;
}
-static ssize_t tm_attr_store(struct device *d, struct device_attribute *attr,
+static ssize_t tm_attr_store(struct device *d,
const char *buf, size_t len, int sched)
{
struct port_info *pi = netdev_priv(to_net_dev(d));
@@ -578,12 +648,12 @@ static ssize_t tm_attr_store(struct device *d, struct device_attribute *attr,
static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
char *buf) \
{ \
- return tm_attr_show(d, attr, buf, sched); \
+ return tm_attr_show(d, buf, sched); \
} \
static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
const char *buf, size_t len) \
{ \
- return tm_attr_store(d, attr, buf, len, sched); \
+ return tm_attr_store(d, buf, len, sched); \
} \
static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
@@ -720,7 +790,7 @@ static int upgrade_fw(struct adapter *adap)
else
dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
-
+
return ret;
}
@@ -747,7 +817,7 @@ static int update_tpsram(struct adapter *adap)
struct device *dev = &adap->pdev->dev;
int ret;
char rev;
-
+
rev = t3rev2char(adap);
if (!rev)
return 0;
@@ -761,10 +831,10 @@ static int update_tpsram(struct adapter *adap)
buf);
return ret;
}
-
+
ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
if (ret)
- goto release_tpsram;
+ goto release_tpsram;
ret = t3_set_proto_sram(adap, tpsram->data);
if (ret == 0)
@@ -780,7 +850,7 @@ static int update_tpsram(struct adapter *adap)
release_tpsram:
release_firmware(tpsram);
-
+
return ret;
}
@@ -818,6 +888,7 @@ static int cxgb_up(struct adapter *adap)
if (err)
goto out;
+ t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
err = setup_sge_qsets(adap);
@@ -839,7 +910,8 @@ static int cxgb_up(struct adapter *adap)
if (err)
goto irq_err;
- if (request_msix_data_irqs(adap)) {
+ err = request_msix_data_irqs(adap);
+ if (err) {
free_irq(adap->msix_info[0].vec, adap);
goto irq_err;
}
@@ -856,6 +928,16 @@ static int cxgb_up(struct adapter *adap)
t3_sge_start(adap);
t3_intr_enable(adap);
+ if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
+ is_offload(adap) && init_tp_parity(adap) == 0)
+ adap->flags |= TP_PARITY_INIT;
+
+ if (adap->flags & TP_PARITY_INIT) {
+ t3_write_reg(adap, A_TP_INT_CAUSE,
+ F_CMCACHEPERR | F_ARPLUTPERR);
+ t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
+ }
+
if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
bind_qsets(adap);
adap->flags |= QUEUES_BOUND;
@@ -1560,7 +1642,7 @@ static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
e->magic = EEPROM_MAGIC;
for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
- err = t3_seeprom_read(adapter, i, (u32 *) & buf[i]);
+ err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
if (!err)
memcpy(data, buf + e->offset, e->len);
@@ -1573,7 +1655,8 @@ static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
- u32 aligned_offset, aligned_len, *p;
+ u32 aligned_offset, aligned_len;
+ __le32 *p;
u8 *buf;
int err;
@@ -1587,11 +1670,11 @@ static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
buf = kmalloc(aligned_len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- err = t3_seeprom_read(adapter, aligned_offset, (u32 *) buf);
+ err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
if (!err && aligned_len > 4)
err = t3_seeprom_read(adapter,
aligned_offset + aligned_len - 4,
- (u32 *) & buf[aligned_len - 4]);
+ (__le32 *) & buf[aligned_len - 4]);
if (err)
goto out;
memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
@@ -1602,7 +1685,7 @@ static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
if (err)
goto out;
- for (p = (u32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
+ for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
err = t3_seeprom_write(adapter, aligned_offset, *p);
aligned_offset += 4;
}
@@ -2144,7 +2227,7 @@ static void cxgb_netpoll(struct net_device *dev)
for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
struct sge_qset *qs = &adapter->sge.qs[qidx];
void *source;
-
+
if (adapter->flags & USING_MSIX)
source = qs;
else
@@ -2315,6 +2398,106 @@ void t3_fatal_err(struct adapter *adapter)
}
+/**
+ * t3_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct adapter *adapter = pci_get_drvdata(pdev);
+ int i;
+
+ /* Stop all ports */
+ for_each_port(adapter, i) {
+ struct net_device *netdev = adapter->port[i];
+
+ if (netif_running(netdev))
+ cxgb_close(netdev);
+ }
+
+ if (is_offload(adapter) &&
+ test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
+ offload_close(&adapter->tdev);
+
+ /* Free sge resources */
+ t3_free_sge_resources(adapter);
+
+ adapter->flags &= ~FULL_INIT_DONE;
+
+ pci_disable_device(pdev);
+
+ /* Request a slot slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * t3_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot.
+ */
+static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
+{
+ struct adapter *adapter = pci_get_drvdata(pdev);
+
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev,
+ "Cannot re-enable PCI device after reset.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ pci_set_master(pdev);
+
+ t3_prep_adapter(adapter, adapter->params.info, 1);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * t3_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation.
+ */
+static void t3_io_resume(struct pci_dev *pdev)
+{
+ struct adapter *adapter = pci_get_drvdata(pdev);
+ int i;
+
+ /* Restart the ports */
+ for_each_port(adapter, i) {
+ struct net_device *netdev = adapter->port[i];
+
+ if (netif_running(netdev)) {
+ if (cxgb_open(netdev)) {
+ dev_err(&pdev->dev,
+ "can't bring device back up"
+ " after reset\n");
+ continue;
+ }
+ netif_device_attach(netdev);
+ }
+ }
+
+ if (is_offload(adapter)) {
+ __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
+ if (offload_open(adapter->port[0]))
+ printk(KERN_WARNING
+ "Could not bring back offload capabilities\n");
+ }
+}
+
+static struct pci_error_handlers t3_err_handler = {
+ .error_detected = t3_io_error_detected,
+ .slot_reset = t3_io_slot_reset,
+ .resume = t3_io_resume,
+};
+
static int __devinit cxgb_enable_msix(struct adapter *adap)
{
struct msix_entry entries[SGE_QSETS + 1];
@@ -2507,7 +2690,7 @@ static int __devinit init_one(struct pci_dev *pdev,
err = -ENODEV;
goto out_free_dev;
}
-
+
/*
* The card is now ready to go. If any errors occur during device
* registration we do not fail the whole card but rather proceed only
@@ -2584,10 +2767,6 @@ static void __devexit remove_one(struct pci_dev *pdev)
sysfs_remove_group(&adapter->port[0]->dev.kobj,
&cxgb3_attr_group);
- for_each_port(adapter, i)
- if (test_bit(i, &adapter->registered_device_map))
- unregister_netdev(adapter->port[i]);
-
if (is_offload(adapter)) {
cxgb3_adapter_unofld(adapter);
if (test_bit(OFFLOAD_DEVMAP_BIT,
@@ -2595,6 +2774,10 @@ static void __devexit remove_one(struct pci_dev *pdev)
offload_close(&adapter->tdev);
}
+ for_each_port(adapter, i)
+ if (test_bit(i, &adapter->registered_device_map))
+ unregister_netdev(adapter->port[i]);
+
t3_free_sge_resources(adapter);
cxgb_disable_msi(adapter);
@@ -2615,6 +2798,7 @@ static struct pci_driver driver = {
.id_table = cxgb3_pci_tbl,
.probe = init_one,
.remove = __devexit_p(remove_one),
+ .err_handler = &t3_err_handler,
};
static int __init cxgb3_init_module(void)
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index bd25421bc12..d48c396bdab 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -403,8 +403,6 @@ static int cxgb_offload_ctl(struct t3cdev *tdev, unsigned int req, void *data)
static int rx_offload_blackhole(struct t3cdev *dev, struct sk_buff **skbs,
int n)
{
- CH_ERR(tdev2adap(dev), "%d unexpected offload packets, first data %u\n",
- n, ntohl(*(__be32 *)skbs[0]->data));
while (n--)
dev_kfree_skb_any(skbs[n]);
return 0;
@@ -488,7 +486,7 @@ static void t3_process_tid_release_list(struct work_struct *work)
tid_release_task);
struct sk_buff *skb;
struct t3cdev *tdev = td->dev;
-
+
spin_lock_bh(&td->tid_release_lock);
while (td->tid_release_list) {
@@ -634,6 +632,18 @@ static int do_l2t_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
return CPL_RET_BUF_DONE;
}
+static int do_rte_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
+{
+ struct cpl_rte_write_rpl *rpl = cplhdr(skb);
+
+ if (rpl->status != CPL_ERR_NONE)
+ printk(KERN_ERR
+ "Unexpected RTE_WRITE_RPL status %u for entry %u\n",
+ rpl->status, GET_TID(rpl));
+
+ return CPL_RET_BUF_DONE;
+}
+
static int do_act_open_rpl(struct t3cdev *dev, struct sk_buff *skb)
{
struct cpl_act_open_rpl *rpl = cplhdr(skb);
@@ -1004,7 +1014,7 @@ void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
if (!is_offloading(olddev))
return;
if (!is_offloading(newdev)) {
- printk(KERN_WARNING "%s: Redirect to non-offload"
+ printk(KERN_WARNING "%s: Redirect to non-offload "
"device ignored.\n", __FUNCTION__);
return;
}
@@ -1257,6 +1267,7 @@ void __init cxgb3_offload_init(void)
t3_register_cpl_handler(CPL_SMT_WRITE_RPL, do_smt_write_rpl);
t3_register_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl);
+ t3_register_cpl_handler(CPL_RTE_WRITE_RPL, do_rte_write_rpl);
t3_register_cpl_handler(CPL_PASS_OPEN_RPL, do_stid_rpl);
t3_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL, do_stid_rpl);
t3_register_cpl_handler(CPL_PASS_ACCEPT_REQ, do_cr);
diff --git a/drivers/net/cxgb3/firmware_exports.h b/drivers/net/cxgb3/firmware_exports.h
index 6a835f6a262..b75ddd8777f 100644
--- a/drivers/net/cxgb3/firmware_exports.h
+++ b/drivers/net/cxgb3/firmware_exports.h
@@ -76,14 +76,14 @@
#define FW_WROPCODE_MNGT 0x1D
#define FW_MNGTOPCODE_PKTSCHED_SET 0x00
-/* Maximum size of a WR sent from the host, limited by the SGE.
+/* Maximum size of a WR sent from the host, limited by the SGE.
*
- * Note: WR coming from ULP or TP are only limited by CIM.
+ * Note: WR coming from ULP or TP are only limited by CIM.
*/
#define FW_WR_SIZE 128
/* Maximum number of outstanding WRs sent from the host. Value must be
- * programmed in the CTRL/TUNNEL/QP SGE Egress Context and used by
+ * programmed in the CTRL/TUNNEL/QP SGE Egress Context and used by
* offload modules to limit the number of WRs per connection.
*/
#define FW_T3_WR_NUM 16
@@ -99,7 +99,7 @@
* queues must start at SGE Egress Context FW_TUNNEL_SGEEC_START and must
* start at 'TID' (or 'uP Token') FW_TUNNEL_TID_START.
*
- * Ingress Traffic (e.g. DMA completion credit) for TUNNEL Queue[i] is sent
+ * Ingress Traffic (e.g. DMA completion credit) for TUNNEL Queue[i] is sent
* to RESP Queue[i].
*/
#define FW_TUNNEL_NUM 8
@@ -116,10 +116,10 @@
#define FW_CTRL_SGEEC_START 65528
#define FW_CTRL_TID_START 65536
-/* FW_OFLD_NUM corresponds to the number of supported OFFLOAD Queues. These
- * queues must start at SGE Egress Context FW_OFLD_SGEEC_START.
- *
- * Note: the 'uP Token' in the SGE Egress Context fields is irrelevant for
+/* FW_OFLD_NUM corresponds to the number of supported OFFLOAD Queues. These
+ * queues must start at SGE Egress Context FW_OFLD_SGEEC_START.
+ *
+ * Note: the 'uP Token' in the SGE Egress Context fields is irrelevant for
* OFFLOAD Queues, as the host is responsible for providing the correct TID in
* every WR.
*
@@ -129,14 +129,14 @@
#define FW_OFLD_SGEEC_START 0
/*
- *
+ *
*/
#define FW_RI_NUM 1
#define FW_RI_SGEEC_START 65527
#define FW_RI_TID_START 65552
/*
- * The RX_PKT_TID
+ * The RX_PKT_TID
*/
#define FW_RX_PKT_NUM 1
#define FW_RX_PKT_TID_START 65553
diff --git a/drivers/net/cxgb3/l2t.c b/drivers/net/cxgb3/l2t.c
index d660af74606..17ed4c3527b 100644
--- a/drivers/net/cxgb3/l2t.c
+++ b/drivers/net/cxgb3/l2t.c
@@ -337,7 +337,7 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
atomic_set(&e->refcnt, 1);
neigh_replace(e, neigh);
if (neigh->dev->priv_flags & IFF_802_1Q_VLAN)
- e->vlan = VLAN_DEV_INFO(neigh->dev)->vlan_id;
+ e->vlan = vlan_dev_info(neigh->dev)->vlan_id;
else
e->vlan = VLAN_NONE;
spin_unlock(&e->lock);
diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
index 5e1bc0dec5f..02dbbb30092 100644
--- a/drivers/net/cxgb3/regs.h
+++ b/drivers/net/cxgb3/regs.h
@@ -1,5 +1,17 @@
#define A_SG_CONTROL 0x0
+#define S_CONGMODE 29
+#define V_CONGMODE(x) ((x) << S_CONGMODE)
+#define F_CONGMODE V_CONGMODE(1U)
+
+#define S_TNLFLMODE 28
+#define V_TNLFLMODE(x) ((x) << S_TNLFLMODE)
+#define F_TNLFLMODE V_TNLFLMODE(1U)
+
+#define S_FATLPERREN 27
+#define V_FATLPERREN(x) ((x) << S_FATLPERREN)
+#define F_FATLPERREN V_FATLPERREN(1U)
+
#define S_DROPPKT 20
#define V_DROPPKT(x) ((x) << S_DROPPKT)
#define F_DROPPKT V_DROPPKT(1U)
@@ -172,6 +184,64 @@
#define A_SG_INT_CAUSE 0x5c
+#define S_HIRCQPARITYERROR 31
+#define V_HIRCQPARITYERROR(x) ((x) << S_HIRCQPARITYERROR)
+#define F_HIRCQPARITYERROR V_HIRCQPARITYERROR(1U)
+
+#define S_LORCQPARITYERROR 30
+#define V_LORCQPARITYERROR(x) ((x) << S_LORCQPARITYERROR)
+#define F_LORCQPARITYERROR V_LORCQPARITYERROR(1U)
+
+#define S_HIDRBPARITYERROR 29
+#define V_HIDRBPARITYERROR(x) ((x) << S_HIDRBPARITYERROR)
+#define F_HIDRBPARITYERROR V_HIDRBPARITYERROR(1U)
+
+#define S_LODRBPARITYERROR 28
+#define V_LODRBPARITYERROR(x) ((x) << S_LODRBPARITYERROR)
+#define F_LODRBPARITYERROR V_LODRBPARITYERROR(1U)
+
+#define S_FLPARITYERROR 22
+#define M_FLPARITYERROR 0x3f
+#define V_FLPARITYERROR(x) ((x) << S_FLPARITYERROR)
+#define G_FLPARITYERROR(x) (((x) >> S_FLPARITYERROR) & M_FLPARITYERROR)
+
+#define S_ITPARITYERROR 20
+#define M_ITPARITYERROR 0x3
+#define V_ITPARITYERROR(x) ((x) << S_ITPARITYERROR)
+#define G_ITPARITYERROR(x) (((x) >> S_ITPARITYERROR) & M_ITPARITYERROR)
+
+#define S_IRPARITYERROR 19
+#define V_IRPARITYERROR(x) ((x) << S_IRPARITYERROR)
+#define F_IRPARITYERROR V_IRPARITYERROR(1U)
+
+#define S_RCPARITYERROR 18
+#define V_RCPARITYERROR(x) ((x) << S_RCPARITYERROR)
+#define F_RCPARITYERROR V_RCPARITYERROR(1U)
+
+#define S_OCPARITYERROR 17
+#define V_OCPARITYERROR(x) ((x) << S_OCPARITYERROR)
+#define F_OCPARITYERROR V_OCPARITYERROR(1U)
+
+#define S_CPPARITYERROR 16
+#define V_CPPARITYERROR(x) ((x) << S_CPPARITYERROR)
+#define F_CPPARITYERROR V_CPPARITYERROR(1U)
+
+#define S_R_REQ_FRAMINGERROR 15
+#define V_R_REQ_FRAMINGERROR(x) ((x) << S_R_REQ_FRAMINGERROR)
+#define F_R_REQ_FRAMINGERROR V_R_REQ_FRAMINGERROR(1U)
+
+#define S_UC_REQ_FRAMINGERROR 14
+#define V_UC_REQ_FRAMINGERROR(x) ((x) << S_UC_REQ_FRAMINGERROR)
+#define F_UC_REQ_FRAMINGERROR V_UC_REQ_FRAMINGERROR(1U)
+
+#define S_HICTLDRBDROPERR 13
+#define V_HICTLDRBDROPERR(x) ((x) << S_HICTLDRBDROPERR)
+#define F_HICTLDRBDROPERR V_HICTLDRBDROPERR(1U)
+
+#define S_LOCTLDRBDROPERR 12
+#define V_LOCTLDRBDROPERR(x) ((x) << S_LOCTLDRBDROPERR)
+#define F_LOCTLDRBDROPERR V_LOCTLDRBDROPERR(1U)
+
#define S_HIPIODRBDROPERR 11
#define V_HIPIODRBDROPERR(x) ((x) << S_HIPIODRBDROPERR)
#define F_HIPIODRBDROPERR V_HIPIODRBDROPERR(1U)
@@ -286,6 +356,10 @@
#define A_PCIX_CFG 0x88
+#define S_DMASTOPEN 19
+#define V_DMASTOPEN(x) ((x) << S_DMASTOPEN)
+#define F_DMASTOPEN V_DMASTOPEN(1U)
+
#define S_CLIDECEN 18
#define V_CLIDECEN(x) ((x) << S_CLIDECEN)
#define F_CLIDECEN V_CLIDECEN(1U)
@@ -313,6 +387,22 @@
#define V_BISTERR(x) ((x) << S_BISTERR)
+#define S_TXPARERR 18
+#define V_TXPARERR(x) ((x) << S_TXPARERR)
+#define F_TXPARERR V_TXPARERR(1U)
+
+#define S_RXPARERR 17
+#define V_RXPARERR(x) ((x) << S_RXPARERR)
+#define F_RXPARERR V_RXPARERR(1U)
+
+#define S_RETRYLUTPARERR 16
+#define V_RETRYLUTPARERR(x) ((x) << S_RETRYLUTPARERR)
+#define F_RETRYLUTPARERR V_RETRYLUTPARERR(1U)
+
+#define S_RETRYBUFPARERR 15
+#define V_RETRYBUFPARERR(x) ((x) << S_RETRYBUFPARERR)
+#define F_RETRYBUFPARERR V_RETRYBUFPARERR(1U)
+
#define S_PCIE_MSIXPARERR 12
#define M_PCIE_MSIXPARERR 0x7
@@ -348,6 +438,10 @@
#define A_PCIE_INT_CAUSE 0x84
+#define S_PCIE_DMASTOPEN 24
+#define V_PCIE_DMASTOPEN(x) ((x) << S_PCIE_DMASTOPEN)
+#define F_PCIE_DMASTOPEN V_PCIE_DMASTOPEN(1U)
+
#define A_PCIE_CFG 0x88
#define S_PCIE_CLIDECEN 16
@@ -741,6 +835,54 @@
#define A_CIM_HOST_INT_ENABLE 0x298
+#define S_DTAGPARERR 28
+#define V_DTAGPARERR(x) ((x) << S_DTAGPARERR)
+#define F_DTAGPARERR V_DTAGPARERR(1U)
+
+#define S_ITAGPARERR 27
+#define V_ITAGPARERR(x) ((x) << S_ITAGPARERR)
+#define F_ITAGPARERR V_ITAGPARERR(1U)
+
+#define S_IBQTPPARERR 26
+#define V_IBQTPPARERR(x) ((x) << S_IBQTPPARERR)
+#define F_IBQTPPARERR V_IBQTPPARERR(1U)
+
+#define S_IBQULPPARERR 25
+#define V_IBQULPPARERR(x) ((x) << S_IBQULPPARERR)
+#define F_IBQULPPARERR V_IBQULPPARERR(1U)
+
+#define S_IBQSGEHIPARERR 24
+#define V_IBQSGEHIPARERR(x) ((x) << S_IBQSGEHIPARERR)
+#define F_IBQSGEHIPARERR V_IBQSGEHIPARERR(1U)
+
+#define S_IBQSGELOPARERR 23
+#define V_IBQSGELOPARERR(x) ((x) << S_IBQSGELOPARERR)
+#define F_IBQSGELOPARERR V_IBQSGELOPARERR(1U)
+
+#define S_OBQULPLOPARERR 22
+#define V_OBQULPLOPARERR(x) ((x) << S_OBQULPLOPARERR)
+#define F_OBQULPLOPARERR V_OBQULPLOPARERR(1U)
+
+#define S_OBQULPHIPARERR 21
+#define V_OBQULPHIPARERR(x) ((x) << S_OBQULPHIPARERR)
+#define F_OBQULPHIPARERR V_OBQULPHIPARERR(1U)
+
+#define S_OBQSGEPARERR 20
+#define V_OBQSGEPARERR(x) ((x) << S_OBQSGEPARERR)
+#define F_OBQSGEPARERR V_OBQSGEPARERR(1U)
+
+#define S_DCACHEPARERR 19
+#define V_DCACHEPARERR(x) ((x) << S_DCACHEPARERR)
+#define F_DCACHEPARERR V_DCACHEPARERR(1U)
+
+#define S_ICACHEPARERR 18
+#define V_ICACHEPARERR(x) ((x) << S_ICACHEPARERR)
+#define F_ICACHEPARERR V_ICACHEPARERR(1U)
+
+#define S_DRAMPARERR 17
+#define V_DRAMPARERR(x) ((x) << S_DRAMPARERR)
+#define F_DRAMPARERR V_DRAMPARERR(1U)
+
#define A_CIM_HOST_INT_CAUSE 0x29c
#define S_BLKWRPLINT 12
@@ -799,8 +941,42 @@
#define A_CIM_HOST_ACC_DATA 0x2b4
+#define A_CIM_IBQ_DBG_CFG 0x2c0
+
+#define S_IBQDBGADDR 16
+#define M_IBQDBGADDR 0x1ff
+#define V_IBQDBGADDR(x) ((x) << S_IBQDBGADDR)
+#define G_IBQDBGADDR(x) (((x) >> S_IBQDBGADDR) & M_IBQDBGADDR)
+
+#define S_IBQDBGQID 3
+#define M_IBQDBGQID 0x3
+#define V_IBQDBGQID(x) ((x) << S_IBQDBGQID)
+#define G_IBQDBGQID(x) (((x) >> S_IBQDBGQID) & M_IBQDBGQID)
+
+#define S_IBQDBGWR 2
+#define V_IBQDBGWR(x) ((x) << S_IBQDBGWR)
+#define F_IBQDBGWR V_IBQDBGWR(1U)
+
+#define S_IBQDBGBUSY 1
+#define V_IBQDBGBUSY(x) ((x) << S_IBQDBGBUSY)
+#define F_IBQDBGBUSY V_IBQDBGBUSY(1U)
+
+#define S_IBQDBGEN 0
+#define V_IBQDBGEN(x) ((x) << S_IBQDBGEN)
+#define F_IBQDBGEN V_IBQDBGEN(1U)
+
+#define A_CIM_IBQ_DBG_DATA 0x2c8
+
#define A_TP_IN_CONFIG 0x300
+#define S_RXFBARBPRIO 25
+#define V_RXFBARBPRIO(x) ((x) << S_RXFBARBPRIO)
+#define F_RXFBARBPRIO V_RXFBARBPRIO(1U)
+
+#define S_TXFBARBPRIO 24
+#define V_TXFBARBPRIO(x) ((x) << S_TXFBARBPRIO)
+#define F_TXFBARBPRIO V_TXFBARBPRIO(1U)
+
#define S_NICMODE 14
#define V_NICMODE(x) ((x) << S_NICMODE)
#define F_NICMODE V_NICMODE(1U)
@@ -965,8 +1141,30 @@
#define V_LOCKTID(x) ((x) << S_LOCKTID)
#define F_LOCKTID V_LOCKTID(1U)
+#define S_TABLELATENCYDELTA 0
+#define M_TABLELATENCYDELTA 0xf
+#define V_TABLELATENCYDELTA(x) ((x) << S_TABLELATENCYDELTA)
+#define G_TABLELATENCYDELTA(x) \
+ (((x) >> S_TABLELATENCYDELTA) & M_TABLELATENCYDELTA)
+
#define A_TP_PC_CONFIG2 0x34c
+#define S_DISBLEDAPARBIT0 15
+#define V_DISBLEDAPARBIT0(x) ((x) << S_DISBLEDAPARBIT0)
+#define F_DISBLEDAPARBIT0 V_DISBLEDAPARBIT0(1U)
+
+#define S_ENABLEARPMISS 13
+#define V_ENABLEARPMISS(x) ((x) << S_ENABLEARPMISS)
+#define F_ENABLEARPMISS V_ENABLEARPMISS(1U)
+
+#define S_ENABLENONOFDTNLSYN 12
+#define V_ENABLENONOFDTNLSYN(x) ((x) << S_ENABLENONOFDTNLSYN)
+#define F_ENABLENONOFDTNLSYN V_ENABLENONOFDTNLSYN(1U)
+
+#define S_ENABLEIPV6RSS 11
+#define V_ENABLEIPV6RSS(x) ((x) << S_ENABLEIPV6RSS)
+#define F_ENABLEIPV6RSS V_ENABLEIPV6RSS(1U)
+
#define S_CHDRAFULL 4
#define V_CHDRAFULL(x) ((x) << S_CHDRAFULL)
#define F_CHDRAFULL V_CHDRAFULL(1U)
@@ -1018,6 +1216,12 @@
#define A_TP_PARA_REG4 0x370
+#define A_TP_PARA_REG5 0x374
+
+#define S_RXDDPOFFINIT 3
+#define V_RXDDPOFFINIT(x) ((x) << S_RXDDPOFFINIT)
+#define F_RXDDPOFFINIT V_RXDDPOFFINIT(1U)
+
#define A_TP_PARA_REG6 0x378
#define S_T3A_ENABLEESND 13
@@ -1138,6 +1342,10 @@
#define V_TNLLKPEN(x) ((x) << S_TNLLKPEN)
#define F_TNLLKPEN V_TNLLKPEN(1U)
+#define S_RRCPLMAPEN 7
+#define V_RRCPLMAPEN(x) ((x) << S_RRCPLMAPEN)
+#define F_RRCPLMAPEN V_RRCPLMAPEN(1U)
+
#define S_RRCPLCPUSIZE 4
#define M_RRCPLCPUSIZE 0x7
#define V_RRCPLCPUSIZE(x) ((x) << S_RRCPLCPUSIZE)
@@ -1146,6 +1354,10 @@
#define V_RQFEEDBACKENABLE(x) ((x) << S_RQFEEDBACKENABLE)
#define F_RQFEEDBACKENABLE V_RQFEEDBACKENABLE(1U)
+#define S_HASHTOEPLITZ 2
+#define V_HASHTOEPLITZ(x) ((x) << S_HASHTOEPLITZ)
+#define F_HASHTOEPLITZ V_HASHTOEPLITZ(1U)
+
#define S_DISABLE 0
#define A_TP_TM_PIO_ADDR 0x418
@@ -1198,6 +1410,22 @@
#define A_TP_INT_ENABLE 0x470
+#define S_FLMTXFLSTEMPTY 30
+#define V_FLMTXFLSTEMPTY(x) ((x) << S_FLMTXFLSTEMPTY)
+#define F_FLMTXFLSTEMPTY V_FLMTXFLSTEMPTY(1U)
+
+#define S_FLMRXFLSTEMPTY 29
+#define V_FLMRXFLSTEMPTY(x) ((x) << S_FLMRXFLSTEMPTY)
+#define F_FLMRXFLSTEMPTY V_FLMRXFLSTEMPTY(1U)
+
+#define S_ARPLUTPERR 26
+#define V_ARPLUTPERR(x) ((x) << S_ARPLUTPERR)
+#define F_ARPLUTPERR V_ARPLUTPERR(1U)
+
+#define S_CMCACHEPERR 24
+#define V_CMCACHEPERR(x) ((x) << S_CMCACHEPERR)
+#define F_CMCACHEPERR V_CMCACHEPERR(1U)
+
#define A_TP_INT_CAUSE 0x474
#define A_TP_TX_MOD_Q1_Q0_RATE_LIMIT 0x8
@@ -1241,9 +1469,37 @@
#define A_ULPRX_INT_ENABLE 0x504
-#define S_PARERR 0
-#define V_PARERR(x) ((x) << S_PARERR)
-#define F_PARERR V_PARERR(1U)
+#define S_DATASELFRAMEERR0 7
+#define V_DATASELFRAMEERR0(x) ((x) << S_DATASELFRAMEERR0)
+#define F_DATASELFRAMEERR0 V_DATASELFRAMEERR0(1U)
+
+#define S_DATASELFRAMEERR1 6
+#define V_DATASELFRAMEERR1(x) ((x) << S_DATASELFRAMEERR1)
+#define F_DATASELFRAMEERR1 V_DATASELFRAMEERR1(1U)
+
+#define S_PCMDMUXPERR 5
+#define V_PCMDMUXPERR(x) ((x) << S_PCMDMUXPERR)
+#define F_PCMDMUXPERR V_PCMDMUXPERR(1U)
+
+#define S_ARBFPERR 4
+#define V_ARBFPERR(x) ((x) << S_ARBFPERR)
+#define F_ARBFPERR V_ARBFPERR(1U)
+
+#define S_ARBPF0PERR 3
+#define V_ARBPF0PERR(x) ((x) << S_ARBPF0PERR)
+#define F_ARBPF0PERR V_ARBPF0PERR(1U)
+
+#define S_ARBPF1PERR 2
+#define V_ARBPF1PERR(x) ((x) << S_ARBPF1PERR)
+#define F_ARBPF1PERR V_ARBPF1PERR(1U)
+
+#define S_PARERRPCMD 1
+#define V_PARERRPCMD(x) ((x) << S_PARERRPCMD)
+#define F_PARERRPCMD V_PARERRPCMD(1U)
+
+#define S_PARERRDATA 0
+#define V_PARERRDATA(x) ((x) << S_PARERRDATA)
+#define F_PARERRDATA V_PARERRDATA(1U)
#define A_ULPRX_INT_CAUSE 0x508
@@ -1291,6 +1547,10 @@
#define A_ULPTX_CONFIG 0x580
+#define S_CFG_CQE_SOP_MASK 1
+#define V_CFG_CQE_SOP_MASK(x) ((x) << S_CFG_CQE_SOP_MASK)
+#define F_CFG_CQE_SOP_MASK V_CFG_CQE_SOP_MASK(1U)
+
#define S_CFG_RR_ARB 0
#define V_CFG_RR_ARB(x) ((x) << S_CFG_RR_ARB)
#define F_CFG_RR_ARB V_CFG_RR_ARB(1U)
@@ -1537,6 +1797,10 @@
#define A_CPL_INTR_ENABLE 0x650
+#define S_CIM_OP_MAP_PERR 5
+#define V_CIM_OP_MAP_PERR(x) ((x) << S_CIM_OP_MAP_PERR)
+#define F_CIM_OP_MAP_PERR V_CIM_OP_MAP_PERR(1U)
+
#define S_CIM_OVFL_ERROR 4
#define V_CIM_OVFL_ERROR(x) ((x) << S_CIM_OVFL_ERROR)
#define F_CIM_OVFL_ERROR V_CIM_OVFL_ERROR(1U)
@@ -1937,6 +2201,10 @@
#define A_XGM_RXFIFO_CFG 0x884
+#define S_RXFIFO_EMPTY 31
+#define V_RXFIFO_EMPTY(x) ((x) << S_RXFIFO_EMPTY)
+#define F_RXFIFO_EMPTY V_RXFIFO_EMPTY(1U)
+
#define S_RXFIFOPAUSEHWM 17
#define M_RXFIFOPAUSEHWM 0xfff
@@ -1961,6 +2229,10 @@
#define A_XGM_TXFIFO_CFG 0x888
+#define S_UNDERUNFIX 22
+#define V_UNDERUNFIX(x) ((x) << S_UNDERUNFIX)
+#define F_UNDERUNFIX V_UNDERUNFIX(1U)
+
#define S_TXIPG 13
#define M_TXIPG 0xff
#define V_TXIPG(x) ((x) << S_TXIPG)
@@ -2034,10 +2306,27 @@
#define V_XAUIIMP(x) ((x) << S_XAUIIMP)
#define A_XGM_RX_MAX_PKT_SIZE 0x8a8
-#define A_XGM_RX_MAX_PKT_SIZE_ERR_CNT 0x9a4
+
+#define S_RXMAXFRAMERSIZE 17
+#define M_RXMAXFRAMERSIZE 0x3fff
+#define V_RXMAXFRAMERSIZE(x) ((x) << S_RXMAXFRAMERSIZE)
+#define G_RXMAXFRAMERSIZE(x) (((x) >> S_RXMAXFRAMERSIZE) & M_RXMAXFRAMERSIZE)
+
+#define S_RXENFRAMER 14
+#define V_RXENFRAMER(x) ((x) << S_RXENFRAMER)
+#define F_RXENFRAMER V_RXENFRAMER(1U)
+
+#define S_RXMAXPKTSIZE 0
+#define M_RXMAXPKTSIZE 0x3fff
+#define V_RXMAXPKTSIZE(x) ((x) << S_RXMAXPKTSIZE)
+#define G_RXMAXPKTSIZE(x) (((x) >> S_RXMAXPKTSIZE) & M_RXMAXPKTSIZE)
#define A_XGM_RESET_CTRL 0x8ac
+#define S_XGMAC_STOP_EN 4
+#define V_XGMAC_STOP_EN(x) ((x) << S_XGMAC_STOP_EN)
+#define F_XGMAC_STOP_EN V_XGMAC_STOP_EN(1U)
+
#define S_XG2G_RESET_ 3
#define V_XG2G_RESET_(x) ((x) << S_XG2G_RESET_)
#define F_XG2G_RESET_ V_XG2G_RESET_(1U)
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index c15e43a8543..cb684d30831 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -91,6 +91,10 @@ struct rx_desc {
struct tx_sw_desc { /* SW state per Tx descriptor */
struct sk_buff *skb;
+ u8 eop; /* set if last descriptor for packet */
+ u8 addr_idx; /* buffer index of first SGL entry in descriptor */
+ u8 fragidx; /* first page fragment associated with descriptor */
+ s8 sflit; /* start flit of first SGL entry in descriptor */
};
struct rx_sw_desc { /* SW state per Rx descriptor */
@@ -109,13 +113,6 @@ struct rsp_desc { /* response queue descriptor */
u8 intr_gen;
};
-struct unmap_info { /* packet unmapping info, overlays skb->cb */
- int sflit; /* start flit of first SGL entry in Tx descriptor */
- u16 fragidx; /* first page fragment in current Tx descriptor */
- u16 addr_idx; /* buffer index of first SGL entry in descriptor */
- u32 len; /* mapped length of skb main body */
-};
-
/*
* Holds unmapping information for Tx packets that need deferred unmapping.
* This structure lives at skb->head and must be allocated by callers.
@@ -177,6 +174,7 @@ static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
static inline void refill_rspq(struct adapter *adapter,
const struct sge_rspq *q, unsigned int credits)
{
+ rmb();
t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
}
@@ -209,32 +207,36 @@ static inline int need_skb_unmap(void)
*
* Unmap the main body of an sk_buff and its page fragments, if any.
* Because of the fairly complicated structure of our SGLs and the desire
- * to conserve space for metadata, we keep the information necessary to
- * unmap an sk_buff partly in the sk_buff itself (in its cb), and partly
- * in the Tx descriptors (the physical addresses of the various data
- * buffers). The send functions initialize the state in skb->cb so we
- * can unmap the buffers held in the first Tx descriptor here, and we
- * have enough information at this point to update the state for the next
- * Tx descriptor.
+ * to conserve space for metadata, the information necessary to unmap an
+ * sk_buff is spread across the sk_buff itself (buffer lengths), the HW Tx
+ * descriptors (the physical addresses of the various data buffers), and
+ * the SW descriptor state (assorted indices). The send functions
+ * initialize the indices for the first packet descriptor so we can unmap
+ * the buffers held in the first Tx descriptor here, and we have enough
+ * information at this point to set the state for the next Tx descriptor.
+ *
+ * Note that it is possible to clean up the first descriptor of a packet
+ * before the send routines have written the next descriptors, but this
+ * race does not cause any problem. We just end up writing the unmapping
+ * info for the descriptor first.
*/
static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
unsigned int cidx, struct pci_dev *pdev)
{
const struct sg_ent *sgp;
- struct unmap_info *ui = (struct unmap_info *)skb->cb;
- int nfrags, frag_idx, curflit, j = ui->addr_idx;
+ struct tx_sw_desc *d = &q->sdesc[cidx];
+ int nfrags, frag_idx, curflit, j = d->addr_idx;
- sgp = (struct sg_ent *)&q->desc[cidx].flit[ui->sflit];
+ sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit];
+ frag_idx = d->fragidx;
- if (ui->len) {
- pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]), ui->len,
- PCI_DMA_TODEVICE);
- ui->len = 0; /* so we know for next descriptor for this skb */
+ if (frag_idx == 0 && skb_headlen(skb)) {
+ pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]),
+ skb_headlen(skb), PCI_DMA_TODEVICE);
j = 1;
}
- frag_idx = ui->fragidx;
- curflit = ui->sflit + 1 + j;
+ curflit = d->sflit + 1 + j;
nfrags = skb_shinfo(skb)->nr_frags;
while (frag_idx < nfrags && curflit < WR_FLITS) {
@@ -250,10 +252,11 @@ static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
frag_idx++;
}
- if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */
- ui->fragidx = frag_idx;
- ui->addr_idx = j;
- ui->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
+ if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */
+ d = cidx + 1 == q->size ? q->sdesc : d + 1;
+ d->fragidx = frag_idx;
+ d->addr_idx = j;
+ d->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
}
}
@@ -281,7 +284,7 @@ static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
if (d->skb) { /* an SGL is present */
if (need_unmap)
unmap_skb(d->skb, q, cidx, pdev);
- if (d->skb->priority == cidx)
+ if (d->eop)
kfree_skb(d->skb);
}
++d;
@@ -456,7 +459,7 @@ nomem: q->alloc_failed++;
}
q->credits++;
}
-
+ wmb();
t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
}
@@ -912,15 +915,13 @@ static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
sd->skb = skb;
if (need_skb_unmap()) {
- struct unmap_info *ui = (struct unmap_info *)skb->cb;
-
- ui->fragidx = 0;
- ui->addr_idx = 0;
- ui->sflit = flits;
+ sd->fragidx = 0;
+ sd->addr_idx = 0;
+ sd->sflit = flits;
}
if (likely(ndesc == 1)) {
- skb->priority = pidx;
+ sd->eop = 1;
wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
V_WR_SGLSFLT(flits)) | wr_hi;
wmb();
@@ -948,6 +949,7 @@ static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
fp += avail;
d++;
+ sd->eop = 0;
sd++;
if (++pidx == q->size) {
pidx = 0;
@@ -966,7 +968,7 @@ static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
wr_gen2(d, gen);
flits = 1;
}
- skb->priority = pidx;
+ sd->eop = 1;
wrp->wr_hi |= htonl(F_WR_EOP);
wmb();
wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
@@ -1051,8 +1053,6 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
- if (need_skb_unmap())
- ((struct unmap_info *)skb->cb)->len = skb_headlen(skb);
write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
@@ -1354,6 +1354,7 @@ static void restart_ctrlq(unsigned long data)
}
spin_unlock(&q->lock);
+ wmb();
t3_write_reg(qs->adap, A_SG_KDOORBELL,
F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
}
@@ -1363,7 +1364,12 @@ static void restart_ctrlq(unsigned long data)
*/
int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
{
- return ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
+ int ret;
+ local_bh_disable();
+ ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
+ local_bh_enable();
+
+ return ret;
}
/**
@@ -1380,13 +1386,14 @@ static void deferred_unmap_destructor(struct sk_buff *skb)
const dma_addr_t *p;
const struct skb_shared_info *si;
const struct deferred_unmap_info *dui;
- const struct unmap_info *ui = (struct unmap_info *)skb->cb;
dui = (struct deferred_unmap_info *)skb->head;
p = dui->addr;
- if (ui->len)
- pci_unmap_single(dui->pdev, *p++, ui->len, PCI_DMA_TODEVICE);
+ if (skb->tail - skb->transport_header)
+ pci_unmap_single(dui->pdev, *p++,
+ skb->tail - skb->transport_header,
+ PCI_DMA_TODEVICE);
si = skb_shinfo(skb);
for (i = 0; i < si->nr_frags; i++)
@@ -1451,8 +1458,6 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
if (need_skb_unmap()) {
setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
skb->destructor = deferred_unmap_destructor;
- ((struct unmap_info *)skb->cb)->len = (skb->tail -
- skb->transport_header);
}
write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
@@ -1574,6 +1579,7 @@ static void restart_offloadq(unsigned long data)
set_bit(TXQ_RUNNING, &q->flags);
set_bit(TXQ_LAST_PKT_DB, &q->flags);
#endif
+ wmb();
t3_write_reg(adap, A_SG_KDOORBELL,
F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
}
@@ -1739,7 +1745,6 @@ static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
struct sk_buff *skb, struct sk_buff *rx_gather[],
unsigned int gather_idx)
{
- rq->offload_pkts++;
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
skb_reset_transport_header(skb);
@@ -1809,7 +1814,7 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
skb->dev->last_rx = jiffies;
pi = netdev_priv(skb->dev);
- if (pi->rx_csum_offload && p->csum_valid && p->csum == 0xffff &&
+ if (pi->rx_csum_offload && p->csum_valid && p->csum == htons(0xffff) &&
!p->fragment) {
rspq_to_qset(rq)->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1956,7 +1961,7 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs,
int eth, ethpad = 2;
struct sk_buff *skb = NULL;
u32 len, flags = ntohl(r->flags);
- u32 rss_hi = *(const u32 *)r, rss_lo = r->rss_hdr.rss_hash_val;
+ __be32 rss_hi = *(const __be32 *)r, rss_lo = r->rss_hdr.rss_hash_val;
eth = r->rss_hdr.opcode == CPL_RX_PKT;
@@ -2033,6 +2038,7 @@ no_mem:
if (eth)
rx_eth(adap, q, skb, ethpad);
else {
+ q->offload_pkts++;
/* Preserve the RSS info in csum & priority */
skb->csum = rss_hi;
skb->priority = rss_lo;
@@ -2442,6 +2448,15 @@ irq_handler_t t3_intr_handler(struct adapter *adap, int polling)
return t3_intr;
}
+#define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
+ F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
+ V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
+ F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
+ F_HIRCQPARITYERROR)
+#define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
+#define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
+ F_RSPQDISABLED)
+
/**
* t3_sge_err_intr_handler - SGE async event interrupt handler
* @adapter: the adapter
@@ -2452,6 +2467,13 @@ void t3_sge_err_intr_handler(struct adapter *adapter)
{
unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE);
+ if (status & SGE_PARERR)
+ CH_ALERT(adapter, "SGE parity error (0x%x)\n",
+ status & SGE_PARERR);
+ if (status & SGE_FRAMINGERR)
+ CH_ALERT(adapter, "SGE framing error (0x%x)\n",
+ status & SGE_FRAMINGERR);
+
if (status & F_RSPQCREDITOVERFOW)
CH_ALERT(adapter, "SGE response queue credit overflow\n");
@@ -2468,7 +2490,7 @@ void t3_sge_err_intr_handler(struct adapter *adapter)
status & F_HIPIODRBDROPERR ? "high" : "lo");
t3_write_reg(adapter, A_SG_INT_CAUSE, status);
- if (status & (F_RSPQCREDITOVERFOW | F_RSPQDISABLED))
+ if (status & SGE_FATALERR)
t3_fatal_err(adapter);
}
@@ -2780,7 +2802,7 @@ void t3_sge_init(struct adapter *adap, struct sge_params *p)
unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
- F_CQCRDTCTRL |
+ F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
#if SGE_NUM_GENBITS == 1
@@ -2789,7 +2811,6 @@ void t3_sge_init(struct adapter *adap, struct sge_params *p)
if (adap->params.rev > 0) {
if (!(adap->flags & (USING_MSIX | USING_MSI)))
ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
- ctrl |= F_CQCRDTCTRL | F_AVOIDCQOVFL;
}
t3_write_reg(adap, A_SG_CONTROL, ctrl);
t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
@@ -2797,7 +2818,8 @@ void t3_sge_init(struct adapter *adap, struct sge_params *p)
t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
V_TIMEOUT(200 * core_ticks_per_usec(adap)));
- t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH, 1000);
+ t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
+ adap->params.rev < T3_REV_C ? 1000 : 500);
t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index d4ee00d3221..7469935877b 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -62,7 +62,7 @@ int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
return 0;
}
if (--attempts == 0)
- return -EAGAIN;
+ return -EAGAIN;
if (delay)
udelay(delay);
}
@@ -447,8 +447,8 @@ static const struct adapter_info t3_adap_info[] = {
&mi1_mdio_ops, "Chelsio T302"},
{1, 0, 0, 0,
F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
- F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
- SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
+ F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
+ 0, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
&mi1_mdio_ext_ops, "Chelsio T310"},
{2, 0, 0, 0,
F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
@@ -537,10 +537,11 @@ struct t3_vpd {
* addres is written to the control register. The hardware device will
* set the flag to 1 when 4 bytes have been read into the data register.
*/
-int t3_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
+int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data)
{
u16 val;
int attempts = EEPROM_MAX_POLL;
+ u32 v;
unsigned int base = adapter->params.pci.vpd_cap_addr;
if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
@@ -556,8 +557,8 @@ int t3_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
return -EIO;
}
- pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, data);
- *data = le32_to_cpu(*data);
+ pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, &v);
+ *data = cpu_to_le32(v);
return 0;
}
@@ -570,7 +571,7 @@ int t3_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
* Write a 32-bit word to a location in VPD EEPROM using the card's PCI
* VPD ROM capability.
*/
-int t3_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
+int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data)
{
u16 val;
int attempts = EEPROM_MAX_POLL;
@@ -580,7 +581,7 @@ int t3_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
return -EINVAL;
pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
- cpu_to_le32(data));
+ le32_to_cpu(data));
pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
addr | PCI_VPD_ADDR_F);
do {
@@ -631,14 +632,14 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
* Card information is normally at VPD_BASE but some early cards had
* it at 0.
*/
- ret = t3_seeprom_read(adapter, VPD_BASE, (u32 *)&vpd);
+ ret = t3_seeprom_read(adapter, VPD_BASE, (__le32 *)&vpd);
if (ret)
return ret;
addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
for (i = 0; i < sizeof(vpd); i += 4) {
ret = t3_seeprom_read(adapter, addr + i,
- (u32 *)((u8 *)&vpd + i));
+ (__le32 *)((u8 *)&vpd + i));
if (ret)
return ret;
}
@@ -865,7 +866,7 @@ int t3_get_tp_version(struct adapter *adapter, u32 *vers)
1, 1, 5, 1);
if (ret)
return ret;
-
+
*vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
return 0;
@@ -896,7 +897,7 @@ int t3_check_tpsram_version(struct adapter *adapter, int *must_load)
major = G_TP_VERSION_MAJOR(vers);
minor = G_TP_VERSION_MINOR(vers);
- if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
+ if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
return 0;
if (major != TP_VERSION_MAJOR)
@@ -913,7 +914,7 @@ int t3_check_tpsram_version(struct adapter *adapter, int *must_load)
}
/**
- * t3_check_tpsram - check if provided protocol SRAM
+ * t3_check_tpsram - check if provided protocol SRAM
* is compatible with this driver
* @adapter: the adapter
* @tp_sram: the firmware image to write
@@ -926,7 +927,7 @@ int t3_check_tpsram(struct adapter *adapter, u8 *tp_sram, unsigned int size)
{
u32 csum;
unsigned int i;
- const u32 *p = (const u32 *)tp_sram;
+ const __be32 *p = (const __be32 *)tp_sram;
/* Verify checksum */
for (csum = 0, i = 0; i < size / sizeof(csum); i++)
@@ -988,13 +989,17 @@ int t3_check_fw_version(struct adapter *adapter, int *must_load)
CH_ERR(adapter, "found wrong FW version(%u.%u), "
"driver needs version %u.%u\n", major, minor,
FW_VERSION_MAJOR, FW_VERSION_MINOR);
- else {
+ else if (minor < FW_VERSION_MINOR) {
*must_load = 0;
- CH_WARN(adapter, "found wrong FW minor version(%u.%u), "
+ CH_WARN(adapter, "found old FW minor version(%u.%u), "
"driver compiled for version %u.%u\n", major, minor,
FW_VERSION_MAJOR, FW_VERSION_MINOR);
+ } else {
+ CH_WARN(adapter, "found newer FW version(%u.%u), "
+ "driver compiled for version %u.%u\n", major, minor,
+ FW_VERSION_MAJOR, FW_VERSION_MINOR);
+ return 0;
}
-
return -EINVAL;
}
@@ -1036,7 +1041,7 @@ int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
{
u32 csum;
unsigned int i;
- const u32 *p = (const u32 *)fw_data;
+ const __be32 *p = (const __be32 *)fw_data;
int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
if ((size & 3) || size < FW_MIN_SIZE)
@@ -1259,7 +1264,13 @@ static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
return fatal;
}
-#define SGE_INTR_MASK (F_RSPQDISABLED)
+#define SGE_INTR_MASK (F_RSPQDISABLED | \
+ F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
+ F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
+ F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
+ V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
+ F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
+ F_HIRCQPARITYERROR)
#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
F_NFASRCHFAIL)
@@ -1276,16 +1287,23 @@ static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
#define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
/* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
- V_BISTERR(M_BISTERR) | F_PEXERR)
-#define ULPRX_INTR_MASK F_PARERR
-#define ULPTX_INTR_MASK 0
-#define CPLSW_INTR_MASK (F_TP_FRAMING_ERROR | \
+ F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
+ F_TXPARERR | V_BISTERR(M_BISTERR))
+#define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
+ F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
+ F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
+#define ULPTX_INTR_MASK 0xfc
+#define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
F_ZERO_SWITCH_ERROR)
#define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
- F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT)
+ F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
+ F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
+ F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
+ F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
+ F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
#define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
@@ -1354,6 +1372,10 @@ static void pcie_intr_handler(struct adapter *adapter)
{F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
{V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
"PCI MSI-X table/PBA parity error", -1, 1},
+ {F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1},
+ {F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1},
+ {F_RXPARERR, "PCI Rx parity error", -1, 1},
+ {F_TXPARERR, "PCI Tx parity error", -1, 1},
{V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
{0}
};
@@ -1379,8 +1401,16 @@ static void tp_intr_handler(struct adapter *adapter)
{0}
};
+ static struct intr_info tp_intr_info_t3c[] = {
+ {0x1fffffff, "TP parity error", -1, 1},
+ {F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1},
+ {F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1},
+ {0}
+ };
+
if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
- tp_intr_info, NULL))
+ adapter->params.rev < T3_REV_C ?
+ tp_intr_info : tp_intr_info_t3c, NULL))
t3_fatal_err(adapter);
}
@@ -1402,6 +1432,18 @@ static void cim_intr_handler(struct adapter *adapter)
{F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
{F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
{F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
+ {F_DRAMPARERR, "CIM DRAM parity error", -1, 1},
+ {F_ICACHEPARERR, "CIM icache parity error", -1, 1},
+ {F_DCACHEPARERR, "CIM dcache parity error", -1, 1},
+ {F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1},
+ {F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1},
+ {F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1},
+ {F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1},
+ {F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1},
+ {F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1},
+ {F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1},
+ {F_ITAGPARERR, "CIM itag parity error", -1, 1},
+ {F_DTAGPARERR, "CIM dtag parity error", -1, 1},
{0}
};
@@ -1416,7 +1458,14 @@ static void cim_intr_handler(struct adapter *adapter)
static void ulprx_intr_handler(struct adapter *adapter)
{
static const struct intr_info ulprx_intr_info[] = {
- {F_PARERR, "ULP RX parity error", -1, 1},
+ {F_PARERRDATA, "ULP RX data parity error", -1, 1},
+ {F_PARERRPCMD, "ULP RX command parity error", -1, 1},
+ {F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1},
+ {F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1},
+ {F_ARBFPERR, "ULP RX ArbF parity error", -1, 1},
+ {F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1},
+ {F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1},
+ {F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1},
{0}
};
@@ -1435,6 +1484,7 @@ static void ulptx_intr_handler(struct adapter *adapter)
STAT_ULP_CH0_PBL_OOB, 0},
{F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
STAT_ULP_CH1_PBL_OOB, 0},
+ {0xfc, "ULP TX parity error", -1, 1},
{0}
};
@@ -1509,7 +1559,8 @@ static void pmrx_intr_handler(struct adapter *adapter)
static void cplsw_intr_handler(struct adapter *adapter)
{
static const struct intr_info cplsw_intr_info[] = {
-/* { F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1 }, */
+ {F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1},
+ {F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1},
{F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
{F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
{F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
@@ -1730,7 +1781,6 @@ void t3_intr_enable(struct adapter *adapter)
MC7_INTR_MASK},
{A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
{A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
- {A_TP_INT_ENABLE, 0x3bfffff},
{A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
{A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
{A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
@@ -1740,6 +1790,8 @@ void t3_intr_enable(struct adapter *adapter)
adapter->slow_intr_mask = PL_INTR_MASK;
t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
+ t3_write_reg(adapter, A_TP_INT_ENABLE,
+ adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
if (adapter->params.rev > 0) {
t3_write_reg(adapter, A_CPL_INTR_ENABLE,
@@ -1894,6 +1946,16 @@ static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
0, SG_CONTEXT_CMD_ATTEMPTS, 1);
}
+static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
+ unsigned int type)
+{
+ t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
+ t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
+ t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
+ t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
+ return t3_sge_write_context(adap, id, type);
+}
+
/**
* t3_sge_init_ecntxt - initialize an SGE egress context
* @adapter: the adapter to configure
@@ -2395,7 +2457,7 @@ static inline unsigned int pm_num_pages(unsigned int mem_size,
t3_write_reg((adap), A_ ## reg, (start)); \
start += size
-/*
+/**
* partition_mem - partition memory and configure TP memory settings
* @adap: the adapter
* @p: the TP parameters
@@ -2480,7 +2542,7 @@ static void tp_config(struct adapter *adap, const struct tp_params *p)
V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
- t3_set_reg_field(adap, A_TP_IN_CONFIG, F_IPV6ENABLE | F_NICMODE,
+ t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
F_IPV6ENABLE | F_NICMODE);
t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
@@ -2492,10 +2554,12 @@ static void tp_config(struct adapter *adap, const struct tp_params *p)
F_ENABLEEPCMDAFULL,
F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
- t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL, 0);
+ t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
+ F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
+ F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
-
+
if (adap->params.rev > 0) {
tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
@@ -2505,6 +2569,11 @@ static void tp_config(struct adapter *adap, const struct tp_params *p)
} else
t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
+ if (adap->params.rev == T3_REV_C)
+ t3_set_reg_field(adap, A_TP_PC_CONFIG,
+ V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
+ V_TABLELATENCYDELTA(4));
+
t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
@@ -2613,7 +2682,7 @@ static void __devinit init_mtus(unsigned short mtus[])
* it can accomodate max size TCP/IP headers when SACK and timestamps
* are enabled and still have at least 8 bytes of payload.
*/
- mtus[1] = 88;
+ mtus[0] = 88;
mtus[1] = 88;
mtus[2] = 256;
mtus[3] = 512;
@@ -2809,15 +2878,15 @@ static void ulp_config(struct adapter *adap, const struct tp_params *p)
int t3_set_proto_sram(struct adapter *adap, u8 *data)
{
int i;
- u32 *buf = (u32 *)data;
+ __be32 *buf = (__be32 *)data;
for (i = 0; i < PROTO_SRAM_LINES; i++) {
- t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, cpu_to_be32(*buf++));
- t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, cpu_to_be32(*buf++));
- t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, cpu_to_be32(*buf++));
- t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, cpu_to_be32(*buf++));
- t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, cpu_to_be32(*buf++));
-
+ t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, be32_to_cpu(*buf++));
+ t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, be32_to_cpu(*buf++));
+ t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, be32_to_cpu(*buf++));
+ t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, be32_to_cpu(*buf++));
+ t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, be32_to_cpu(*buf++));
+
t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
return -EIO;
@@ -3194,7 +3263,8 @@ static void config_pcie(struct adapter *adap)
V_REPLAYLMT(rpllmt));
t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
- t3_set_reg_field(adap, A_PCIE_CFG, F_PCIE_CLIDECEN, F_PCIE_CLIDECEN);
+ t3_set_reg_field(adap, A_PCIE_CFG, 0,
+ F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
}
/*
@@ -3207,7 +3277,7 @@ static void config_pcie(struct adapter *adap)
*/
int t3_init_hw(struct adapter *adapter, u32 fw_params)
{
- int err = -EIO, attempts = 100;
+ int err = -EIO, attempts, i;
const struct vpd_params *vpd = &adapter->params.vpd;
if (adapter->params.rev > 0)
@@ -3225,6 +3295,10 @@ int t3_init_hw(struct adapter *adapter, u32 fw_params)
adapter->params.mc5.nfilters,
adapter->params.mc5.nroutes))
goto out_err;
+
+ for (i = 0; i < 32; i++)
+ if (clear_sge_ctxt(adapter, i, F_CQ))
+ goto out_err;
}
if (tp_init(adapter, &adapter->params.tp))
@@ -3240,7 +3314,12 @@ int t3_init_hw(struct adapter *adapter, u32 fw_params)
if (is_pcie(adapter))
config_pcie(adapter);
else
- t3_set_reg_field(adapter, A_PCIX_CFG, 0, F_CLIDECEN);
+ t3_set_reg_field(adapter, A_PCIX_CFG, 0,
+ F_DMASTOPEN | F_CLIDECEN);
+
+ if (adapter->params.rev == T3_REV_C)
+ t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
+ F_CFG_CQE_SOP_MASK);
t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
t3_write_reg(adapter, A_PM1_RX_MODE, 0);
@@ -3253,6 +3332,7 @@ int t3_init_hw(struct adapter *adapter, u32 fw_params)
V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
+ attempts = 100;
do { /* wait for uP to initialize */
msleep(20);
} while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
@@ -3387,6 +3467,7 @@ void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
t3_write_reg(adapter, A_T3DBG_GPIO_EN,
ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
+ t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
if (adapter->params.rev == 0 || !uses_xaui(adapter))
val |= F_ENRGMII;
@@ -3403,13 +3484,13 @@ void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
}
/*
- * Reset the adapter.
+ * Reset the adapter.
* Older PCIe cards lose their config space during reset, PCI-X
* ones don't.
*/
static int t3_reset_adapter(struct adapter *adapter)
{
- int i, save_and_restore_pcie =
+ int i, save_and_restore_pcie =
adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
uint16_t devid = 0;
@@ -3436,6 +3517,36 @@ static int t3_reset_adapter(struct adapter *adapter)
return 0;
}
+static int __devinit init_parity(struct adapter *adap)
+{
+ int i, err, addr;
+
+ if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+ return -EBUSY;
+
+ for (err = i = 0; !err && i < 16; i++)
+ err = clear_sge_ctxt(adap, i, F_EGRESS);
+ for (i = 0xfff0; !err && i <= 0xffff; i++)
+ err = clear_sge_ctxt(adap, i, F_EGRESS);
+ for (i = 0; !err && i < SGE_QSETS; i++)
+ err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
+ if (err)
+ return err;
+
+ t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
+ for (i = 0; i < 4; i++)
+ for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
+ t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
+ F_IBQDBGWR | V_IBQDBGQID(i) |
+ V_IBQDBGADDR(addr));
+ err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
+ F_IBQDBGBUSY, 0, 2, 1);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
/*
* Initialize adapter SW state for the various HW modules, set initial values
* for some adapter tunables, take PHYs out of reset, and initialize the MDIO
@@ -3503,6 +3614,9 @@ int __devinit t3_prep_adapter(struct adapter *adapter,
}
early_hw_init(adapter, ai);
+ ret = init_parity(adapter);
+ if (ret)
+ return ret;
for_each_port(adapter, i) {
u8 hw_addr[6];
diff --git a/drivers/net/cxgb3/version.h b/drivers/net/cxgb3/version.h
index ef1c6339c80..229303ff6a3 100644
--- a/drivers/net/cxgb3/version.h
+++ b/drivers/net/cxgb3/version.h
@@ -38,7 +38,7 @@
#define DRV_VERSION "1.0-ko"
/* Firmware version */
-#define FW_VERSION_MAJOR 4
-#define FW_VERSION_MINOR 6
+#define FW_VERSION_MAJOR 5
+#define FW_VERSION_MINOR 0
#define FW_VERSION_MICRO 0
#endif /* __CHELSIO_VERSION_H */
diff --git a/drivers/net/cxgb3/xgmac.c b/drivers/net/cxgb3/xgmac.c
index eeb766aeced..ffdc0a1892b 100644
--- a/drivers/net/cxgb3/xgmac.c
+++ b/drivers/net/cxgb3/xgmac.c
@@ -106,6 +106,7 @@ int t3_mac_reset(struct cmac *mac)
t3_set_reg_field(adap, A_XGM_RXFIFO_CFG + oft,
F_RXSTRFRWRD | F_DISERRFRAMES,
uses_xaui(adap) ? 0 : F_RXSTRFRWRD);
+ t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + oft, 0, F_UNDERUNFIX);
if (uses_xaui(adap)) {
if (adap->params.rev == 0) {
@@ -124,7 +125,11 @@ int t3_mac_reset(struct cmac *mac)
xaui_serdes_reset(mac);
}
- val = F_MAC_RESET_;
+ t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + oft,
+ V_RXMAXFRAMERSIZE(M_RXMAXFRAMERSIZE),
+ V_RXMAXFRAMERSIZE(MAX_FRAME_SIZE) | F_RXENFRAMER);
+ val = F_MAC_RESET_ | F_XGMAC_STOP_EN;
+
if (is_10G(adap))
val |= F_PCS_RESET_;
else if (uses_xaui(adap))
@@ -148,7 +153,7 @@ static int t3b2_mac_reset(struct cmac *mac)
unsigned int oft = mac->offset;
u32 val;
- if (!macidx(mac))
+ if (!macidx(mac))
t3_set_reg_field(adap, A_MPS_CFG, F_PORT0ACTIVE, 0);
else
t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE, 0);
@@ -182,11 +187,11 @@ static int t3b2_mac_reset(struct cmac *mac)
msleep(1);
t3b_pcs_reset(mac);
}
- t3_write_reg(adap, A_XGM_RX_CFG + oft,
+ t3_write_reg(adap, A_XGM_RX_CFG + oft,
F_DISPAUSEFRAMES | F_EN1536BFRAMES |
F_RMFCS | F_ENJUMBO | F_ENHASHMCAST);
- if (!macidx(mac))
+ if (!macidx(mac))
t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT0ACTIVE);
else
t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT1ACTIVE);
@@ -313,8 +318,9 @@ static int rx_fifo_hwm(int mtu)
int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu)
{
- int hwm, lwm;
- unsigned int thres, v;
+ int hwm, lwm, divisor;
+ int ipg;
+ unsigned int thres, v, reg;
struct adapter *adap = mac->adapter;
/*
@@ -330,32 +336,37 @@ int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu)
* Adjust the PAUSE frame watermarks. We always set the LWM, and the
* HWM only if flow-control is enabled.
*/
- hwm = max_t(unsigned int, MAC_RXFIFO_SIZE - 3 * mtu,
+ hwm = max_t(unsigned int, MAC_RXFIFO_SIZE - 3 * mtu,
MAC_RXFIFO_SIZE * 38 / 100);
hwm = min(hwm, MAC_RXFIFO_SIZE - 8192);
lwm = min(3 * (int)mtu, MAC_RXFIFO_SIZE / 4);
- if (adap->params.rev == T3_REV_B2 &&
+ if (adap->params.rev >= T3_REV_B2 &&
(t3_read_reg(adap, A_XGM_RX_CTRL + mac->offset) & F_RXEN)) {
disable_exact_filters(mac);
v = t3_read_reg(adap, A_XGM_RX_CFG + mac->offset);
t3_set_reg_field(adap, A_XGM_RX_CFG + mac->offset,
F_ENHASHMCAST | F_COPYALLFRAMES, F_DISBCAST);
- /* drain rx FIFO */
- if (t3_wait_op_done(adap,
- A_XGM_RX_MAX_PKT_SIZE_ERR_CNT +
- mac->offset,
- 1 << 31, 1, 20, 5)) {
+ reg = adap->params.rev == T3_REV_B2 ?
+ A_XGM_RX_MAX_PKT_SIZE_ERR_CNT : A_XGM_RXFIFO_CFG;
+
+ /* drain RX FIFO */
+ if (t3_wait_op_done(adap, reg + mac->offset,
+ F_RXFIFO_EMPTY, 1, 20, 5)) {
t3_write_reg(adap, A_XGM_RX_CFG + mac->offset, v);
enable_exact_filters(mac);
return -EIO;
}
- t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, mtu);
+ t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset,
+ V_RXMAXPKTSIZE(M_RXMAXPKTSIZE),
+ V_RXMAXPKTSIZE(mtu));
t3_write_reg(adap, A_XGM_RX_CFG + mac->offset, v);
enable_exact_filters(mac);
} else
- t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, mtu);
+ t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset,
+ V_RXMAXPKTSIZE(M_RXMAXPKTSIZE),
+ V_RXMAXPKTSIZE(mtu));
/*
* Adjust the PAUSE frame watermarks. We always set the LWM, and the
@@ -379,13 +390,16 @@ int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu)
thres /= 10;
thres = mtu > thres ? (mtu - thres + 7) / 8 : 0;
thres = max(thres, 8U); /* need at least 8 */
+ ipg = (adap->params.rev == T3_REV_C) ? 0 : 1;
t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + mac->offset,
V_TXFIFOTHRESH(M_TXFIFOTHRESH) | V_TXIPG(M_TXIPG),
- V_TXFIFOTHRESH(thres) | V_TXIPG(1));
+ V_TXFIFOTHRESH(thres) | V_TXIPG(ipg));
- if (adap->params.rev > 0)
+ if (adap->params.rev > 0) {
+ divisor = (adap->params.rev == T3_REV_C) ? 64 : 8;
t3_write_reg(adap, A_XGM_PAUSE_TIMER + mac->offset,
- (hwm - lwm) * 4 / 8);
+ (hwm - lwm) * 4 / divisor);
+ }
t3_write_reg(adap, A_XGM_TX_PAUSE_QUANTA + mac->offset,
MAC_RXFIFO_SIZE * 4 * 8 / 512);
return 0;
@@ -435,7 +449,7 @@ int t3_mac_enable(struct cmac *mac, int which)
struct adapter *adap = mac->adapter;
unsigned int oft = mac->offset;
struct mac_stats *s = &mac->stats;
-
+
if (which & MAC_DIRECTION_TX) {
t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
t3_write_reg(adap, A_TP_PIO_DATA, 0xc0ede401);
@@ -522,7 +536,7 @@ int t3b2_mac_watchdog_task(struct cmac *mac)
goto rxcheck;
}
- if ((tx_tcnt != mac->tx_tcnt) && (mac->tx_xcnt == 0)) {
+ if ((tx_tcnt != mac->tx_tcnt) && (mac->tx_xcnt == 0)) {
if (mac->toggle_cnt > 4) {
status = 2;
goto out;